Compare commits
2 commits
phase4-fou
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
| f0268d12bf | |||
| fc8ebe53e1 |
359 changed files with 11 additions and 66056 deletions
74
go.mod
74
go.mod
|
|
@ -2,30 +2,17 @@ module forge.lthn.ai/core/go
|
||||||
|
|
||||||
go 1.25.5
|
go 1.25.5
|
||||||
|
|
||||||
|
require forge.lthn.ai/core/go-crypt v0.0.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
code.gitea.io/sdk/gitea v0.23.2
|
|
||||||
codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0
|
|
||||||
github.com/ProtonMail/go-crypto v1.3.0
|
|
||||||
github.com/Snider/Borg v0.2.0
|
github.com/Snider/Borg v0.2.0
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.1
|
github.com/aws/aws-sdk-go-v2 v1.41.1
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0
|
||||||
github.com/getkin/kin-openapi v0.133.0
|
|
||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1
|
|
||||||
github.com/leaanthony/debme v1.2.1
|
|
||||||
github.com/leaanthony/gosod v1.0.4
|
|
||||||
github.com/marcboeker/go-duckdb v1.8.5
|
|
||||||
github.com/modelcontextprotocol/go-sdk v1.3.0
|
|
||||||
github.com/oasdiff/oasdiff v1.11.10
|
|
||||||
github.com/ollama/ollama v0.16.1
|
|
||||||
github.com/parquet-go/parquet-go v0.27.0
|
|
||||||
github.com/qdrant/go-client v1.16.2
|
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/spf13/viper v1.21.0
|
github.com/spf13/viper v1.21.0
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
github.com/unpoller/unifi/v5 v5.18.0
|
|
||||||
golang.org/x/crypto v0.48.0
|
golang.org/x/crypto v0.48.0
|
||||||
golang.org/x/net v0.50.0
|
|
||||||
golang.org/x/term v0.40.0
|
golang.org/x/term v0.40.0
|
||||||
golang.org/x/text v0.34.0
|
golang.org/x/text v0.34.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
|
@ -33,11 +20,7 @@ require (
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.123.0 // indirect
|
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||||
github.com/42wim/httpsig v1.2.3 // indirect
|
|
||||||
github.com/TwiN/go-color v1.4.1 // indirect
|
|
||||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
|
||||||
github.com/apache/arrow-go/v18 v18.5.1 // indirect
|
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||||
|
|
@ -47,75 +30,32 @@ require (
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
||||||
github.com/aws/smithy-go v1.24.0 // indirect
|
github.com/aws/smithy-go v1.24.0 // indirect
|
||||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
|
||||||
github.com/brianvoe/gofakeit/v6 v6.28.0 // indirect
|
|
||||||
github.com/buger/jsonparser v1.1.1 // indirect
|
|
||||||
github.com/cloudflare/circl v1.6.3 // indirect
|
github.com/cloudflare/circl v1.6.3 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/davidmz/go-pageant v1.0.2 // indirect
|
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||||
github.com/go-fed/httpsig v1.1.0 // indirect
|
|
||||||
github.com/go-openapi/jsonpointer v0.22.4 // indirect
|
|
||||||
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
|
|
||||||
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
|
||||||
github.com/goccy/go-json v0.10.5 // indirect
|
github.com/google/go-cmp v0.7.0 // indirect
|
||||||
github.com/gofrs/flock v0.12.1 // indirect
|
|
||||||
github.com/google/flatbuffers v25.12.19+incompatible // indirect
|
|
||||||
github.com/google/jsonschema-go v0.4.2 // indirect
|
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/hashicorp/go-version v1.8.0 // indirect
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
|
||||||
github.com/klauspost/compress v1.18.4 // indirect
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
|
||||||
github.com/mailru/easyjson v0.9.1 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
|
||||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
|
||||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
|
||||||
github.com/parquet-go/bitpack v1.0.0 // indirect
|
|
||||||
github.com/parquet-go/jsonlite v1.4.0 // indirect
|
|
||||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||||
github.com/perimeterx/marshmallow v1.1.5 // indirect
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.25 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
github.com/sagikazarmark/locafero v0.12.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
|
||||||
github.com/spf13/afero v1.15.0 // indirect
|
github.com/spf13/afero v1.15.0 // indirect
|
||||||
github.com/spf13/cast v1.10.0 // indirect
|
github.com/spf13/cast v1.10.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.10 // indirect
|
github.com/spf13/pflag v1.0.10 // indirect
|
||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/tidwall/gjson v1.18.0 // indirect
|
|
||||||
github.com/tidwall/match v1.2.0 // indirect
|
|
||||||
github.com/tidwall/pretty v1.2.1 // indirect
|
|
||||||
github.com/tidwall/sjson v1.2.5 // indirect
|
|
||||||
github.com/twpayne/go-geom v1.6.1 // indirect
|
|
||||||
github.com/ugorji/go/codec v1.3.1 // indirect
|
|
||||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
|
||||||
github.com/wI2L/jsondiff v0.7.0 // indirect
|
|
||||||
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
|
|
||||||
github.com/woodsbury/decimal128 v1.4.0 // indirect
|
|
||||||
github.com/yargevad/filepathx v1.0.0 // indirect
|
|
||||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
|
||||||
github.com/zeebo/xxh3 v1.1.0 // indirect
|
|
||||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
|
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect
|
||||||
golang.org/x/mod v0.33.0 // indirect
|
|
||||||
golang.org/x/oauth2 v0.35.0 // indirect
|
|
||||||
golang.org/x/sync v0.19.0 // indirect
|
|
||||||
golang.org/x/sys v0.41.0 // indirect
|
golang.org/x/sys v0.41.0 // indirect
|
||||||
golang.org/x/telemetry v0.0.0-20260213145524-e0ab670178e1 // indirect
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
golang.org/x/tools v0.42.0 // indirect
|
|
||||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
|
||||||
gonum.org/v1/gonum v0.17.0 // indirect
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba // indirect
|
|
||||||
google.golang.org/grpc v1.78.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.36.11 // indirect
|
|
||||||
modernc.org/libc v1.67.7 // indirect
|
modernc.org/libc v1.67.7 // indirect
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.11.0 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
replace forge.lthn.ai/core/go-crypt => ../go-crypt
|
||||||
|
|
|
||||||
205
go.sum
205
go.sum
|
|
@ -1,29 +1,7 @@
|
||||||
cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE=
|
|
||||||
cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU=
|
|
||||||
code.gitea.io/sdk/gitea v0.23.2 h1:iJB1FDmLegwfwjX8gotBDHdPSbk/ZR8V9VmEJaVsJYg=
|
|
||||||
code.gitea.io/sdk/gitea v0.23.2/go.mod h1:yyF5+GhljqvA30sRDreoyHILruNiy4ASufugzYg0VHM=
|
|
||||||
codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0 h1:HTCWpzyWQOHDWt3LzI6/d2jvUDsw/vgGRWm/8BTvcqI=
|
|
||||||
codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v2 v2.2.0/go.mod h1:ZglEEDj+qkxYUb+SQIeqGtFxQrbaMYqIOgahNKb7uxs=
|
|
||||||
github.com/42wim/httpsig v1.2.3 h1:xb0YyWhkYj57SPtfSttIobJUPJZB9as1nsfo7KWVcEs=
|
|
||||||
github.com/42wim/httpsig v1.2.3/go.mod h1:nZq9OlYKDrUBhptd77IHx4/sZZD+IxTBADvAPI9G/EM=
|
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
|
||||||
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
|
||||||
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
|
||||||
github.com/Snider/Borg v0.2.0 h1:iCyDhY4WTXi39+FexRwXbn2YpZ2U9FUXVXDZk9xRCXQ=
|
github.com/Snider/Borg v0.2.0 h1:iCyDhY4WTXi39+FexRwXbn2YpZ2U9FUXVXDZk9xRCXQ=
|
||||||
github.com/Snider/Borg v0.2.0/go.mod h1:TqlKnfRo9okioHbgrZPfWjQsztBV0Nfskz4Om1/vdMY=
|
github.com/Snider/Borg v0.2.0/go.mod h1:TqlKnfRo9okioHbgrZPfWjQsztBV0Nfskz4Om1/vdMY=
|
||||||
github.com/TwiN/go-color v1.4.1 h1:mqG0P/KBgHKVqmtL5ye7K0/Gr4l6hTksPgTgMk3mUzc=
|
|
||||||
github.com/TwiN/go-color v1.4.1/go.mod h1:WcPf/jtiW95WBIsEeY1Lc/b8aaWoiqQpu5cf8WFxu+s=
|
|
||||||
github.com/alecthomas/assert/v2 v2.10.0 h1:jjRCHsj6hBJhkmhznrCzoNpbA3zqy0fYiUcYZP/GkPY=
|
|
||||||
github.com/alecthomas/assert/v2 v2.10.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
|
||||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
|
||||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
|
||||||
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
|
|
||||||
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
|
|
||||||
github.com/apache/arrow-go/v18 v18.5.1 h1:yaQ6zxMGgf9YCYw4/oaeOU3AULySDlAYDOcnr4LdHdI=
|
|
||||||
github.com/apache/arrow-go/v18 v18.5.1/go.mod h1:OCCJsmdq8AsRm8FkBSSmYTwL/s4zHW9CqxeBxEytkNE=
|
|
||||||
github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc=
|
|
||||||
github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g=
|
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||||
|
|
@ -46,142 +24,46 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIM
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
|
||||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
|
||||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
|
||||||
github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=
|
|
||||||
github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=
|
|
||||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
|
||||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
|
||||||
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
|
github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=
|
||||||
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
|
github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
|
|
||||||
github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
|
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||||
github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ=
|
|
||||||
github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE=
|
|
||||||
github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
|
|
||||||
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
|
|
||||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
|
||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
|
||||||
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
|
|
||||||
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
|
|
||||||
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
|
|
||||||
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
|
|
||||||
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
|
|
||||||
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
|
|
||||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
|
||||||
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
|
||||||
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
|
||||||
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
|
||||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
|
||||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
|
||||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
|
||||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
|
||||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
|
||||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
|
||||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/google/flatbuffers v25.12.19+incompatible h1:haMV2JRRJCe1998HeW/p0X9UaMTK6SDo0ffLn2+DbLs=
|
|
||||||
github.com/google/flatbuffers v25.12.19+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8=
|
|
||||||
github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
|
|
||||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
|
|
||||||
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
|
||||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
|
||||||
github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
|
|
||||||
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
|
|
||||||
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
|
|
||||||
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
|
||||||
github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1 h1:x1cSEj4Ug5mpuZgUHLvUmlc5r//KHFn6iYiRSrRcVy4=
|
|
||||||
github.com/kluctl/go-embed-python v0.0.0-3.13.1-20241219-1/go.mod h1:3ebNU9QBrNpUO+Hj6bHaGpkh5pymDHQ+wwVPHTE4mCE=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/leaanthony/debme v1.2.1 h1:9Tgwf+kjcrbMQ4WnPcEIUcQuIZYqdWftzZkBr+i/oOc=
|
|
||||||
github.com/leaanthony/debme v1.2.1/go.mod h1:3V+sCm5tYAgQymvSOfYQ5Xx2JCr+OXiD9Jkw3otUjiA=
|
|
||||||
github.com/leaanthony/gosod v1.0.4 h1:YLAbVyd591MRffDgxUOU1NwLhT9T1/YiwjKZpkNFeaI=
|
|
||||||
github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw=
|
|
||||||
github.com/leaanthony/slicer v1.5.0/go.mod h1:FwrApmf8gOrpzEWM2J/9Lh79tyq8KTX5AzRtwV7m4AY=
|
|
||||||
github.com/leaanthony/slicer v1.6.0 h1:1RFP5uiPJvT93TAHi+ipd3NACobkW53yUiBqZheE/Js=
|
|
||||||
github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8=
|
|
||||||
github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8=
|
|
||||||
github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
|
||||||
github.com/marcboeker/go-duckdb v1.8.5 h1:tkYp+TANippy0DaIOP5OEfBEwbUINqiFqgwMQ44jME0=
|
|
||||||
github.com/marcboeker/go-duckdb v1.8.5/go.mod h1:6mK7+WQE4P4u5AFLvVBmhFxY5fvhymFptghgJX6B+/8=
|
|
||||||
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
|
||||||
github.com/matryer/is v1.4.1 h1:55ehd8zaGABKLXQUe2awZ99BD/PTc2ls+KV/dXphgEQ=
|
|
||||||
github.com/matryer/is v1.4.1/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
|
|
||||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
|
|
||||||
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
|
|
||||||
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
|
|
||||||
github.com/modelcontextprotocol/go-sdk v1.3.0 h1:gMfZkv3DzQF5q/DcQePo5rahEY+sguyPfXDfNBcT0Zs=
|
|
||||||
github.com/modelcontextprotocol/go-sdk v1.3.0/go.mod h1:AnQ//Qc6+4nIyyrB4cxBU7UW9VibK4iOZBeyP/rF1IE=
|
|
||||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
|
||||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
|
||||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/oasdiff/oasdiff v1.11.10 h1:4I9VrktUoHmwydkJqVOC7Bd6BXKu9dc4UUP3PIu1VjM=
|
|
||||||
github.com/oasdiff/oasdiff v1.11.10/go.mod h1:GXARzmqBKN8lZHsTQD35ZM41ePbu6JdAZza4sRMeEKg=
|
|
||||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY=
|
|
||||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
|
|
||||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
|
|
||||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
|
|
||||||
github.com/ollama/ollama v0.16.1 h1:DIxnLdS0om3hb7HheJqj6+ZnPCCMWmy/vyUxiQgRYoI=
|
|
||||||
github.com/ollama/ollama v0.16.1/go.mod h1:FEk95NbAJJZk+t7cLh+bPGTul72j1O3PLLlYNV3FVZ0=
|
|
||||||
github.com/parquet-go/bitpack v1.0.0 h1:AUqzlKzPPXf2bCdjfj4sTeacrUwsT7NlcYDMUQxPcQA=
|
|
||||||
github.com/parquet-go/bitpack v1.0.0/go.mod h1:XnVk9TH+O40eOOmvpAVZ7K2ocQFrQwysLMnc6M/8lgs=
|
|
||||||
github.com/parquet-go/jsonlite v1.4.0 h1:RTG7prqfO0HD5egejU8MUDBN8oToMj55cgSV1I0zNW4=
|
|
||||||
github.com/parquet-go/jsonlite v1.4.0/go.mod h1:nDjpkpL4EOtqs6NQugUsi0Rleq9sW/OtC1NnZEnxzF0=
|
|
||||||
github.com/parquet-go/parquet-go v0.27.0 h1:vHWK2xaHbj+v1DYps03yDRpEsdtOeKbhiXUaixoPb3g=
|
|
||||||
github.com/parquet-go/parquet-go v0.27.0/go.mod h1:navtkAYr2LGoJVp141oXPlO/sxLvaOe3la2JEoD8+rg=
|
|
||||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||||
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
|
|
||||||
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0=
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/qdrant/go-client v1.16.2 h1:UUMJJfvXTByhwhH1DwWdbkhZ2cTdvSqVkXSIfBrVWSg=
|
|
||||||
github.com/qdrant/go-client v1.16.2/go.mod h1:I+EL3h4HRoRTeHtbfOd/4kDXwCukZfkd41j/9wryGkw=
|
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
|
|
@ -189,8 +71,6 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4=
|
||||||
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
|
||||||
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
|
||||||
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
|
||||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||||
|
|
@ -202,111 +82,32 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
|
||||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
|
||||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
|
||||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
|
||||||
github.com/tidwall/match v1.2.0 h1:0pt8FlkOwjN2fPt4bIl4BoNxb98gGHN2ObFEDkrfZnM=
|
|
||||||
github.com/tidwall/match v1.2.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
|
||||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
|
||||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
|
||||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
|
||||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
|
||||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
|
||||||
github.com/twpayne/go-geom v1.6.1 h1:iLE+Opv0Ihm/ABIcvQFGIiFBXd76oBIar9drAwHFhR4=
|
|
||||||
github.com/twpayne/go-geom v1.6.1/go.mod h1:Kr+Nly6BswFsKM5sd31YaoWS5PeDDH2NftJTK7Gd028=
|
|
||||||
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
|
|
||||||
github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
|
|
||||||
github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
|
|
||||||
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
|
||||||
github.com/unpoller/unifi/v5 v5.18.0 h1:i9xecLeI9CU6m+5++TIm+zhdGS9f8KCUz8PuuzO7sSQ=
|
|
||||||
github.com/unpoller/unifi/v5 v5.18.0/go.mod h1:vSIXIclPG9dpKxUp+pavfgENHWaTZXvDg7F036R1YCo=
|
|
||||||
github.com/wI2L/jsondiff v0.7.0 h1:1lH1G37GhBPqCfp/lrs91rf/2j3DktX6qYAKZkLuCQQ=
|
|
||||||
github.com/wI2L/jsondiff v0.7.0/go.mod h1:KAEIojdQq66oJiHhDyQez2x+sRit0vIzC9KeK0yizxM=
|
|
||||||
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
|
|
||||||
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
|
|
||||||
github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQsBgUlc=
|
|
||||||
github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU=
|
|
||||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
|
||||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
|
||||||
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
|
||||||
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
|
|
||||||
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
|
|
||||||
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
|
|
||||||
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
|
|
||||||
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
|
|
||||||
github.com/zeebo/xxh3 v1.1.0 h1:s7DLGDK45Dyfg7++yxI0khrfwq9661w9EN78eP/UZVs=
|
|
||||||
github.com/zeebo/xxh3 v1.1.0/go.mod h1:IisAie1LELR4xhVinxWS5+zf1lA4p0MW4T+w+W07F5s=
|
|
||||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
|
||||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
|
||||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
|
||||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
|
||||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
|
||||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
|
||||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
|
||||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
|
||||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o=
|
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o=
|
||||||
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
|
golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
|
||||||
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
||||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
|
|
||||||
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
|
||||||
golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
|
|
||||||
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/telemetry v0.0.0-20260213145524-e0ab670178e1 h1:QNaHp8YvpPswfDNxlCmJyeesxbGOgaKf41iT9/QrErY=
|
|
||||||
golang.org/x/telemetry v0.0.0-20260213145524-e0ab670178e1/go.mod h1:NuITXsA9cTiqnXtVk+/wrBT2Ja4X5hsfGOYRJ6kgYjs=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
||||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
|
|
||||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
|
|
||||||
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
|
|
||||||
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba h1:UKgtfRM7Yh93Sya0Fo8ZzhDP4qBckrrxEr2oF5UIVb8=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
|
||||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
|
||||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
|
||||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
|
||||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||||
|
|
|
||||||
|
|
@ -1,87 +0,0 @@
|
||||||
package agentci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/jobrunner"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RunMode determines the execution strategy for a dispatched task.
|
|
||||||
type RunMode string
|
|
||||||
|
|
||||||
const (
|
|
||||||
ModeStandard RunMode = "standard"
|
|
||||||
ModeDual RunMode = "dual" // The Clotho Protocol — dual-run verification
|
|
||||||
)
|
|
||||||
|
|
||||||
// Spinner is the Clotho orchestrator that determines the fate of each task.
|
|
||||||
type Spinner struct {
|
|
||||||
Config ClothoConfig
|
|
||||||
Agents map[string]AgentConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSpinner creates a new Clotho orchestrator.
|
|
||||||
func NewSpinner(cfg ClothoConfig, agents map[string]AgentConfig) *Spinner {
|
|
||||||
return &Spinner{
|
|
||||||
Config: cfg,
|
|
||||||
Agents: agents,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeterminePlan decides if a signal requires dual-run verification based on
|
|
||||||
// the global strategy, agent configuration, and repository criticality.
|
|
||||||
func (s *Spinner) DeterminePlan(signal *jobrunner.PipelineSignal, agentName string) RunMode {
|
|
||||||
if s.Config.Strategy != "clotho-verified" {
|
|
||||||
return ModeStandard
|
|
||||||
}
|
|
||||||
|
|
||||||
agent, ok := s.Agents[agentName]
|
|
||||||
if !ok {
|
|
||||||
return ModeStandard
|
|
||||||
}
|
|
||||||
if agent.DualRun {
|
|
||||||
return ModeDual
|
|
||||||
}
|
|
||||||
|
|
||||||
// Protect critical repos with dual-run (Axiom 1).
|
|
||||||
if signal.RepoName == "core" || strings.Contains(signal.RepoName, "security") {
|
|
||||||
return ModeDual
|
|
||||||
}
|
|
||||||
|
|
||||||
return ModeStandard
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVerifierModel returns the model for the secondary "signed" verification run.
|
|
||||||
func (s *Spinner) GetVerifierModel(agentName string) string {
|
|
||||||
agent, ok := s.Agents[agentName]
|
|
||||||
if !ok || agent.VerifyModel == "" {
|
|
||||||
return "gemini-1.5-pro"
|
|
||||||
}
|
|
||||||
return agent.VerifyModel
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindByForgejoUser resolves a Forgejo username to the agent config key and config.
|
|
||||||
// This decouples agent naming (mythological roles) from Forgejo identity.
|
|
||||||
func (s *Spinner) FindByForgejoUser(forgejoUser string) (string, AgentConfig, bool) {
|
|
||||||
if forgejoUser == "" {
|
|
||||||
return "", AgentConfig{}, false
|
|
||||||
}
|
|
||||||
// Direct match on config key first.
|
|
||||||
if agent, ok := s.Agents[forgejoUser]; ok {
|
|
||||||
return forgejoUser, agent, true
|
|
||||||
}
|
|
||||||
// Search by ForgejoUser field.
|
|
||||||
for name, agent := range s.Agents {
|
|
||||||
if agent.ForgejoUser != "" && agent.ForgejoUser == forgejoUser {
|
|
||||||
return name, agent, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", AgentConfig{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Weave compares primary and verifier outputs. Returns true if they converge.
|
|
||||||
// This is a placeholder for future semantic diff logic.
|
|
||||||
func (s *Spinner) Weave(ctx context.Context, primaryOutput, signedOutput []byte) (bool, error) {
|
|
||||||
return string(primaryOutput) == string(signedOutput), nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,144 +0,0 @@
|
||||||
// Package agentci provides configuration, security, and orchestration for AgentCI dispatch targets.
|
|
||||||
package agentci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AgentConfig represents a single agent machine in the config file.
|
|
||||||
type AgentConfig struct {
|
|
||||||
Host string `yaml:"host" mapstructure:"host"`
|
|
||||||
QueueDir string `yaml:"queue_dir" mapstructure:"queue_dir"`
|
|
||||||
ForgejoUser string `yaml:"forgejo_user" mapstructure:"forgejo_user"`
|
|
||||||
Model string `yaml:"model" mapstructure:"model"` // primary AI model
|
|
||||||
Runner string `yaml:"runner" mapstructure:"runner"` // runner binary: claude, codex, gemini
|
|
||||||
VerifyModel string `yaml:"verify_model" mapstructure:"verify_model"` // secondary model for dual-run
|
|
||||||
SecurityLevel string `yaml:"security_level" mapstructure:"security_level"` // low, high
|
|
||||||
Roles []string `yaml:"roles" mapstructure:"roles"`
|
|
||||||
DualRun bool `yaml:"dual_run" mapstructure:"dual_run"`
|
|
||||||
Active bool `yaml:"active" mapstructure:"active"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClothoConfig controls the orchestration strategy.
|
|
||||||
type ClothoConfig struct {
|
|
||||||
Strategy string `yaml:"strategy" mapstructure:"strategy"` // direct, clotho-verified
|
|
||||||
ValidationThreshold float64 `yaml:"validation_threshold" mapstructure:"validation_threshold"` // divergence limit (0.0-1.0)
|
|
||||||
SigningKeyPath string `yaml:"signing_key_path" mapstructure:"signing_key_path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadAgents reads agent targets from config and returns a map of AgentConfig.
|
|
||||||
// Returns an empty map (not an error) if no agents are configured.
|
|
||||||
func LoadAgents(cfg *config.Config) (map[string]AgentConfig, error) {
|
|
||||||
var agents map[string]AgentConfig
|
|
||||||
if err := cfg.Get("agentci.agents", &agents); err != nil {
|
|
||||||
return map[string]AgentConfig{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate and apply defaults.
|
|
||||||
for name, ac := range agents {
|
|
||||||
if !ac.Active {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if ac.Host == "" {
|
|
||||||
return nil, fmt.Errorf("agent %q: host is required", name)
|
|
||||||
}
|
|
||||||
if ac.QueueDir == "" {
|
|
||||||
ac.QueueDir = "/home/claude/ai-work/queue"
|
|
||||||
}
|
|
||||||
if ac.Model == "" {
|
|
||||||
ac.Model = "sonnet"
|
|
||||||
}
|
|
||||||
if ac.Runner == "" {
|
|
||||||
ac.Runner = "claude"
|
|
||||||
}
|
|
||||||
agents[name] = ac
|
|
||||||
}
|
|
||||||
|
|
||||||
return agents, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadActiveAgents returns only active agents.
|
|
||||||
func LoadActiveAgents(cfg *config.Config) (map[string]AgentConfig, error) {
|
|
||||||
all, err := LoadAgents(cfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
active := make(map[string]AgentConfig)
|
|
||||||
for name, ac := range all {
|
|
||||||
if ac.Active {
|
|
||||||
active[name] = ac
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return active, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadClothoConfig loads the Clotho orchestrator settings.
|
|
||||||
// Returns sensible defaults if no config is present.
|
|
||||||
func LoadClothoConfig(cfg *config.Config) (ClothoConfig, error) {
|
|
||||||
var cc ClothoConfig
|
|
||||||
if err := cfg.Get("agentci.clotho", &cc); err != nil {
|
|
||||||
return ClothoConfig{
|
|
||||||
Strategy: "direct",
|
|
||||||
ValidationThreshold: 0.85,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
if cc.Strategy == "" {
|
|
||||||
cc.Strategy = "direct"
|
|
||||||
}
|
|
||||||
if cc.ValidationThreshold == 0 {
|
|
||||||
cc.ValidationThreshold = 0.85
|
|
||||||
}
|
|
||||||
return cc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveAgent writes an agent config entry to the config file.
|
|
||||||
func SaveAgent(cfg *config.Config, name string, ac AgentConfig) error {
|
|
||||||
key := fmt.Sprintf("agentci.agents.%s", name)
|
|
||||||
data := map[string]any{
|
|
||||||
"host": ac.Host,
|
|
||||||
"queue_dir": ac.QueueDir,
|
|
||||||
"forgejo_user": ac.ForgejoUser,
|
|
||||||
"active": ac.Active,
|
|
||||||
"dual_run": ac.DualRun,
|
|
||||||
}
|
|
||||||
if ac.Model != "" {
|
|
||||||
data["model"] = ac.Model
|
|
||||||
}
|
|
||||||
if ac.Runner != "" {
|
|
||||||
data["runner"] = ac.Runner
|
|
||||||
}
|
|
||||||
if ac.VerifyModel != "" {
|
|
||||||
data["verify_model"] = ac.VerifyModel
|
|
||||||
}
|
|
||||||
if ac.SecurityLevel != "" {
|
|
||||||
data["security_level"] = ac.SecurityLevel
|
|
||||||
}
|
|
||||||
if len(ac.Roles) > 0 {
|
|
||||||
data["roles"] = ac.Roles
|
|
||||||
}
|
|
||||||
return cfg.Set(key, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveAgent removes an agent from the config file.
|
|
||||||
func RemoveAgent(cfg *config.Config, name string) error {
|
|
||||||
var agents map[string]AgentConfig
|
|
||||||
if err := cfg.Get("agentci.agents", &agents); err != nil {
|
|
||||||
return fmt.Errorf("no agents configured")
|
|
||||||
}
|
|
||||||
if _, ok := agents[name]; !ok {
|
|
||||||
return fmt.Errorf("agent %q not found", name)
|
|
||||||
}
|
|
||||||
delete(agents, name)
|
|
||||||
return cfg.Set("agentci.agents", agents)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListAgents returns all configured agents (active and inactive).
|
|
||||||
func ListAgents(cfg *config.Config) (map[string]AgentConfig, error) {
|
|
||||||
var agents map[string]AgentConfig
|
|
||||||
if err := cfg.Get("agentci.agents", &agents); err != nil {
|
|
||||||
return map[string]AgentConfig{}, nil
|
|
||||||
}
|
|
||||||
return agents, nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,329 +0,0 @@
|
||||||
package agentci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/config"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newTestConfig(t *testing.T, yaml string) *config.Config {
|
|
||||||
t.Helper()
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
if yaml != "" {
|
|
||||||
m.Files["/tmp/test/config.yaml"] = yaml
|
|
||||||
}
|
|
||||||
cfg, err := config.New(config.WithMedium(m), config.WithPath("/tmp/test/config.yaml"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadAgents_Good(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
darbs-claude:
|
|
||||||
host: claude@192.168.0.201
|
|
||||||
queue_dir: /home/claude/ai-work/queue
|
|
||||||
forgejo_user: darbs-claude
|
|
||||||
model: sonnet
|
|
||||||
runner: claude
|
|
||||||
active: true
|
|
||||||
`)
|
|
||||||
agents, err := LoadAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, agents, 1)
|
|
||||||
|
|
||||||
agent := agents["darbs-claude"]
|
|
||||||
assert.Equal(t, "claude@192.168.0.201", agent.Host)
|
|
||||||
assert.Equal(t, "/home/claude/ai-work/queue", agent.QueueDir)
|
|
||||||
assert.Equal(t, "sonnet", agent.Model)
|
|
||||||
assert.Equal(t, "claude", agent.Runner)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadAgents_Good_MultipleAgents(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
darbs-claude:
|
|
||||||
host: claude@192.168.0.201
|
|
||||||
queue_dir: /home/claude/ai-work/queue
|
|
||||||
active: true
|
|
||||||
local-codex:
|
|
||||||
host: localhost
|
|
||||||
queue_dir: /home/claude/ai-work/queue
|
|
||||||
runner: codex
|
|
||||||
active: true
|
|
||||||
`)
|
|
||||||
agents, err := LoadAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, agents, 2)
|
|
||||||
assert.Contains(t, agents, "darbs-claude")
|
|
||||||
assert.Contains(t, agents, "local-codex")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadAgents_Good_SkipsInactive(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
active-agent:
|
|
||||||
host: claude@10.0.0.1
|
|
||||||
active: true
|
|
||||||
offline-agent:
|
|
||||||
host: claude@10.0.0.2
|
|
||||||
active: false
|
|
||||||
`)
|
|
||||||
agents, err := LoadAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Both are returned, but only active-agent has defaults applied.
|
|
||||||
assert.Len(t, agents, 2)
|
|
||||||
assert.Contains(t, agents, "active-agent")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadActiveAgents_Good(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
active-agent:
|
|
||||||
host: claude@10.0.0.1
|
|
||||||
active: true
|
|
||||||
offline-agent:
|
|
||||||
host: claude@10.0.0.2
|
|
||||||
active: false
|
|
||||||
`)
|
|
||||||
active, err := LoadActiveAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, active, 1)
|
|
||||||
assert.Contains(t, active, "active-agent")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadAgents_Good_Defaults(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
minimal:
|
|
||||||
host: claude@10.0.0.1
|
|
||||||
active: true
|
|
||||||
`)
|
|
||||||
agents, err := LoadAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, agents, 1)
|
|
||||||
|
|
||||||
agent := agents["minimal"]
|
|
||||||
assert.Equal(t, "/home/claude/ai-work/queue", agent.QueueDir)
|
|
||||||
assert.Equal(t, "sonnet", agent.Model)
|
|
||||||
assert.Equal(t, "claude", agent.Runner)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadAgents_Good_NoConfig(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
agents, err := LoadAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, agents)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadAgents_Bad_MissingHost(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
broken:
|
|
||||||
queue_dir: /tmp
|
|
||||||
active: true
|
|
||||||
`)
|
|
||||||
_, err := LoadAgents(cfg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "host is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadAgents_Good_WithDualRun(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
gemini-agent:
|
|
||||||
host: localhost
|
|
||||||
runner: gemini
|
|
||||||
model: gemini-2.0-flash
|
|
||||||
verify_model: gemini-1.5-pro
|
|
||||||
dual_run: true
|
|
||||||
active: true
|
|
||||||
`)
|
|
||||||
agents, err := LoadAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
agent := agents["gemini-agent"]
|
|
||||||
assert.Equal(t, "gemini", agent.Runner)
|
|
||||||
assert.Equal(t, "gemini-2.0-flash", agent.Model)
|
|
||||||
assert.Equal(t, "gemini-1.5-pro", agent.VerifyModel)
|
|
||||||
assert.True(t, agent.DualRun)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadClothoConfig_Good(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
clotho:
|
|
||||||
strategy: clotho-verified
|
|
||||||
validation_threshold: 0.9
|
|
||||||
signing_key_path: /etc/core/keys/clotho.pub
|
|
||||||
`)
|
|
||||||
cc, err := LoadClothoConfig(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "clotho-verified", cc.Strategy)
|
|
||||||
assert.Equal(t, 0.9, cc.ValidationThreshold)
|
|
||||||
assert.Equal(t, "/etc/core/keys/clotho.pub", cc.SigningKeyPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadClothoConfig_Good_Defaults(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
cc, err := LoadClothoConfig(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "direct", cc.Strategy)
|
|
||||||
assert.Equal(t, 0.85, cc.ValidationThreshold)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveAgent_Good(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
|
|
||||||
err := SaveAgent(cfg, "new-agent", AgentConfig{
|
|
||||||
Host: "claude@10.0.0.5",
|
|
||||||
QueueDir: "/home/claude/ai-work/queue",
|
|
||||||
ForgejoUser: "new-agent",
|
|
||||||
Model: "haiku",
|
|
||||||
Runner: "claude",
|
|
||||||
Active: true,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
agents, err := ListAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Contains(t, agents, "new-agent")
|
|
||||||
assert.Equal(t, "claude@10.0.0.5", agents["new-agent"].Host)
|
|
||||||
assert.Equal(t, "haiku", agents["new-agent"].Model)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveAgent_Good_WithDualRun(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
|
|
||||||
err := SaveAgent(cfg, "verified-agent", AgentConfig{
|
|
||||||
Host: "claude@10.0.0.5",
|
|
||||||
Model: "gemini-2.0-flash",
|
|
||||||
VerifyModel: "gemini-1.5-pro",
|
|
||||||
DualRun: true,
|
|
||||||
Active: true,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
agents, err := ListAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Contains(t, agents, "verified-agent")
|
|
||||||
assert.True(t, agents["verified-agent"].DualRun)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveAgent_Good_OmitsEmptyOptionals(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
|
|
||||||
err := SaveAgent(cfg, "minimal", AgentConfig{
|
|
||||||
Host: "claude@10.0.0.1",
|
|
||||||
Active: true,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
agents, err := ListAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Contains(t, agents, "minimal")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveAgent_Good(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
to-remove:
|
|
||||||
host: claude@10.0.0.1
|
|
||||||
active: true
|
|
||||||
to-keep:
|
|
||||||
host: claude@10.0.0.2
|
|
||||||
active: true
|
|
||||||
`)
|
|
||||||
err := RemoveAgent(cfg, "to-remove")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
agents, err := ListAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotContains(t, agents, "to-remove")
|
|
||||||
assert.Contains(t, agents, "to-keep")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveAgent_Bad_NotFound(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
existing:
|
|
||||||
host: claude@10.0.0.1
|
|
||||||
active: true
|
|
||||||
`)
|
|
||||||
err := RemoveAgent(cfg, "nonexistent")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveAgent_Bad_NoAgents(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
err := RemoveAgent(cfg, "anything")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "no agents configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListAgents_Good(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, `
|
|
||||||
agentci:
|
|
||||||
agents:
|
|
||||||
agent-a:
|
|
||||||
host: claude@10.0.0.1
|
|
||||||
active: true
|
|
||||||
agent-b:
|
|
||||||
host: claude@10.0.0.2
|
|
||||||
active: false
|
|
||||||
`)
|
|
||||||
agents, err := ListAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, agents, 2)
|
|
||||||
assert.True(t, agents["agent-a"].Active)
|
|
||||||
assert.False(t, agents["agent-b"].Active)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestListAgents_Good_Empty(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
agents, err := ListAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, agents)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRoundTrip_SaveThenLoad(t *testing.T) {
|
|
||||||
cfg := newTestConfig(t, "")
|
|
||||||
|
|
||||||
err := SaveAgent(cfg, "alpha", AgentConfig{
|
|
||||||
Host: "claude@alpha",
|
|
||||||
QueueDir: "/home/claude/work/queue",
|
|
||||||
ForgejoUser: "alpha-bot",
|
|
||||||
Model: "opus",
|
|
||||||
Runner: "claude",
|
|
||||||
Active: true,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = SaveAgent(cfg, "beta", AgentConfig{
|
|
||||||
Host: "claude@beta",
|
|
||||||
ForgejoUser: "beta-bot",
|
|
||||||
Runner: "codex",
|
|
||||||
Active: true,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
agents, err := LoadActiveAgents(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, agents, 2)
|
|
||||||
assert.Equal(t, "claude@alpha", agents["alpha"].Host)
|
|
||||||
assert.Equal(t, "opus", agents["alpha"].Model)
|
|
||||||
assert.Equal(t, "codex", agents["beta"].Runner)
|
|
||||||
}
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
||||||
package agentci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var safeNameRegex = regexp.MustCompile(`^[a-zA-Z0-9\-\_\.]+$`)
|
|
||||||
|
|
||||||
// SanitizePath ensures a filename or directory name is safe and prevents path traversal.
|
|
||||||
// Returns filepath.Base of the input after validation.
|
|
||||||
func SanitizePath(input string) (string, error) {
|
|
||||||
base := filepath.Base(input)
|
|
||||||
if !safeNameRegex.MatchString(base) {
|
|
||||||
return "", fmt.Errorf("invalid characters in path element: %s", input)
|
|
||||||
}
|
|
||||||
if base == "." || base == ".." || base == "/" {
|
|
||||||
return "", fmt.Errorf("invalid path element: %s", base)
|
|
||||||
}
|
|
||||||
return base, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EscapeShellArg wraps a string in single quotes for safe remote shell insertion.
|
|
||||||
// Prefer exec.Command arguments over constructing shell strings where possible.
|
|
||||||
func EscapeShellArg(arg string) string {
|
|
||||||
return "'" + strings.ReplaceAll(arg, "'", "'\\''") + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecureSSHCommand creates an SSH exec.Cmd with strict host key checking and batch mode.
|
|
||||||
func SecureSSHCommand(host string, remoteCmd string) *exec.Cmd {
|
|
||||||
return exec.Command("ssh",
|
|
||||||
"-o", "StrictHostKeyChecking=yes",
|
|
||||||
"-o", "BatchMode=yes",
|
|
||||||
"-o", "ConnectTimeout=10",
|
|
||||||
host,
|
|
||||||
remoteCmd,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaskToken returns a masked version of a token for safe logging.
|
|
||||||
func MaskToken(token string) string {
|
|
||||||
if len(token) < 8 {
|
|
||||||
return "*****"
|
|
||||||
}
|
|
||||||
return token[:4] + "****" + token[len(token)-4:]
|
|
||||||
}
|
|
||||||
|
|
@ -1,299 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AllowanceStatus indicates the current state of an agent's quota.
|
|
||||||
type AllowanceStatus string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AllowanceOK indicates the agent has remaining quota.
|
|
||||||
AllowanceOK AllowanceStatus = "ok"
|
|
||||||
// AllowanceWarning indicates the agent is at 80%+ usage.
|
|
||||||
AllowanceWarning AllowanceStatus = "warning"
|
|
||||||
// AllowanceExceeded indicates the agent has exceeded its quota.
|
|
||||||
AllowanceExceeded AllowanceStatus = "exceeded"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AgentAllowance defines the quota limits for a single agent.
|
|
||||||
type AgentAllowance struct {
|
|
||||||
// AgentID is the unique identifier for the agent.
|
|
||||||
AgentID string `json:"agent_id" yaml:"agent_id"`
|
|
||||||
// DailyTokenLimit is the maximum tokens (in+out) per 24h. 0 means unlimited.
|
|
||||||
DailyTokenLimit int64 `json:"daily_token_limit" yaml:"daily_token_limit"`
|
|
||||||
// DailyJobLimit is the maximum jobs per 24h. 0 means unlimited.
|
|
||||||
DailyJobLimit int `json:"daily_job_limit" yaml:"daily_job_limit"`
|
|
||||||
// ConcurrentJobs is the maximum simultaneous jobs. 0 means unlimited.
|
|
||||||
ConcurrentJobs int `json:"concurrent_jobs" yaml:"concurrent_jobs"`
|
|
||||||
// MaxJobDuration is the maximum job duration before kill. 0 means unlimited.
|
|
||||||
MaxJobDuration time.Duration `json:"max_job_duration" yaml:"max_job_duration"`
|
|
||||||
// ModelAllowlist restricts which models this agent can use. Empty means all.
|
|
||||||
ModelAllowlist []string `json:"model_allowlist,omitempty" yaml:"model_allowlist"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModelQuota defines global per-model limits across all agents.
|
|
||||||
type ModelQuota struct {
|
|
||||||
// Model is the model identifier (e.g. "claude-sonnet-4-5-20250929").
|
|
||||||
Model string `json:"model" yaml:"model"`
|
|
||||||
// DailyTokenBudget is the total tokens across all agents per 24h.
|
|
||||||
DailyTokenBudget int64 `json:"daily_token_budget" yaml:"daily_token_budget"`
|
|
||||||
// HourlyRateLimit is the max requests per hour.
|
|
||||||
HourlyRateLimit int `json:"hourly_rate_limit" yaml:"hourly_rate_limit"`
|
|
||||||
// CostCeiling stops all usage if cumulative cost exceeds this (in cents).
|
|
||||||
CostCeiling int64 `json:"cost_ceiling" yaml:"cost_ceiling"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RepoLimit defines per-repository rate limits.
|
|
||||||
type RepoLimit struct {
|
|
||||||
// Repo is the repository identifier (e.g. "owner/repo").
|
|
||||||
Repo string `json:"repo" yaml:"repo"`
|
|
||||||
// MaxDailyPRs is the maximum PRs per day. 0 means unlimited.
|
|
||||||
MaxDailyPRs int `json:"max_daily_prs" yaml:"max_daily_prs"`
|
|
||||||
// MaxDailyIssues is the maximum issues per day. 0 means unlimited.
|
|
||||||
MaxDailyIssues int `json:"max_daily_issues" yaml:"max_daily_issues"`
|
|
||||||
// CooldownAfterFailure is the wait time after a failure before retrying.
|
|
||||||
CooldownAfterFailure time.Duration `json:"cooldown_after_failure" yaml:"cooldown_after_failure"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UsageRecord tracks an agent's current usage within a quota period.
|
|
||||||
type UsageRecord struct {
|
|
||||||
// AgentID is the agent this record belongs to.
|
|
||||||
AgentID string `json:"agent_id"`
|
|
||||||
// TokensUsed is the total tokens consumed in the current period.
|
|
||||||
TokensUsed int64 `json:"tokens_used"`
|
|
||||||
// JobsStarted is the total jobs started in the current period.
|
|
||||||
JobsStarted int `json:"jobs_started"`
|
|
||||||
// ActiveJobs is the number of currently running jobs.
|
|
||||||
ActiveJobs int `json:"active_jobs"`
|
|
||||||
// PeriodStart is when the current quota period began.
|
|
||||||
PeriodStart time.Time `json:"period_start"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// QuotaCheckResult is the outcome of a pre-dispatch allowance check.
|
|
||||||
type QuotaCheckResult struct {
|
|
||||||
// Allowed indicates whether the agent may proceed.
|
|
||||||
Allowed bool `json:"allowed"`
|
|
||||||
// Status is the current allowance state.
|
|
||||||
Status AllowanceStatus `json:"status"`
|
|
||||||
// Remaining is the number of tokens remaining in the period.
|
|
||||||
RemainingTokens int64 `json:"remaining_tokens"`
|
|
||||||
// RemainingJobs is the number of jobs remaining in the period.
|
|
||||||
RemainingJobs int `json:"remaining_jobs"`
|
|
||||||
// Reason explains why the check failed (if !Allowed).
|
|
||||||
Reason string `json:"reason,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// QuotaEvent represents a change in quota usage, used for recovery.
|
|
||||||
type QuotaEvent string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// QuotaEventJobStarted deducts quota when a job begins.
|
|
||||||
QuotaEventJobStarted QuotaEvent = "job_started"
|
|
||||||
// QuotaEventJobCompleted deducts nothing (already counted).
|
|
||||||
QuotaEventJobCompleted QuotaEvent = "job_completed"
|
|
||||||
// QuotaEventJobFailed returns 50% of token quota.
|
|
||||||
QuotaEventJobFailed QuotaEvent = "job_failed"
|
|
||||||
// QuotaEventJobCancelled returns 100% of token quota.
|
|
||||||
QuotaEventJobCancelled QuotaEvent = "job_cancelled"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UsageReport is emitted by the agent runner to report token consumption.
|
|
||||||
type UsageReport struct {
|
|
||||||
// AgentID is the agent that consumed tokens.
|
|
||||||
AgentID string `json:"agent_id"`
|
|
||||||
// JobID identifies the specific job.
|
|
||||||
JobID string `json:"job_id"`
|
|
||||||
// Model is the model used.
|
|
||||||
Model string `json:"model"`
|
|
||||||
// TokensIn is the number of input tokens consumed.
|
|
||||||
TokensIn int64 `json:"tokens_in"`
|
|
||||||
// TokensOut is the number of output tokens consumed.
|
|
||||||
TokensOut int64 `json:"tokens_out"`
|
|
||||||
// Event is the type of quota event.
|
|
||||||
Event QuotaEvent `json:"event"`
|
|
||||||
// Timestamp is when the usage occurred.
|
|
||||||
Timestamp time.Time `json:"timestamp"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowanceStore is the interface for persisting and querying allowance data.
|
|
||||||
// Implementations may use Redis, SQLite, or any backing store.
|
|
||||||
type AllowanceStore interface {
|
|
||||||
// GetAllowance returns the quota limits for an agent.
|
|
||||||
GetAllowance(agentID string) (*AgentAllowance, error)
|
|
||||||
// SetAllowance persists quota limits for an agent.
|
|
||||||
SetAllowance(a *AgentAllowance) error
|
|
||||||
// GetUsage returns the current usage record for an agent.
|
|
||||||
GetUsage(agentID string) (*UsageRecord, error)
|
|
||||||
// IncrementUsage atomically adds to an agent's usage counters.
|
|
||||||
IncrementUsage(agentID string, tokens int64, jobs int) error
|
|
||||||
// DecrementActiveJobs reduces the active job count by 1.
|
|
||||||
DecrementActiveJobs(agentID string) error
|
|
||||||
// ReturnTokens adds tokens back to the agent's remaining quota.
|
|
||||||
ReturnTokens(agentID string, tokens int64) error
|
|
||||||
// ResetUsage clears usage counters for an agent (daily reset).
|
|
||||||
ResetUsage(agentID string) error
|
|
||||||
// GetModelQuota returns global limits for a model.
|
|
||||||
GetModelQuota(model string) (*ModelQuota, error)
|
|
||||||
// GetModelUsage returns current token usage for a model.
|
|
||||||
GetModelUsage(model string) (int64, error)
|
|
||||||
// IncrementModelUsage atomically adds to a model's usage counter.
|
|
||||||
IncrementModelUsage(model string, tokens int64) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryStore is an in-memory AllowanceStore for testing and single-node use.
|
|
||||||
type MemoryStore struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
allowances map[string]*AgentAllowance
|
|
||||||
usage map[string]*UsageRecord
|
|
||||||
modelQuotas map[string]*ModelQuota
|
|
||||||
modelUsage map[string]int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemoryStore creates a new in-memory allowance store.
|
|
||||||
func NewMemoryStore() *MemoryStore {
|
|
||||||
return &MemoryStore{
|
|
||||||
allowances: make(map[string]*AgentAllowance),
|
|
||||||
usage: make(map[string]*UsageRecord),
|
|
||||||
modelQuotas: make(map[string]*ModelQuota),
|
|
||||||
modelUsage: make(map[string]int64),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllowance returns the quota limits for an agent.
|
|
||||||
func (m *MemoryStore) GetAllowance(agentID string) (*AgentAllowance, error) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
a, ok := m.allowances[agentID]
|
|
||||||
if !ok {
|
|
||||||
return nil, &APIError{Code: 404, Message: "allowance not found for agent: " + agentID}
|
|
||||||
}
|
|
||||||
cp := *a
|
|
||||||
return &cp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAllowance persists quota limits for an agent.
|
|
||||||
func (m *MemoryStore) SetAllowance(a *AgentAllowance) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
cp := *a
|
|
||||||
m.allowances[a.AgentID] = &cp
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUsage returns the current usage record for an agent.
|
|
||||||
func (m *MemoryStore) GetUsage(agentID string) (*UsageRecord, error) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
u, ok := m.usage[agentID]
|
|
||||||
if !ok {
|
|
||||||
return &UsageRecord{
|
|
||||||
AgentID: agentID,
|
|
||||||
PeriodStart: startOfDay(time.Now().UTC()),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
cp := *u
|
|
||||||
return &cp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncrementUsage atomically adds to an agent's usage counters.
|
|
||||||
func (m *MemoryStore) IncrementUsage(agentID string, tokens int64, jobs int) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
u, ok := m.usage[agentID]
|
|
||||||
if !ok {
|
|
||||||
u = &UsageRecord{
|
|
||||||
AgentID: agentID,
|
|
||||||
PeriodStart: startOfDay(time.Now().UTC()),
|
|
||||||
}
|
|
||||||
m.usage[agentID] = u
|
|
||||||
}
|
|
||||||
u.TokensUsed += tokens
|
|
||||||
u.JobsStarted += jobs
|
|
||||||
if jobs > 0 {
|
|
||||||
u.ActiveJobs += jobs
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecrementActiveJobs reduces the active job count by 1.
|
|
||||||
func (m *MemoryStore) DecrementActiveJobs(agentID string) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
u, ok := m.usage[agentID]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if u.ActiveJobs > 0 {
|
|
||||||
u.ActiveJobs--
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReturnTokens adds tokens back to the agent's remaining quota.
|
|
||||||
func (m *MemoryStore) ReturnTokens(agentID string, tokens int64) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
u, ok := m.usage[agentID]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
u.TokensUsed -= tokens
|
|
||||||
if u.TokensUsed < 0 {
|
|
||||||
u.TokensUsed = 0
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetUsage clears usage counters for an agent.
|
|
||||||
func (m *MemoryStore) ResetUsage(agentID string) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
m.usage[agentID] = &UsageRecord{
|
|
||||||
AgentID: agentID,
|
|
||||||
PeriodStart: startOfDay(time.Now().UTC()),
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetModelQuota returns global limits for a model.
|
|
||||||
func (m *MemoryStore) GetModelQuota(model string) (*ModelQuota, error) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
q, ok := m.modelQuotas[model]
|
|
||||||
if !ok {
|
|
||||||
return nil, &APIError{Code: 404, Message: "model quota not found: " + model}
|
|
||||||
}
|
|
||||||
cp := *q
|
|
||||||
return &cp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetModelUsage returns current token usage for a model.
|
|
||||||
func (m *MemoryStore) GetModelUsage(model string) (int64, error) {
|
|
||||||
m.mu.RLock()
|
|
||||||
defer m.mu.RUnlock()
|
|
||||||
return m.modelUsage[model], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncrementModelUsage atomically adds to a model's usage counter.
|
|
||||||
func (m *MemoryStore) IncrementModelUsage(model string, tokens int64) error {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
m.modelUsage[model] += tokens
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModelQuota sets global limits for a model (used in testing).
|
|
||||||
func (m *MemoryStore) SetModelQuota(q *ModelQuota) {
|
|
||||||
m.mu.Lock()
|
|
||||||
defer m.mu.Unlock()
|
|
||||||
cp := *q
|
|
||||||
m.modelQuotas[q.Model] = &cp
|
|
||||||
}
|
|
||||||
|
|
||||||
// startOfDay returns midnight UTC for the given time.
|
|
||||||
func startOfDay(t time.Time) time.Time {
|
|
||||||
y, mo, d := t.Date()
|
|
||||||
return time.Date(y, mo, d, 0, 0, 0, 0, time.UTC)
|
|
||||||
}
|
|
||||||
|
|
@ -1,176 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"slices"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AllowanceService enforces agent quota limits. It provides pre-dispatch checks,
|
|
||||||
// runtime usage recording, and quota recovery for failed/cancelled jobs.
|
|
||||||
type AllowanceService struct {
|
|
||||||
store AllowanceStore
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAllowanceService creates a new AllowanceService with the given store.
|
|
||||||
func NewAllowanceService(store AllowanceStore) *AllowanceService {
|
|
||||||
return &AllowanceService{store: store}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check performs a pre-dispatch allowance check for the given agent and model.
|
|
||||||
// It verifies daily token limits, daily job limits, concurrent job limits, and
|
|
||||||
// model allowlists. Returns a QuotaCheckResult indicating whether the agent may proceed.
|
|
||||||
func (s *AllowanceService) Check(agentID, model string) (*QuotaCheckResult, error) {
|
|
||||||
const op = "AllowanceService.Check"
|
|
||||||
|
|
||||||
allowance, err := s.store.GetAllowance(agentID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "failed to get allowance", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
usage, err := s.store.GetUsage(agentID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "failed to get usage", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &QuotaCheckResult{
|
|
||||||
Allowed: true,
|
|
||||||
Status: AllowanceOK,
|
|
||||||
RemainingTokens: -1, // unlimited
|
|
||||||
RemainingJobs: -1, // unlimited
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check model allowlist
|
|
||||||
if len(allowance.ModelAllowlist) > 0 && model != "" {
|
|
||||||
if !slices.Contains(allowance.ModelAllowlist, model) {
|
|
||||||
result.Allowed = false
|
|
||||||
result.Status = AllowanceExceeded
|
|
||||||
result.Reason = "model not in allowlist: " + model
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check daily token limit
|
|
||||||
if allowance.DailyTokenLimit > 0 {
|
|
||||||
remaining := allowance.DailyTokenLimit - usage.TokensUsed
|
|
||||||
result.RemainingTokens = remaining
|
|
||||||
if remaining <= 0 {
|
|
||||||
result.Allowed = false
|
|
||||||
result.Status = AllowanceExceeded
|
|
||||||
result.Reason = "daily token limit exceeded"
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
ratio := float64(usage.TokensUsed) / float64(allowance.DailyTokenLimit)
|
|
||||||
if ratio >= 0.8 {
|
|
||||||
result.Status = AllowanceWarning
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check daily job limit
|
|
||||||
if allowance.DailyJobLimit > 0 {
|
|
||||||
remaining := allowance.DailyJobLimit - usage.JobsStarted
|
|
||||||
result.RemainingJobs = remaining
|
|
||||||
if remaining <= 0 {
|
|
||||||
result.Allowed = false
|
|
||||||
result.Status = AllowanceExceeded
|
|
||||||
result.Reason = "daily job limit exceeded"
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check concurrent jobs
|
|
||||||
if allowance.ConcurrentJobs > 0 && usage.ActiveJobs >= allowance.ConcurrentJobs {
|
|
||||||
result.Allowed = false
|
|
||||||
result.Status = AllowanceExceeded
|
|
||||||
result.Reason = "concurrent job limit reached"
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check global model quota
|
|
||||||
if model != "" {
|
|
||||||
modelQuota, err := s.store.GetModelQuota(model)
|
|
||||||
if err == nil && modelQuota.DailyTokenBudget > 0 {
|
|
||||||
modelUsage, err := s.store.GetModelUsage(model)
|
|
||||||
if err == nil && modelUsage >= modelQuota.DailyTokenBudget {
|
|
||||||
result.Allowed = false
|
|
||||||
result.Status = AllowanceExceeded
|
|
||||||
result.Reason = "global model token budget exceeded for: " + model
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RecordUsage processes a usage report, updating counters and handling quota recovery.
|
|
||||||
func (s *AllowanceService) RecordUsage(report UsageReport) error {
|
|
||||||
const op = "AllowanceService.RecordUsage"
|
|
||||||
|
|
||||||
totalTokens := report.TokensIn + report.TokensOut
|
|
||||||
|
|
||||||
switch report.Event {
|
|
||||||
case QuotaEventJobStarted:
|
|
||||||
if err := s.store.IncrementUsage(report.AgentID, 0, 1); err != nil {
|
|
||||||
return log.E(op, "failed to increment job count", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
case QuotaEventJobCompleted:
|
|
||||||
if err := s.store.IncrementUsage(report.AgentID, totalTokens, 0); err != nil {
|
|
||||||
return log.E(op, "failed to record token usage", err)
|
|
||||||
}
|
|
||||||
if err := s.store.DecrementActiveJobs(report.AgentID); err != nil {
|
|
||||||
return log.E(op, "failed to decrement active jobs", err)
|
|
||||||
}
|
|
||||||
// Record model-level usage
|
|
||||||
if report.Model != "" {
|
|
||||||
if err := s.store.IncrementModelUsage(report.Model, totalTokens); err != nil {
|
|
||||||
return log.E(op, "failed to record model usage", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case QuotaEventJobFailed:
|
|
||||||
// Record partial usage, return 50% of tokens
|
|
||||||
if err := s.store.IncrementUsage(report.AgentID, totalTokens, 0); err != nil {
|
|
||||||
return log.E(op, "failed to record token usage", err)
|
|
||||||
}
|
|
||||||
if err := s.store.DecrementActiveJobs(report.AgentID); err != nil {
|
|
||||||
return log.E(op, "failed to decrement active jobs", err)
|
|
||||||
}
|
|
||||||
returnAmount := totalTokens / 2
|
|
||||||
if returnAmount > 0 {
|
|
||||||
if err := s.store.ReturnTokens(report.AgentID, returnAmount); err != nil {
|
|
||||||
return log.E(op, "failed to return tokens", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Still record model-level usage (net of return)
|
|
||||||
if report.Model != "" {
|
|
||||||
if err := s.store.IncrementModelUsage(report.Model, totalTokens-returnAmount); err != nil {
|
|
||||||
return log.E(op, "failed to record model usage", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case QuotaEventJobCancelled:
|
|
||||||
// Return 100% of tokens
|
|
||||||
if err := s.store.DecrementActiveJobs(report.AgentID); err != nil {
|
|
||||||
return log.E(op, "failed to decrement active jobs", err)
|
|
||||||
}
|
|
||||||
if totalTokens > 0 {
|
|
||||||
if err := s.store.ReturnTokens(report.AgentID, totalTokens); err != nil {
|
|
||||||
return log.E(op, "failed to return tokens", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// No model-level usage for cancelled jobs
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetAgent clears daily usage counters for the given agent (midnight reset).
|
|
||||||
func (s *AllowanceService) ResetAgent(agentID string) error {
|
|
||||||
const op = "AllowanceService.ResetAgent"
|
|
||||||
if err := s.store.ResetUsage(agentID); err != nil {
|
|
||||||
return log.E(op, "failed to reset usage", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,407 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// --- MemoryStore tests ---
|
|
||||||
|
|
||||||
func TestMemoryStore_SetGetAllowance_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
a := &AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
DailyTokenLimit: 100000,
|
|
||||||
DailyJobLimit: 10,
|
|
||||||
ConcurrentJobs: 2,
|
|
||||||
MaxJobDuration: 30 * time.Minute,
|
|
||||||
ModelAllowlist: []string{"claude-sonnet-4-5-20250929"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := store.SetAllowance(a)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
got, err := store.GetAllowance("agent-1")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, a.AgentID, got.AgentID)
|
|
||||||
assert.Equal(t, a.DailyTokenLimit, got.DailyTokenLimit)
|
|
||||||
assert.Equal(t, a.DailyJobLimit, got.DailyJobLimit)
|
|
||||||
assert.Equal(t, a.ConcurrentJobs, got.ConcurrentJobs)
|
|
||||||
assert.Equal(t, a.ModelAllowlist, got.ModelAllowlist)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_GetAllowance_Bad_NotFound(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
_, err := store.GetAllowance("nonexistent")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_IncrementUsage_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
|
|
||||||
err := store.IncrementUsage("agent-1", 5000, 1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, err := store.GetUsage("agent-1")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(5000), usage.TokensUsed)
|
|
||||||
assert.Equal(t, 1, usage.JobsStarted)
|
|
||||||
assert.Equal(t, 1, usage.ActiveJobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_DecrementActiveJobs_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
|
|
||||||
_ = store.IncrementUsage("agent-1", 0, 2)
|
|
||||||
_ = store.DecrementActiveJobs("agent-1")
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, 1, usage.ActiveJobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_DecrementActiveJobs_Good_FloorAtZero(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
|
|
||||||
_ = store.DecrementActiveJobs("agent-1") // no-op, no usage record
|
|
||||||
_ = store.IncrementUsage("agent-1", 0, 0)
|
|
||||||
_ = store.DecrementActiveJobs("agent-1") // should stay at 0
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, 0, usage.ActiveJobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_ReturnTokens_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
|
|
||||||
_ = store.IncrementUsage("agent-1", 10000, 0)
|
|
||||||
err := store.ReturnTokens("agent-1", 5000)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, int64(5000), usage.TokensUsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_ReturnTokens_Good_FloorAtZero(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
|
|
||||||
_ = store.IncrementUsage("agent-1", 1000, 0)
|
|
||||||
_ = store.ReturnTokens("agent-1", 5000) // more than used
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, int64(0), usage.TokensUsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_ResetUsage_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
|
|
||||||
_ = store.IncrementUsage("agent-1", 50000, 5)
|
|
||||||
err := store.ResetUsage("agent-1")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, int64(0), usage.TokensUsed)
|
|
||||||
assert.Equal(t, 0, usage.JobsStarted)
|
|
||||||
assert.Equal(t, 0, usage.ActiveJobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemoryStore_ModelUsage_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
|
|
||||||
_ = store.IncrementModelUsage("claude-sonnet", 10000)
|
|
||||||
_ = store.IncrementModelUsage("claude-sonnet", 5000)
|
|
||||||
|
|
||||||
usage, err := store.GetModelUsage("claude-sonnet")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(15000), usage)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- AllowanceService.Check tests ---
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
DailyTokenLimit: 100000,
|
|
||||||
DailyJobLimit: 10,
|
|
||||||
ConcurrentJobs: 2,
|
|
||||||
})
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, result.Allowed)
|
|
||||||
assert.Equal(t, AllowanceOK, result.Status)
|
|
||||||
assert.Equal(t, int64(100000), result.RemainingTokens)
|
|
||||||
assert.Equal(t, 10, result.RemainingJobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Good_Warning(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
DailyTokenLimit: 100000,
|
|
||||||
})
|
|
||||||
_ = store.IncrementUsage("agent-1", 85000, 0)
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, result.Allowed)
|
|
||||||
assert.Equal(t, AllowanceWarning, result.Status)
|
|
||||||
assert.Equal(t, int64(15000), result.RemainingTokens)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Bad_TokenLimitExceeded(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
DailyTokenLimit: 100000,
|
|
||||||
})
|
|
||||||
_ = store.IncrementUsage("agent-1", 100001, 0)
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, result.Allowed)
|
|
||||||
assert.Equal(t, AllowanceExceeded, result.Status)
|
|
||||||
assert.Contains(t, result.Reason, "daily token limit")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Bad_JobLimitExceeded(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
DailyJobLimit: 5,
|
|
||||||
})
|
|
||||||
_ = store.IncrementUsage("agent-1", 0, 5)
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, result.Allowed)
|
|
||||||
assert.Contains(t, result.Reason, "daily job limit")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Bad_ConcurrentLimitReached(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
ConcurrentJobs: 1,
|
|
||||||
})
|
|
||||||
_ = store.IncrementUsage("agent-1", 0, 1) // 1 active job
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, result.Allowed)
|
|
||||||
assert.Contains(t, result.Reason, "concurrent job limit")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Bad_ModelNotInAllowlist(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
ModelAllowlist: []string{"claude-sonnet-4-5-20250929"},
|
|
||||||
})
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "claude-opus-4-6")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, result.Allowed)
|
|
||||||
assert.Contains(t, result.Reason, "model not in allowlist")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Good_ModelInAllowlist(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
ModelAllowlist: []string{"claude-sonnet-4-5-20250929", "claude-haiku-4-5-20251001"},
|
|
||||||
})
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "claude-sonnet-4-5-20250929")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, result.Allowed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Good_EmptyModelSkipsCheck(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
ModelAllowlist: []string{"claude-sonnet-4-5-20250929"},
|
|
||||||
})
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, result.Allowed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Bad_GlobalModelBudgetExceeded(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.SetAllowance(&AgentAllowance{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
})
|
|
||||||
store.SetModelQuota(&ModelQuota{
|
|
||||||
Model: "claude-opus-4-6",
|
|
||||||
DailyTokenBudget: 500000,
|
|
||||||
})
|
|
||||||
_ = store.IncrementModelUsage("claude-opus-4-6", 500001)
|
|
||||||
|
|
||||||
result, err := svc.Check("agent-1", "claude-opus-4-6")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.False(t, result.Allowed)
|
|
||||||
assert.Contains(t, result.Reason, "global model token budget")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceCheck_Bad_NoAllowance(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_, err := svc.Check("unknown-agent", "")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- AllowanceService.RecordUsage tests ---
|
|
||||||
|
|
||||||
func TestAllowanceServiceRecordUsage_Good_JobStarted(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
err := svc.RecordUsage(UsageReport{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
JobID: "job-1",
|
|
||||||
Event: QuotaEventJobStarted,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, 1, usage.JobsStarted)
|
|
||||||
assert.Equal(t, 1, usage.ActiveJobs)
|
|
||||||
assert.Equal(t, int64(0), usage.TokensUsed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceRecordUsage_Good_JobCompleted(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
// Start a job first
|
|
||||||
_ = svc.RecordUsage(UsageReport{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
JobID: "job-1",
|
|
||||||
Event: QuotaEventJobStarted,
|
|
||||||
})
|
|
||||||
|
|
||||||
err := svc.RecordUsage(UsageReport{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
JobID: "job-1",
|
|
||||||
Model: "claude-sonnet",
|
|
||||||
TokensIn: 1000,
|
|
||||||
TokensOut: 500,
|
|
||||||
Event: QuotaEventJobCompleted,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, int64(1500), usage.TokensUsed)
|
|
||||||
assert.Equal(t, 0, usage.ActiveJobs)
|
|
||||||
|
|
||||||
modelUsage, _ := store.GetModelUsage("claude-sonnet")
|
|
||||||
assert.Equal(t, int64(1500), modelUsage)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceRecordUsage_Good_JobFailed_ReturnsHalf(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = svc.RecordUsage(UsageReport{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
JobID: "job-1",
|
|
||||||
Event: QuotaEventJobStarted,
|
|
||||||
})
|
|
||||||
|
|
||||||
err := svc.RecordUsage(UsageReport{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
JobID: "job-1",
|
|
||||||
Model: "claude-sonnet",
|
|
||||||
TokensIn: 1000,
|
|
||||||
TokensOut: 1000,
|
|
||||||
Event: QuotaEventJobFailed,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
// 2000 tokens used, 1000 returned (50%) = 1000 net
|
|
||||||
assert.Equal(t, int64(1000), usage.TokensUsed)
|
|
||||||
assert.Equal(t, 0, usage.ActiveJobs)
|
|
||||||
|
|
||||||
// Model sees net usage (2000 - 1000 = 1000)
|
|
||||||
modelUsage, _ := store.GetModelUsage("claude-sonnet")
|
|
||||||
assert.Equal(t, int64(1000), modelUsage)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAllowanceServiceRecordUsage_Good_JobCancelled_ReturnsAll(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.IncrementUsage("agent-1", 5000, 1) // simulate pre-existing usage
|
|
||||||
|
|
||||||
err := svc.RecordUsage(UsageReport{
|
|
||||||
AgentID: "agent-1",
|
|
||||||
JobID: "job-1",
|
|
||||||
TokensIn: 500,
|
|
||||||
TokensOut: 500,
|
|
||||||
Event: QuotaEventJobCancelled,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
// 5000 pre-existing - 1000 returned = 4000
|
|
||||||
assert.Equal(t, int64(4000), usage.TokensUsed)
|
|
||||||
assert.Equal(t, 0, usage.ActiveJobs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- AllowanceService.ResetAgent tests ---
|
|
||||||
|
|
||||||
func TestAllowanceServiceResetAgent_Good(t *testing.T) {
|
|
||||||
store := NewMemoryStore()
|
|
||||||
svc := NewAllowanceService(store)
|
|
||||||
|
|
||||||
_ = store.IncrementUsage("agent-1", 50000, 5)
|
|
||||||
|
|
||||||
err := svc.ResetAgent("agent-1")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
usage, _ := store.GetUsage("agent-1")
|
|
||||||
assert.Equal(t, int64(0), usage.TokensUsed)
|
|
||||||
assert.Equal(t, 0, usage.JobsStarted)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- startOfDay helper test ---
|
|
||||||
|
|
||||||
func TestStartOfDay_Good(t *testing.T) {
|
|
||||||
input := time.Date(2026, 2, 10, 15, 30, 45, 0, time.UTC)
|
|
||||||
expected := time.Date(2026, 2, 10, 0, 0, 0, 0, time.UTC)
|
|
||||||
assert.Equal(t, expected, startOfDay(input))
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- AllowanceStatus tests ---
|
|
||||||
|
|
||||||
func TestAllowanceStatus_Good_Values(t *testing.T) {
|
|
||||||
assert.Equal(t, AllowanceStatus("ok"), AllowanceOK)
|
|
||||||
assert.Equal(t, AllowanceStatus("warning"), AllowanceWarning)
|
|
||||||
assert.Equal(t, AllowanceStatus("exceeded"), AllowanceExceeded)
|
|
||||||
}
|
|
||||||
|
|
@ -1,322 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client is the API client for the core-agentic service.
|
|
||||||
type Client struct {
|
|
||||||
// BaseURL is the base URL of the API server.
|
|
||||||
BaseURL string
|
|
||||||
// Token is the authentication token.
|
|
||||||
Token string
|
|
||||||
// HTTPClient is the HTTP client used for requests.
|
|
||||||
HTTPClient *http.Client
|
|
||||||
// AgentID is the identifier for this agent when claiming tasks.
|
|
||||||
AgentID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient creates a new agentic API client with the given base URL and token.
|
|
||||||
func NewClient(baseURL, token string) *Client {
|
|
||||||
return &Client{
|
|
||||||
BaseURL: strings.TrimSuffix(baseURL, "/"),
|
|
||||||
Token: token,
|
|
||||||
HTTPClient: &http.Client{
|
|
||||||
Timeout: 30 * time.Second,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientFromConfig creates a new client from a Config struct.
|
|
||||||
func NewClientFromConfig(cfg *Config) *Client {
|
|
||||||
client := NewClient(cfg.BaseURL, cfg.Token)
|
|
||||||
client.AgentID = cfg.AgentID
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListTasks retrieves a list of tasks matching the given options.
|
|
||||||
func (c *Client) ListTasks(ctx context.Context, opts ListOptions) ([]Task, error) {
|
|
||||||
const op = "agentic.Client.ListTasks"
|
|
||||||
|
|
||||||
// Build query parameters
|
|
||||||
params := url.Values{}
|
|
||||||
if opts.Status != "" {
|
|
||||||
params.Set("status", string(opts.Status))
|
|
||||||
}
|
|
||||||
if opts.Priority != "" {
|
|
||||||
params.Set("priority", string(opts.Priority))
|
|
||||||
}
|
|
||||||
if opts.Project != "" {
|
|
||||||
params.Set("project", opts.Project)
|
|
||||||
}
|
|
||||||
if opts.ClaimedBy != "" {
|
|
||||||
params.Set("claimed_by", opts.ClaimedBy)
|
|
||||||
}
|
|
||||||
if opts.Limit > 0 {
|
|
||||||
params.Set("limit", strconv.Itoa(opts.Limit))
|
|
||||||
}
|
|
||||||
if len(opts.Labels) > 0 {
|
|
||||||
params.Set("labels", strings.Join(opts.Labels, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := c.BaseURL + "/api/tasks"
|
|
||||||
if len(params) > 0 {
|
|
||||||
endpoint += "?" + params.Encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "failed to create request", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.setHeaders(req)
|
|
||||||
|
|
||||||
resp, err := c.HTTPClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if err := c.checkResponse(resp); err != nil {
|
|
||||||
return nil, log.E(op, "API error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var tasks []Task
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&tasks); err != nil {
|
|
||||||
return nil, log.E(op, "failed to decode response", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return tasks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTask retrieves a single task by its ID.
|
|
||||||
func (c *Client) GetTask(ctx context.Context, id string) (*Task, error) {
|
|
||||||
const op = "agentic.Client.GetTask"
|
|
||||||
|
|
||||||
if id == "" {
|
|
||||||
return nil, log.E(op, "task ID is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := fmt.Sprintf("%s/api/tasks/%s", c.BaseURL, url.PathEscape(id))
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "failed to create request", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.setHeaders(req)
|
|
||||||
|
|
||||||
resp, err := c.HTTPClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if err := c.checkResponse(resp); err != nil {
|
|
||||||
return nil, log.E(op, "API error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var task Task
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&task); err != nil {
|
|
||||||
return nil, log.E(op, "failed to decode response", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &task, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClaimTask claims a task for the current agent.
|
|
||||||
func (c *Client) ClaimTask(ctx context.Context, id string) (*Task, error) {
|
|
||||||
const op = "agentic.Client.ClaimTask"
|
|
||||||
|
|
||||||
if id == "" {
|
|
||||||
return nil, log.E(op, "task ID is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := fmt.Sprintf("%s/api/tasks/%s/claim", c.BaseURL, url.PathEscape(id))
|
|
||||||
|
|
||||||
// Include agent ID in the claim request if available
|
|
||||||
var body io.Reader
|
|
||||||
if c.AgentID != "" {
|
|
||||||
data, _ := json.Marshal(map[string]string{"agent_id": c.AgentID})
|
|
||||||
body = bytes.NewReader(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "failed to create request", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.setHeaders(req)
|
|
||||||
if body != nil {
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := c.HTTPClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if err := c.checkResponse(resp); err != nil {
|
|
||||||
return nil, log.E(op, "API error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read body once to allow multiple decode attempts
|
|
||||||
bodyData, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E(op, "failed to read response", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try decoding as ClaimResponse first
|
|
||||||
var result ClaimResponse
|
|
||||||
if err := json.Unmarshal(bodyData, &result); err == nil && result.Task != nil {
|
|
||||||
return result.Task, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try decoding as just a Task for simpler API responses
|
|
||||||
var task Task
|
|
||||||
if err := json.Unmarshal(bodyData, &task); err != nil {
|
|
||||||
return nil, log.E(op, "failed to decode response", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &task, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateTask updates a task with new status, progress, or notes.
|
|
||||||
func (c *Client) UpdateTask(ctx context.Context, id string, update TaskUpdate) error {
|
|
||||||
const op = "agentic.Client.UpdateTask"
|
|
||||||
|
|
||||||
if id == "" {
|
|
||||||
return log.E(op, "task ID is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := fmt.Sprintf("%s/api/tasks/%s", c.BaseURL, url.PathEscape(id))
|
|
||||||
|
|
||||||
data, err := json.Marshal(update)
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "failed to marshal update", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPatch, endpoint, bytes.NewReader(data))
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "failed to create request", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.setHeaders(req)
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp, err := c.HTTPClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if err := c.checkResponse(resp); err != nil {
|
|
||||||
return log.E(op, "API error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompleteTask marks a task as completed with the given result.
|
|
||||||
func (c *Client) CompleteTask(ctx context.Context, id string, result TaskResult) error {
|
|
||||||
const op = "agentic.Client.CompleteTask"
|
|
||||||
|
|
||||||
if id == "" {
|
|
||||||
return log.E(op, "task ID is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := fmt.Sprintf("%s/api/tasks/%s/complete", c.BaseURL, url.PathEscape(id))
|
|
||||||
|
|
||||||
data, err := json.Marshal(result)
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "failed to marshal result", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(data))
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "failed to create request", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.setHeaders(req)
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp, err := c.HTTPClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if err := c.checkResponse(resp); err != nil {
|
|
||||||
return log.E(op, "API error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setHeaders adds common headers to the request.
|
|
||||||
func (c *Client) setHeaders(req *http.Request) {
|
|
||||||
req.Header.Set("Authorization", "Bearer "+c.Token)
|
|
||||||
req.Header.Set("Accept", "application/json")
|
|
||||||
req.Header.Set("User-Agent", "core-agentic-client/1.0")
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkResponse checks if the response indicates an error.
|
|
||||||
func (c *Client) checkResponse(resp *http.Response) error {
|
|
||||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
body, _ := io.ReadAll(resp.Body)
|
|
||||||
|
|
||||||
// Try to parse as APIError
|
|
||||||
var apiErr APIError
|
|
||||||
if err := json.Unmarshal(body, &apiErr); err == nil && apiErr.Message != "" {
|
|
||||||
apiErr.Code = resp.StatusCode
|
|
||||||
return &apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return generic error
|
|
||||||
return &APIError{
|
|
||||||
Code: resp.StatusCode,
|
|
||||||
Message: fmt.Sprintf("HTTP %d: %s", resp.StatusCode, http.StatusText(resp.StatusCode)),
|
|
||||||
Details: string(body),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ping tests the connection to the API server.
|
|
||||||
func (c *Client) Ping(ctx context.Context) error {
|
|
||||||
const op = "agentic.Client.Ping"
|
|
||||||
|
|
||||||
endpoint := c.BaseURL + "/api/health"
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "failed to create request", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.setHeaders(req)
|
|
||||||
|
|
||||||
resp, err := c.HTTPClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode >= 400 {
|
|
||||||
return log.E(op, fmt.Sprintf("server returned status %d", resp.StatusCode), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,356 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test fixtures
|
|
||||||
var testTask = Task{
|
|
||||||
ID: "task-123",
|
|
||||||
Title: "Implement feature X",
|
|
||||||
Description: "Add the new feature X to the system",
|
|
||||||
Priority: PriorityHigh,
|
|
||||||
Status: StatusPending,
|
|
||||||
Labels: []string{"feature", "backend"},
|
|
||||||
Files: []string{"pkg/feature/feature.go"},
|
|
||||||
CreatedAt: time.Now().Add(-24 * time.Hour),
|
|
||||||
Project: "core",
|
|
||||||
}
|
|
||||||
|
|
||||||
var testTasks = []Task{
|
|
||||||
testTask,
|
|
||||||
{
|
|
||||||
ID: "task-456",
|
|
||||||
Title: "Fix bug Y",
|
|
||||||
Description: "Fix the bug in component Y",
|
|
||||||
Priority: PriorityCritical,
|
|
||||||
Status: StatusPending,
|
|
||||||
Labels: []string{"bug", "urgent"},
|
|
||||||
CreatedAt: time.Now().Add(-2 * time.Hour),
|
|
||||||
Project: "core",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewClient_Good(t *testing.T) {
|
|
||||||
client := NewClient("https://api.example.com", "test-token")
|
|
||||||
|
|
||||||
assert.Equal(t, "https://api.example.com", client.BaseURL)
|
|
||||||
assert.Equal(t, "test-token", client.Token)
|
|
||||||
assert.NotNil(t, client.HTTPClient)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewClient_Good_TrailingSlash(t *testing.T) {
|
|
||||||
client := NewClient("https://api.example.com/", "test-token")
|
|
||||||
|
|
||||||
assert.Equal(t, "https://api.example.com", client.BaseURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewClientFromConfig_Good(t *testing.T) {
|
|
||||||
cfg := &Config{
|
|
||||||
BaseURL: "https://api.example.com",
|
|
||||||
Token: "config-token",
|
|
||||||
AgentID: "agent-001",
|
|
||||||
}
|
|
||||||
|
|
||||||
client := NewClientFromConfig(cfg)
|
|
||||||
|
|
||||||
assert.Equal(t, "https://api.example.com", client.BaseURL)
|
|
||||||
assert.Equal(t, "config-token", client.Token)
|
|
||||||
assert.Equal(t, "agent-001", client.AgentID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_ListTasks_Good(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
assert.Equal(t, http.MethodGet, r.Method)
|
|
||||||
assert.Equal(t, "/api/tasks", r.URL.Path)
|
|
||||||
assert.Equal(t, "Bearer test-token", r.Header.Get("Authorization"))
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
_ = json.NewEncoder(w).Encode(testTasks)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
tasks, err := client.ListTasks(context.Background(), ListOptions{})
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, tasks, 2)
|
|
||||||
assert.Equal(t, "task-123", tasks[0].ID)
|
|
||||||
assert.Equal(t, "task-456", tasks[1].ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_ListTasks_Good_WithFilters(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
query := r.URL.Query()
|
|
||||||
assert.Equal(t, "pending", query.Get("status"))
|
|
||||||
assert.Equal(t, "high", query.Get("priority"))
|
|
||||||
assert.Equal(t, "core", query.Get("project"))
|
|
||||||
assert.Equal(t, "10", query.Get("limit"))
|
|
||||||
assert.Equal(t, "bug,urgent", query.Get("labels"))
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
_ = json.NewEncoder(w).Encode([]Task{testTask})
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
opts := ListOptions{
|
|
||||||
Status: StatusPending,
|
|
||||||
Priority: PriorityHigh,
|
|
||||||
Project: "core",
|
|
||||||
Limit: 10,
|
|
||||||
Labels: []string{"bug", "urgent"},
|
|
||||||
}
|
|
||||||
|
|
||||||
tasks, err := client.ListTasks(context.Background(), opts)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, tasks, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_ListTasks_Bad_ServerError(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
_ = json.NewEncoder(w).Encode(APIError{Message: "internal error"})
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
tasks, err := client.ListTasks(context.Background(), ListOptions{})
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, tasks)
|
|
||||||
assert.Contains(t, err.Error(), "internal error")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_GetTask_Good(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
assert.Equal(t, http.MethodGet, r.Method)
|
|
||||||
assert.Equal(t, "/api/tasks/task-123", r.URL.Path)
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
_ = json.NewEncoder(w).Encode(testTask)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
task, err := client.GetTask(context.Background(), "task-123")
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "task-123", task.ID)
|
|
||||||
assert.Equal(t, "Implement feature X", task.Title)
|
|
||||||
assert.Equal(t, PriorityHigh, task.Priority)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_GetTask_Bad_EmptyID(t *testing.T) {
|
|
||||||
client := NewClient("https://api.example.com", "test-token")
|
|
||||||
task, err := client.GetTask(context.Background(), "")
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, task)
|
|
||||||
assert.Contains(t, err.Error(), "task ID is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_GetTask_Bad_NotFound(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusNotFound)
|
|
||||||
_ = json.NewEncoder(w).Encode(APIError{Message: "task not found"})
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
task, err := client.GetTask(context.Background(), "nonexistent")
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, task)
|
|
||||||
assert.Contains(t, err.Error(), "task not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_ClaimTask_Good(t *testing.T) {
|
|
||||||
claimedTask := testTask
|
|
||||||
claimedTask.Status = StatusInProgress
|
|
||||||
claimedTask.ClaimedBy = "agent-001"
|
|
||||||
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
assert.Equal(t, http.MethodPost, r.Method)
|
|
||||||
assert.Equal(t, "/api/tasks/task-123/claim", r.URL.Path)
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
_ = json.NewEncoder(w).Encode(ClaimResponse{Task: &claimedTask})
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
client.AgentID = "agent-001"
|
|
||||||
task, err := client.ClaimTask(context.Background(), "task-123")
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, StatusInProgress, task.Status)
|
|
||||||
assert.Equal(t, "agent-001", task.ClaimedBy)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_ClaimTask_Good_SimpleResponse(t *testing.T) {
|
|
||||||
// Some APIs might return just the task without wrapping
|
|
||||||
claimedTask := testTask
|
|
||||||
claimedTask.Status = StatusInProgress
|
|
||||||
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
_ = json.NewEncoder(w).Encode(claimedTask)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
task, err := client.ClaimTask(context.Background(), "task-123")
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "task-123", task.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_ClaimTask_Bad_EmptyID(t *testing.T) {
|
|
||||||
client := NewClient("https://api.example.com", "test-token")
|
|
||||||
task, err := client.ClaimTask(context.Background(), "")
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, task)
|
|
||||||
assert.Contains(t, err.Error(), "task ID is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_ClaimTask_Bad_AlreadyClaimed(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusConflict)
|
|
||||||
_ = json.NewEncoder(w).Encode(APIError{Message: "task already claimed"})
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
task, err := client.ClaimTask(context.Background(), "task-123")
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, task)
|
|
||||||
assert.Contains(t, err.Error(), "task already claimed")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_UpdateTask_Good(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
assert.Equal(t, http.MethodPatch, r.Method)
|
|
||||||
assert.Equal(t, "/api/tasks/task-123", r.URL.Path)
|
|
||||||
assert.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
|
||||||
|
|
||||||
var update TaskUpdate
|
|
||||||
err := json.NewDecoder(r.Body).Decode(&update)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, StatusInProgress, update.Status)
|
|
||||||
assert.Equal(t, 50, update.Progress)
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
err := client.UpdateTask(context.Background(), "task-123", TaskUpdate{
|
|
||||||
Status: StatusInProgress,
|
|
||||||
Progress: 50,
|
|
||||||
Notes: "Making progress",
|
|
||||||
})
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_UpdateTask_Bad_EmptyID(t *testing.T) {
|
|
||||||
client := NewClient("https://api.example.com", "test-token")
|
|
||||||
err := client.UpdateTask(context.Background(), "", TaskUpdate{})
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "task ID is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_CompleteTask_Good(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
assert.Equal(t, http.MethodPost, r.Method)
|
|
||||||
assert.Equal(t, "/api/tasks/task-123/complete", r.URL.Path)
|
|
||||||
|
|
||||||
var result TaskResult
|
|
||||||
err := json.NewDecoder(r.Body).Decode(&result)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, result.Success)
|
|
||||||
assert.Equal(t, "Feature implemented", result.Output)
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
err := client.CompleteTask(context.Background(), "task-123", TaskResult{
|
|
||||||
Success: true,
|
|
||||||
Output: "Feature implemented",
|
|
||||||
Artifacts: []string{"pkg/feature/feature.go"},
|
|
||||||
})
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_CompleteTask_Bad_EmptyID(t *testing.T) {
|
|
||||||
client := NewClient("https://api.example.com", "test-token")
|
|
||||||
err := client.CompleteTask(context.Background(), "", TaskResult{})
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "task ID is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_Ping_Good(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
assert.Equal(t, "/api/health", r.URL.Path)
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
client := NewClient(server.URL, "test-token")
|
|
||||||
err := client.Ping(context.Background())
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClient_Ping_Bad_ServerDown(t *testing.T) {
|
|
||||||
client := NewClient("http://localhost:99999", "test-token")
|
|
||||||
client.HTTPClient.Timeout = 100 * time.Millisecond
|
|
||||||
|
|
||||||
err := client.Ping(context.Background())
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "request failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAPIError_Error_Good(t *testing.T) {
|
|
||||||
err := &APIError{
|
|
||||||
Code: 404,
|
|
||||||
Message: "task not found",
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, "task not found", err.Error())
|
|
||||||
|
|
||||||
err.Details = "task-123 does not exist"
|
|
||||||
assert.Equal(t, "task not found: task-123 does not exist", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTaskStatus_Good(t *testing.T) {
|
|
||||||
assert.Equal(t, TaskStatus("pending"), StatusPending)
|
|
||||||
assert.Equal(t, TaskStatus("in_progress"), StatusInProgress)
|
|
||||||
assert.Equal(t, TaskStatus("completed"), StatusCompleted)
|
|
||||||
assert.Equal(t, TaskStatus("blocked"), StatusBlocked)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTaskPriority_Good(t *testing.T) {
|
|
||||||
assert.Equal(t, TaskPriority("critical"), PriorityCritical)
|
|
||||||
assert.Equal(t, TaskPriority("high"), PriorityHigh)
|
|
||||||
assert.Equal(t, TaskPriority("medium"), PriorityMedium)
|
|
||||||
assert.Equal(t, TaskPriority("low"), PriorityLow)
|
|
||||||
}
|
|
||||||
|
|
@ -1,338 +0,0 @@
|
||||||
// Package agentic provides AI collaboration features for task management.
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PROptions contains options for creating a pull request.
|
|
||||||
type PROptions struct {
|
|
||||||
// Title is the PR title.
|
|
||||||
Title string `json:"title"`
|
|
||||||
// Body is the PR description.
|
|
||||||
Body string `json:"body"`
|
|
||||||
// Draft marks the PR as a draft.
|
|
||||||
Draft bool `json:"draft"`
|
|
||||||
// Labels are labels to add to the PR.
|
|
||||||
Labels []string `json:"labels"`
|
|
||||||
// Base is the base branch (defaults to main).
|
|
||||||
Base string `json:"base"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AutoCommit creates a git commit with a task reference.
|
|
||||||
// The commit message follows the format:
|
|
||||||
//
|
|
||||||
// feat(scope): description
|
|
||||||
//
|
|
||||||
// Task: #123
|
|
||||||
// Co-Authored-By: Claude <noreply@anthropic.com>
|
|
||||||
func AutoCommit(ctx context.Context, task *Task, dir string, message string) error {
|
|
||||||
const op = "agentic.AutoCommit"
|
|
||||||
|
|
||||||
if task == nil {
|
|
||||||
return log.E(op, "task is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if message == "" {
|
|
||||||
return log.E(op, "commit message is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build full commit message
|
|
||||||
fullMessage := buildCommitMessage(task, message)
|
|
||||||
|
|
||||||
// Stage all changes
|
|
||||||
if _, err := runGitCommandCtx(ctx, dir, "add", "-A"); err != nil {
|
|
||||||
return log.E(op, "failed to stage changes", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create commit
|
|
||||||
if _, err := runGitCommandCtx(ctx, dir, "commit", "-m", fullMessage); err != nil {
|
|
||||||
return log.E(op, "failed to create commit", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildCommitMessage formats a commit message with task reference.
|
|
||||||
func buildCommitMessage(task *Task, message string) string {
|
|
||||||
var sb strings.Builder
|
|
||||||
|
|
||||||
// Write the main message
|
|
||||||
sb.WriteString(message)
|
|
||||||
sb.WriteString("\n\n")
|
|
||||||
|
|
||||||
// Add task reference
|
|
||||||
sb.WriteString("Task: #")
|
|
||||||
sb.WriteString(task.ID)
|
|
||||||
sb.WriteString("\n")
|
|
||||||
|
|
||||||
// Add co-author
|
|
||||||
sb.WriteString("Co-Authored-By: Claude <noreply@anthropic.com>\n")
|
|
||||||
|
|
||||||
return sb.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatePR creates a pull request using the gh CLI.
|
|
||||||
func CreatePR(ctx context.Context, task *Task, dir string, opts PROptions) (string, error) {
|
|
||||||
const op = "agentic.CreatePR"
|
|
||||||
|
|
||||||
if task == nil {
|
|
||||||
return "", log.E(op, "task is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build title if not provided
|
|
||||||
title := opts.Title
|
|
||||||
if title == "" {
|
|
||||||
title = task.Title
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build body if not provided
|
|
||||||
body := opts.Body
|
|
||||||
if body == "" {
|
|
||||||
body = buildPRBody(task)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build gh command arguments
|
|
||||||
args := []string{"pr", "create", "--title", title, "--body", body}
|
|
||||||
|
|
||||||
if opts.Draft {
|
|
||||||
args = append(args, "--draft")
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Base != "" {
|
|
||||||
args = append(args, "--base", opts.Base)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, label := range opts.Labels {
|
|
||||||
args = append(args, "--label", label)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run gh pr create
|
|
||||||
output, err := runCommandCtx(ctx, dir, "gh", args...)
|
|
||||||
if err != nil {
|
|
||||||
return "", log.E(op, "failed to create PR", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract PR URL from output
|
|
||||||
prURL := strings.TrimSpace(output)
|
|
||||||
|
|
||||||
return prURL, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildPRBody creates a PR body from task details.
|
|
||||||
func buildPRBody(task *Task) string {
|
|
||||||
var sb strings.Builder
|
|
||||||
|
|
||||||
sb.WriteString("## Summary\n\n")
|
|
||||||
sb.WriteString(task.Description)
|
|
||||||
sb.WriteString("\n\n")
|
|
||||||
|
|
||||||
sb.WriteString("## Task Reference\n\n")
|
|
||||||
sb.WriteString("- Task ID: #")
|
|
||||||
sb.WriteString(task.ID)
|
|
||||||
sb.WriteString("\n")
|
|
||||||
sb.WriteString("- Priority: ")
|
|
||||||
sb.WriteString(string(task.Priority))
|
|
||||||
sb.WriteString("\n")
|
|
||||||
|
|
||||||
if len(task.Labels) > 0 {
|
|
||||||
sb.WriteString("- Labels: ")
|
|
||||||
sb.WriteString(strings.Join(task.Labels, ", "))
|
|
||||||
sb.WriteString("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.WriteString("\n---\n")
|
|
||||||
sb.WriteString("Generated with AI assistance\n")
|
|
||||||
|
|
||||||
return sb.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncStatus syncs the task status back to the agentic service.
|
|
||||||
func SyncStatus(ctx context.Context, client *Client, task *Task, update TaskUpdate) error {
|
|
||||||
const op = "agentic.SyncStatus"
|
|
||||||
|
|
||||||
if client == nil {
|
|
||||||
return log.E(op, "client is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if task == nil {
|
|
||||||
return log.E(op, "task is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.UpdateTask(ctx, task.ID, update)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommitAndSync commits changes and syncs task status.
|
|
||||||
func CommitAndSync(ctx context.Context, client *Client, task *Task, dir string, message string, progress int) error {
|
|
||||||
const op = "agentic.CommitAndSync"
|
|
||||||
|
|
||||||
// Create commit
|
|
||||||
if err := AutoCommit(ctx, task, dir, message); err != nil {
|
|
||||||
return log.E(op, "failed to commit", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync status if client provided
|
|
||||||
if client != nil {
|
|
||||||
update := TaskUpdate{
|
|
||||||
Status: StatusInProgress,
|
|
||||||
Progress: progress,
|
|
||||||
Notes: "Committed: " + message,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := SyncStatus(ctx, client, task, update); err != nil {
|
|
||||||
// Log but don't fail on sync errors
|
|
||||||
return log.E(op, "commit succeeded but sync failed", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushChanges pushes committed changes to the remote.
|
|
||||||
func PushChanges(ctx context.Context, dir string) error {
|
|
||||||
const op = "agentic.PushChanges"
|
|
||||||
|
|
||||||
_, err := runGitCommandCtx(ctx, dir, "push")
|
|
||||||
if err != nil {
|
|
||||||
return log.E(op, "failed to push changes", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateBranch creates a new branch for the task.
|
|
||||||
func CreateBranch(ctx context.Context, task *Task, dir string) (string, error) {
|
|
||||||
const op = "agentic.CreateBranch"
|
|
||||||
|
|
||||||
if task == nil {
|
|
||||||
return "", log.E(op, "task is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate branch name from task
|
|
||||||
branchName := generateBranchName(task)
|
|
||||||
|
|
||||||
// Create and checkout branch
|
|
||||||
_, err := runGitCommandCtx(ctx, dir, "checkout", "-b", branchName)
|
|
||||||
if err != nil {
|
|
||||||
return "", log.E(op, "failed to create branch", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return branchName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateBranchName creates a branch name from task details.
|
|
||||||
func generateBranchName(task *Task) string {
|
|
||||||
// Determine prefix based on labels
|
|
||||||
prefix := "feat"
|
|
||||||
for _, label := range task.Labels {
|
|
||||||
switch strings.ToLower(label) {
|
|
||||||
case "bug", "bugfix", "fix":
|
|
||||||
prefix = "fix"
|
|
||||||
case "docs", "documentation":
|
|
||||||
prefix = "docs"
|
|
||||||
case "refactor":
|
|
||||||
prefix = "refactor"
|
|
||||||
case "test", "tests":
|
|
||||||
prefix = "test"
|
|
||||||
case "chore":
|
|
||||||
prefix = "chore"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize title for branch name
|
|
||||||
title := strings.ToLower(task.Title)
|
|
||||||
title = strings.Map(func(r rune) rune {
|
|
||||||
if (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
if r == ' ' || r == '-' || r == '_' {
|
|
||||||
return '-'
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}, title)
|
|
||||||
|
|
||||||
// Remove consecutive dashes
|
|
||||||
for strings.Contains(title, "--") {
|
|
||||||
title = strings.ReplaceAll(title, "--", "-")
|
|
||||||
}
|
|
||||||
title = strings.Trim(title, "-")
|
|
||||||
|
|
||||||
// Truncate if too long
|
|
||||||
if len(title) > 40 {
|
|
||||||
title = title[:40]
|
|
||||||
title = strings.TrimRight(title, "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%s/%s-%s", prefix, task.ID, title)
|
|
||||||
}
|
|
||||||
|
|
||||||
// runGitCommandCtx runs a git command with context.
|
|
||||||
func runGitCommandCtx(ctx context.Context, dir string, args ...string) (string, error) {
|
|
||||||
return runCommandCtx(ctx, dir, "git", args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// runCommandCtx runs an arbitrary command with context.
|
|
||||||
func runCommandCtx(ctx context.Context, dir string, command string, args ...string) (string, error) {
|
|
||||||
cmd := exec.CommandContext(ctx, command, args...)
|
|
||||||
cmd.Dir = dir
|
|
||||||
|
|
||||||
var stdout, stderr bytes.Buffer
|
|
||||||
cmd.Stdout = &stdout
|
|
||||||
cmd.Stderr = &stderr
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
if stderr.Len() > 0 {
|
|
||||||
return "", fmt.Errorf("%w: %s", err, stderr.String())
|
|
||||||
}
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return stdout.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCurrentBranch returns the current git branch name.
|
|
||||||
func GetCurrentBranch(ctx context.Context, dir string) (string, error) {
|
|
||||||
const op = "agentic.GetCurrentBranch"
|
|
||||||
|
|
||||||
output, err := runGitCommandCtx(ctx, dir, "rev-parse", "--abbrev-ref", "HEAD")
|
|
||||||
if err != nil {
|
|
||||||
return "", log.E(op, "failed to get current branch", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimSpace(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasUncommittedChanges checks if there are uncommitted changes.
|
|
||||||
func HasUncommittedChanges(ctx context.Context, dir string) (bool, error) {
|
|
||||||
const op = "agentic.HasUncommittedChanges"
|
|
||||||
|
|
||||||
output, err := runGitCommandCtx(ctx, dir, "status", "--porcelain")
|
|
||||||
if err != nil {
|
|
||||||
return false, log.E(op, "failed to get git status", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimSpace(output) != "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDiff returns the current diff for staged and unstaged changes.
|
|
||||||
func GetDiff(ctx context.Context, dir string, staged bool) (string, error) {
|
|
||||||
const op = "agentic.GetDiff"
|
|
||||||
|
|
||||||
args := []string{"diff"}
|
|
||||||
if staged {
|
|
||||||
args = append(args, "--staged")
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := runGitCommandCtx(ctx, dir, args...)
|
|
||||||
if err != nil {
|
|
||||||
return "", log.E(op, "failed to get diff", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return output, nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,199 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuildCommitMessage(t *testing.T) {
|
|
||||||
task := &Task{
|
|
||||||
ID: "ABC123",
|
|
||||||
Title: "Test Task",
|
|
||||||
}
|
|
||||||
|
|
||||||
message := buildCommitMessage(task, "add new feature")
|
|
||||||
|
|
||||||
assert.Contains(t, message, "add new feature")
|
|
||||||
assert.Contains(t, message, "Task: #ABC123")
|
|
||||||
assert.Contains(t, message, "Co-Authored-By: Claude <noreply@anthropic.com>")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildPRBody(t *testing.T) {
|
|
||||||
task := &Task{
|
|
||||||
ID: "PR-456",
|
|
||||||
Title: "Add authentication",
|
|
||||||
Description: "Implement user authentication with OAuth2",
|
|
||||||
Priority: PriorityHigh,
|
|
||||||
Labels: []string{"enhancement", "security"},
|
|
||||||
}
|
|
||||||
|
|
||||||
body := buildPRBody(task)
|
|
||||||
|
|
||||||
assert.Contains(t, body, "## Summary")
|
|
||||||
assert.Contains(t, body, "Implement user authentication with OAuth2")
|
|
||||||
assert.Contains(t, body, "## Task Reference")
|
|
||||||
assert.Contains(t, body, "Task ID: #PR-456")
|
|
||||||
assert.Contains(t, body, "Priority: high")
|
|
||||||
assert.Contains(t, body, "Labels: enhancement, security")
|
|
||||||
assert.Contains(t, body, "Generated with AI assistance")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildPRBody_NoLabels(t *testing.T) {
|
|
||||||
task := &Task{
|
|
||||||
ID: "PR-789",
|
|
||||||
Title: "Fix bug",
|
|
||||||
Description: "Fix the login bug",
|
|
||||||
Priority: PriorityMedium,
|
|
||||||
Labels: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
body := buildPRBody(task)
|
|
||||||
|
|
||||||
assert.Contains(t, body, "## Summary")
|
|
||||||
assert.Contains(t, body, "Fix the login bug")
|
|
||||||
assert.NotContains(t, body, "Labels:")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGenerateBranchName(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
task *Task
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "feature task",
|
|
||||||
task: &Task{
|
|
||||||
ID: "123",
|
|
||||||
Title: "Add user authentication",
|
|
||||||
Labels: []string{"enhancement"},
|
|
||||||
},
|
|
||||||
expected: "feat/123-add-user-authentication",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "bug fix task",
|
|
||||||
task: &Task{
|
|
||||||
ID: "456",
|
|
||||||
Title: "Fix login error",
|
|
||||||
Labels: []string{"bug"},
|
|
||||||
},
|
|
||||||
expected: "fix/456-fix-login-error",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "docs task",
|
|
||||||
task: &Task{
|
|
||||||
ID: "789",
|
|
||||||
Title: "Update README",
|
|
||||||
Labels: []string{"documentation"},
|
|
||||||
},
|
|
||||||
expected: "docs/789-update-readme",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "refactor task",
|
|
||||||
task: &Task{
|
|
||||||
ID: "101",
|
|
||||||
Title: "Refactor auth module",
|
|
||||||
Labels: []string{"refactor"},
|
|
||||||
},
|
|
||||||
expected: "refactor/101-refactor-auth-module",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "test task",
|
|
||||||
task: &Task{
|
|
||||||
ID: "202",
|
|
||||||
Title: "Add unit tests",
|
|
||||||
Labels: []string{"test"},
|
|
||||||
},
|
|
||||||
expected: "test/202-add-unit-tests",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "chore task",
|
|
||||||
task: &Task{
|
|
||||||
ID: "303",
|
|
||||||
Title: "Update dependencies",
|
|
||||||
Labels: []string{"chore"},
|
|
||||||
},
|
|
||||||
expected: "chore/303-update-dependencies",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "long title truncated",
|
|
||||||
task: &Task{
|
|
||||||
ID: "404",
|
|
||||||
Title: "This is a very long title that should be truncated to fit the branch name limit",
|
|
||||||
Labels: nil,
|
|
||||||
},
|
|
||||||
expected: "feat/404-this-is-a-very-long-title-that-should-be",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "special characters removed",
|
|
||||||
task: &Task{
|
|
||||||
ID: "505",
|
|
||||||
Title: "Fix: user's auth (OAuth2) [important]",
|
|
||||||
Labels: nil,
|
|
||||||
},
|
|
||||||
expected: "feat/505-fix-users-auth-oauth2-important",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no labels defaults to feat",
|
|
||||||
task: &Task{
|
|
||||||
ID: "606",
|
|
||||||
Title: "New feature",
|
|
||||||
Labels: nil,
|
|
||||||
},
|
|
||||||
expected: "feat/606-new-feature",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result := generateBranchName(tt.task)
|
|
||||||
assert.Equal(t, tt.expected, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoCommit_Bad_NilTask(t *testing.T) {
|
|
||||||
err := AutoCommit(context.TODO(), nil, ".", "test message")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "task is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoCommit_Bad_EmptyMessage(t *testing.T) {
|
|
||||||
task := &Task{ID: "123", Title: "Test"}
|
|
||||||
err := AutoCommit(context.TODO(), task, ".", "")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "commit message is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSyncStatus_Bad_NilClient(t *testing.T) {
|
|
||||||
task := &Task{ID: "123", Title: "Test"}
|
|
||||||
update := TaskUpdate{Status: StatusInProgress}
|
|
||||||
|
|
||||||
err := SyncStatus(context.TODO(), nil, task, update)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "client is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSyncStatus_Bad_NilTask(t *testing.T) {
|
|
||||||
client := &Client{BaseURL: "http://test"}
|
|
||||||
update := TaskUpdate{Status: StatusInProgress}
|
|
||||||
|
|
||||||
err := SyncStatus(context.TODO(), client, nil, update)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "task is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateBranch_Bad_NilTask(t *testing.T) {
|
|
||||||
branch, err := CreateBranch(context.TODO(), nil, ".")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, branch)
|
|
||||||
assert.Contains(t, err.Error(), "task is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreatePR_Bad_NilTask(t *testing.T) {
|
|
||||||
url, err := CreatePR(context.TODO(), nil, ".", PROptions{})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, url)
|
|
||||||
assert.Contains(t, err.Error(), "task is required")
|
|
||||||
}
|
|
||||||
|
|
@ -1,197 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
errors "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config holds the configuration for connecting to the core-agentic service.
|
|
||||||
type Config struct {
|
|
||||||
// BaseURL is the URL of the core-agentic API server.
|
|
||||||
BaseURL string `yaml:"base_url" json:"base_url"`
|
|
||||||
// Token is the authentication token for API requests.
|
|
||||||
Token string `yaml:"token" json:"token"`
|
|
||||||
// DefaultProject is the project to use when none is specified.
|
|
||||||
DefaultProject string `yaml:"default_project" json:"default_project"`
|
|
||||||
// AgentID is the identifier for this agent (optional, used for claiming tasks).
|
|
||||||
AgentID string `yaml:"agent_id" json:"agent_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// configFileName is the name of the YAML config file.
|
|
||||||
const configFileName = "agentic.yaml"
|
|
||||||
|
|
||||||
// envFileName is the name of the environment file.
|
|
||||||
const envFileName = ".env"
|
|
||||||
|
|
||||||
// DefaultBaseURL is the default API endpoint if none is configured.
|
|
||||||
const DefaultBaseURL = "https://api.core-agentic.dev"
|
|
||||||
|
|
||||||
// LoadConfig loads the agentic configuration from the specified directory.
|
|
||||||
// It first checks for a .env file, then falls back to ~/.core/agentic.yaml.
|
|
||||||
// If dir is empty, it checks the current directory first.
|
|
||||||
//
|
|
||||||
// Environment variables take precedence:
|
|
||||||
// - AGENTIC_BASE_URL: API base URL
|
|
||||||
// - AGENTIC_TOKEN: Authentication token
|
|
||||||
// - AGENTIC_PROJECT: Default project
|
|
||||||
// - AGENTIC_AGENT_ID: Agent identifier
|
|
||||||
func LoadConfig(dir string) (*Config, error) {
|
|
||||||
cfg := &Config{
|
|
||||||
BaseURL: DefaultBaseURL,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try loading from .env file in the specified directory
|
|
||||||
if dir != "" {
|
|
||||||
envPath := filepath.Join(dir, envFileName)
|
|
||||||
if err := loadEnvFile(envPath, cfg); err == nil {
|
|
||||||
// Successfully loaded from .env
|
|
||||||
applyEnvOverrides(cfg)
|
|
||||||
if cfg.Token != "" {
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try loading from current directory .env
|
|
||||||
if dir == "" {
|
|
||||||
cwd, err := os.Getwd()
|
|
||||||
if err == nil {
|
|
||||||
envPath := filepath.Join(cwd, envFileName)
|
|
||||||
if err := loadEnvFile(envPath, cfg); err == nil {
|
|
||||||
applyEnvOverrides(cfg)
|
|
||||||
if cfg.Token != "" {
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try loading from ~/.core/agentic.yaml
|
|
||||||
homeDir, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.E("agentic.LoadConfig", "failed to get home directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
configPath := filepath.Join(homeDir, ".core", configFileName)
|
|
||||||
if err := loadYAMLConfig(configPath, cfg); err != nil && !os.IsNotExist(err) {
|
|
||||||
return nil, errors.E("agentic.LoadConfig", "failed to load config", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply environment variable overrides
|
|
||||||
applyEnvOverrides(cfg)
|
|
||||||
|
|
||||||
// Validate configuration
|
|
||||||
if cfg.Token == "" {
|
|
||||||
return nil, errors.E("agentic.LoadConfig", "no authentication token configured", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadEnvFile reads a .env file and extracts agentic configuration.
|
|
||||||
func loadEnvFile(path string, cfg *Config) error {
|
|
||||||
content, err := io.Local.Read(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, line := range strings.Split(content, "\n") {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
|
|
||||||
// Skip empty lines and comments
|
|
||||||
if line == "" || strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse KEY=value
|
|
||||||
parts := strings.SplitN(line, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.TrimSpace(parts[0])
|
|
||||||
value := strings.TrimSpace(parts[1])
|
|
||||||
|
|
||||||
// Remove quotes if present
|
|
||||||
value = strings.Trim(value, `"'`)
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case "AGENTIC_BASE_URL":
|
|
||||||
cfg.BaseURL = value
|
|
||||||
case "AGENTIC_TOKEN":
|
|
||||||
cfg.Token = value
|
|
||||||
case "AGENTIC_PROJECT":
|
|
||||||
cfg.DefaultProject = value
|
|
||||||
case "AGENTIC_AGENT_ID":
|
|
||||||
cfg.AgentID = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadYAMLConfig reads configuration from a YAML file.
|
|
||||||
func loadYAMLConfig(path string, cfg *Config) error {
|
|
||||||
content, err := io.Local.Read(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return yaml.Unmarshal([]byte(content), cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// applyEnvOverrides applies environment variable overrides to the config.
|
|
||||||
func applyEnvOverrides(cfg *Config) {
|
|
||||||
if v := os.Getenv("AGENTIC_BASE_URL"); v != "" {
|
|
||||||
cfg.BaseURL = v
|
|
||||||
}
|
|
||||||
if v := os.Getenv("AGENTIC_TOKEN"); v != "" {
|
|
||||||
cfg.Token = v
|
|
||||||
}
|
|
||||||
if v := os.Getenv("AGENTIC_PROJECT"); v != "" {
|
|
||||||
cfg.DefaultProject = v
|
|
||||||
}
|
|
||||||
if v := os.Getenv("AGENTIC_AGENT_ID"); v != "" {
|
|
||||||
cfg.AgentID = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveConfig saves the configuration to ~/.core/agentic.yaml.
|
|
||||||
func SaveConfig(cfg *Config) error {
|
|
||||||
homeDir, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return errors.E("agentic.SaveConfig", "failed to get home directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
configDir := filepath.Join(homeDir, ".core")
|
|
||||||
if err := io.Local.EnsureDir(configDir); err != nil {
|
|
||||||
return errors.E("agentic.SaveConfig", "failed to create config directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
configPath := filepath.Join(configDir, configFileName)
|
|
||||||
|
|
||||||
data, err := yaml.Marshal(cfg)
|
|
||||||
if err != nil {
|
|
||||||
return errors.E("agentic.SaveConfig", "failed to marshal config", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := io.Local.Write(configPath, string(data)); err != nil {
|
|
||||||
return errors.E("agentic.SaveConfig", "failed to write config file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigPath returns the path to the config file in the user's home directory.
|
|
||||||
func ConfigPath() (string, error) {
|
|
||||||
homeDir, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.E("agentic.ConfigPath", "failed to get home directory", err)
|
|
||||||
}
|
|
||||||
return filepath.Join(homeDir, ".core", configFileName), nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,185 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoadConfig_Good_FromEnvFile(t *testing.T) {
|
|
||||||
// Create temp directory with .env file
|
|
||||||
tmpDir, err := os.MkdirTemp("", "agentic-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
envContent := `
|
|
||||||
AGENTIC_BASE_URL=https://test.api.com
|
|
||||||
AGENTIC_TOKEN=test-token-123
|
|
||||||
AGENTIC_PROJECT=my-project
|
|
||||||
AGENTIC_AGENT_ID=agent-001
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(tmpDir, ".env"), []byte(envContent), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(tmpDir)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "https://test.api.com", cfg.BaseURL)
|
|
||||||
assert.Equal(t, "test-token-123", cfg.Token)
|
|
||||||
assert.Equal(t, "my-project", cfg.DefaultProject)
|
|
||||||
assert.Equal(t, "agent-001", cfg.AgentID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Good_FromEnvVars(t *testing.T) {
|
|
||||||
// Create temp directory with .env file (partial config)
|
|
||||||
tmpDir, err := os.MkdirTemp("", "agentic-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
envContent := `
|
|
||||||
AGENTIC_TOKEN=env-file-token
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(tmpDir, ".env"), []byte(envContent), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Set environment variables that should override
|
|
||||||
_ = os.Setenv("AGENTIC_BASE_URL", "https://env-override.com")
|
|
||||||
_ = os.Setenv("AGENTIC_TOKEN", "env-override-token")
|
|
||||||
defer func() {
|
|
||||||
_ = os.Unsetenv("AGENTIC_BASE_URL")
|
|
||||||
_ = os.Unsetenv("AGENTIC_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(tmpDir)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "https://env-override.com", cfg.BaseURL)
|
|
||||||
assert.Equal(t, "env-override-token", cfg.Token)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Bad_NoToken(t *testing.T) {
|
|
||||||
// Create temp directory without config
|
|
||||||
tmpDir, err := os.MkdirTemp("", "agentic-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
// Create empty .env
|
|
||||||
err = os.WriteFile(filepath.Join(tmpDir, ".env"), []byte(""), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Ensure no env vars are set
|
|
||||||
_ = os.Unsetenv("AGENTIC_TOKEN")
|
|
||||||
_ = os.Unsetenv("AGENTIC_BASE_URL")
|
|
||||||
|
|
||||||
_, err = LoadConfig(tmpDir)
|
|
||||||
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "no authentication token")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Good_EnvFileWithQuotes(t *testing.T) {
|
|
||||||
tmpDir, err := os.MkdirTemp("", "agentic-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
// Test with quoted values
|
|
||||||
envContent := `
|
|
||||||
AGENTIC_TOKEN="quoted-token"
|
|
||||||
AGENTIC_BASE_URL='single-quoted-url'
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(tmpDir, ".env"), []byte(envContent), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(tmpDir)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "quoted-token", cfg.Token)
|
|
||||||
assert.Equal(t, "single-quoted-url", cfg.BaseURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Good_EnvFileWithComments(t *testing.T) {
|
|
||||||
tmpDir, err := os.MkdirTemp("", "agentic-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
envContent := `
|
|
||||||
# This is a comment
|
|
||||||
AGENTIC_TOKEN=token-with-comments
|
|
||||||
|
|
||||||
# Another comment
|
|
||||||
AGENTIC_PROJECT=commented-project
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(tmpDir, ".env"), []byte(envContent), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(tmpDir)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "token-with-comments", cfg.Token)
|
|
||||||
assert.Equal(t, "commented-project", cfg.DefaultProject)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveConfig_Good(t *testing.T) {
|
|
||||||
// Create temp home directory
|
|
||||||
tmpHome, err := os.MkdirTemp("", "agentic-home")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(tmpHome) }()
|
|
||||||
|
|
||||||
// Override HOME for the test
|
|
||||||
originalHome := os.Getenv("HOME")
|
|
||||||
_ = os.Setenv("HOME", tmpHome)
|
|
||||||
defer func() { _ = os.Setenv("HOME", originalHome) }()
|
|
||||||
|
|
||||||
cfg := &Config{
|
|
||||||
BaseURL: "https://saved.api.com",
|
|
||||||
Token: "saved-token",
|
|
||||||
DefaultProject: "saved-project",
|
|
||||||
AgentID: "saved-agent",
|
|
||||||
}
|
|
||||||
|
|
||||||
err = SaveConfig(cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify file was created
|
|
||||||
configPath := filepath.Join(tmpHome, ".core", "agentic.yaml")
|
|
||||||
_, err = os.Stat(configPath)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Read back the config
|
|
||||||
data, err := os.ReadFile(configPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Contains(t, string(data), "saved.api.com")
|
|
||||||
assert.Contains(t, string(data), "saved-token")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigPath_Good(t *testing.T) {
|
|
||||||
path, err := ConfigPath()
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Contains(t, path, ".core")
|
|
||||||
assert.Contains(t, path, "agentic.yaml")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Good_DefaultBaseURL(t *testing.T) {
|
|
||||||
tmpDir, err := os.MkdirTemp("", "agentic-test")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
|
||||||
|
|
||||||
// Only provide token, should use default base URL
|
|
||||||
envContent := `
|
|
||||||
AGENTIC_TOKEN=test-token
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(tmpDir, ".env"), []byte(envContent), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Clear any env overrides
|
|
||||||
_ = os.Unsetenv("AGENTIC_BASE_URL")
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(tmpDir)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, DefaultBaseURL, cfg.BaseURL)
|
|
||||||
}
|
|
||||||
|
|
@ -1,335 +0,0 @@
|
||||||
// Package agentic provides AI collaboration features for task management.
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
errors "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileContent represents the content of a file for AI context.
|
|
||||||
type FileContent struct {
|
|
||||||
// Path is the relative path to the file.
|
|
||||||
Path string `json:"path"`
|
|
||||||
// Content is the file content.
|
|
||||||
Content string `json:"content"`
|
|
||||||
// Language is the detected programming language.
|
|
||||||
Language string `json:"language"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskContext contains gathered context for AI collaboration.
|
|
||||||
type TaskContext struct {
|
|
||||||
// Task is the task being worked on.
|
|
||||||
Task *Task `json:"task"`
|
|
||||||
// Files is a list of relevant file contents.
|
|
||||||
Files []FileContent `json:"files"`
|
|
||||||
// GitStatus is the current git status output.
|
|
||||||
GitStatus string `json:"git_status"`
|
|
||||||
// RecentCommits is the recent commit log.
|
|
||||||
RecentCommits string `json:"recent_commits"`
|
|
||||||
// RelatedCode contains code snippets related to the task.
|
|
||||||
RelatedCode []FileContent `json:"related_code"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildTaskContext gathers context for AI collaboration on a task.
|
|
||||||
func BuildTaskContext(task *Task, dir string) (*TaskContext, error) {
|
|
||||||
const op = "agentic.BuildTaskContext"
|
|
||||||
|
|
||||||
if task == nil {
|
|
||||||
return nil, errors.E(op, "task is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if dir == "" {
|
|
||||||
cwd, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.E(op, "failed to get working directory", err)
|
|
||||||
}
|
|
||||||
dir = cwd
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := &TaskContext{
|
|
||||||
Task: task,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gather files mentioned in the task
|
|
||||||
files, err := GatherRelatedFiles(task, dir)
|
|
||||||
if err != nil {
|
|
||||||
// Non-fatal: continue without files
|
|
||||||
files = nil
|
|
||||||
}
|
|
||||||
ctx.Files = files
|
|
||||||
|
|
||||||
// Get git status
|
|
||||||
gitStatus, _ := runGitCommand(dir, "status", "--porcelain")
|
|
||||||
ctx.GitStatus = gitStatus
|
|
||||||
|
|
||||||
// Get recent commits
|
|
||||||
recentCommits, _ := runGitCommand(dir, "log", "--oneline", "-10")
|
|
||||||
ctx.RecentCommits = recentCommits
|
|
||||||
|
|
||||||
// Find related code by searching for keywords
|
|
||||||
relatedCode, err := findRelatedCode(task, dir)
|
|
||||||
if err != nil {
|
|
||||||
relatedCode = nil
|
|
||||||
}
|
|
||||||
ctx.RelatedCode = relatedCode
|
|
||||||
|
|
||||||
return ctx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GatherRelatedFiles reads files mentioned in the task.
|
|
||||||
func GatherRelatedFiles(task *Task, dir string) ([]FileContent, error) {
|
|
||||||
const op = "agentic.GatherRelatedFiles"
|
|
||||||
|
|
||||||
if task == nil {
|
|
||||||
return nil, errors.E(op, "task is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var files []FileContent
|
|
||||||
|
|
||||||
// Read files explicitly mentioned in the task
|
|
||||||
for _, relPath := range task.Files {
|
|
||||||
fullPath := filepath.Join(dir, relPath)
|
|
||||||
|
|
||||||
content, err := io.Local.Read(fullPath)
|
|
||||||
if err != nil {
|
|
||||||
// Skip files that don't exist
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
files = append(files, FileContent{
|
|
||||||
Path: relPath,
|
|
||||||
Content: content,
|
|
||||||
Language: detectLanguage(relPath),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findRelatedCode searches for code related to the task by keywords.
|
|
||||||
func findRelatedCode(task *Task, dir string) ([]FileContent, error) {
|
|
||||||
const op = "agentic.findRelatedCode"
|
|
||||||
|
|
||||||
if task == nil {
|
|
||||||
return nil, errors.E(op, "task is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract keywords from title and description
|
|
||||||
keywords := extractKeywords(task.Title + " " + task.Description)
|
|
||||||
if len(keywords) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var files []FileContent
|
|
||||||
seen := make(map[string]bool)
|
|
||||||
|
|
||||||
// Search for each keyword using git grep
|
|
||||||
for _, keyword := range keywords {
|
|
||||||
if len(keyword) < 3 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := runGitCommand(dir, "grep", "-l", "-i", keyword, "--", "*.go", "*.ts", "*.js", "*.py")
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse matched files
|
|
||||||
for _, line := range strings.Split(output, "\n") {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line == "" || seen[line] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[line] = true
|
|
||||||
|
|
||||||
// Limit to 10 related files
|
|
||||||
if len(files) >= 10 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
fullPath := filepath.Join(dir, line)
|
|
||||||
content, err := io.Local.Read(fullPath)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Truncate large files
|
|
||||||
if len(content) > 5000 {
|
|
||||||
content = content[:5000] + "\n... (truncated)"
|
|
||||||
}
|
|
||||||
|
|
||||||
files = append(files, FileContent{
|
|
||||||
Path: line,
|
|
||||||
Content: content,
|
|
||||||
Language: detectLanguage(line),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(files) >= 10 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractKeywords extracts meaningful words from text for searching.
|
|
||||||
func extractKeywords(text string) []string {
|
|
||||||
// Remove common words and extract identifiers
|
|
||||||
text = strings.ToLower(text)
|
|
||||||
|
|
||||||
// Split by non-alphanumeric characters
|
|
||||||
re := regexp.MustCompile(`[^a-zA-Z0-9]+`)
|
|
||||||
words := re.Split(text, -1)
|
|
||||||
|
|
||||||
// Filter stop words and short words
|
|
||||||
stopWords := map[string]bool{
|
|
||||||
"the": true, "a": true, "an": true, "and": true, "or": true, "but": true,
|
|
||||||
"in": true, "on": true, "at": true, "to": true, "for": true, "of": true,
|
|
||||||
"with": true, "by": true, "from": true, "is": true, "are": true, "was": true,
|
|
||||||
"be": true, "been": true, "being": true, "have": true, "has": true, "had": true,
|
|
||||||
"do": true, "does": true, "did": true, "will": true, "would": true, "could": true,
|
|
||||||
"should": true, "may": true, "might": true, "must": true, "shall": true,
|
|
||||||
"this": true, "that": true, "these": true, "those": true, "it": true,
|
|
||||||
"add": true, "create": true, "update": true, "fix": true, "remove": true,
|
|
||||||
"implement": true, "new": true, "file": true, "code": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
var keywords []string
|
|
||||||
for _, word := range words {
|
|
||||||
word = strings.TrimSpace(word)
|
|
||||||
if len(word) >= 3 && !stopWords[word] {
|
|
||||||
keywords = append(keywords, word)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit to first 5 keywords
|
|
||||||
if len(keywords) > 5 {
|
|
||||||
keywords = keywords[:5]
|
|
||||||
}
|
|
||||||
|
|
||||||
return keywords
|
|
||||||
}
|
|
||||||
|
|
||||||
// detectLanguage detects the programming language from a file extension.
|
|
||||||
func detectLanguage(path string) string {
|
|
||||||
ext := strings.ToLower(filepath.Ext(path))
|
|
||||||
|
|
||||||
languages := map[string]string{
|
|
||||||
".go": "go",
|
|
||||||
".ts": "typescript",
|
|
||||||
".tsx": "typescript",
|
|
||||||
".js": "javascript",
|
|
||||||
".jsx": "javascript",
|
|
||||||
".py": "python",
|
|
||||||
".rs": "rust",
|
|
||||||
".java": "java",
|
|
||||||
".kt": "kotlin",
|
|
||||||
".swift": "swift",
|
|
||||||
".c": "c",
|
|
||||||
".cpp": "cpp",
|
|
||||||
".h": "c",
|
|
||||||
".hpp": "cpp",
|
|
||||||
".rb": "ruby",
|
|
||||||
".php": "php",
|
|
||||||
".cs": "csharp",
|
|
||||||
".fs": "fsharp",
|
|
||||||
".scala": "scala",
|
|
||||||
".sh": "bash",
|
|
||||||
".bash": "bash",
|
|
||||||
".zsh": "zsh",
|
|
||||||
".yaml": "yaml",
|
|
||||||
".yml": "yaml",
|
|
||||||
".json": "json",
|
|
||||||
".xml": "xml",
|
|
||||||
".html": "html",
|
|
||||||
".css": "css",
|
|
||||||
".scss": "scss",
|
|
||||||
".sql": "sql",
|
|
||||||
".md": "markdown",
|
|
||||||
}
|
|
||||||
|
|
||||||
if lang, ok := languages[ext]; ok {
|
|
||||||
return lang
|
|
||||||
}
|
|
||||||
return "text"
|
|
||||||
}
|
|
||||||
|
|
||||||
// runGitCommand runs a git command and returns the output.
|
|
||||||
func runGitCommand(dir string, args ...string) (string, error) {
|
|
||||||
cmd := exec.Command("git", args...)
|
|
||||||
cmd.Dir = dir
|
|
||||||
|
|
||||||
var stdout, stderr bytes.Buffer
|
|
||||||
cmd.Stdout = &stdout
|
|
||||||
cmd.Stderr = &stderr
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return stdout.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatContext formats the TaskContext for AI consumption.
|
|
||||||
func (tc *TaskContext) FormatContext() string {
|
|
||||||
var sb strings.Builder
|
|
||||||
|
|
||||||
sb.WriteString("# Task Context\n\n")
|
|
||||||
|
|
||||||
// Task info
|
|
||||||
sb.WriteString("## Task\n")
|
|
||||||
sb.WriteString("ID: " + tc.Task.ID + "\n")
|
|
||||||
sb.WriteString("Title: " + tc.Task.Title + "\n")
|
|
||||||
sb.WriteString("Priority: " + string(tc.Task.Priority) + "\n")
|
|
||||||
sb.WriteString("Status: " + string(tc.Task.Status) + "\n")
|
|
||||||
sb.WriteString("\n### Description\n")
|
|
||||||
sb.WriteString(tc.Task.Description + "\n\n")
|
|
||||||
|
|
||||||
// Files
|
|
||||||
if len(tc.Files) > 0 {
|
|
||||||
sb.WriteString("## Task Files\n")
|
|
||||||
for _, f := range tc.Files {
|
|
||||||
sb.WriteString("### " + f.Path + " (" + f.Language + ")\n")
|
|
||||||
sb.WriteString("```" + f.Language + "\n")
|
|
||||||
sb.WriteString(f.Content)
|
|
||||||
sb.WriteString("\n```\n\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Git status
|
|
||||||
if tc.GitStatus != "" {
|
|
||||||
sb.WriteString("## Git Status\n")
|
|
||||||
sb.WriteString("```\n")
|
|
||||||
sb.WriteString(tc.GitStatus)
|
|
||||||
sb.WriteString("\n```\n\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recent commits
|
|
||||||
if tc.RecentCommits != "" {
|
|
||||||
sb.WriteString("## Recent Commits\n")
|
|
||||||
sb.WriteString("```\n")
|
|
||||||
sb.WriteString(tc.RecentCommits)
|
|
||||||
sb.WriteString("\n```\n\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Related code
|
|
||||||
if len(tc.RelatedCode) > 0 {
|
|
||||||
sb.WriteString("## Related Code\n")
|
|
||||||
for _, f := range tc.RelatedCode {
|
|
||||||
sb.WriteString("### " + f.Path + " (" + f.Language + ")\n")
|
|
||||||
sb.WriteString("```" + f.Language + "\n")
|
|
||||||
sb.WriteString(f.Content)
|
|
||||||
sb.WriteString("\n```\n\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.String()
|
|
||||||
}
|
|
||||||
|
|
@ -1,214 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuildTaskContext_Good(t *testing.T) {
|
|
||||||
// Create a temp directory with some files
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
|
|
||||||
// Create a test file
|
|
||||||
testFile := filepath.Join(tmpDir, "main.go")
|
|
||||||
err := os.WriteFile(testFile, []byte("package main\n\nfunc main() {}\n"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
task := &Task{
|
|
||||||
ID: "test-123",
|
|
||||||
Title: "Test Task",
|
|
||||||
Description: "A test task description",
|
|
||||||
Priority: PriorityMedium,
|
|
||||||
Status: StatusPending,
|
|
||||||
Files: []string{"main.go"},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, err := BuildTaskContext(task, tmpDir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, ctx)
|
|
||||||
assert.Equal(t, task, ctx.Task)
|
|
||||||
assert.Len(t, ctx.Files, 1)
|
|
||||||
assert.Equal(t, "main.go", ctx.Files[0].Path)
|
|
||||||
assert.Equal(t, "go", ctx.Files[0].Language)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildTaskContext_Bad_NilTask(t *testing.T) {
|
|
||||||
ctx, err := BuildTaskContext(nil, ".")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, ctx)
|
|
||||||
assert.Contains(t, err.Error(), "task is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGatherRelatedFiles_Good(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
|
|
||||||
// Create test files
|
|
||||||
files := map[string]string{
|
|
||||||
"app.go": "package app\n\nfunc Run() {}\n",
|
|
||||||
"config.ts": "export const config = {};\n",
|
|
||||||
"README.md": "# Project\n",
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, content := range files {
|
|
||||||
path := filepath.Join(tmpDir, name)
|
|
||||||
err := os.WriteFile(path, []byte(content), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
task := &Task{
|
|
||||||
ID: "task-1",
|
|
||||||
Title: "Test",
|
|
||||||
Files: []string{"app.go", "config.ts"},
|
|
||||||
}
|
|
||||||
|
|
||||||
gathered, err := GatherRelatedFiles(task, tmpDir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, gathered, 2)
|
|
||||||
|
|
||||||
// Check languages detected correctly
|
|
||||||
foundGo := false
|
|
||||||
foundTS := false
|
|
||||||
for _, f := range gathered {
|
|
||||||
if f.Path == "app.go" {
|
|
||||||
foundGo = true
|
|
||||||
assert.Equal(t, "go", f.Language)
|
|
||||||
}
|
|
||||||
if f.Path == "config.ts" {
|
|
||||||
foundTS = true
|
|
||||||
assert.Equal(t, "typescript", f.Language)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.True(t, foundGo, "should find app.go")
|
|
||||||
assert.True(t, foundTS, "should find config.ts")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGatherRelatedFiles_Bad_NilTask(t *testing.T) {
|
|
||||||
files, err := GatherRelatedFiles(nil, ".")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, files)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGatherRelatedFiles_Good_MissingFiles(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
|
|
||||||
task := &Task{
|
|
||||||
ID: "task-1",
|
|
||||||
Title: "Test",
|
|
||||||
Files: []string{"nonexistent.go", "also-missing.ts"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not error, just return empty list
|
|
||||||
gathered, err := GatherRelatedFiles(task, tmpDir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Empty(t, gathered)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDetectLanguage(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
path string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"main.go", "go"},
|
|
||||||
{"app.ts", "typescript"},
|
|
||||||
{"app.tsx", "typescript"},
|
|
||||||
{"script.js", "javascript"},
|
|
||||||
{"script.jsx", "javascript"},
|
|
||||||
{"main.py", "python"},
|
|
||||||
{"lib.rs", "rust"},
|
|
||||||
{"App.java", "java"},
|
|
||||||
{"config.yaml", "yaml"},
|
|
||||||
{"config.yml", "yaml"},
|
|
||||||
{"data.json", "json"},
|
|
||||||
{"index.html", "html"},
|
|
||||||
{"styles.css", "css"},
|
|
||||||
{"styles.scss", "scss"},
|
|
||||||
{"query.sql", "sql"},
|
|
||||||
{"README.md", "markdown"},
|
|
||||||
{"unknown.xyz", "text"},
|
|
||||||
{"", "text"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.path, func(t *testing.T) {
|
|
||||||
result := detectLanguage(tt.path)
|
|
||||||
assert.Equal(t, tt.expected, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtractKeywords(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
text string
|
|
||||||
expected int // minimum number of keywords expected
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "simple title",
|
|
||||||
text: "Add user authentication feature",
|
|
||||||
expected: 2,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "with stop words",
|
|
||||||
text: "The quick brown fox jumps over the lazy dog",
|
|
||||||
expected: 3,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "technical text",
|
|
||||||
text: "Implement OAuth2 authentication with JWT tokens",
|
|
||||||
expected: 3,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty",
|
|
||||||
text: "",
|
|
||||||
expected: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "only stop words",
|
|
||||||
text: "the a an and or but in on at",
|
|
||||||
expected: 0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
keywords := extractKeywords(tt.text)
|
|
||||||
assert.GreaterOrEqual(t, len(keywords), tt.expected)
|
|
||||||
// Keywords should not exceed 5
|
|
||||||
assert.LessOrEqual(t, len(keywords), 5)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTaskContext_FormatContext(t *testing.T) {
|
|
||||||
task := &Task{
|
|
||||||
ID: "test-456",
|
|
||||||
Title: "Test Formatting",
|
|
||||||
Description: "This is a test description",
|
|
||||||
Priority: PriorityHigh,
|
|
||||||
Status: StatusInProgress,
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := &TaskContext{
|
|
||||||
Task: task,
|
|
||||||
Files: []FileContent{{Path: "main.go", Content: "package main", Language: "go"}},
|
|
||||||
GitStatus: " M main.go",
|
|
||||||
RecentCommits: "abc123 Initial commit",
|
|
||||||
RelatedCode: []FileContent{{Path: "util.go", Content: "package util", Language: "go"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
formatted := ctx.FormatContext()
|
|
||||||
|
|
||||||
assert.Contains(t, formatted, "# Task Context")
|
|
||||||
assert.Contains(t, formatted, "test-456")
|
|
||||||
assert.Contains(t, formatted, "Test Formatting")
|
|
||||||
assert.Contains(t, formatted, "## Task Files")
|
|
||||||
assert.Contains(t, formatted, "## Git Status")
|
|
||||||
assert.Contains(t, formatted, "## Recent Commits")
|
|
||||||
assert.Contains(t, formatted, "## Related Code")
|
|
||||||
}
|
|
||||||
|
|
@ -1,19 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"embed"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed prompts/*.md
|
|
||||||
var promptsFS embed.FS
|
|
||||||
|
|
||||||
// Prompt returns the content of an embedded prompt file.
|
|
||||||
// Name should be without the .md extension (e.g., "commit").
|
|
||||||
func Prompt(name string) string {
|
|
||||||
data, err := promptsFS.ReadFile("prompts/" + name + ".md")
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(string(data))
|
|
||||||
}
|
|
||||||
|
|
@ -1,44 +0,0 @@
|
||||||
# Commit Instructions
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
When asked to commit changes in these repositories:
|
|
||||||
|
|
||||||
1. **All changes are pre-approved** - Both tracked (modified) and untracked files have been reviewed and approved for commit
|
|
||||||
2. **Include everything** - Commit ALL modified files AND all untracked files (including new directories)
|
|
||||||
3. **Don't skip files** - Do not leave out untracked files assuming they need separate review
|
|
||||||
4. **Single commit preferred** - Combine all changes into one cohesive commit unless explicitly told otherwise
|
|
||||||
|
|
||||||
The user has already validated these changes. Proceed with confidence.
|
|
||||||
|
|
||||||
## Handling Ignored Files
|
|
||||||
|
|
||||||
If you see untracked directories that should typically be ignored (like `node_modules/`, `vendor/`, `.cache/`, `dist/`, `build/`):
|
|
||||||
|
|
||||||
1. **Fix the .gitignore** - Create or update `.gitignore` to exclude these directories
|
|
||||||
2. **Commit the .gitignore** - Include this fix in your commit
|
|
||||||
3. **Don't ask** - Just fix it and commit
|
|
||||||
|
|
||||||
Common patterns to add to .gitignore:
|
|
||||||
```
|
|
||||||
node_modules/
|
|
||||||
vendor/
|
|
||||||
.cache/
|
|
||||||
dist/
|
|
||||||
build/
|
|
||||||
*.log
|
|
||||||
.env
|
|
||||||
.DS_Store
|
|
||||||
```
|
|
||||||
|
|
||||||
## Commit Message Style
|
|
||||||
|
|
||||||
- Use conventional commit format: `type(scope): description`
|
|
||||||
- Common types: `refactor`, `feat`, `fix`, `docs`, `chore`
|
|
||||||
- Keep the first line under 72 characters
|
|
||||||
- Add body for complex changes explaining the "why"
|
|
||||||
- Include `Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>`
|
|
||||||
|
|
||||||
## Task
|
|
||||||
|
|
||||||
Review the uncommitted changes and create an appropriate commit. Be concise.
|
|
||||||
|
|
@ -1,142 +0,0 @@
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/framework"
|
|
||||||
"forge.lthn.ai/core/go/pkg/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tasks for AI service
|
|
||||||
|
|
||||||
// TaskCommit requests Claude to create a commit.
|
|
||||||
type TaskCommit struct {
|
|
||||||
Path string
|
|
||||||
Name string
|
|
||||||
CanEdit bool // allow Write/Edit tools
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskPrompt sends a custom prompt to Claude.
|
|
||||||
type TaskPrompt struct {
|
|
||||||
Prompt string
|
|
||||||
WorkDir string
|
|
||||||
AllowedTools []string
|
|
||||||
|
|
||||||
taskID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TaskPrompt) SetTaskID(id string) { t.taskID = id }
|
|
||||||
func (t *TaskPrompt) GetTaskID() string { return t.taskID }
|
|
||||||
|
|
||||||
// ServiceOptions for configuring the AI service.
|
|
||||||
type ServiceOptions struct {
|
|
||||||
DefaultTools []string
|
|
||||||
AllowEdit bool // global permission for Write/Edit tools
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultServiceOptions returns sensible defaults.
|
|
||||||
func DefaultServiceOptions() ServiceOptions {
|
|
||||||
return ServiceOptions{
|
|
||||||
DefaultTools: []string{"Bash", "Read", "Glob", "Grep"},
|
|
||||||
AllowEdit: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Service provides AI/Claude operations as a Core service.
|
|
||||||
type Service struct {
|
|
||||||
*framework.ServiceRuntime[ServiceOptions]
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewService creates an AI service factory.
|
|
||||||
func NewService(opts ServiceOptions) func(*framework.Core) (any, error) {
|
|
||||||
return func(c *framework.Core) (any, error) {
|
|
||||||
return &Service{
|
|
||||||
ServiceRuntime: framework.NewServiceRuntime(c, opts),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnStartup registers task handlers.
|
|
||||||
func (s *Service) OnStartup(ctx context.Context) error {
|
|
||||||
s.Core().RegisterTask(s.handleTask)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) handleTask(c *framework.Core, t framework.Task) (any, bool, error) {
|
|
||||||
switch m := t.(type) {
|
|
||||||
case TaskCommit:
|
|
||||||
err := s.doCommit(m)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("agentic: commit task failed", "err", err, "path", m.Path)
|
|
||||||
}
|
|
||||||
return nil, true, err
|
|
||||||
|
|
||||||
case TaskPrompt:
|
|
||||||
err := s.doPrompt(m)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("agentic: prompt task failed", "err", err)
|
|
||||||
}
|
|
||||||
return nil, true, err
|
|
||||||
}
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) doCommit(task TaskCommit) error {
|
|
||||||
prompt := Prompt("commit")
|
|
||||||
|
|
||||||
tools := []string{"Bash", "Read", "Glob", "Grep"}
|
|
||||||
if task.CanEdit {
|
|
||||||
tools = []string{"Bash", "Read", "Write", "Edit", "Glob", "Grep"}
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(context.Background(), "claude", "-p", prompt, "--allowedTools", strings.Join(tools, ","))
|
|
||||||
cmd.Dir = task.Path
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
cmd.Stdin = os.Stdin
|
|
||||||
|
|
||||||
return cmd.Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) doPrompt(task TaskPrompt) error {
|
|
||||||
if task.taskID != "" {
|
|
||||||
s.Core().Progress(task.taskID, 0.1, "Starting Claude...", &task)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := s.Opts()
|
|
||||||
tools := opts.DefaultTools
|
|
||||||
if len(tools) == 0 {
|
|
||||||
tools = []string{"Bash", "Read", "Glob", "Grep"}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(task.AllowedTools) > 0 {
|
|
||||||
tools = task.AllowedTools
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(context.Background(), "claude", "-p", task.Prompt, "--allowedTools", strings.Join(tools, ","))
|
|
||||||
if task.WorkDir != "" {
|
|
||||||
cmd.Dir = task.WorkDir
|
|
||||||
}
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
cmd.Stdin = os.Stdin
|
|
||||||
|
|
||||||
if task.taskID != "" {
|
|
||||||
s.Core().Progress(task.taskID, 0.5, "Running Claude prompt...", &task)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := cmd.Run()
|
|
||||||
|
|
||||||
if task.taskID != "" {
|
|
||||||
if err != nil {
|
|
||||||
s.Core().Progress(task.taskID, 1.0, "Failed: "+err.Error(), &task)
|
|
||||||
} else {
|
|
||||||
s.Core().Progress(task.taskID, 1.0, "Completed", &task)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
@ -1,140 +0,0 @@
|
||||||
// Package agentic provides an API client for core-agentic, an AI-assisted task
|
|
||||||
// management service. It enables developers and AI agents to discover, claim,
|
|
||||||
// and complete development tasks.
|
|
||||||
package agentic
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TaskStatus represents the state of a task in the system.
|
|
||||||
type TaskStatus string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// StatusPending indicates the task is available to be claimed.
|
|
||||||
StatusPending TaskStatus = "pending"
|
|
||||||
// StatusInProgress indicates the task has been claimed and is being worked on.
|
|
||||||
StatusInProgress TaskStatus = "in_progress"
|
|
||||||
// StatusCompleted indicates the task has been successfully completed.
|
|
||||||
StatusCompleted TaskStatus = "completed"
|
|
||||||
// StatusBlocked indicates the task cannot proceed due to dependencies.
|
|
||||||
StatusBlocked TaskStatus = "blocked"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TaskPriority represents the urgency level of a task.
|
|
||||||
type TaskPriority string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PriorityCritical indicates the task requires immediate attention.
|
|
||||||
PriorityCritical TaskPriority = "critical"
|
|
||||||
// PriorityHigh indicates the task is important and should be addressed soon.
|
|
||||||
PriorityHigh TaskPriority = "high"
|
|
||||||
// PriorityMedium indicates the task has normal priority.
|
|
||||||
PriorityMedium TaskPriority = "medium"
|
|
||||||
// PriorityLow indicates the task can be addressed when time permits.
|
|
||||||
PriorityLow TaskPriority = "low"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Task represents a development task in the core-agentic system.
|
|
||||||
type Task struct {
|
|
||||||
// ID is the unique identifier for the task.
|
|
||||||
ID string `json:"id"`
|
|
||||||
// Title is the short description of the task.
|
|
||||||
Title string `json:"title"`
|
|
||||||
// Description provides detailed information about what needs to be done.
|
|
||||||
Description string `json:"description"`
|
|
||||||
// Priority indicates the urgency of the task.
|
|
||||||
Priority TaskPriority `json:"priority"`
|
|
||||||
// Status indicates the current state of the task.
|
|
||||||
Status TaskStatus `json:"status"`
|
|
||||||
// Labels are tags used to categorize the task.
|
|
||||||
Labels []string `json:"labels,omitempty"`
|
|
||||||
// Files lists the files that are relevant to this task.
|
|
||||||
Files []string `json:"files,omitempty"`
|
|
||||||
// CreatedAt is when the task was created.
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
|
||||||
// UpdatedAt is when the task was last modified.
|
|
||||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
|
||||||
// ClaimedBy is the identifier of the agent or developer who claimed the task.
|
|
||||||
ClaimedBy string `json:"claimed_by,omitempty"`
|
|
||||||
// ClaimedAt is when the task was claimed.
|
|
||||||
ClaimedAt *time.Time `json:"claimed_at,omitempty"`
|
|
||||||
// Project is the project this task belongs to.
|
|
||||||
Project string `json:"project,omitempty"`
|
|
||||||
// Dependencies lists task IDs that must be completed before this task.
|
|
||||||
Dependencies []string `json:"dependencies,omitempty"`
|
|
||||||
// Blockers lists task IDs that this task is blocking.
|
|
||||||
Blockers []string `json:"blockers,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskUpdate contains fields that can be updated on a task.
|
|
||||||
type TaskUpdate struct {
|
|
||||||
// Status is the new status for the task.
|
|
||||||
Status TaskStatus `json:"status,omitempty"`
|
|
||||||
// Progress is a percentage (0-100) indicating completion.
|
|
||||||
Progress int `json:"progress,omitempty"`
|
|
||||||
// Notes are additional comments about the update.
|
|
||||||
Notes string `json:"notes,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskResult contains the outcome of a completed task.
|
|
||||||
type TaskResult struct {
|
|
||||||
// Success indicates whether the task was completed successfully.
|
|
||||||
Success bool `json:"success"`
|
|
||||||
// Output is the result or summary of the completed work.
|
|
||||||
Output string `json:"output,omitempty"`
|
|
||||||
// Artifacts are files or resources produced by the task.
|
|
||||||
Artifacts []string `json:"artifacts,omitempty"`
|
|
||||||
// ErrorMessage contains details if the task failed.
|
|
||||||
ErrorMessage string `json:"error_message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListOptions specifies filters for listing tasks.
|
|
||||||
type ListOptions struct {
|
|
||||||
// Status filters tasks by their current status.
|
|
||||||
Status TaskStatus `json:"status,omitempty"`
|
|
||||||
// Labels filters tasks that have all specified labels.
|
|
||||||
Labels []string `json:"labels,omitempty"`
|
|
||||||
// Priority filters tasks by priority level.
|
|
||||||
Priority TaskPriority `json:"priority,omitempty"`
|
|
||||||
// Limit is the maximum number of tasks to return.
|
|
||||||
Limit int `json:"limit,omitempty"`
|
|
||||||
// Project filters tasks by project.
|
|
||||||
Project string `json:"project,omitempty"`
|
|
||||||
// ClaimedBy filters tasks claimed by a specific agent.
|
|
||||||
ClaimedBy string `json:"claimed_by,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIError represents an error response from the API.
|
|
||||||
type APIError struct {
|
|
||||||
// Code is the HTTP status code.
|
|
||||||
Code int `json:"code"`
|
|
||||||
// Message is the error description.
|
|
||||||
Message string `json:"message"`
|
|
||||||
// Details provides additional context about the error.
|
|
||||||
Details string `json:"details,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface for APIError.
|
|
||||||
func (e *APIError) Error() string {
|
|
||||||
if e.Details != "" {
|
|
||||||
return e.Message + ": " + e.Details
|
|
||||||
}
|
|
||||||
return e.Message
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClaimResponse is returned when a task is successfully claimed.
|
|
||||||
type ClaimResponse struct {
|
|
||||||
// Task is the claimed task with updated fields.
|
|
||||||
Task *Task `json:"task"`
|
|
||||||
// Message provides additional context about the claim.
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompleteResponse is returned when a task is completed.
|
|
||||||
type CompleteResponse struct {
|
|
||||||
// Task is the completed task with final status.
|
|
||||||
Task *Task `json:"task"`
|
|
||||||
// Message provides additional context about the completion.
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
11
pkg/ai/ai.go
11
pkg/ai/ai.go
|
|
@ -1,11 +0,0 @@
|
||||||
// Package ai provides the unified AI package for the core CLI.
|
|
||||||
//
|
|
||||||
// It composes functionality from pkg/rag (vector search) and pkg/agentic
|
|
||||||
// (task management) into a single public API surface. New AI features
|
|
||||||
// should be added here; existing packages remain importable but pkg/ai
|
|
||||||
// is the canonical entry point.
|
|
||||||
//
|
|
||||||
// Sub-packages composed:
|
|
||||||
// - pkg/rag: Qdrant vector database + Ollama embeddings
|
|
||||||
// - pkg/agentic: Task queue client and context building
|
|
||||||
package ai
|
|
||||||
|
|
@ -1,171 +0,0 @@
|
||||||
package ai
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Event represents a recorded AI/security metric event.
|
|
||||||
type Event struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
Timestamp time.Time `json:"timestamp"`
|
|
||||||
AgentID string `json:"agent_id,omitempty"`
|
|
||||||
Repo string `json:"repo,omitempty"`
|
|
||||||
Duration time.Duration `json:"duration,omitempty"`
|
|
||||||
Data map[string]any `json:"data,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// metricsDir returns the base directory for metrics storage.
|
|
||||||
func metricsDir() (string, error) {
|
|
||||||
home, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("get home directory: %w", err)
|
|
||||||
}
|
|
||||||
return filepath.Join(home, ".core", "ai", "metrics"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// metricsFilePath returns the JSONL file path for the given date.
|
|
||||||
func metricsFilePath(dir string, t time.Time) string {
|
|
||||||
return filepath.Join(dir, t.Format("2006-01-02")+".jsonl")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record appends an event to the daily JSONL file at
|
|
||||||
// ~/.core/ai/metrics/YYYY-MM-DD.jsonl.
|
|
||||||
func Record(event Event) (err error) {
|
|
||||||
if event.Timestamp.IsZero() {
|
|
||||||
event.Timestamp = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
dir, err := metricsDir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
|
||||||
return fmt.Errorf("create metrics directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
path := metricsFilePath(dir, event.Timestamp)
|
|
||||||
|
|
||||||
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("open metrics file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if cerr := f.Close(); cerr != nil && err == nil {
|
|
||||||
err = fmt.Errorf("close metrics file: %w", cerr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
data, err := json.Marshal(event)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshal event: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := f.Write(append(data, '\n')); err != nil {
|
|
||||||
return fmt.Errorf("write event: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadEvents reads events from JSONL files within the given time range.
|
|
||||||
func ReadEvents(since time.Time) ([]Event, error) {
|
|
||||||
dir, err := metricsDir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var events []Event
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// Iterate each day from since to now.
|
|
||||||
for d := time.Date(since.Year(), since.Month(), since.Day(), 0, 0, 0, 0, time.Local); !d.After(now); d = d.AddDate(0, 0, 1) {
|
|
||||||
path := metricsFilePath(dir, d)
|
|
||||||
|
|
||||||
dayEvents, err := readMetricsFile(path, since)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
events = append(events, dayEvents...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return events, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMetricsFile reads events from a single JSONL file, returning only those at or after since.
|
|
||||||
func readMetricsFile(path string, since time.Time) ([]Event, error) {
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("open metrics file %s: %w", path, err)
|
|
||||||
}
|
|
||||||
defer func() { _ = f.Close() }()
|
|
||||||
|
|
||||||
var events []Event
|
|
||||||
scanner := bufio.NewScanner(f)
|
|
||||||
for scanner.Scan() {
|
|
||||||
var ev Event
|
|
||||||
if err := json.Unmarshal(scanner.Bytes(), &ev); err != nil {
|
|
||||||
continue // skip malformed lines
|
|
||||||
}
|
|
||||||
if !ev.Timestamp.Before(since) {
|
|
||||||
events = append(events, ev)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("read metrics file %s: %w", path, err)
|
|
||||||
}
|
|
||||||
return events, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Summary aggregates events into counts by type, repo, and agent.
|
|
||||||
func Summary(events []Event) map[string]any {
|
|
||||||
byType := make(map[string]int)
|
|
||||||
byRepo := make(map[string]int)
|
|
||||||
byAgent := make(map[string]int)
|
|
||||||
|
|
||||||
for _, ev := range events {
|
|
||||||
byType[ev.Type]++
|
|
||||||
if ev.Repo != "" {
|
|
||||||
byRepo[ev.Repo]++
|
|
||||||
}
|
|
||||||
if ev.AgentID != "" {
|
|
||||||
byAgent[ev.AgentID]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return map[string]any{
|
|
||||||
"total": len(events),
|
|
||||||
"by_type": sortedMap(byType),
|
|
||||||
"by_repo": sortedMap(byRepo),
|
|
||||||
"by_agent": sortedMap(byAgent),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortedMap returns a slice of key-count pairs sorted by count descending.
|
|
||||||
func sortedMap(m map[string]int) []map[string]any {
|
|
||||||
type entry struct {
|
|
||||||
key string
|
|
||||||
count int
|
|
||||||
}
|
|
||||||
entries := make([]entry, 0, len(m))
|
|
||||||
for k, v := range m {
|
|
||||||
entries = append(entries, entry{k, v})
|
|
||||||
}
|
|
||||||
sort.Slice(entries, func(i, j int) bool {
|
|
||||||
return entries[i].count > entries[j].count
|
|
||||||
})
|
|
||||||
result := make([]map[string]any, len(entries))
|
|
||||||
for i, e := range entries {
|
|
||||||
result[i] = map[string]any{"key": e.key, "count": e.count}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
package ai
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/rag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TaskInfo carries the minimal task data needed for RAG queries,
|
|
||||||
// avoiding a direct dependency on pkg/agentic (which imports pkg/ai).
|
|
||||||
type TaskInfo struct {
|
|
||||||
Title string
|
|
||||||
Description string
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryRAGForTask queries Qdrant for documentation relevant to a task.
|
|
||||||
// It builds a query from the task title and description, queries with
|
|
||||||
// sensible defaults, and returns formatted context. Returns "" on any
|
|
||||||
// error (e.g. Qdrant/Ollama not running) for graceful degradation.
|
|
||||||
func QueryRAGForTask(task TaskInfo) string {
|
|
||||||
query := task.Title + " " + task.Description
|
|
||||||
|
|
||||||
// Truncate to 500 runes to keep the embedding focused.
|
|
||||||
runes := []rune(query)
|
|
||||||
if len(runes) > 500 {
|
|
||||||
query = string(runes[:500])
|
|
||||||
}
|
|
||||||
|
|
||||||
qdrantCfg := rag.DefaultQdrantConfig()
|
|
||||||
qdrantClient, err := rag.NewQdrantClient(qdrantCfg)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
defer func() { _ = qdrantClient.Close() }()
|
|
||||||
|
|
||||||
ollamaCfg := rag.DefaultOllamaConfig()
|
|
||||||
ollamaClient, err := rag.NewOllamaClient(ollamaCfg)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
queryCfg := rag.QueryConfig{
|
|
||||||
Collection: "hostuk-docs",
|
|
||||||
Limit: 3,
|
|
||||||
Threshold: 0.5,
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := rag.Query(ctx, qdrantClient, ollamaClient, query, queryCfg)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return rag.FormatResultsContext(results)
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,438 +0,0 @@
|
||||||
package ansible
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/log"
|
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Parser handles Ansible YAML parsing.
|
|
||||||
type Parser struct {
|
|
||||||
basePath string
|
|
||||||
vars map[string]any
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewParser creates a new Ansible parser.
|
|
||||||
func NewParser(basePath string) *Parser {
|
|
||||||
return &Parser{
|
|
||||||
basePath: basePath,
|
|
||||||
vars: make(map[string]any),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParsePlaybook parses an Ansible playbook file.
|
|
||||||
func (p *Parser) ParsePlaybook(path string) ([]Play, error) {
|
|
||||||
data, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("read playbook: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var plays []Play
|
|
||||||
if err := yaml.Unmarshal(data, &plays); err != nil {
|
|
||||||
return nil, fmt.Errorf("parse playbook: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process each play
|
|
||||||
for i := range plays {
|
|
||||||
if err := p.processPlay(&plays[i]); err != nil {
|
|
||||||
return nil, fmt.Errorf("process play %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return plays, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseInventory parses an Ansible inventory file.
|
|
||||||
func (p *Parser) ParseInventory(path string) (*Inventory, error) {
|
|
||||||
data, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("read inventory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var inv Inventory
|
|
||||||
if err := yaml.Unmarshal(data, &inv); err != nil {
|
|
||||||
return nil, fmt.Errorf("parse inventory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &inv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTasks parses a tasks file (used by include_tasks).
|
|
||||||
func (p *Parser) ParseTasks(path string) ([]Task, error) {
|
|
||||||
data, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("read tasks: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var tasks []Task
|
|
||||||
if err := yaml.Unmarshal(data, &tasks); err != nil {
|
|
||||||
return nil, fmt.Errorf("parse tasks: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range tasks {
|
|
||||||
if err := p.extractModule(&tasks[i]); err != nil {
|
|
||||||
return nil, fmt.Errorf("task %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tasks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseRole parses a role and returns its tasks.
|
|
||||||
func (p *Parser) ParseRole(name string, tasksFrom string) ([]Task, error) {
|
|
||||||
if tasksFrom == "" {
|
|
||||||
tasksFrom = "main.yml"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search paths for roles (in order of precedence)
|
|
||||||
searchPaths := []string{
|
|
||||||
// Relative to playbook
|
|
||||||
filepath.Join(p.basePath, "roles", name, "tasks", tasksFrom),
|
|
||||||
// Parent directory roles
|
|
||||||
filepath.Join(filepath.Dir(p.basePath), "roles", name, "tasks", tasksFrom),
|
|
||||||
// Sibling roles directory
|
|
||||||
filepath.Join(p.basePath, "..", "roles", name, "tasks", tasksFrom),
|
|
||||||
// playbooks/roles pattern
|
|
||||||
filepath.Join(p.basePath, "playbooks", "roles", name, "tasks", tasksFrom),
|
|
||||||
// Common DevOps structure
|
|
||||||
filepath.Join(filepath.Dir(filepath.Dir(p.basePath)), "roles", name, "tasks", tasksFrom),
|
|
||||||
}
|
|
||||||
|
|
||||||
var tasksPath string
|
|
||||||
for _, sp := range searchPaths {
|
|
||||||
// Clean the path to resolve .. segments
|
|
||||||
sp = filepath.Clean(sp)
|
|
||||||
if _, err := os.Stat(sp); err == nil {
|
|
||||||
tasksPath = sp
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tasksPath == "" {
|
|
||||||
return nil, log.E("parser.ParseRole", fmt.Sprintf("role %s not found in search paths: %v", name, searchPaths), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load role defaults
|
|
||||||
defaultsPath := filepath.Join(filepath.Dir(filepath.Dir(tasksPath)), "defaults", "main.yml")
|
|
||||||
if data, err := os.ReadFile(defaultsPath); err == nil {
|
|
||||||
var defaults map[string]any
|
|
||||||
if yaml.Unmarshal(data, &defaults) == nil {
|
|
||||||
for k, v := range defaults {
|
|
||||||
if _, exists := p.vars[k]; !exists {
|
|
||||||
p.vars[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load role vars
|
|
||||||
varsPath := filepath.Join(filepath.Dir(filepath.Dir(tasksPath)), "vars", "main.yml")
|
|
||||||
if data, err := os.ReadFile(varsPath); err == nil {
|
|
||||||
var roleVars map[string]any
|
|
||||||
if yaml.Unmarshal(data, &roleVars) == nil {
|
|
||||||
for k, v := range roleVars {
|
|
||||||
p.vars[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.ParseTasks(tasksPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processPlay processes a play and extracts modules from tasks.
|
|
||||||
func (p *Parser) processPlay(play *Play) error {
|
|
||||||
// Merge play vars
|
|
||||||
for k, v := range play.Vars {
|
|
||||||
p.vars[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range play.PreTasks {
|
|
||||||
if err := p.extractModule(&play.PreTasks[i]); err != nil {
|
|
||||||
return fmt.Errorf("pre_task %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range play.Tasks {
|
|
||||||
if err := p.extractModule(&play.Tasks[i]); err != nil {
|
|
||||||
return fmt.Errorf("task %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range play.PostTasks {
|
|
||||||
if err := p.extractModule(&play.PostTasks[i]); err != nil {
|
|
||||||
return fmt.Errorf("post_task %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range play.Handlers {
|
|
||||||
if err := p.extractModule(&play.Handlers[i]); err != nil {
|
|
||||||
return fmt.Errorf("handler %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractModule extracts the module name and args from a task.
|
|
||||||
func (p *Parser) extractModule(task *Task) error {
|
|
||||||
// First, unmarshal the raw YAML to get all keys
|
|
||||||
// This is a workaround since we need to find the module key dynamically
|
|
||||||
|
|
||||||
// Handle block tasks
|
|
||||||
for i := range task.Block {
|
|
||||||
if err := p.extractModule(&task.Block[i]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := range task.Rescue {
|
|
||||||
if err := p.extractModule(&task.Rescue[i]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i := range task.Always {
|
|
||||||
if err := p.extractModule(&task.Always[i]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalYAML implements custom YAML unmarshaling for Task.
|
|
||||||
func (t *Task) UnmarshalYAML(node *yaml.Node) error {
|
|
||||||
// First decode known fields
|
|
||||||
type rawTask Task
|
|
||||||
var raw rawTask
|
|
||||||
|
|
||||||
// Create a map to capture all fields
|
|
||||||
var m map[string]any
|
|
||||||
if err := node.Decode(&m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode into struct
|
|
||||||
if err := node.Decode(&raw); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*t = Task(raw)
|
|
||||||
t.raw = m
|
|
||||||
|
|
||||||
// Find the module key
|
|
||||||
knownKeys := map[string]bool{
|
|
||||||
"name": true, "register": true, "when": true, "loop": true,
|
|
||||||
"loop_control": true, "vars": true, "environment": true,
|
|
||||||
"changed_when": true, "failed_when": true, "ignore_errors": true,
|
|
||||||
"no_log": true, "become": true, "become_user": true,
|
|
||||||
"delegate_to": true, "run_once": true, "tags": true,
|
|
||||||
"block": true, "rescue": true, "always": true, "notify": true,
|
|
||||||
"retries": true, "delay": true, "until": true,
|
|
||||||
"include_tasks": true, "import_tasks": true,
|
|
||||||
"include_role": true, "import_role": true,
|
|
||||||
"with_items": true, "with_dict": true, "with_file": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, val := range m {
|
|
||||||
if knownKeys[key] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is a module
|
|
||||||
if isModule(key) {
|
|
||||||
t.Module = key
|
|
||||||
t.Args = make(map[string]any)
|
|
||||||
|
|
||||||
switch v := val.(type) {
|
|
||||||
case string:
|
|
||||||
// Free-form args (e.g., shell: echo hello)
|
|
||||||
t.Args["_raw_params"] = v
|
|
||||||
case map[string]any:
|
|
||||||
t.Args = v
|
|
||||||
case nil:
|
|
||||||
// Module with no args
|
|
||||||
default:
|
|
||||||
t.Args["_raw_params"] = v
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle with_items as loop
|
|
||||||
if items, ok := m["with_items"]; ok && t.Loop == nil {
|
|
||||||
t.Loop = items
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isModule checks if a key is a known module.
|
|
||||||
func isModule(key string) bool {
|
|
||||||
for _, m := range KnownModules {
|
|
||||||
if key == m {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Also check without ansible.builtin. prefix
|
|
||||||
if strings.HasPrefix(m, "ansible.builtin.") {
|
|
||||||
if key == strings.TrimPrefix(m, "ansible.builtin.") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Accept any key with dots (likely a module)
|
|
||||||
return strings.Contains(key, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeModule normalizes a module name to its canonical form.
|
|
||||||
func NormalizeModule(name string) string {
|
|
||||||
// Add ansible.builtin. prefix if missing
|
|
||||||
if !strings.Contains(name, ".") {
|
|
||||||
return "ansible.builtin." + name
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHosts returns hosts matching a pattern from inventory.
|
|
||||||
func GetHosts(inv *Inventory, pattern string) []string {
|
|
||||||
if pattern == "all" {
|
|
||||||
return getAllHosts(inv.All)
|
|
||||||
}
|
|
||||||
if pattern == "localhost" {
|
|
||||||
return []string{"localhost"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's a group name
|
|
||||||
hosts := getGroupHosts(inv.All, pattern)
|
|
||||||
if len(hosts) > 0 {
|
|
||||||
return hosts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if it's a specific host
|
|
||||||
if hasHost(inv.All, pattern) {
|
|
||||||
return []string{pattern}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle patterns with : (intersection/union)
|
|
||||||
// For now, just return empty
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAllHosts(group *InventoryGroup) []string {
|
|
||||||
if group == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var hosts []string
|
|
||||||
for name := range group.Hosts {
|
|
||||||
hosts = append(hosts, name)
|
|
||||||
}
|
|
||||||
for _, child := range group.Children {
|
|
||||||
hosts = append(hosts, getAllHosts(child)...)
|
|
||||||
}
|
|
||||||
return hosts
|
|
||||||
}
|
|
||||||
|
|
||||||
func getGroupHosts(group *InventoryGroup, name string) []string {
|
|
||||||
if group == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check children for the group name
|
|
||||||
if child, ok := group.Children[name]; ok {
|
|
||||||
return getAllHosts(child)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recurse
|
|
||||||
for _, child := range group.Children {
|
|
||||||
if hosts := getGroupHosts(child, name); len(hosts) > 0 {
|
|
||||||
return hosts
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasHost(group *InventoryGroup, name string) bool {
|
|
||||||
if group == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := group.Hosts[name]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, child := range group.Children {
|
|
||||||
if hasHost(child, name) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHostVars returns variables for a specific host.
|
|
||||||
func GetHostVars(inv *Inventory, hostname string) map[string]any {
|
|
||||||
vars := make(map[string]any)
|
|
||||||
|
|
||||||
// Collect vars from all levels
|
|
||||||
collectHostVars(inv.All, hostname, vars)
|
|
||||||
|
|
||||||
return vars
|
|
||||||
}
|
|
||||||
|
|
||||||
func collectHostVars(group *InventoryGroup, hostname string, vars map[string]any) bool {
|
|
||||||
if group == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if host is in this group
|
|
||||||
found := false
|
|
||||||
if host, ok := group.Hosts[hostname]; ok {
|
|
||||||
found = true
|
|
||||||
// Apply group vars first
|
|
||||||
for k, v := range group.Vars {
|
|
||||||
vars[k] = v
|
|
||||||
}
|
|
||||||
// Then host vars
|
|
||||||
if host != nil {
|
|
||||||
if host.AnsibleHost != "" {
|
|
||||||
vars["ansible_host"] = host.AnsibleHost
|
|
||||||
}
|
|
||||||
if host.AnsiblePort != 0 {
|
|
||||||
vars["ansible_port"] = host.AnsiblePort
|
|
||||||
}
|
|
||||||
if host.AnsibleUser != "" {
|
|
||||||
vars["ansible_user"] = host.AnsibleUser
|
|
||||||
}
|
|
||||||
if host.AnsiblePassword != "" {
|
|
||||||
vars["ansible_password"] = host.AnsiblePassword
|
|
||||||
}
|
|
||||||
if host.AnsibleSSHPrivateKeyFile != "" {
|
|
||||||
vars["ansible_ssh_private_key_file"] = host.AnsibleSSHPrivateKeyFile
|
|
||||||
}
|
|
||||||
if host.AnsibleConnection != "" {
|
|
||||||
vars["ansible_connection"] = host.AnsibleConnection
|
|
||||||
}
|
|
||||||
for k, v := range host.Vars {
|
|
||||||
vars[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check children
|
|
||||||
for _, child := range group.Children {
|
|
||||||
if collectHostVars(child, hostname, vars) {
|
|
||||||
// Apply this group's vars (parent vars)
|
|
||||||
for k, v := range group.Vars {
|
|
||||||
if _, exists := vars[k]; !exists {
|
|
||||||
vars[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return found
|
|
||||||
}
|
|
||||||
|
|
@ -1,451 +0,0 @@
|
||||||
package ansible
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/log"
|
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
"golang.org/x/crypto/ssh/knownhosts"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SSHClient handles SSH connections to remote hosts.
|
|
||||||
type SSHClient struct {
|
|
||||||
host string
|
|
||||||
port int
|
|
||||||
user string
|
|
||||||
password string
|
|
||||||
keyFile string
|
|
||||||
client *ssh.Client
|
|
||||||
mu sync.Mutex
|
|
||||||
become bool
|
|
||||||
becomeUser string
|
|
||||||
becomePass string
|
|
||||||
timeout time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// SSHConfig holds SSH connection configuration.
|
|
||||||
type SSHConfig struct {
|
|
||||||
Host string
|
|
||||||
Port int
|
|
||||||
User string
|
|
||||||
Password string
|
|
||||||
KeyFile string
|
|
||||||
Become bool
|
|
||||||
BecomeUser string
|
|
||||||
BecomePass string
|
|
||||||
Timeout time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSSHClient creates a new SSH client.
|
|
||||||
func NewSSHClient(cfg SSHConfig) (*SSHClient, error) {
|
|
||||||
if cfg.Port == 0 {
|
|
||||||
cfg.Port = 22
|
|
||||||
}
|
|
||||||
if cfg.User == "" {
|
|
||||||
cfg.User = "root"
|
|
||||||
}
|
|
||||||
if cfg.Timeout == 0 {
|
|
||||||
cfg.Timeout = 30 * time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
client := &SSHClient{
|
|
||||||
host: cfg.Host,
|
|
||||||
port: cfg.Port,
|
|
||||||
user: cfg.User,
|
|
||||||
password: cfg.Password,
|
|
||||||
keyFile: cfg.KeyFile,
|
|
||||||
become: cfg.Become,
|
|
||||||
becomeUser: cfg.BecomeUser,
|
|
||||||
becomePass: cfg.BecomePass,
|
|
||||||
timeout: cfg.Timeout,
|
|
||||||
}
|
|
||||||
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect establishes the SSH connection.
|
|
||||||
func (c *SSHClient) Connect(ctx context.Context) error {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
if c.client != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var authMethods []ssh.AuthMethod
|
|
||||||
|
|
||||||
// Try key-based auth first
|
|
||||||
if c.keyFile != "" {
|
|
||||||
keyPath := c.keyFile
|
|
||||||
if strings.HasPrefix(keyPath, "~") {
|
|
||||||
home, _ := os.UserHomeDir()
|
|
||||||
keyPath = filepath.Join(home, keyPath[1:])
|
|
||||||
}
|
|
||||||
|
|
||||||
if key, err := os.ReadFile(keyPath); err == nil {
|
|
||||||
if signer, err := ssh.ParsePrivateKey(key); err == nil {
|
|
||||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try default SSH keys
|
|
||||||
if len(authMethods) == 0 {
|
|
||||||
home, _ := os.UserHomeDir()
|
|
||||||
defaultKeys := []string{
|
|
||||||
filepath.Join(home, ".ssh", "id_ed25519"),
|
|
||||||
filepath.Join(home, ".ssh", "id_rsa"),
|
|
||||||
}
|
|
||||||
for _, keyPath := range defaultKeys {
|
|
||||||
if key, err := os.ReadFile(keyPath); err == nil {
|
|
||||||
if signer, err := ssh.ParsePrivateKey(key); err == nil {
|
|
||||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fall back to password auth
|
|
||||||
if c.password != "" {
|
|
||||||
authMethods = append(authMethods, ssh.Password(c.password))
|
|
||||||
authMethods = append(authMethods, ssh.KeyboardInteractive(func(user, instruction string, questions []string, echos []bool) ([]string, error) {
|
|
||||||
answers := make([]string, len(questions))
|
|
||||||
for i := range questions {
|
|
||||||
answers[i] = c.password
|
|
||||||
}
|
|
||||||
return answers, nil
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(authMethods) == 0 {
|
|
||||||
return log.E("ssh.Connect", "no authentication method available", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host key verification
|
|
||||||
var hostKeyCallback ssh.HostKeyCallback
|
|
||||||
|
|
||||||
home, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return log.E("ssh.Connect", "failed to get user home dir", err)
|
|
||||||
}
|
|
||||||
knownHostsPath := filepath.Join(home, ".ssh", "known_hosts")
|
|
||||||
|
|
||||||
// Ensure known_hosts file exists
|
|
||||||
if _, err := os.Stat(knownHostsPath); os.IsNotExist(err) {
|
|
||||||
if err := os.MkdirAll(filepath.Dir(knownHostsPath), 0700); err != nil {
|
|
||||||
return log.E("ssh.Connect", "failed to create .ssh dir", err)
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(knownHostsPath, nil, 0600); err != nil {
|
|
||||||
return log.E("ssh.Connect", "failed to create known_hosts file", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cb, err := knownhosts.New(knownHostsPath)
|
|
||||||
if err != nil {
|
|
||||||
return log.E("ssh.Connect", "failed to load known_hosts", err)
|
|
||||||
}
|
|
||||||
hostKeyCallback = cb
|
|
||||||
|
|
||||||
config := &ssh.ClientConfig{
|
|
||||||
User: c.user,
|
|
||||||
Auth: authMethods,
|
|
||||||
HostKeyCallback: hostKeyCallback,
|
|
||||||
Timeout: c.timeout,
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := fmt.Sprintf("%s:%d", c.host, c.port)
|
|
||||||
|
|
||||||
// Connect with context timeout
|
|
||||||
var d net.Dialer
|
|
||||||
conn, err := d.DialContext(ctx, "tcp", addr)
|
|
||||||
if err != nil {
|
|
||||||
return log.E("ssh.Connect", fmt.Sprintf("dial %s", addr), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sshConn, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
|
|
||||||
if err != nil {
|
|
||||||
// conn is closed by NewClientConn on error
|
|
||||||
return log.E("ssh.Connect", fmt.Sprintf("ssh connect %s", addr), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.client = ssh.NewClient(sshConn, chans, reqs)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the SSH connection.
|
|
||||||
func (c *SSHClient) Close() error {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
if c.client != nil {
|
|
||||||
err := c.client.Close()
|
|
||||||
c.client = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run executes a command on the remote host.
|
|
||||||
func (c *SSHClient) Run(ctx context.Context, cmd string) (stdout, stderr string, exitCode int, err error) {
|
|
||||||
if err := c.Connect(ctx); err != nil {
|
|
||||||
return "", "", -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
session, err := c.client.NewSession()
|
|
||||||
if err != nil {
|
|
||||||
return "", "", -1, log.E("ssh.Run", "new session", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = session.Close() }()
|
|
||||||
|
|
||||||
var stdoutBuf, stderrBuf bytes.Buffer
|
|
||||||
session.Stdout = &stdoutBuf
|
|
||||||
session.Stderr = &stderrBuf
|
|
||||||
|
|
||||||
// Apply become if needed
|
|
||||||
if c.become {
|
|
||||||
becomeUser := c.becomeUser
|
|
||||||
if becomeUser == "" {
|
|
||||||
becomeUser = "root"
|
|
||||||
}
|
|
||||||
// Escape single quotes in the command
|
|
||||||
escapedCmd := strings.ReplaceAll(cmd, "'", "'\\''")
|
|
||||||
if c.becomePass != "" {
|
|
||||||
// Use sudo with password via stdin (-S flag)
|
|
||||||
// We launch a goroutine to write the password to stdin
|
|
||||||
cmd = fmt.Sprintf("sudo -S -u %s bash -c '%s'", becomeUser, escapedCmd)
|
|
||||||
stdin, err := session.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
return "", "", -1, log.E("ssh.Run", "stdin pipe", err)
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
defer func() { _ = stdin.Close() }()
|
|
||||||
_, _ = io.WriteString(stdin, c.becomePass+"\n")
|
|
||||||
}()
|
|
||||||
} else if c.password != "" {
|
|
||||||
// Try using connection password for sudo
|
|
||||||
cmd = fmt.Sprintf("sudo -S -u %s bash -c '%s'", becomeUser, escapedCmd)
|
|
||||||
stdin, err := session.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
return "", "", -1, log.E("ssh.Run", "stdin pipe", err)
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
defer func() { _ = stdin.Close() }()
|
|
||||||
_, _ = io.WriteString(stdin, c.password+"\n")
|
|
||||||
}()
|
|
||||||
} else {
|
|
||||||
// Try passwordless sudo
|
|
||||||
cmd = fmt.Sprintf("sudo -n -u %s bash -c '%s'", becomeUser, escapedCmd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run with context
|
|
||||||
done := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
done <- session.Run(cmd)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
_ = session.Signal(ssh.SIGKILL)
|
|
||||||
return "", "", -1, ctx.Err()
|
|
||||||
case err := <-done:
|
|
||||||
exitCode = 0
|
|
||||||
if err != nil {
|
|
||||||
if exitErr, ok := err.(*ssh.ExitError); ok {
|
|
||||||
exitCode = exitErr.ExitStatus()
|
|
||||||
} else {
|
|
||||||
return stdoutBuf.String(), stderrBuf.String(), -1, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return stdoutBuf.String(), stderrBuf.String(), exitCode, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunScript runs a script on the remote host.
|
|
||||||
func (c *SSHClient) RunScript(ctx context.Context, script string) (stdout, stderr string, exitCode int, err error) {
|
|
||||||
// Escape the script for heredoc
|
|
||||||
cmd := fmt.Sprintf("bash <<'ANSIBLE_SCRIPT_EOF'\n%s\nANSIBLE_SCRIPT_EOF", script)
|
|
||||||
return c.Run(ctx, cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload copies a file to the remote host.
|
|
||||||
func (c *SSHClient) Upload(ctx context.Context, local io.Reader, remote string, mode os.FileMode) error {
|
|
||||||
if err := c.Connect(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read content
|
|
||||||
content, err := io.ReadAll(local)
|
|
||||||
if err != nil {
|
|
||||||
return log.E("ssh.Upload", "read content", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create parent directory
|
|
||||||
dir := filepath.Dir(remote)
|
|
||||||
dirCmd := fmt.Sprintf("mkdir -p %q", dir)
|
|
||||||
if c.become {
|
|
||||||
dirCmd = fmt.Sprintf("sudo mkdir -p %q", dir)
|
|
||||||
}
|
|
||||||
if _, _, _, err := c.Run(ctx, dirCmd); err != nil {
|
|
||||||
return log.E("ssh.Upload", "create parent dir", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use cat to write the file (simpler than SCP)
|
|
||||||
writeCmd := fmt.Sprintf("cat > %q && chmod %o %q", remote, mode, remote)
|
|
||||||
|
|
||||||
// If become is needed, we construct a command that reads password then content from stdin
|
|
||||||
// But we need to be careful with handling stdin for sudo + cat.
|
|
||||||
// We'll use a session with piped stdin.
|
|
||||||
|
|
||||||
session2, err := c.client.NewSession()
|
|
||||||
if err != nil {
|
|
||||||
return log.E("ssh.Upload", "new session for write", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = session2.Close() }()
|
|
||||||
|
|
||||||
stdin, err := session2.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
return log.E("ssh.Upload", "stdin pipe", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var stderrBuf bytes.Buffer
|
|
||||||
session2.Stderr = &stderrBuf
|
|
||||||
|
|
||||||
if c.become {
|
|
||||||
becomeUser := c.becomeUser
|
|
||||||
if becomeUser == "" {
|
|
||||||
becomeUser = "root"
|
|
||||||
}
|
|
||||||
|
|
||||||
pass := c.becomePass
|
|
||||||
if pass == "" {
|
|
||||||
pass = c.password
|
|
||||||
}
|
|
||||||
|
|
||||||
if pass != "" {
|
|
||||||
// Use sudo -S with password from stdin
|
|
||||||
writeCmd = fmt.Sprintf("sudo -S -u %s bash -c 'cat > %q && chmod %o %q'",
|
|
||||||
becomeUser, remote, mode, remote)
|
|
||||||
} else {
|
|
||||||
// Use passwordless sudo (sudo -n) to avoid consuming file content as password
|
|
||||||
writeCmd = fmt.Sprintf("sudo -n -u %s bash -c 'cat > %q && chmod %o %q'",
|
|
||||||
becomeUser, remote, mode, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := session2.Start(writeCmd); err != nil {
|
|
||||||
return log.E("ssh.Upload", "start write", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer func() { _ = stdin.Close() }()
|
|
||||||
if pass != "" {
|
|
||||||
_, _ = io.WriteString(stdin, pass+"\n")
|
|
||||||
}
|
|
||||||
_, _ = stdin.Write(content)
|
|
||||||
}()
|
|
||||||
} else {
|
|
||||||
// Normal write
|
|
||||||
if err := session2.Start(writeCmd); err != nil {
|
|
||||||
return log.E("ssh.Upload", "start write", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer func() { _ = stdin.Close() }()
|
|
||||||
_, _ = stdin.Write(content)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := session2.Wait(); err != nil {
|
|
||||||
return log.E("ssh.Upload", fmt.Sprintf("write failed (stderr: %s)", stderrBuf.String()), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Download copies a file from the remote host.
|
|
||||||
func (c *SSHClient) Download(ctx context.Context, remote string) ([]byte, error) {
|
|
||||||
if err := c.Connect(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := fmt.Sprintf("cat %q", remote)
|
|
||||||
|
|
||||||
stdout, stderr, exitCode, err := c.Run(ctx, cmd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if exitCode != 0 {
|
|
||||||
return nil, log.E("ssh.Download", fmt.Sprintf("cat failed: %s", stderr), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return []byte(stdout), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileExists checks if a file exists on the remote host.
|
|
||||||
func (c *SSHClient) FileExists(ctx context.Context, path string) (bool, error) {
|
|
||||||
cmd := fmt.Sprintf("test -e %q && echo yes || echo no", path)
|
|
||||||
stdout, _, exitCode, err := c.Run(ctx, cmd)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if exitCode != 0 {
|
|
||||||
// test command failed but didn't error - file doesn't exist
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(stdout) == "yes", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stat returns file info from the remote host.
|
|
||||||
func (c *SSHClient) Stat(ctx context.Context, path string) (map[string]any, error) {
|
|
||||||
// Simple approach - get basic file info
|
|
||||||
cmd := fmt.Sprintf(`
|
|
||||||
if [ -e %q ]; then
|
|
||||||
if [ -d %q ]; then
|
|
||||||
echo "exists=true isdir=true"
|
|
||||||
else
|
|
||||||
echo "exists=true isdir=false"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "exists=false"
|
|
||||||
fi
|
|
||||||
`, path, path)
|
|
||||||
|
|
||||||
stdout, _, _, err := c.Run(ctx, cmd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := make(map[string]any)
|
|
||||||
parts := strings.Fields(strings.TrimSpace(stdout))
|
|
||||||
for _, part := range parts {
|
|
||||||
kv := strings.SplitN(part, "=", 2)
|
|
||||||
if len(kv) == 2 {
|
|
||||||
result[kv[0]] = kv[1] == "true"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBecome enables privilege escalation.
|
|
||||||
func (c *SSHClient) SetBecome(become bool, user, password string) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.become = become
|
|
||||||
if user != "" {
|
|
||||||
c.becomeUser = user
|
|
||||||
}
|
|
||||||
if password != "" {
|
|
||||||
c.becomePass = password
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
package ansible
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewSSHClient(t *testing.T) {
|
|
||||||
cfg := SSHConfig{
|
|
||||||
Host: "localhost",
|
|
||||||
Port: 2222,
|
|
||||||
User: "root",
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := NewSSHClient(cfg)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, client)
|
|
||||||
assert.Equal(t, "localhost", client.host)
|
|
||||||
assert.Equal(t, 2222, client.port)
|
|
||||||
assert.Equal(t, "root", client.user)
|
|
||||||
assert.Equal(t, 30*time.Second, client.timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSSHConfig_Defaults(t *testing.T) {
|
|
||||||
cfg := SSHConfig{
|
|
||||||
Host: "localhost",
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := NewSSHClient(cfg)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 22, client.port)
|
|
||||||
assert.Equal(t, "root", client.user)
|
|
||||||
assert.Equal(t, 30*time.Second, client.timeout)
|
|
||||||
}
|
|
||||||
|
|
@ -1,258 +0,0 @@
|
||||||
package ansible
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Playbook represents an Ansible playbook.
|
|
||||||
type Playbook struct {
|
|
||||||
Plays []Play `yaml:",inline"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Play represents a single play in a playbook.
|
|
||||||
type Play struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
Hosts string `yaml:"hosts"`
|
|
||||||
Connection string `yaml:"connection,omitempty"`
|
|
||||||
Become bool `yaml:"become,omitempty"`
|
|
||||||
BecomeUser string `yaml:"become_user,omitempty"`
|
|
||||||
GatherFacts *bool `yaml:"gather_facts,omitempty"`
|
|
||||||
Vars map[string]any `yaml:"vars,omitempty"`
|
|
||||||
PreTasks []Task `yaml:"pre_tasks,omitempty"`
|
|
||||||
Tasks []Task `yaml:"tasks,omitempty"`
|
|
||||||
PostTasks []Task `yaml:"post_tasks,omitempty"`
|
|
||||||
Roles []RoleRef `yaml:"roles,omitempty"`
|
|
||||||
Handlers []Task `yaml:"handlers,omitempty"`
|
|
||||||
Tags []string `yaml:"tags,omitempty"`
|
|
||||||
Environment map[string]string `yaml:"environment,omitempty"`
|
|
||||||
Serial any `yaml:"serial,omitempty"` // int or string
|
|
||||||
MaxFailPercent int `yaml:"max_fail_percentage,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoleRef represents a role reference in a play.
|
|
||||||
type RoleRef struct {
|
|
||||||
Role string `yaml:"role,omitempty"`
|
|
||||||
Name string `yaml:"name,omitempty"` // Alternative to role
|
|
||||||
TasksFrom string `yaml:"tasks_from,omitempty"`
|
|
||||||
Vars map[string]any `yaml:"vars,omitempty"`
|
|
||||||
When any `yaml:"when,omitempty"`
|
|
||||||
Tags []string `yaml:"tags,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalYAML handles both string and struct role refs.
|
|
||||||
func (r *RoleRef) UnmarshalYAML(unmarshal func(any) error) error {
|
|
||||||
// Try string first
|
|
||||||
var s string
|
|
||||||
if err := unmarshal(&s); err == nil {
|
|
||||||
r.Role = s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try struct
|
|
||||||
type rawRoleRef RoleRef
|
|
||||||
var raw rawRoleRef
|
|
||||||
if err := unmarshal(&raw); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*r = RoleRef(raw)
|
|
||||||
if r.Role == "" && r.Name != "" {
|
|
||||||
r.Role = r.Name
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Task represents an Ansible task.
|
|
||||||
type Task struct {
|
|
||||||
Name string `yaml:"name,omitempty"`
|
|
||||||
Module string `yaml:"-"` // Derived from the module key
|
|
||||||
Args map[string]any `yaml:"-"` // Module arguments
|
|
||||||
Register string `yaml:"register,omitempty"`
|
|
||||||
When any `yaml:"when,omitempty"` // string or []string
|
|
||||||
Loop any `yaml:"loop,omitempty"` // string or []any
|
|
||||||
LoopControl *LoopControl `yaml:"loop_control,omitempty"`
|
|
||||||
Vars map[string]any `yaml:"vars,omitempty"`
|
|
||||||
Environment map[string]string `yaml:"environment,omitempty"`
|
|
||||||
ChangedWhen any `yaml:"changed_when,omitempty"`
|
|
||||||
FailedWhen any `yaml:"failed_when,omitempty"`
|
|
||||||
IgnoreErrors bool `yaml:"ignore_errors,omitempty"`
|
|
||||||
NoLog bool `yaml:"no_log,omitempty"`
|
|
||||||
Become *bool `yaml:"become,omitempty"`
|
|
||||||
BecomeUser string `yaml:"become_user,omitempty"`
|
|
||||||
Delegate string `yaml:"delegate_to,omitempty"`
|
|
||||||
RunOnce bool `yaml:"run_once,omitempty"`
|
|
||||||
Tags []string `yaml:"tags,omitempty"`
|
|
||||||
Block []Task `yaml:"block,omitempty"`
|
|
||||||
Rescue []Task `yaml:"rescue,omitempty"`
|
|
||||||
Always []Task `yaml:"always,omitempty"`
|
|
||||||
Notify any `yaml:"notify,omitempty"` // string or []string
|
|
||||||
Retries int `yaml:"retries,omitempty"`
|
|
||||||
Delay int `yaml:"delay,omitempty"`
|
|
||||||
Until string `yaml:"until,omitempty"`
|
|
||||||
|
|
||||||
// Include/import directives
|
|
||||||
IncludeTasks string `yaml:"include_tasks,omitempty"`
|
|
||||||
ImportTasks string `yaml:"import_tasks,omitempty"`
|
|
||||||
IncludeRole *struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
TasksFrom string `yaml:"tasks_from,omitempty"`
|
|
||||||
Vars map[string]any `yaml:"vars,omitempty"`
|
|
||||||
} `yaml:"include_role,omitempty"`
|
|
||||||
ImportRole *struct {
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
TasksFrom string `yaml:"tasks_from,omitempty"`
|
|
||||||
Vars map[string]any `yaml:"vars,omitempty"`
|
|
||||||
} `yaml:"import_role,omitempty"`
|
|
||||||
|
|
||||||
// Raw YAML for module extraction
|
|
||||||
raw map[string]any
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoopControl controls loop behavior.
|
|
||||||
type LoopControl struct {
|
|
||||||
LoopVar string `yaml:"loop_var,omitempty"`
|
|
||||||
IndexVar string `yaml:"index_var,omitempty"`
|
|
||||||
Label string `yaml:"label,omitempty"`
|
|
||||||
Pause int `yaml:"pause,omitempty"`
|
|
||||||
Extended bool `yaml:"extended,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TaskResult holds the result of executing a task.
|
|
||||||
type TaskResult struct {
|
|
||||||
Changed bool `json:"changed"`
|
|
||||||
Failed bool `json:"failed"`
|
|
||||||
Skipped bool `json:"skipped"`
|
|
||||||
Msg string `json:"msg,omitempty"`
|
|
||||||
Stdout string `json:"stdout,omitempty"`
|
|
||||||
Stderr string `json:"stderr,omitempty"`
|
|
||||||
RC int `json:"rc,omitempty"`
|
|
||||||
Results []TaskResult `json:"results,omitempty"` // For loops
|
|
||||||
Data map[string]any `json:"data,omitempty"` // Module-specific data
|
|
||||||
Duration time.Duration `json:"duration,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inventory represents Ansible inventory.
|
|
||||||
type Inventory struct {
|
|
||||||
All *InventoryGroup `yaml:"all"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InventoryGroup represents a group in inventory.
|
|
||||||
type InventoryGroup struct {
|
|
||||||
Hosts map[string]*Host `yaml:"hosts,omitempty"`
|
|
||||||
Children map[string]*InventoryGroup `yaml:"children,omitempty"`
|
|
||||||
Vars map[string]any `yaml:"vars,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host represents a host in inventory.
|
|
||||||
type Host struct {
|
|
||||||
AnsibleHost string `yaml:"ansible_host,omitempty"`
|
|
||||||
AnsiblePort int `yaml:"ansible_port,omitempty"`
|
|
||||||
AnsibleUser string `yaml:"ansible_user,omitempty"`
|
|
||||||
AnsiblePassword string `yaml:"ansible_password,omitempty"`
|
|
||||||
AnsibleSSHPrivateKeyFile string `yaml:"ansible_ssh_private_key_file,omitempty"`
|
|
||||||
AnsibleConnection string `yaml:"ansible_connection,omitempty"`
|
|
||||||
AnsibleBecomePassword string `yaml:"ansible_become_password,omitempty"`
|
|
||||||
|
|
||||||
// Custom vars
|
|
||||||
Vars map[string]any `yaml:",inline"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Facts holds gathered facts about a host.
|
|
||||||
type Facts struct {
|
|
||||||
Hostname string `json:"ansible_hostname"`
|
|
||||||
FQDN string `json:"ansible_fqdn"`
|
|
||||||
OS string `json:"ansible_os_family"`
|
|
||||||
Distribution string `json:"ansible_distribution"`
|
|
||||||
Version string `json:"ansible_distribution_version"`
|
|
||||||
Architecture string `json:"ansible_architecture"`
|
|
||||||
Kernel string `json:"ansible_kernel"`
|
|
||||||
Memory int64 `json:"ansible_memtotal_mb"`
|
|
||||||
CPUs int `json:"ansible_processor_vcpus"`
|
|
||||||
IPv4 string `json:"ansible_default_ipv4_address"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Known Ansible modules
|
|
||||||
var KnownModules = []string{
|
|
||||||
// Builtin
|
|
||||||
"ansible.builtin.shell",
|
|
||||||
"ansible.builtin.command",
|
|
||||||
"ansible.builtin.raw",
|
|
||||||
"ansible.builtin.script",
|
|
||||||
"ansible.builtin.copy",
|
|
||||||
"ansible.builtin.template",
|
|
||||||
"ansible.builtin.file",
|
|
||||||
"ansible.builtin.lineinfile",
|
|
||||||
"ansible.builtin.blockinfile",
|
|
||||||
"ansible.builtin.stat",
|
|
||||||
"ansible.builtin.slurp",
|
|
||||||
"ansible.builtin.fetch",
|
|
||||||
"ansible.builtin.get_url",
|
|
||||||
"ansible.builtin.uri",
|
|
||||||
"ansible.builtin.apt",
|
|
||||||
"ansible.builtin.apt_key",
|
|
||||||
"ansible.builtin.apt_repository",
|
|
||||||
"ansible.builtin.yum",
|
|
||||||
"ansible.builtin.dnf",
|
|
||||||
"ansible.builtin.package",
|
|
||||||
"ansible.builtin.pip",
|
|
||||||
"ansible.builtin.service",
|
|
||||||
"ansible.builtin.systemd",
|
|
||||||
"ansible.builtin.user",
|
|
||||||
"ansible.builtin.group",
|
|
||||||
"ansible.builtin.cron",
|
|
||||||
"ansible.builtin.git",
|
|
||||||
"ansible.builtin.unarchive",
|
|
||||||
"ansible.builtin.archive",
|
|
||||||
"ansible.builtin.debug",
|
|
||||||
"ansible.builtin.fail",
|
|
||||||
"ansible.builtin.assert",
|
|
||||||
"ansible.builtin.pause",
|
|
||||||
"ansible.builtin.wait_for",
|
|
||||||
"ansible.builtin.set_fact",
|
|
||||||
"ansible.builtin.include_vars",
|
|
||||||
"ansible.builtin.add_host",
|
|
||||||
"ansible.builtin.group_by",
|
|
||||||
"ansible.builtin.meta",
|
|
||||||
"ansible.builtin.setup",
|
|
||||||
|
|
||||||
// Short forms (legacy)
|
|
||||||
"shell",
|
|
||||||
"command",
|
|
||||||
"raw",
|
|
||||||
"script",
|
|
||||||
"copy",
|
|
||||||
"template",
|
|
||||||
"file",
|
|
||||||
"lineinfile",
|
|
||||||
"blockinfile",
|
|
||||||
"stat",
|
|
||||||
"slurp",
|
|
||||||
"fetch",
|
|
||||||
"get_url",
|
|
||||||
"uri",
|
|
||||||
"apt",
|
|
||||||
"apt_key",
|
|
||||||
"apt_repository",
|
|
||||||
"yum",
|
|
||||||
"dnf",
|
|
||||||
"package",
|
|
||||||
"pip",
|
|
||||||
"service",
|
|
||||||
"systemd",
|
|
||||||
"user",
|
|
||||||
"group",
|
|
||||||
"cron",
|
|
||||||
"git",
|
|
||||||
"unarchive",
|
|
||||||
"archive",
|
|
||||||
"debug",
|
|
||||||
"fail",
|
|
||||||
"assert",
|
|
||||||
"pause",
|
|
||||||
"wait_for",
|
|
||||||
"set_fact",
|
|
||||||
"include_vars",
|
|
||||||
"add_host",
|
|
||||||
"group_by",
|
|
||||||
"meta",
|
|
||||||
"setup",
|
|
||||||
}
|
|
||||||
455
pkg/auth/auth.go
455
pkg/auth/auth.go
|
|
@ -1,455 +0,0 @@
|
||||||
// Package auth implements OpenPGP challenge-response authentication with
|
|
||||||
// support for both online (HTTP) and air-gapped (file-based) transport.
|
|
||||||
//
|
|
||||||
// Ported from dAppServer's mod-auth/lethean.service.ts.
|
|
||||||
//
|
|
||||||
// Authentication Flow (Online):
|
|
||||||
//
|
|
||||||
// 1. Client sends public key to server
|
|
||||||
// 2. Server generates a random nonce, encrypts it with client's public key
|
|
||||||
// 3. Client decrypts the nonce and signs it with their private key
|
|
||||||
// 4. Server verifies the signature, creates a session token
|
|
||||||
//
|
|
||||||
// Authentication Flow (Air-Gapped / Courier):
|
|
||||||
//
|
|
||||||
// Same crypto but challenge/response are exchanged via files on a Medium.
|
|
||||||
//
|
|
||||||
// Storage Layout (via Medium):
|
|
||||||
//
|
|
||||||
// users/
|
|
||||||
// {userID}.pub PGP public key (armored)
|
|
||||||
// {userID}.key PGP private key (armored, password-encrypted)
|
|
||||||
// {userID}.rev Revocation certificate (placeholder)
|
|
||||||
// {userID}.json User metadata (encrypted with user's public key)
|
|
||||||
// {userID}.lthn LTHN password hash
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
coreerr "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/crypt/lthn"
|
|
||||||
"forge.lthn.ai/core/go/pkg/crypt/pgp"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Default durations for challenge and session lifetimes.
|
|
||||||
const (
|
|
||||||
DefaultChallengeTTL = 5 * time.Minute
|
|
||||||
DefaultSessionTTL = 24 * time.Hour
|
|
||||||
nonceBytes = 32
|
|
||||||
)
|
|
||||||
|
|
||||||
// protectedUsers lists usernames that cannot be deleted.
|
|
||||||
// The "server" user holds the server keypair; deleting it would
|
|
||||||
// permanently destroy all joining data and require a full rebuild.
|
|
||||||
var protectedUsers = map[string]bool{
|
|
||||||
"server": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// User represents a registered user with PGP credentials.
|
|
||||||
type User struct {
|
|
||||||
PublicKey string `json:"public_key"`
|
|
||||||
KeyID string `json:"key_id"`
|
|
||||||
Fingerprint string `json:"fingerprint"`
|
|
||||||
PasswordHash string `json:"password_hash"` // LTHN hash
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
LastLogin time.Time `json:"last_login"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Challenge is a PGP-encrypted nonce sent to a client during authentication.
|
|
||||||
type Challenge struct {
|
|
||||||
Nonce []byte `json:"nonce"`
|
|
||||||
Encrypted string `json:"encrypted"` // PGP-encrypted nonce (armored)
|
|
||||||
ExpiresAt time.Time `json:"expires_at"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Session represents an authenticated session.
|
|
||||||
type Session struct {
|
|
||||||
Token string `json:"token"`
|
|
||||||
UserID string `json:"user_id"`
|
|
||||||
ExpiresAt time.Time `json:"expires_at"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option configures an Authenticator.
|
|
||||||
type Option func(*Authenticator)
|
|
||||||
|
|
||||||
// WithChallengeTTL sets the lifetime of a challenge before it expires.
|
|
||||||
func WithChallengeTTL(d time.Duration) Option {
|
|
||||||
return func(a *Authenticator) {
|
|
||||||
a.challengeTTL = d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSessionTTL sets the lifetime of a session before it expires.
|
|
||||||
func WithSessionTTL(d time.Duration) Option {
|
|
||||||
return func(a *Authenticator) {
|
|
||||||
a.sessionTTL = d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Authenticator manages PGP-based challenge-response authentication.
|
|
||||||
// All user data and keys are persisted through an io.Medium, which may
|
|
||||||
// be backed by disk, memory (MockMedium), or any other storage backend.
|
|
||||||
type Authenticator struct {
|
|
||||||
medium io.Medium
|
|
||||||
sessions map[string]*Session
|
|
||||||
challenges map[string]*Challenge // userID -> pending challenge
|
|
||||||
mu sync.RWMutex
|
|
||||||
challengeTTL time.Duration
|
|
||||||
sessionTTL time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates an Authenticator that persists user data via the given Medium.
|
|
||||||
func New(m io.Medium, opts ...Option) *Authenticator {
|
|
||||||
a := &Authenticator{
|
|
||||||
medium: m,
|
|
||||||
sessions: make(map[string]*Session),
|
|
||||||
challenges: make(map[string]*Challenge),
|
|
||||||
challengeTTL: DefaultChallengeTTL,
|
|
||||||
sessionTTL: DefaultSessionTTL,
|
|
||||||
}
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(a)
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// userPath returns the storage path for a user artifact.
|
|
||||||
func userPath(userID, ext string) string {
|
|
||||||
return "users/" + userID + ext
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register creates a new user account. It hashes the username with LTHN to
|
|
||||||
// produce a userID, generates a PGP keypair (protected by the given password),
|
|
||||||
// and persists the public key, private key, revocation placeholder, password
|
|
||||||
// hash, and encrypted metadata via the Medium.
|
|
||||||
func (a *Authenticator) Register(username, password string) (*User, error) {
|
|
||||||
const op = "auth.Register"
|
|
||||||
|
|
||||||
userID := lthn.Hash(username)
|
|
||||||
|
|
||||||
// Check if user already exists
|
|
||||||
if a.medium.IsFile(userPath(userID, ".pub")) {
|
|
||||||
return nil, coreerr.E(op, "user already exists", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure users directory exists
|
|
||||||
if err := a.medium.EnsureDir("users"); err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to create users directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate PGP keypair
|
|
||||||
kp, err := pgp.CreateKeyPair(userID, userID+"@auth.local", password)
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to create PGP keypair", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store public key
|
|
||||||
if err := a.medium.Write(userPath(userID, ".pub"), kp.PublicKey); err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to write public key", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store private key (already encrypted by PGP if password is non-empty)
|
|
||||||
if err := a.medium.Write(userPath(userID, ".key"), kp.PrivateKey); err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to write private key", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store revocation certificate placeholder
|
|
||||||
if err := a.medium.Write(userPath(userID, ".rev"), "REVOCATION_PLACEHOLDER"); err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to write revocation certificate", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store LTHN password hash
|
|
||||||
passwordHash := lthn.Hash(password)
|
|
||||||
if err := a.medium.Write(userPath(userID, ".lthn"), passwordHash); err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to write password hash", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build user metadata
|
|
||||||
now := time.Now()
|
|
||||||
user := &User{
|
|
||||||
PublicKey: kp.PublicKey,
|
|
||||||
KeyID: userID,
|
|
||||||
Fingerprint: lthn.Hash(kp.PublicKey),
|
|
||||||
PasswordHash: passwordHash,
|
|
||||||
Created: now,
|
|
||||||
LastLogin: time.Time{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt metadata with the user's public key and store
|
|
||||||
metaJSON, err := json.Marshal(user)
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to marshal user metadata", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
encMeta, err := pgp.Encrypt(metaJSON, kp.PublicKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to encrypt user metadata", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := a.medium.Write(userPath(userID, ".json"), string(encMeta)); err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to write user metadata", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return user, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateChallenge generates a cryptographic challenge for the given user.
|
|
||||||
// A random nonce is created and encrypted with the user's PGP public key.
|
|
||||||
// The client must decrypt the nonce and sign it to prove key ownership.
|
|
||||||
func (a *Authenticator) CreateChallenge(userID string) (*Challenge, error) {
|
|
||||||
const op = "auth.CreateChallenge"
|
|
||||||
|
|
||||||
// Read user's public key
|
|
||||||
pubKey, err := a.medium.Read(userPath(userID, ".pub"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "user not found", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate random nonce
|
|
||||||
nonce := make([]byte, nonceBytes)
|
|
||||||
if _, err := rand.Read(nonce); err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to generate nonce", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt nonce with user's public key
|
|
||||||
encrypted, err := pgp.Encrypt(nonce, pubKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to encrypt nonce", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
challenge := &Challenge{
|
|
||||||
Nonce: nonce,
|
|
||||||
Encrypted: string(encrypted),
|
|
||||||
ExpiresAt: time.Now().Add(a.challengeTTL),
|
|
||||||
}
|
|
||||||
|
|
||||||
a.mu.Lock()
|
|
||||||
a.challenges[userID] = challenge
|
|
||||||
a.mu.Unlock()
|
|
||||||
|
|
||||||
return challenge, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateResponse verifies a signed nonce from the client. The client must
|
|
||||||
// have decrypted the challenge nonce and signed it with their private key.
|
|
||||||
// On success, a new session is created and returned.
|
|
||||||
func (a *Authenticator) ValidateResponse(userID string, signedNonce []byte) (*Session, error) {
|
|
||||||
const op = "auth.ValidateResponse"
|
|
||||||
|
|
||||||
a.mu.Lock()
|
|
||||||
challenge, exists := a.challenges[userID]
|
|
||||||
if exists {
|
|
||||||
delete(a.challenges, userID)
|
|
||||||
}
|
|
||||||
a.mu.Unlock()
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return nil, coreerr.E(op, "no pending challenge for user", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check challenge expiry
|
|
||||||
if time.Now().After(challenge.ExpiresAt) {
|
|
||||||
return nil, coreerr.E(op, "challenge expired", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read user's public key
|
|
||||||
pubKey, err := a.medium.Read(userPath(userID, ".pub"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "user not found", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify signature over the original nonce
|
|
||||||
if err := pgp.Verify(challenge.Nonce, signedNonce, pubKey); err != nil {
|
|
||||||
return nil, coreerr.E(op, "signature verification failed", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.createSession(userID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateSession checks whether a token maps to a valid, non-expired session.
|
|
||||||
func (a *Authenticator) ValidateSession(token string) (*Session, error) {
|
|
||||||
const op = "auth.ValidateSession"
|
|
||||||
|
|
||||||
a.mu.RLock()
|
|
||||||
session, exists := a.sessions[token]
|
|
||||||
a.mu.RUnlock()
|
|
||||||
|
|
||||||
if !exists {
|
|
||||||
return nil, coreerr.E(op, "session not found", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if time.Now().After(session.ExpiresAt) {
|
|
||||||
a.mu.Lock()
|
|
||||||
delete(a.sessions, token)
|
|
||||||
a.mu.Unlock()
|
|
||||||
return nil, coreerr.E(op, "session expired", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return session, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefreshSession extends the expiry of an existing valid session.
|
|
||||||
func (a *Authenticator) RefreshSession(token string) (*Session, error) {
|
|
||||||
const op = "auth.RefreshSession"
|
|
||||||
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
session, exists := a.sessions[token]
|
|
||||||
if !exists {
|
|
||||||
return nil, coreerr.E(op, "session not found", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if time.Now().After(session.ExpiresAt) {
|
|
||||||
delete(a.sessions, token)
|
|
||||||
return nil, coreerr.E(op, "session expired", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
session.ExpiresAt = time.Now().Add(a.sessionTTL)
|
|
||||||
return session, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RevokeSession removes a session, invalidating the token immediately.
|
|
||||||
func (a *Authenticator) RevokeSession(token string) error {
|
|
||||||
const op = "auth.RevokeSession"
|
|
||||||
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
if _, exists := a.sessions[token]; !exists {
|
|
||||||
return coreerr.E(op, "session not found", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(a.sessions, token)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteUser removes a user and all associated keys from storage.
|
|
||||||
// The "server" user is protected and cannot be deleted (mirroring the
|
|
||||||
// original TypeScript implementation's safeguard).
|
|
||||||
func (a *Authenticator) DeleteUser(userID string) error {
|
|
||||||
const op = "auth.DeleteUser"
|
|
||||||
|
|
||||||
// Protect special users
|
|
||||||
if protectedUsers[userID] {
|
|
||||||
return coreerr.E(op, "cannot delete protected user", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check user exists
|
|
||||||
if !a.medium.IsFile(userPath(userID, ".pub")) {
|
|
||||||
return coreerr.E(op, "user not found", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all artifacts
|
|
||||||
extensions := []string{".pub", ".key", ".rev", ".json", ".lthn"}
|
|
||||||
for _, ext := range extensions {
|
|
||||||
p := userPath(userID, ext)
|
|
||||||
if a.medium.IsFile(p) {
|
|
||||||
if err := a.medium.Delete(p); err != nil {
|
|
||||||
return coreerr.E(op, "failed to delete "+ext, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Revoke any active sessions for this user
|
|
||||||
a.mu.Lock()
|
|
||||||
for token, session := range a.sessions {
|
|
||||||
if session.UserID == userID {
|
|
||||||
delete(a.sessions, token)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
a.mu.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Login performs password-based authentication as a convenience method.
|
|
||||||
// It verifies the password against the stored LTHN hash and, on success,
|
|
||||||
// creates a new session. This bypasses the PGP challenge-response flow.
|
|
||||||
func (a *Authenticator) Login(userID, password string) (*Session, error) {
|
|
||||||
const op = "auth.Login"
|
|
||||||
|
|
||||||
// Read stored password hash
|
|
||||||
storedHash, err := a.medium.Read(userPath(userID, ".lthn"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "user not found", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify password
|
|
||||||
if !lthn.Verify(password, storedHash) {
|
|
||||||
return nil, coreerr.E(op, "invalid password", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.createSession(userID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteChallengeFile writes an encrypted challenge to a file for air-gapped
|
|
||||||
// (courier) transport. The challenge is created and then its encrypted nonce
|
|
||||||
// is written to the specified path on the Medium.
|
|
||||||
func (a *Authenticator) WriteChallengeFile(userID, path string) error {
|
|
||||||
const op = "auth.WriteChallengeFile"
|
|
||||||
|
|
||||||
challenge, err := a.CreateChallenge(userID)
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E(op, "failed to create challenge", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := json.Marshal(challenge)
|
|
||||||
if err != nil {
|
|
||||||
return coreerr.E(op, "failed to marshal challenge", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := a.medium.Write(path, string(data)); err != nil {
|
|
||||||
return coreerr.E(op, "failed to write challenge file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadResponseFile reads a signed response from a file and validates it,
|
|
||||||
// completing the air-gapped authentication flow. The file must contain the
|
|
||||||
// raw PGP signature bytes (armored).
|
|
||||||
func (a *Authenticator) ReadResponseFile(userID, path string) (*Session, error) {
|
|
||||||
const op = "auth.ReadResponseFile"
|
|
||||||
|
|
||||||
content, err := a.medium.Read(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to read response file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
session, err := a.ValidateResponse(userID, []byte(content))
|
|
||||||
if err != nil {
|
|
||||||
return nil, coreerr.E(op, "failed to validate response", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return session, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSession generates a cryptographically random session token and
|
|
||||||
// stores the session in the in-memory session map.
|
|
||||||
func (a *Authenticator) createSession(userID string) (*Session, error) {
|
|
||||||
tokenBytes := make([]byte, 32)
|
|
||||||
if _, err := rand.Read(tokenBytes); err != nil {
|
|
||||||
return nil, fmt.Errorf("auth: failed to generate session token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
session := &Session{
|
|
||||||
Token: hex.EncodeToString(tokenBytes),
|
|
||||||
UserID: userID,
|
|
||||||
ExpiresAt: time.Now().Add(a.sessionTTL),
|
|
||||||
}
|
|
||||||
|
|
||||||
a.mu.Lock()
|
|
||||||
a.sessions[session.Token] = session
|
|
||||||
a.mu.Unlock()
|
|
||||||
|
|
||||||
return session, nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,581 +0,0 @@
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/crypt/lthn"
|
|
||||||
"forge.lthn.ai/core/go/pkg/crypt/pgp"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// helper creates a fresh Authenticator backed by MockMedium.
|
|
||||||
func newTestAuth(opts ...Option) (*Authenticator, *io.MockMedium) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
a := New(m, opts...)
|
|
||||||
return a, m
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Register ---
|
|
||||||
|
|
||||||
func TestRegister_Good(t *testing.T) {
|
|
||||||
a, m := newTestAuth()
|
|
||||||
|
|
||||||
user, err := a.Register("alice", "hunter2")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, user)
|
|
||||||
|
|
||||||
userID := lthn.Hash("alice")
|
|
||||||
|
|
||||||
// Verify public key is stored
|
|
||||||
assert.True(t, m.IsFile(userPath(userID, ".pub")))
|
|
||||||
assert.True(t, m.IsFile(userPath(userID, ".key")))
|
|
||||||
assert.True(t, m.IsFile(userPath(userID, ".rev")))
|
|
||||||
assert.True(t, m.IsFile(userPath(userID, ".json")))
|
|
||||||
assert.True(t, m.IsFile(userPath(userID, ".lthn")))
|
|
||||||
|
|
||||||
// Verify user fields
|
|
||||||
assert.NotEmpty(t, user.PublicKey)
|
|
||||||
assert.Equal(t, userID, user.KeyID)
|
|
||||||
assert.NotEmpty(t, user.Fingerprint)
|
|
||||||
assert.Equal(t, lthn.Hash("hunter2"), user.PasswordHash)
|
|
||||||
assert.False(t, user.Created.IsZero())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegister_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Register first time succeeds
|
|
||||||
_, err := a.Register("bob", "pass1")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Duplicate registration should fail
|
|
||||||
_, err = a.Register("bob", "pass2")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "user already exists")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRegister_Ugly(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Empty username/password should still work (PGP allows it)
|
|
||||||
user, err := a.Register("", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, user)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- CreateChallenge ---
|
|
||||||
|
|
||||||
func TestCreateChallenge_Good(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
user, err := a.Register("charlie", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
challenge, err := a.CreateChallenge(user.KeyID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, challenge)
|
|
||||||
|
|
||||||
assert.Len(t, challenge.Nonce, nonceBytes)
|
|
||||||
assert.NotEmpty(t, challenge.Encrypted)
|
|
||||||
assert.True(t, challenge.ExpiresAt.After(time.Now()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateChallenge_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Challenge for non-existent user
|
|
||||||
_, err := a.CreateChallenge("nonexistent-user-id")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "user not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateChallenge_Ugly(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Empty userID
|
|
||||||
_, err := a.CreateChallenge("")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- ValidateResponse (full challenge-response flow) ---
|
|
||||||
|
|
||||||
func TestValidateResponse_Good(t *testing.T) {
|
|
||||||
a, m := newTestAuth()
|
|
||||||
|
|
||||||
// Register user
|
|
||||||
_, err := a.Register("dave", "password123")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
userID := lthn.Hash("dave")
|
|
||||||
|
|
||||||
// Create challenge
|
|
||||||
challenge, err := a.CreateChallenge(userID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Client-side: decrypt nonce, then sign it
|
|
||||||
privKey, err := m.Read(userPath(userID, ".key"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
decryptedNonce, err := pgp.Decrypt([]byte(challenge.Encrypted), privKey, "password123")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, challenge.Nonce, decryptedNonce)
|
|
||||||
|
|
||||||
signedNonce, err := pgp.Sign(decryptedNonce, privKey, "password123")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Validate response
|
|
||||||
session, err := a.ValidateResponse(userID, signedNonce)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, session)
|
|
||||||
|
|
||||||
assert.NotEmpty(t, session.Token)
|
|
||||||
assert.Equal(t, userID, session.UserID)
|
|
||||||
assert.True(t, session.ExpiresAt.After(time.Now()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateResponse_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("eve", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("eve")
|
|
||||||
|
|
||||||
// No pending challenge
|
|
||||||
_, err = a.ValidateResponse(userID, []byte("fake-signature"))
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "no pending challenge")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateResponse_Ugly(t *testing.T) {
|
|
||||||
a, m := newTestAuth(WithChallengeTTL(1 * time.Millisecond))
|
|
||||||
|
|
||||||
_, err := a.Register("frank", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("frank")
|
|
||||||
|
|
||||||
// Create challenge and let it expire
|
|
||||||
challenge, err := a.CreateChallenge(userID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
|
|
||||||
// Sign with valid key but expired challenge
|
|
||||||
privKey, err := m.Read(userPath(userID, ".key"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
signedNonce, err := pgp.Sign(challenge.Nonce, privKey, "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = a.ValidateResponse(userID, signedNonce)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "challenge expired")
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- ValidateSession ---
|
|
||||||
|
|
||||||
func TestValidateSession_Good(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("grace", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("grace")
|
|
||||||
|
|
||||||
session, err := a.Login(userID, "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
validated, err := a.ValidateSession(session.Token)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, session.Token, validated.Token)
|
|
||||||
assert.Equal(t, userID, validated.UserID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateSession_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.ValidateSession("nonexistent-token")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "session not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateSession_Ugly(t *testing.T) {
|
|
||||||
a, _ := newTestAuth(WithSessionTTL(1 * time.Millisecond))
|
|
||||||
|
|
||||||
_, err := a.Register("heidi", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("heidi")
|
|
||||||
|
|
||||||
session, err := a.Login(userID, "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
|
|
||||||
_, err = a.ValidateSession(session.Token)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "session expired")
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- RefreshSession ---
|
|
||||||
|
|
||||||
func TestRefreshSession_Good(t *testing.T) {
|
|
||||||
a, _ := newTestAuth(WithSessionTTL(1 * time.Hour))
|
|
||||||
|
|
||||||
_, err := a.Register("ivan", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("ivan")
|
|
||||||
|
|
||||||
session, err := a.Login(userID, "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
originalExpiry := session.ExpiresAt
|
|
||||||
|
|
||||||
// Small delay to ensure time moves forward
|
|
||||||
time.Sleep(2 * time.Millisecond)
|
|
||||||
|
|
||||||
refreshed, err := a.RefreshSession(session.Token)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, refreshed.ExpiresAt.After(originalExpiry))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRefreshSession_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.RefreshSession("nonexistent-token")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "session not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRefreshSession_Ugly(t *testing.T) {
|
|
||||||
a, _ := newTestAuth(WithSessionTTL(1 * time.Millisecond))
|
|
||||||
|
|
||||||
_, err := a.Register("judy", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("judy")
|
|
||||||
|
|
||||||
session, err := a.Login(userID, "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
time.Sleep(5 * time.Millisecond)
|
|
||||||
|
|
||||||
_, err = a.RefreshSession(session.Token)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "session expired")
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- RevokeSession ---
|
|
||||||
|
|
||||||
func TestRevokeSession_Good(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("karl", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("karl")
|
|
||||||
|
|
||||||
session, err := a.Login(userID, "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = a.RevokeSession(session.Token)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Token should no longer be valid
|
|
||||||
_, err = a.ValidateSession(session.Token)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRevokeSession_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
err := a.RevokeSession("nonexistent-token")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "session not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRevokeSession_Ugly(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Revoke empty token
|
|
||||||
err := a.RevokeSession("")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- DeleteUser ---
|
|
||||||
|
|
||||||
func TestDeleteUser_Good(t *testing.T) {
|
|
||||||
a, m := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("larry", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("larry")
|
|
||||||
|
|
||||||
// Also create a session that should be cleaned up
|
|
||||||
_, err = a.Login(userID, "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = a.DeleteUser(userID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// All files should be gone
|
|
||||||
assert.False(t, m.IsFile(userPath(userID, ".pub")))
|
|
||||||
assert.False(t, m.IsFile(userPath(userID, ".key")))
|
|
||||||
assert.False(t, m.IsFile(userPath(userID, ".rev")))
|
|
||||||
assert.False(t, m.IsFile(userPath(userID, ".json")))
|
|
||||||
assert.False(t, m.IsFile(userPath(userID, ".lthn")))
|
|
||||||
|
|
||||||
// Session should be gone
|
|
||||||
a.mu.RLock()
|
|
||||||
sessionCount := 0
|
|
||||||
for _, s := range a.sessions {
|
|
||||||
if s.UserID == userID {
|
|
||||||
sessionCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
a.mu.RUnlock()
|
|
||||||
assert.Equal(t, 0, sessionCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteUser_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Protected user "server" cannot be deleted
|
|
||||||
err := a.DeleteUser("server")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "cannot delete protected user")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteUser_Ugly(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Non-existent user
|
|
||||||
err := a.DeleteUser("nonexistent-user-id")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "user not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Login ---
|
|
||||||
|
|
||||||
func TestLogin_Good(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("mallory", "secret")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("mallory")
|
|
||||||
|
|
||||||
session, err := a.Login(userID, "secret")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, session)
|
|
||||||
|
|
||||||
assert.NotEmpty(t, session.Token)
|
|
||||||
assert.Equal(t, userID, session.UserID)
|
|
||||||
assert.True(t, session.ExpiresAt.After(time.Now()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLogin_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("nancy", "correct-password")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("nancy")
|
|
||||||
|
|
||||||
// Wrong password
|
|
||||||
_, err = a.Login(userID, "wrong-password")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "invalid password")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLogin_Ugly(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Login for non-existent user
|
|
||||||
_, err := a.Login("nonexistent-user-id", "pass")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "user not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- WriteChallengeFile / ReadResponseFile (Air-Gapped) ---
|
|
||||||
|
|
||||||
func TestAirGappedFlow_Good(t *testing.T) {
|
|
||||||
a, m := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("oscar", "airgap-pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("oscar")
|
|
||||||
|
|
||||||
// Write challenge to file
|
|
||||||
challengePath := "transfer/challenge.json"
|
|
||||||
err = a.WriteChallengeFile(userID, challengePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, m.IsFile(challengePath))
|
|
||||||
|
|
||||||
// Read challenge file to get the encrypted nonce (simulating courier)
|
|
||||||
challengeData, err := m.Read(challengePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var challenge Challenge
|
|
||||||
err = json.Unmarshal([]byte(challengeData), &challenge)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Client-side: decrypt nonce and sign it
|
|
||||||
privKey, err := m.Read(userPath(userID, ".key"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
decryptedNonce, err := pgp.Decrypt([]byte(challenge.Encrypted), privKey, "airgap-pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
signedNonce, err := pgp.Sign(decryptedNonce, privKey, "airgap-pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Write signed response to file
|
|
||||||
responsePath := "transfer/response.sig"
|
|
||||||
err = m.Write(responsePath, string(signedNonce))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Server reads response file
|
|
||||||
session, err := a.ReadResponseFile(userID, responsePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, session)
|
|
||||||
|
|
||||||
assert.NotEmpty(t, session.Token)
|
|
||||||
assert.Equal(t, userID, session.UserID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteChallengeFile_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Challenge for non-existent user
|
|
||||||
err := a.WriteChallengeFile("nonexistent-user", "challenge.json")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadResponseFile_Bad(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
// Response file does not exist
|
|
||||||
_, err := a.ReadResponseFile("some-user", "nonexistent-file.sig")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadResponseFile_Ugly(t *testing.T) {
|
|
||||||
a, m := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("peggy", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("peggy")
|
|
||||||
|
|
||||||
// Create a challenge
|
|
||||||
_, err = a.CreateChallenge(userID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Write garbage to response file
|
|
||||||
responsePath := "transfer/bad-response.sig"
|
|
||||||
err = m.Write(responsePath, "not-a-valid-signature")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = a.ReadResponseFile(userID, responsePath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Options ---
|
|
||||||
|
|
||||||
func TestWithChallengeTTL_Good(t *testing.T) {
|
|
||||||
ttl := 30 * time.Second
|
|
||||||
a, _ := newTestAuth(WithChallengeTTL(ttl))
|
|
||||||
assert.Equal(t, ttl, a.challengeTTL)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWithSessionTTL_Good(t *testing.T) {
|
|
||||||
ttl := 2 * time.Hour
|
|
||||||
a, _ := newTestAuth(WithSessionTTL(ttl))
|
|
||||||
assert.Equal(t, ttl, a.sessionTTL)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Full Round-Trip (Online Flow) ---
|
|
||||||
|
|
||||||
func TestFullRoundTrip_Good(t *testing.T) {
|
|
||||||
a, m := newTestAuth()
|
|
||||||
|
|
||||||
// 1. Register
|
|
||||||
user, err := a.Register("quinn", "roundtrip-pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, user)
|
|
||||||
|
|
||||||
userID := lthn.Hash("quinn")
|
|
||||||
|
|
||||||
// 2. Create challenge
|
|
||||||
challenge, err := a.CreateChallenge(userID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// 3. Client decrypts + signs
|
|
||||||
privKey, err := m.Read(userPath(userID, ".key"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
nonce, err := pgp.Decrypt([]byte(challenge.Encrypted), privKey, "roundtrip-pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
sig, err := pgp.Sign(nonce, privKey, "roundtrip-pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// 4. Server validates, issues session
|
|
||||||
session, err := a.ValidateResponse(userID, sig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, session)
|
|
||||||
|
|
||||||
// 5. Validate session
|
|
||||||
validated, err := a.ValidateSession(session.Token)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, session.Token, validated.Token)
|
|
||||||
|
|
||||||
// 6. Refresh session
|
|
||||||
refreshed, err := a.RefreshSession(session.Token)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, session.Token, refreshed.Token)
|
|
||||||
|
|
||||||
// 7. Revoke session
|
|
||||||
err = a.RevokeSession(session.Token)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// 8. Session should be invalid now
|
|
||||||
_, err = a.ValidateSession(session.Token)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Concurrent Access ---
|
|
||||||
|
|
||||||
func TestConcurrentSessions_Good(t *testing.T) {
|
|
||||||
a, _ := newTestAuth()
|
|
||||||
|
|
||||||
_, err := a.Register("ruth", "pass")
|
|
||||||
require.NoError(t, err)
|
|
||||||
userID := lthn.Hash("ruth")
|
|
||||||
|
|
||||||
// Create multiple sessions concurrently
|
|
||||||
const n = 10
|
|
||||||
sessions := make(chan *Session, n)
|
|
||||||
errs := make(chan error, n)
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
go func() {
|
|
||||||
s, err := a.Login(userID, "pass")
|
|
||||||
if err != nil {
|
|
||||||
errs <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sessions <- s
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
select {
|
|
||||||
case s := <-sessions:
|
|
||||||
require.NotNil(t, s)
|
|
||||||
// Validate each session
|
|
||||||
_, err := a.ValidateSession(s.Token)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
case err := <-errs:
|
|
||||||
t.Fatalf("concurrent login failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,297 +0,0 @@
|
||||||
// Package build provides project type detection and cross-compilation for the Core build system.
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/Snider/Borg/pkg/compress"
|
|
||||||
io_interface "forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ArchiveFormat specifies the compression format for archives.
|
|
||||||
type ArchiveFormat string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ArchiveFormatGzip uses tar.gz (gzip compression) - widely compatible.
|
|
||||||
ArchiveFormatGzip ArchiveFormat = "gz"
|
|
||||||
// ArchiveFormatXZ uses tar.xz (xz/LZMA2 compression) - better compression ratio.
|
|
||||||
ArchiveFormatXZ ArchiveFormat = "xz"
|
|
||||||
// ArchiveFormatZip uses zip - for Windows.
|
|
||||||
ArchiveFormatZip ArchiveFormat = "zip"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Archive creates an archive for a single artifact using gzip compression.
|
|
||||||
// Uses tar.gz for linux/darwin and zip for windows.
|
|
||||||
// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.gz).
|
|
||||||
// Returns a new Artifact with Path pointing to the archive.
|
|
||||||
func Archive(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
|
||||||
return ArchiveWithFormat(fs, artifact, ArchiveFormatGzip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArchiveXZ creates an archive for a single artifact using xz compression.
|
|
||||||
// Uses tar.xz for linux/darwin and zip for windows.
|
|
||||||
// Returns a new Artifact with Path pointing to the archive.
|
|
||||||
func ArchiveXZ(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
|
||||||
return ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArchiveWithFormat creates an archive for a single artifact with the specified format.
|
|
||||||
// Uses tar.gz or tar.xz for linux/darwin and zip for windows.
|
|
||||||
// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.xz).
|
|
||||||
// Returns a new Artifact with Path pointing to the archive.
|
|
||||||
func ArchiveWithFormat(fs io_interface.Medium, artifact Artifact, format ArchiveFormat) (Artifact, error) {
|
|
||||||
if artifact.Path == "" {
|
|
||||||
return Artifact{}, fmt.Errorf("build.Archive: artifact path is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the source file exists
|
|
||||||
info, err := fs.Stat(artifact.Path)
|
|
||||||
if err != nil {
|
|
||||||
return Artifact{}, fmt.Errorf("build.Archive: source file not found: %w", err)
|
|
||||||
}
|
|
||||||
if info.IsDir() {
|
|
||||||
return Artifact{}, fmt.Errorf("build.Archive: source path is a directory, expected file")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine archive type based on OS and format
|
|
||||||
var archivePath string
|
|
||||||
var archiveFunc func(fs io_interface.Medium, src, dst string) error
|
|
||||||
|
|
||||||
if artifact.OS == "windows" {
|
|
||||||
archivePath = archiveFilename(artifact, ".zip")
|
|
||||||
archiveFunc = createZipArchive
|
|
||||||
} else {
|
|
||||||
switch format {
|
|
||||||
case ArchiveFormatXZ:
|
|
||||||
archivePath = archiveFilename(artifact, ".tar.xz")
|
|
||||||
archiveFunc = createTarXzArchive
|
|
||||||
default:
|
|
||||||
archivePath = archiveFilename(artifact, ".tar.gz")
|
|
||||||
archiveFunc = createTarGzArchive
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the archive
|
|
||||||
if err := archiveFunc(fs, artifact.Path, archivePath); err != nil {
|
|
||||||
return Artifact{}, fmt.Errorf("build.Archive: failed to create archive: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Artifact{
|
|
||||||
Path: archivePath,
|
|
||||||
OS: artifact.OS,
|
|
||||||
Arch: artifact.Arch,
|
|
||||||
Checksum: artifact.Checksum,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArchiveAll archives all artifacts using gzip compression.
|
|
||||||
// Returns a slice of new artifacts pointing to the archives.
|
|
||||||
func ArchiveAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
|
||||||
return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatGzip)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArchiveAllXZ archives all artifacts using xz compression.
|
|
||||||
// Returns a slice of new artifacts pointing to the archives.
|
|
||||||
func ArchiveAllXZ(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
|
||||||
return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatXZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArchiveAllWithFormat archives all artifacts with the specified format.
|
|
||||||
// Returns a slice of new artifacts pointing to the archives.
|
|
||||||
func ArchiveAllWithFormat(fs io_interface.Medium, artifacts []Artifact, format ArchiveFormat) ([]Artifact, error) {
|
|
||||||
if len(artifacts) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var archived []Artifact
|
|
||||||
for _, artifact := range artifacts {
|
|
||||||
arch, err := ArchiveWithFormat(fs, artifact, format)
|
|
||||||
if err != nil {
|
|
||||||
return archived, fmt.Errorf("build.ArchiveAll: failed to archive %s: %w", artifact.Path, err)
|
|
||||||
}
|
|
||||||
archived = append(archived, arch)
|
|
||||||
}
|
|
||||||
|
|
||||||
return archived, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// archiveFilename generates the archive filename based on the artifact and extension.
|
|
||||||
// Format: dist/myapp_linux_amd64.tar.gz (binary name taken from artifact path).
|
|
||||||
func archiveFilename(artifact Artifact, ext string) string {
|
|
||||||
// Get the directory containing the binary (e.g., dist/linux_amd64)
|
|
||||||
dir := filepath.Dir(artifact.Path)
|
|
||||||
// Go up one level to the output directory (e.g., dist)
|
|
||||||
outputDir := filepath.Dir(dir)
|
|
||||||
|
|
||||||
// Get the binary name without extension
|
|
||||||
binaryName := filepath.Base(artifact.Path)
|
|
||||||
binaryName = strings.TrimSuffix(binaryName, ".exe")
|
|
||||||
|
|
||||||
// Construct archive name: myapp_linux_amd64.tar.gz
|
|
||||||
archiveName := fmt.Sprintf("%s_%s_%s%s", binaryName, artifact.OS, artifact.Arch, ext)
|
|
||||||
|
|
||||||
return filepath.Join(outputDir, archiveName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createTarXzArchive creates a tar.xz archive containing a single file.
|
|
||||||
// Uses Borg's compress package for xz compression.
|
|
||||||
func createTarXzArchive(fs io_interface.Medium, src, dst string) error {
|
|
||||||
// Open the source file
|
|
||||||
srcFile, err := fs.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open source file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = srcFile.Close() }()
|
|
||||||
|
|
||||||
srcInfo, err := srcFile.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to stat source file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create tar archive in memory
|
|
||||||
var tarBuf bytes.Buffer
|
|
||||||
tarWriter := tar.NewWriter(&tarBuf)
|
|
||||||
|
|
||||||
// Create tar header
|
|
||||||
header, err := tar.FileInfoHeader(srcInfo, "")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create tar header: %w", err)
|
|
||||||
}
|
|
||||||
header.Name = filepath.Base(src)
|
|
||||||
|
|
||||||
if err := tarWriter.WriteHeader(header); err != nil {
|
|
||||||
return fmt.Errorf("failed to write tar header: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(tarWriter, srcFile); err != nil {
|
|
||||||
return fmt.Errorf("failed to write file content to tar: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tarWriter.Close(); err != nil {
|
|
||||||
return fmt.Errorf("failed to close tar writer: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compress with xz using Borg
|
|
||||||
xzData, err := compress.Compress(tarBuf.Bytes(), "xz")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to compress with xz: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write to destination file
|
|
||||||
dstFile, err := fs.Create(dst)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create archive file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = dstFile.Close() }()
|
|
||||||
|
|
||||||
if _, err := dstFile.Write(xzData); err != nil {
|
|
||||||
return fmt.Errorf("failed to write archive file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createTarGzArchive creates a tar.gz archive containing a single file.
|
|
||||||
func createTarGzArchive(fs io_interface.Medium, src, dst string) error {
|
|
||||||
// Open the source file
|
|
||||||
srcFile, err := fs.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open source file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = srcFile.Close() }()
|
|
||||||
|
|
||||||
srcInfo, err := srcFile.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to stat source file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the destination file
|
|
||||||
dstFile, err := fs.Create(dst)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create archive file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = dstFile.Close() }()
|
|
||||||
|
|
||||||
// Create gzip writer
|
|
||||||
gzWriter := gzip.NewWriter(dstFile)
|
|
||||||
defer func() { _ = gzWriter.Close() }()
|
|
||||||
|
|
||||||
// Create tar writer
|
|
||||||
tarWriter := tar.NewWriter(gzWriter)
|
|
||||||
defer func() { _ = tarWriter.Close() }()
|
|
||||||
|
|
||||||
// Create tar header
|
|
||||||
header, err := tar.FileInfoHeader(srcInfo, "")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create tar header: %w", err)
|
|
||||||
}
|
|
||||||
// Use just the filename, not the full path
|
|
||||||
header.Name = filepath.Base(src)
|
|
||||||
|
|
||||||
// Write header
|
|
||||||
if err := tarWriter.WriteHeader(header); err != nil {
|
|
||||||
return fmt.Errorf("failed to write tar header: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write file content
|
|
||||||
if _, err := io.Copy(tarWriter, srcFile); err != nil {
|
|
||||||
return fmt.Errorf("failed to write file content to tar: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createZipArchive creates a zip archive containing a single file.
|
|
||||||
func createZipArchive(fs io_interface.Medium, src, dst string) error {
|
|
||||||
// Open the source file
|
|
||||||
srcFile, err := fs.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open source file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = srcFile.Close() }()
|
|
||||||
|
|
||||||
srcInfo, err := srcFile.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to stat source file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the destination file
|
|
||||||
dstFile, err := fs.Create(dst)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create archive file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = dstFile.Close() }()
|
|
||||||
|
|
||||||
// Create zip writer
|
|
||||||
zipWriter := zip.NewWriter(dstFile)
|
|
||||||
defer func() { _ = zipWriter.Close() }()
|
|
||||||
|
|
||||||
// Create zip header
|
|
||||||
header, err := zip.FileInfoHeader(srcInfo)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create zip header: %w", err)
|
|
||||||
}
|
|
||||||
// Use just the filename, not the full path
|
|
||||||
header.Name = filepath.Base(src)
|
|
||||||
header.Method = zip.Deflate
|
|
||||||
|
|
||||||
// Create file in archive
|
|
||||||
writer, err := zipWriter.CreateHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create zip entry: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write file content
|
|
||||||
if _, err := io.Copy(writer, srcFile); err != nil {
|
|
||||||
return fmt.Errorf("failed to write file content to zip: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,397 +0,0 @@
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/Snider/Borg/pkg/compress"
|
|
||||||
io_interface "forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// setupArchiveTestFile creates a test binary file in a temp directory with the standard structure.
|
|
||||||
// Returns the path to the binary and the output directory.
|
|
||||||
func setupArchiveTestFile(t *testing.T, name, os_, arch string) (binaryPath string, outputDir string) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
outputDir = t.TempDir()
|
|
||||||
|
|
||||||
// Create platform directory: dist/os_arch
|
|
||||||
platformDir := filepath.Join(outputDir, os_+"_"+arch)
|
|
||||||
err := os.MkdirAll(platformDir, 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create test binary
|
|
||||||
binaryPath = filepath.Join(platformDir, name)
|
|
||||||
content := []byte("#!/bin/bash\necho 'Hello, World!'\n")
|
|
||||||
err = os.WriteFile(binaryPath, content, 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return binaryPath, outputDir
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestArchive_Good(t *testing.T) {
|
|
||||||
fs := io_interface.Local
|
|
||||||
t.Run("creates tar.gz for linux", func(t *testing.T) {
|
|
||||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Archive(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify archive was created
|
|
||||||
expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.gz")
|
|
||||||
assert.Equal(t, expectedPath, result.Path)
|
|
||||||
assert.FileExists(t, result.Path)
|
|
||||||
|
|
||||||
// Verify OS and Arch are preserved
|
|
||||||
assert.Equal(t, "linux", result.OS)
|
|
||||||
assert.Equal(t, "amd64", result.Arch)
|
|
||||||
|
|
||||||
// Verify archive content
|
|
||||||
verifyTarGzContent(t, result.Path, "myapp")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("creates tar.gz for darwin", func(t *testing.T) {
|
|
||||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: "darwin",
|
|
||||||
Arch: "arm64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Archive(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.gz")
|
|
||||||
assert.Equal(t, expectedPath, result.Path)
|
|
||||||
assert.FileExists(t, result.Path)
|
|
||||||
|
|
||||||
verifyTarGzContent(t, result.Path, "myapp")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("creates zip for windows", func(t *testing.T) {
|
|
||||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: "windows",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Archive(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Windows archives should strip .exe from archive name
|
|
||||||
expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip")
|
|
||||||
assert.Equal(t, expectedPath, result.Path)
|
|
||||||
assert.FileExists(t, result.Path)
|
|
||||||
|
|
||||||
verifyZipContent(t, result.Path, "myapp.exe")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("preserves checksum field", func(t *testing.T) {
|
|
||||||
binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
Checksum: "abc123",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Archive(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "abc123", result.Checksum)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("creates tar.xz for linux with ArchiveXZ", func(t *testing.T) {
|
|
||||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := ArchiveXZ(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.xz")
|
|
||||||
assert.Equal(t, expectedPath, result.Path)
|
|
||||||
assert.FileExists(t, result.Path)
|
|
||||||
|
|
||||||
verifyTarXzContent(t, result.Path, "myapp")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("creates tar.xz for darwin with ArchiveWithFormat", func(t *testing.T) {
|
|
||||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: "darwin",
|
|
||||||
Arch: "arm64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.xz")
|
|
||||||
assert.Equal(t, expectedPath, result.Path)
|
|
||||||
assert.FileExists(t, result.Path)
|
|
||||||
|
|
||||||
verifyTarXzContent(t, result.Path, "myapp")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("windows still uses zip even with xz format", func(t *testing.T) {
|
|
||||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: "windows",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Windows should still get .zip regardless of format
|
|
||||||
expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip")
|
|
||||||
assert.Equal(t, expectedPath, result.Path)
|
|
||||||
assert.FileExists(t, result.Path)
|
|
||||||
|
|
||||||
verifyZipContent(t, result.Path, "myapp.exe")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestArchive_Bad(t *testing.T) {
|
|
||||||
fs := io_interface.Local
|
|
||||||
t.Run("returns error for empty path", func(t *testing.T) {
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: "",
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Archive(fs, artifact)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "artifact path is empty")
|
|
||||||
assert.Empty(t, result.Path)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for non-existent file", func(t *testing.T) {
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: "/nonexistent/path/binary",
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Archive(fs, artifact)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "source file not found")
|
|
||||||
assert.Empty(t, result.Path)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for directory path", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: dir,
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Archive(fs, artifact)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "source path is a directory")
|
|
||||||
assert.Empty(t, result.Path)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestArchiveAll_Good(t *testing.T) {
|
|
||||||
fs := io_interface.Local
|
|
||||||
t.Run("archives multiple artifacts", func(t *testing.T) {
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
// Create multiple binaries
|
|
||||||
var artifacts []Artifact
|
|
||||||
targets := []struct {
|
|
||||||
os_ string
|
|
||||||
arch string
|
|
||||||
}{
|
|
||||||
{"linux", "amd64"},
|
|
||||||
{"linux", "arm64"},
|
|
||||||
{"darwin", "arm64"},
|
|
||||||
{"windows", "amd64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, target := range targets {
|
|
||||||
platformDir := filepath.Join(outputDir, target.os_+"_"+target.arch)
|
|
||||||
err := os.MkdirAll(platformDir, 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
name := "myapp"
|
|
||||||
if target.os_ == "windows" {
|
|
||||||
name = "myapp.exe"
|
|
||||||
}
|
|
||||||
|
|
||||||
binaryPath := filepath.Join(platformDir, name)
|
|
||||||
err = os.WriteFile(binaryPath, []byte("binary content"), 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
artifacts = append(artifacts, Artifact{
|
|
||||||
Path: binaryPath,
|
|
||||||
OS: target.os_,
|
|
||||||
Arch: target.arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := ArchiveAll(fs, artifacts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, results, 4)
|
|
||||||
|
|
||||||
// Verify all archives were created
|
|
||||||
for i, result := range results {
|
|
||||||
assert.FileExists(t, result.Path)
|
|
||||||
assert.Equal(t, artifacts[i].OS, result.OS)
|
|
||||||
assert.Equal(t, artifacts[i].Arch, result.Arch)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns nil for empty slice", func(t *testing.T) {
|
|
||||||
results, err := ArchiveAll(fs, []Artifact{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, results)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns nil for nil slice", func(t *testing.T) {
|
|
||||||
results, err := ArchiveAll(fs, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, results)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestArchiveAll_Bad(t *testing.T) {
|
|
||||||
fs := io_interface.Local
|
|
||||||
t.Run("returns partial results on error", func(t *testing.T) {
|
|
||||||
binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: binaryPath, OS: "linux", Arch: "amd64"},
|
|
||||||
{Path: "/nonexistent/binary", OS: "linux", Arch: "arm64"}, // This will fail
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := ArchiveAll(fs, artifacts)
|
|
||||||
assert.Error(t, err)
|
|
||||||
// Should have the first successful result
|
|
||||||
assert.Len(t, results, 1)
|
|
||||||
assert.FileExists(t, results[0].Path)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestArchiveFilename_Good(t *testing.T) {
|
|
||||||
t.Run("generates correct tar.gz filename", func(t *testing.T) {
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: "/output/linux_amd64/myapp",
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := archiveFilename(artifact, ".tar.gz")
|
|
||||||
assert.Equal(t, "/output/myapp_linux_amd64.tar.gz", filename)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("generates correct zip filename", func(t *testing.T) {
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: "/output/windows_amd64/myapp.exe",
|
|
||||||
OS: "windows",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := archiveFilename(artifact, ".zip")
|
|
||||||
assert.Equal(t, "/output/myapp_windows_amd64.zip", filename)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("handles nested output directories", func(t *testing.T) {
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: "/project/dist/linux_arm64/cli",
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "arm64",
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := archiveFilename(artifact, ".tar.gz")
|
|
||||||
assert.Equal(t, "/project/dist/cli_linux_arm64.tar.gz", filename)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyTarGzContent opens a tar.gz file and verifies it contains the expected file.
|
|
||||||
func verifyTarGzContent(t *testing.T, archivePath, expectedName string) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
file, err := os.Open(archivePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = file.Close() }()
|
|
||||||
|
|
||||||
gzReader, err := gzip.NewReader(file)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = gzReader.Close() }()
|
|
||||||
|
|
||||||
tarReader := tar.NewReader(gzReader)
|
|
||||||
|
|
||||||
header, err := tarReader.Next()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedName, header.Name)
|
|
||||||
|
|
||||||
// Verify there's only one file
|
|
||||||
_, err = tarReader.Next()
|
|
||||||
assert.Equal(t, io.EOF, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyZipContent opens a zip file and verifies it contains the expected file.
|
|
||||||
func verifyZipContent(t *testing.T, archivePath, expectedName string) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
reader, err := zip.OpenReader(archivePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = reader.Close() }()
|
|
||||||
|
|
||||||
require.Len(t, reader.File, 1)
|
|
||||||
assert.Equal(t, expectedName, reader.File[0].Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyTarXzContent opens a tar.xz file and verifies it contains the expected file.
|
|
||||||
func verifyTarXzContent(t *testing.T, archivePath, expectedName string) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
// Read the xz-compressed file
|
|
||||||
xzData, err := os.ReadFile(archivePath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Decompress with Borg
|
|
||||||
tarData, err := compress.Decompress(xzData)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Read tar archive
|
|
||||||
tarReader := tar.NewReader(bytes.NewReader(tarData))
|
|
||||||
|
|
||||||
header, err := tarReader.Next()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedName, header.Name)
|
|
||||||
|
|
||||||
// Verify there's only one file
|
|
||||||
_, err = tarReader.Next()
|
|
||||||
assert.Equal(t, io.EOF, err)
|
|
||||||
}
|
|
||||||
|
|
@ -1,90 +0,0 @@
|
||||||
// Package build provides project type detection and cross-compilation for the Core build system.
|
|
||||||
// It supports Go, Wails, Node.js, and PHP projects with automatic detection based on
|
|
||||||
// marker files (go.mod, wails.json, package.json, composer.json).
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProjectType represents a detected project type.
|
|
||||||
type ProjectType string
|
|
||||||
|
|
||||||
// Project type constants for build detection.
|
|
||||||
const (
|
|
||||||
// ProjectTypeGo indicates a standard Go project with go.mod.
|
|
||||||
ProjectTypeGo ProjectType = "go"
|
|
||||||
// ProjectTypeWails indicates a Wails desktop application.
|
|
||||||
ProjectTypeWails ProjectType = "wails"
|
|
||||||
// ProjectTypeNode indicates a Node.js project with package.json.
|
|
||||||
ProjectTypeNode ProjectType = "node"
|
|
||||||
// ProjectTypePHP indicates a PHP/Laravel project with composer.json.
|
|
||||||
ProjectTypePHP ProjectType = "php"
|
|
||||||
// ProjectTypeCPP indicates a C++ project with CMakeLists.txt.
|
|
||||||
ProjectTypeCPP ProjectType = "cpp"
|
|
||||||
// ProjectTypeDocker indicates a Docker-based project with Dockerfile.
|
|
||||||
ProjectTypeDocker ProjectType = "docker"
|
|
||||||
// ProjectTypeLinuxKit indicates a LinuxKit VM configuration.
|
|
||||||
ProjectTypeLinuxKit ProjectType = "linuxkit"
|
|
||||||
// ProjectTypeTaskfile indicates a project using Taskfile automation.
|
|
||||||
ProjectTypeTaskfile ProjectType = "taskfile"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Target represents a build target platform.
|
|
||||||
type Target struct {
|
|
||||||
OS string
|
|
||||||
Arch string
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the target in GOOS/GOARCH format.
|
|
||||||
func (t Target) String() string {
|
|
||||||
return t.OS + "/" + t.Arch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Artifact represents a build output file.
|
|
||||||
type Artifact struct {
|
|
||||||
Path string
|
|
||||||
OS string
|
|
||||||
Arch string
|
|
||||||
Checksum string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config holds build configuration.
|
|
||||||
type Config struct {
|
|
||||||
// FS is the medium used for file operations.
|
|
||||||
FS io.Medium
|
|
||||||
// ProjectDir is the root directory of the project.
|
|
||||||
ProjectDir string
|
|
||||||
// OutputDir is where build artifacts are placed.
|
|
||||||
OutputDir string
|
|
||||||
// Name is the output binary name.
|
|
||||||
Name string
|
|
||||||
// Version is the build version string.
|
|
||||||
Version string
|
|
||||||
// LDFlags are additional linker flags.
|
|
||||||
LDFlags []string
|
|
||||||
|
|
||||||
// Docker-specific config
|
|
||||||
Dockerfile string // Path to Dockerfile (default: Dockerfile)
|
|
||||||
Registry string // Container registry (default: ghcr.io)
|
|
||||||
Image string // Image name (owner/repo format)
|
|
||||||
Tags []string // Additional tags to apply
|
|
||||||
BuildArgs map[string]string // Docker build arguments
|
|
||||||
Push bool // Whether to push after build
|
|
||||||
|
|
||||||
// LinuxKit-specific config
|
|
||||||
LinuxKitConfig string // Path to LinuxKit YAML config
|
|
||||||
Formats []string // Output formats (iso, qcow2, raw, vmdk)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Builder defines the interface for project-specific build implementations.
|
|
||||||
type Builder interface {
|
|
||||||
// Name returns the builder's identifier.
|
|
||||||
Name() string
|
|
||||||
// Detect checks if this builder can handle the project in the given directory.
|
|
||||||
Detect(fs io.Medium, dir string) (bool, error)
|
|
||||||
// Build compiles the project for the specified targets.
|
|
||||||
Build(ctx context.Context, cfg *Config, targets []Target) ([]Artifact, error)
|
|
||||||
}
|
|
||||||
|
|
@ -1,144 +0,0 @@
|
||||||
// Package buildcmd provides project build commands with auto-detection.
|
|
||||||
package buildcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"embed"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/cli"
|
|
||||||
"forge.lthn.ai/core/go/pkg/i18n"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
cli.RegisterCommands(AddBuildCommands)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Style aliases from shared package
|
|
||||||
var (
|
|
||||||
buildHeaderStyle = cli.TitleStyle
|
|
||||||
buildTargetStyle = cli.ValueStyle
|
|
||||||
buildSuccessStyle = cli.SuccessStyle
|
|
||||||
buildErrorStyle = cli.ErrorStyle
|
|
||||||
buildDimStyle = cli.DimStyle
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed all:tmpl/gui
|
|
||||||
var guiTemplate embed.FS
|
|
||||||
|
|
||||||
// Flags for the main build command
|
|
||||||
var (
|
|
||||||
buildType string
|
|
||||||
ciMode bool
|
|
||||||
targets string
|
|
||||||
outputDir string
|
|
||||||
doArchive bool
|
|
||||||
doChecksum bool
|
|
||||||
verbose bool
|
|
||||||
|
|
||||||
// Docker/LinuxKit specific flags
|
|
||||||
configPath string
|
|
||||||
format string
|
|
||||||
push bool
|
|
||||||
imageName string
|
|
||||||
|
|
||||||
// Signing flags
|
|
||||||
noSign bool
|
|
||||||
notarize bool
|
|
||||||
|
|
||||||
// from-path subcommand
|
|
||||||
fromPath string
|
|
||||||
|
|
||||||
// pwa subcommand
|
|
||||||
pwaURL string
|
|
||||||
|
|
||||||
// sdk subcommand
|
|
||||||
sdkSpec string
|
|
||||||
sdkLang string
|
|
||||||
sdkVersion string
|
|
||||||
sdkDryRun bool
|
|
||||||
)
|
|
||||||
|
|
||||||
var buildCmd = &cobra.Command{
|
|
||||||
Use: "build",
|
|
||||||
Short: i18n.T("cmd.build.short"),
|
|
||||||
Long: i18n.T("cmd.build.long"),
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
return runProjectBuild(cmd.Context(), buildType, ciMode, targets, outputDir, doArchive, doChecksum, configPath, format, push, imageName, noSign, notarize, verbose)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var fromPathCmd = &cobra.Command{
|
|
||||||
Use: "from-path",
|
|
||||||
Short: i18n.T("cmd.build.from_path.short"),
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
if fromPath == "" {
|
|
||||||
return errPathRequired
|
|
||||||
}
|
|
||||||
return runBuild(fromPath)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var pwaCmd = &cobra.Command{
|
|
||||||
Use: "pwa",
|
|
||||||
Short: i18n.T("cmd.build.pwa.short"),
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
if pwaURL == "" {
|
|
||||||
return errURLRequired
|
|
||||||
}
|
|
||||||
return runPwaBuild(pwaURL)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var sdkBuildCmd = &cobra.Command{
|
|
||||||
Use: "sdk",
|
|
||||||
Short: i18n.T("cmd.build.sdk.short"),
|
|
||||||
Long: i18n.T("cmd.build.sdk.long"),
|
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
|
||||||
return runBuildSDK(sdkSpec, sdkLang, sdkVersion, sdkDryRun)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func initBuildFlags() {
|
|
||||||
// Main build command flags
|
|
||||||
buildCmd.Flags().StringVar(&buildType, "type", "", i18n.T("cmd.build.flag.type"))
|
|
||||||
buildCmd.Flags().BoolVar(&ciMode, "ci", false, i18n.T("cmd.build.flag.ci"))
|
|
||||||
buildCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, i18n.T("common.flag.verbose"))
|
|
||||||
buildCmd.Flags().StringVar(&targets, "targets", "", i18n.T("cmd.build.flag.targets"))
|
|
||||||
buildCmd.Flags().StringVar(&outputDir, "output", "", i18n.T("cmd.build.flag.output"))
|
|
||||||
buildCmd.Flags().BoolVar(&doArchive, "archive", true, i18n.T("cmd.build.flag.archive"))
|
|
||||||
buildCmd.Flags().BoolVar(&doChecksum, "checksum", true, i18n.T("cmd.build.flag.checksum"))
|
|
||||||
|
|
||||||
// Docker/LinuxKit specific
|
|
||||||
buildCmd.Flags().StringVar(&configPath, "config", "", i18n.T("cmd.build.flag.config"))
|
|
||||||
buildCmd.Flags().StringVar(&format, "format", "", i18n.T("cmd.build.flag.format"))
|
|
||||||
buildCmd.Flags().BoolVar(&push, "push", false, i18n.T("cmd.build.flag.push"))
|
|
||||||
buildCmd.Flags().StringVar(&imageName, "image", "", i18n.T("cmd.build.flag.image"))
|
|
||||||
|
|
||||||
// Signing flags
|
|
||||||
buildCmd.Flags().BoolVar(&noSign, "no-sign", false, i18n.T("cmd.build.flag.no_sign"))
|
|
||||||
buildCmd.Flags().BoolVar(¬arize, "notarize", false, i18n.T("cmd.build.flag.notarize"))
|
|
||||||
|
|
||||||
// from-path subcommand flags
|
|
||||||
fromPathCmd.Flags().StringVar(&fromPath, "path", "", i18n.T("cmd.build.from_path.flag.path"))
|
|
||||||
|
|
||||||
// pwa subcommand flags
|
|
||||||
pwaCmd.Flags().StringVar(&pwaURL, "url", "", i18n.T("cmd.build.pwa.flag.url"))
|
|
||||||
|
|
||||||
// sdk subcommand flags
|
|
||||||
sdkBuildCmd.Flags().StringVar(&sdkSpec, "spec", "", i18n.T("common.flag.spec"))
|
|
||||||
sdkBuildCmd.Flags().StringVar(&sdkLang, "lang", "", i18n.T("cmd.build.sdk.flag.lang"))
|
|
||||||
sdkBuildCmd.Flags().StringVar(&sdkVersion, "version", "", i18n.T("cmd.build.sdk.flag.version"))
|
|
||||||
sdkBuildCmd.Flags().BoolVar(&sdkDryRun, "dry-run", false, i18n.T("cmd.build.sdk.flag.dry_run"))
|
|
||||||
|
|
||||||
// Add subcommands
|
|
||||||
buildCmd.AddCommand(fromPathCmd)
|
|
||||||
buildCmd.AddCommand(pwaCmd)
|
|
||||||
buildCmd.AddCommand(sdkBuildCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddBuildCommands registers the 'build' command and all subcommands.
|
|
||||||
func AddBuildCommands(root *cobra.Command) {
|
|
||||||
initBuildFlags()
|
|
||||||
AddReleaseCommand(buildCmd)
|
|
||||||
root.AddCommand(buildCmd)
|
|
||||||
}
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
// Package buildcmd provides project build commands with auto-detection.
|
|
||||||
//
|
|
||||||
// Supports building:
|
|
||||||
// - Go projects (standard and cross-compilation)
|
|
||||||
// - Wails desktop applications
|
|
||||||
// - Docker images
|
|
||||||
// - LinuxKit VM images
|
|
||||||
// - Taskfile-based projects
|
|
||||||
//
|
|
||||||
// Configuration via .core/build.yaml or command-line flags.
|
|
||||||
//
|
|
||||||
// Subcommands:
|
|
||||||
// - build: Auto-detect and build the current project
|
|
||||||
// - build from-path: Build from a local static web app directory
|
|
||||||
// - build pwa: Build from a live PWA URL
|
|
||||||
// - build sdk: Generate API SDKs from OpenAPI spec
|
|
||||||
package buildcmd
|
|
||||||
|
|
||||||
// Note: The AddBuildCommands function is defined in cmd_build.go
|
|
||||||
// This file exists for documentation purposes and maintains the original
|
|
||||||
// package documentation from commands.go.
|
|
||||||
|
|
@ -1,392 +0,0 @@
|
||||||
// cmd_project.go implements the main project build logic.
|
|
||||||
//
|
|
||||||
// This handles auto-detection of project types (Go, Wails, Docker, LinuxKit, Taskfile)
|
|
||||||
// and orchestrates the build process including signing, archiving, and checksums.
|
|
||||||
|
|
||||||
package buildcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/build/builders"
|
|
||||||
"forge.lthn.ai/core/go/pkg/build/signing"
|
|
||||||
"forge.lthn.ai/core/go/pkg/i18n"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// runProjectBuild handles the main `core build` command with auto-detection.
|
|
||||||
func runProjectBuild(ctx context.Context, buildType string, ciMode bool, targetsFlag string, outputDir string, doArchive bool, doChecksum bool, configPath string, format string, push bool, imageName string, noSign bool, notarize bool, verbose bool) error {
|
|
||||||
// Use local filesystem as the default medium
|
|
||||||
fs := io.Local
|
|
||||||
|
|
||||||
// Get current working directory as project root
|
|
||||||
projectDir, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load configuration from .core/build.yaml (or defaults)
|
|
||||||
buildCfg, err := build.LoadConfig(fs, projectDir)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "load config"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect project type if not specified
|
|
||||||
var projectType build.ProjectType
|
|
||||||
if buildType != "" {
|
|
||||||
projectType = build.ProjectType(buildType)
|
|
||||||
} else {
|
|
||||||
projectType, err = build.PrimaryType(fs, projectDir)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "detect project type"}), err)
|
|
||||||
}
|
|
||||||
if projectType == "" {
|
|
||||||
return fmt.Errorf("%s", i18n.T("cmd.build.error.no_project_type", map[string]interface{}{"Dir": projectDir}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine targets
|
|
||||||
var buildTargets []build.Target
|
|
||||||
if targetsFlag != "" {
|
|
||||||
// Parse from command line
|
|
||||||
buildTargets, err = parseTargets(targetsFlag)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if len(buildCfg.Targets) > 0 {
|
|
||||||
// Use config targets
|
|
||||||
buildTargets = buildCfg.ToTargets()
|
|
||||||
} else {
|
|
||||||
// Fall back to current OS/arch
|
|
||||||
buildTargets = []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine output directory
|
|
||||||
if outputDir == "" {
|
|
||||||
outputDir = "dist"
|
|
||||||
}
|
|
||||||
if !filepath.IsAbs(outputDir) {
|
|
||||||
outputDir = filepath.Join(projectDir, outputDir)
|
|
||||||
}
|
|
||||||
outputDir = filepath.Clean(outputDir)
|
|
||||||
|
|
||||||
// Ensure config path is absolute if provided
|
|
||||||
if configPath != "" && !filepath.IsAbs(configPath) {
|
|
||||||
configPath = filepath.Join(projectDir, configPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine binary name
|
|
||||||
binaryName := buildCfg.Project.Binary
|
|
||||||
if binaryName == "" {
|
|
||||||
binaryName = buildCfg.Project.Name
|
|
||||||
}
|
|
||||||
if binaryName == "" {
|
|
||||||
binaryName = filepath.Base(projectDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print build info (verbose mode only)
|
|
||||||
if verbose && !ciMode {
|
|
||||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.build")), i18n.T("cmd.build.building_project"))
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.type"), buildTargetStyle.Render(string(projectType)))
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.output"), buildTargetStyle.Render(outputDir))
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.binary"), buildTargetStyle.Render(binaryName))
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.targets"), buildTargetStyle.Render(formatTargets(buildTargets)))
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the appropriate builder
|
|
||||||
builder, err := getBuilder(projectType)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create build config for the builder
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: fs,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: binaryName,
|
|
||||||
Version: buildCfg.Project.Name, // Could be enhanced with git describe
|
|
||||||
LDFlags: buildCfg.Build.LDFlags,
|
|
||||||
// Docker/LinuxKit specific
|
|
||||||
Dockerfile: configPath, // Reuse for Dockerfile path
|
|
||||||
LinuxKitConfig: configPath,
|
|
||||||
Push: push,
|
|
||||||
Image: imageName,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse formats for LinuxKit
|
|
||||||
if format != "" {
|
|
||||||
cfg.Formats = strings.Split(format, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute build
|
|
||||||
artifacts, err := builder.Build(ctx, cfg, buildTargets)
|
|
||||||
if err != nil {
|
|
||||||
if !ciMode {
|
|
||||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if verbose && !ciMode {
|
|
||||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.built_artifacts", map[string]interface{}{"Count": len(artifacts)}))
|
|
||||||
fmt.Println()
|
|
||||||
for _, artifact := range artifacts {
|
|
||||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
|
||||||
if err != nil {
|
|
||||||
relPath = artifact.Path
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s %s %s\n",
|
|
||||||
buildSuccessStyle.Render("*"),
|
|
||||||
buildTargetStyle.Render(relPath),
|
|
||||||
buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign macOS binaries if enabled
|
|
||||||
signCfg := buildCfg.Sign
|
|
||||||
if notarize {
|
|
||||||
signCfg.MacOS.Notarize = true
|
|
||||||
}
|
|
||||||
if noSign {
|
|
||||||
signCfg.Enabled = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if signCfg.Enabled && runtime.GOOS == "darwin" {
|
|
||||||
if verbose && !ciMode {
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.sign")), i18n.T("cmd.build.signing_binaries"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert build.Artifact to signing.Artifact
|
|
||||||
signingArtifacts := make([]signing.Artifact, len(artifacts))
|
|
||||||
for i, a := range artifacts {
|
|
||||||
signingArtifacts[i] = signing.Artifact{Path: a.Path, OS: a.OS, Arch: a.Arch}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := signing.SignBinaries(ctx, fs, signCfg, signingArtifacts); err != nil {
|
|
||||||
if !ciMode {
|
|
||||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.signing_failed"), err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if signCfg.MacOS.Notarize {
|
|
||||||
if err := signing.NotarizeBinaries(ctx, fs, signCfg, signingArtifacts); err != nil {
|
|
||||||
if !ciMode {
|
|
||||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.notarization_failed"), err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Archive artifacts if enabled
|
|
||||||
var archivedArtifacts []build.Artifact
|
|
||||||
if doArchive && len(artifacts) > 0 {
|
|
||||||
if verbose && !ciMode {
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.archive")), i18n.T("cmd.build.creating_archives"))
|
|
||||||
}
|
|
||||||
|
|
||||||
archivedArtifacts, err = build.ArchiveAll(fs, artifacts)
|
|
||||||
if err != nil {
|
|
||||||
if !ciMode {
|
|
||||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.archive_failed"), err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if verbose && !ciMode {
|
|
||||||
for _, artifact := range archivedArtifacts {
|
|
||||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
|
||||||
if err != nil {
|
|
||||||
relPath = artifact.Path
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s %s %s\n",
|
|
||||||
buildSuccessStyle.Render("*"),
|
|
||||||
buildTargetStyle.Render(relPath),
|
|
||||||
buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute checksums if enabled
|
|
||||||
var checksummedArtifacts []build.Artifact
|
|
||||||
if doChecksum && len(archivedArtifacts) > 0 {
|
|
||||||
checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, archivedArtifacts, signCfg, ciMode, verbose)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if doChecksum && len(artifacts) > 0 && !doArchive {
|
|
||||||
// Checksum raw binaries if archiving is disabled
|
|
||||||
checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, artifacts, signCfg, ciMode, verbose)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output results
|
|
||||||
if ciMode {
|
|
||||||
// Determine which artifacts to output (prefer checksummed > archived > raw)
|
|
||||||
var outputArtifacts []build.Artifact
|
|
||||||
if len(checksummedArtifacts) > 0 {
|
|
||||||
outputArtifacts = checksummedArtifacts
|
|
||||||
} else if len(archivedArtifacts) > 0 {
|
|
||||||
outputArtifacts = archivedArtifacts
|
|
||||||
} else {
|
|
||||||
outputArtifacts = artifacts
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSON output for CI
|
|
||||||
output, err := json.MarshalIndent(outputArtifacts, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "marshal artifacts"}), err)
|
|
||||||
}
|
|
||||||
fmt.Println(string(output))
|
|
||||||
} else if !verbose {
|
|
||||||
// Minimal output: just success with artifact count
|
|
||||||
fmt.Printf("%s %s %s\n",
|
|
||||||
buildSuccessStyle.Render(i18n.T("common.label.success")),
|
|
||||||
i18n.T("cmd.build.built_artifacts", map[string]interface{}{"Count": len(artifacts)}),
|
|
||||||
buildDimStyle.Render(fmt.Sprintf("(%s)", outputDir)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// computeAndWriteChecksums computes checksums for artifacts and writes CHECKSUMS.txt.
|
|
||||||
func computeAndWriteChecksums(ctx context.Context, projectDir, outputDir string, artifacts []build.Artifact, signCfg signing.SignConfig, ciMode bool, verbose bool) ([]build.Artifact, error) {
|
|
||||||
fs := io.Local
|
|
||||||
if verbose && !ciMode {
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.checksum")), i18n.T("cmd.build.computing_checksums"))
|
|
||||||
}
|
|
||||||
|
|
||||||
checksummedArtifacts, err := build.ChecksumAll(fs, artifacts)
|
|
||||||
if err != nil {
|
|
||||||
if !ciMode {
|
|
||||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.checksum_failed"), err)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write CHECKSUMS.txt
|
|
||||||
checksumPath := filepath.Join(outputDir, "CHECKSUMS.txt")
|
|
||||||
if err := build.WriteChecksumFile(fs, checksummedArtifacts, checksumPath); err != nil {
|
|
||||||
if !ciMode {
|
|
||||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("common.error.failed", map[string]any{"Action": "write CHECKSUMS.txt"}), err)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign checksums with GPG
|
|
||||||
if signCfg.Enabled {
|
|
||||||
if err := signing.SignChecksums(ctx, fs, signCfg, checksumPath); err != nil {
|
|
||||||
if !ciMode {
|
|
||||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.gpg_signing_failed"), err)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if verbose && !ciMode {
|
|
||||||
for _, artifact := range checksummedArtifacts {
|
|
||||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
|
||||||
if err != nil {
|
|
||||||
relPath = artifact.Path
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s %s\n",
|
|
||||||
buildSuccessStyle.Render("*"),
|
|
||||||
buildTargetStyle.Render(relPath),
|
|
||||||
)
|
|
||||||
fmt.Printf(" %s\n", buildDimStyle.Render(artifact.Checksum))
|
|
||||||
}
|
|
||||||
|
|
||||||
relChecksumPath, err := filepath.Rel(projectDir, checksumPath)
|
|
||||||
if err != nil {
|
|
||||||
relChecksumPath = checksumPath
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s %s\n",
|
|
||||||
buildSuccessStyle.Render("*"),
|
|
||||||
buildTargetStyle.Render(relChecksumPath),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return checksummedArtifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTargets parses a comma-separated list of OS/arch pairs.
|
|
||||||
func parseTargets(targetsFlag string) ([]build.Target, error) {
|
|
||||||
parts := strings.Split(targetsFlag, ",")
|
|
||||||
var targets []build.Target
|
|
||||||
|
|
||||||
for _, part := range parts {
|
|
||||||
part = strings.TrimSpace(part)
|
|
||||||
if part == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
osArch := strings.Split(part, "/")
|
|
||||||
if len(osArch) != 2 {
|
|
||||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.invalid_target", map[string]interface{}{"Target": part}))
|
|
||||||
}
|
|
||||||
|
|
||||||
targets = append(targets, build.Target{
|
|
||||||
OS: strings.TrimSpace(osArch[0]),
|
|
||||||
Arch: strings.TrimSpace(osArch[1]),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(targets) == 0 {
|
|
||||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.no_targets"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return targets, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatTargets returns a human-readable string of targets.
|
|
||||||
func formatTargets(targets []build.Target) string {
|
|
||||||
var parts []string
|
|
||||||
for _, t := range targets {
|
|
||||||
parts = append(parts, t.String())
|
|
||||||
}
|
|
||||||
return strings.Join(parts, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// getBuilder returns the appropriate builder for the project type.
|
|
||||||
func getBuilder(projectType build.ProjectType) (build.Builder, error) {
|
|
||||||
switch projectType {
|
|
||||||
case build.ProjectTypeWails:
|
|
||||||
return builders.NewWailsBuilder(), nil
|
|
||||||
case build.ProjectTypeGo:
|
|
||||||
return builders.NewGoBuilder(), nil
|
|
||||||
case build.ProjectTypeDocker:
|
|
||||||
return builders.NewDockerBuilder(), nil
|
|
||||||
case build.ProjectTypeLinuxKit:
|
|
||||||
return builders.NewLinuxKitBuilder(), nil
|
|
||||||
case build.ProjectTypeTaskfile:
|
|
||||||
return builders.NewTaskfileBuilder(), nil
|
|
||||||
case build.ProjectTypeCPP:
|
|
||||||
return builders.NewCPPBuilder(), nil
|
|
||||||
case build.ProjectTypeNode:
|
|
||||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.node_not_implemented"))
|
|
||||||
case build.ProjectTypePHP:
|
|
||||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.php_not_implemented"))
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("%s: %s", i18n.T("cmd.build.error.unsupported_type"), projectType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,324 +0,0 @@
|
||||||
// cmd_pwa.go implements PWA and legacy GUI build functionality.
|
|
||||||
//
|
|
||||||
// Supports building desktop applications from:
|
|
||||||
// - Local static web application directories
|
|
||||||
// - Live PWA URLs (downloads and packages)
|
|
||||||
|
|
||||||
package buildcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/i18n"
|
|
||||||
"github.com/leaanthony/debme"
|
|
||||||
"github.com/leaanthony/gosod"
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error sentinels for build commands
|
|
||||||
var (
|
|
||||||
errPathRequired = errors.New("the --path flag is required")
|
|
||||||
errURLRequired = errors.New("the --url flag is required")
|
|
||||||
)
|
|
||||||
|
|
||||||
// runPwaBuild downloads a PWA from URL and builds it.
|
|
||||||
func runPwaBuild(pwaURL string) error {
|
|
||||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.starting"), pwaURL)
|
|
||||||
|
|
||||||
tempDir, err := os.MkdirTemp("", "core-pwa-build-*")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "create temporary directory"}), err)
|
|
||||||
}
|
|
||||||
// defer os.RemoveAll(tempDir) // Keep temp dir for debugging
|
|
||||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.downloading_to"), tempDir)
|
|
||||||
|
|
||||||
if err := downloadPWA(pwaURL, tempDir); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "download PWA"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return runBuild(tempDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// downloadPWA fetches a PWA from a URL and saves assets locally.
|
|
||||||
func downloadPWA(baseURL, destDir string) error {
|
|
||||||
// Fetch the main HTML page
|
|
||||||
resp, err := http.Get(baseURL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s %s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch URL"}), baseURL, err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "read response body"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the manifest URL from the HTML
|
|
||||||
manifestURL, err := findManifestURL(string(body), baseURL)
|
|
||||||
if err != nil {
|
|
||||||
// If no manifest, it's not a PWA, but we can still try to package it as a simple site.
|
|
||||||
fmt.Printf("%s %s\n", i18n.T("common.label.warning"), i18n.T("cmd.build.pwa.no_manifest"))
|
|
||||||
if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.found_manifest"), manifestURL)
|
|
||||||
|
|
||||||
// Fetch and parse the manifest
|
|
||||||
manifest, err := fetchManifest(manifestURL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch or parse manifest"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Download all assets listed in the manifest
|
|
||||||
assets := collectAssets(manifest, manifestURL)
|
|
||||||
for _, assetURL := range assets {
|
|
||||||
if err := downloadAsset(assetURL, destDir); err != nil {
|
|
||||||
fmt.Printf("%s %s %s: %v\n", i18n.T("common.label.warning"), i18n.T("common.error.failed", map[string]any{"Action": "download asset"}), assetURL, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also save the root index.html
|
|
||||||
if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println(i18n.T("cmd.build.pwa.download_complete"))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findManifestURL extracts the manifest URL from HTML content.
|
|
||||||
func findManifestURL(htmlContent, baseURL string) (string, error) {
|
|
||||||
doc, err := html.Parse(strings.NewReader(htmlContent))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
var manifestPath string
|
|
||||||
var f func(*html.Node)
|
|
||||||
f = func(n *html.Node) {
|
|
||||||
if n.Type == html.ElementNode && n.Data == "link" {
|
|
||||||
var rel, href string
|
|
||||||
for _, a := range n.Attr {
|
|
||||||
if a.Key == "rel" {
|
|
||||||
rel = a.Val
|
|
||||||
}
|
|
||||||
if a.Key == "href" {
|
|
||||||
href = a.Val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if rel == "manifest" && href != "" {
|
|
||||||
manifestPath = href
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
f(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f(doc)
|
|
||||||
|
|
||||||
if manifestPath == "" {
|
|
||||||
return "", fmt.Errorf("%s", i18n.T("cmd.build.pwa.error.no_manifest_tag"))
|
|
||||||
}
|
|
||||||
|
|
||||||
base, err := url.Parse(baseURL)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestURL, err := base.Parse(manifestPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return manifestURL.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchManifest downloads and parses a PWA manifest.
|
|
||||||
func fetchManifest(manifestURL string) (map[string]interface{}, error) {
|
|
||||||
resp, err := http.Get(manifestURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
var manifest map[string]interface{}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&manifest); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return manifest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectAssets extracts asset URLs from a PWA manifest.
|
|
||||||
func collectAssets(manifest map[string]interface{}, manifestURL string) []string {
|
|
||||||
var assets []string
|
|
||||||
base, _ := url.Parse(manifestURL)
|
|
||||||
|
|
||||||
// Add start_url
|
|
||||||
if startURL, ok := manifest["start_url"].(string); ok {
|
|
||||||
if resolved, err := base.Parse(startURL); err == nil {
|
|
||||||
assets = append(assets, resolved.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add icons
|
|
||||||
if icons, ok := manifest["icons"].([]interface{}); ok {
|
|
||||||
for _, icon := range icons {
|
|
||||||
if iconMap, ok := icon.(map[string]interface{}); ok {
|
|
||||||
if src, ok := iconMap["src"].(string); ok {
|
|
||||||
if resolved, err := base.Parse(src); err == nil {
|
|
||||||
assets = append(assets, resolved.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return assets
|
|
||||||
}
|
|
||||||
|
|
||||||
// downloadAsset fetches a single asset and saves it locally.
|
|
||||||
func downloadAsset(assetURL, destDir string) error {
|
|
||||||
resp, err := http.Get(assetURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
u, err := url.Parse(assetURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
path := filepath.Join(destDir, filepath.FromSlash(u.Path))
|
|
||||||
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { _ = out.Close() }()
|
|
||||||
|
|
||||||
_, err = io.Copy(out, resp.Body)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// runBuild builds a desktop application from a local directory.
|
|
||||||
func runBuild(fromPath string) error {
|
|
||||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.from_path.starting"), fromPath)
|
|
||||||
|
|
||||||
info, err := os.Stat(fromPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.invalid_path"), err)
|
|
||||||
}
|
|
||||||
if !info.IsDir() {
|
|
||||||
return fmt.Errorf("%s", i18n.T("cmd.build.from_path.error.must_be_directory"))
|
|
||||||
}
|
|
||||||
|
|
||||||
buildDir := ".core/build/app"
|
|
||||||
htmlDir := filepath.Join(buildDir, "html")
|
|
||||||
appName := filepath.Base(fromPath)
|
|
||||||
if strings.HasPrefix(appName, "core-pwa-build-") {
|
|
||||||
appName = "pwa-app"
|
|
||||||
}
|
|
||||||
outputExe := appName
|
|
||||||
|
|
||||||
if err := os.RemoveAll(buildDir); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "clean build directory"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. Generate the project from the embedded template
|
|
||||||
fmt.Println(i18n.T("cmd.build.from_path.generating_template"))
|
|
||||||
templateFS, err := debme.FS(guiTemplate, "tmpl/gui")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "anchor template filesystem"}), err)
|
|
||||||
}
|
|
||||||
sod := gosod.New(templateFS)
|
|
||||||
if sod == nil {
|
|
||||||
return fmt.Errorf("%s", i18n.T("common.error.failed", map[string]any{"Action": "create new sod instance"}))
|
|
||||||
}
|
|
||||||
|
|
||||||
templateData := map[string]string{"AppName": appName}
|
|
||||||
if err := sod.Extract(buildDir, templateData); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "extract template"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Copy the user's web app files
|
|
||||||
fmt.Println(i18n.T("cmd.build.from_path.copying_files"))
|
|
||||||
if err := copyDir(fromPath, htmlDir); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "copy application files"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Compile the application
|
|
||||||
fmt.Println(i18n.T("cmd.build.from_path.compiling"))
|
|
||||||
|
|
||||||
// Run go mod tidy
|
|
||||||
cmd := exec.Command("go", "mod", "tidy")
|
|
||||||
cmd.Dir = buildDir
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_mod_tidy"), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run go build
|
|
||||||
cmd = exec.Command("go", "build", "-o", outputExe)
|
|
||||||
cmd.Dir = buildDir
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_build"), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("\n%s %s/%s\n", i18n.T("cmd.build.from_path.success"), buildDir, outputExe)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyDir recursively copies a directory from src to dst.
|
|
||||||
func copyDir(src, dst string) error {
|
|
||||||
return filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
relPath, err := filepath.Rel(src, path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
dstPath := filepath.Join(dst, relPath)
|
|
||||||
|
|
||||||
if info.IsDir() {
|
|
||||||
return os.MkdirAll(dstPath, info.Mode())
|
|
||||||
}
|
|
||||||
|
|
||||||
srcFile, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { _ = srcFile.Close() }()
|
|
||||||
|
|
||||||
dstFile, err := os.Create(dstPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { _ = dstFile.Close() }()
|
|
||||||
|
|
||||||
_, err = io.Copy(dstFile, srcFile)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,111 +0,0 @@
|
||||||
// cmd_release.go implements the release command: build + archive + publish in one step.
|
|
||||||
|
|
||||||
package buildcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/cli"
|
|
||||||
"forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
"forge.lthn.ai/core/go/pkg/i18n"
|
|
||||||
"forge.lthn.ai/core/go/pkg/release"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Flag variables for release command
|
|
||||||
var (
|
|
||||||
releaseVersion string
|
|
||||||
releaseDraft bool
|
|
||||||
releasePrerelease bool
|
|
||||||
releaseGoForLaunch bool
|
|
||||||
)
|
|
||||||
|
|
||||||
var releaseCmd = &cli.Command{
|
|
||||||
Use: "release",
|
|
||||||
Short: i18n.T("cmd.build.release.short"),
|
|
||||||
Long: i18n.T("cmd.build.release.long"),
|
|
||||||
RunE: func(cmd *cli.Command, args []string) error {
|
|
||||||
return runRelease(cmd.Context(), !releaseGoForLaunch, releaseVersion, releaseDraft, releasePrerelease)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
releaseCmd.Flags().BoolVar(&releaseGoForLaunch, "we-are-go-for-launch", false, i18n.T("cmd.build.release.flag.go_for_launch"))
|
|
||||||
releaseCmd.Flags().StringVar(&releaseVersion, "version", "", i18n.T("cmd.build.release.flag.version"))
|
|
||||||
releaseCmd.Flags().BoolVar(&releaseDraft, "draft", false, i18n.T("cmd.build.release.flag.draft"))
|
|
||||||
releaseCmd.Flags().BoolVar(&releasePrerelease, "prerelease", false, i18n.T("cmd.build.release.flag.prerelease"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddReleaseCommand adds the release subcommand to the build command.
|
|
||||||
func AddReleaseCommand(buildCmd *cli.Command) {
|
|
||||||
buildCmd.AddCommand(releaseCmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
// runRelease executes the full release workflow: build + archive + checksum + publish.
|
|
||||||
func runRelease(ctx context.Context, dryRun bool, version string, draft, prerelease bool) error {
|
|
||||||
// Get current directory
|
|
||||||
projectDir, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return core.E("release", "get working directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for release config
|
|
||||||
if !release.ConfigExists(projectDir) {
|
|
||||||
cli.Print("%s %s\n",
|
|
||||||
buildErrorStyle.Render(i18n.Label("error")),
|
|
||||||
i18n.T("cmd.build.release.error.no_config"),
|
|
||||||
)
|
|
||||||
cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.hint.create_config")))
|
|
||||||
return core.E("release", "config not found", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load configuration
|
|
||||||
cfg, err := release.LoadConfig(projectDir)
|
|
||||||
if err != nil {
|
|
||||||
return core.E("release", "load config", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply CLI overrides
|
|
||||||
if version != "" {
|
|
||||||
cfg.SetVersion(version)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply draft/prerelease overrides to all publishers
|
|
||||||
if draft || prerelease {
|
|
||||||
for i := range cfg.Publishers {
|
|
||||||
if draft {
|
|
||||||
cfg.Publishers[i].Draft = true
|
|
||||||
}
|
|
||||||
if prerelease {
|
|
||||||
cfg.Publishers[i].Prerelease = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print header
|
|
||||||
cli.Print("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.release.label.release")), i18n.T("cmd.build.release.building_and_publishing"))
|
|
||||||
if dryRun {
|
|
||||||
cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.dry_run_hint")))
|
|
||||||
}
|
|
||||||
cli.Blank()
|
|
||||||
|
|
||||||
// Run full release (build + archive + checksum + publish)
|
|
||||||
rel, err := release.Run(ctx, cfg, dryRun)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print summary
|
|
||||||
cli.Blank()
|
|
||||||
cli.Print("%s %s\n", buildSuccessStyle.Render(i18n.T("i18n.done.pass")), i18n.T("cmd.build.release.completed"))
|
|
||||||
cli.Print(" %s %s\n", i18n.Label("version"), buildTargetStyle.Render(rel.Version))
|
|
||||||
cli.Print(" %s %d\n", i18n.T("cmd.build.release.label.artifacts"), len(rel.Artifacts))
|
|
||||||
|
|
||||||
if !dryRun {
|
|
||||||
for _, pub := range cfg.Publishers {
|
|
||||||
cli.Print(" %s %s\n", i18n.T("cmd.build.release.label.published"), buildTargetStyle.Render(pub.Type))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
||||||
// cmd_sdk.go implements SDK generation from OpenAPI specifications.
|
|
||||||
//
|
|
||||||
// Generates typed API clients for TypeScript, Python, Go, and PHP
|
|
||||||
// from OpenAPI/Swagger specifications.
|
|
||||||
|
|
||||||
package buildcmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/sdk"
|
|
||||||
"forge.lthn.ai/core/go/pkg/i18n"
|
|
||||||
)
|
|
||||||
|
|
||||||
// runBuildSDK handles the `core build sdk` command.
|
|
||||||
func runBuildSDK(specPath, lang, version string, dryRun bool) error {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
projectDir, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load config
|
|
||||||
config := sdk.DefaultConfig()
|
|
||||||
if specPath != "" {
|
|
||||||
config.Spec = specPath
|
|
||||||
}
|
|
||||||
|
|
||||||
s := sdk.New(projectDir, config)
|
|
||||||
if version != "" {
|
|
||||||
s.SetVersion(version)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.sdk.label")), i18n.T("cmd.build.sdk.generating"))
|
|
||||||
if dryRun {
|
|
||||||
fmt.Printf(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.sdk.dry_run_mode")))
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
// Detect spec
|
|
||||||
detectedSpec, err := s.DetectSpec()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("common.label.spec"), buildTargetStyle.Render(detectedSpec))
|
|
||||||
|
|
||||||
if dryRun {
|
|
||||||
if lang != "" {
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.language_label"), buildTargetStyle.Render(lang))
|
|
||||||
} else {
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.languages_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", ")))
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("cmd.build.label.ok")), i18n.T("cmd.build.sdk.would_generate"))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if lang != "" {
|
|
||||||
// Generate single language
|
|
||||||
if err := s.GenerateLanguage(ctx, lang); err != nil {
|
|
||||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(lang))
|
|
||||||
} else {
|
|
||||||
// Generate all
|
|
||||||
if err := s.Generate(ctx); err != nil {
|
|
||||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", ")))
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.sdk.complete"))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
||||||
module {{.AppName}}
|
|
||||||
|
|
||||||
go 1.21
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/wailsapp/wails/v3 v3.0.0-alpha.8
|
|
||||||
)
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
// This file ensures the 'html' directory is correctly embedded by the Go compiler.
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"embed"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/wailsapp/wails/v3/pkg/application"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed all:html
|
|
||||||
var assets embed.FS
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
app := application.New(application.Options{
|
|
||||||
Name: "{{.AppName}}",
|
|
||||||
Description: "A web application enclaved by Core.",
|
|
||||||
Assets: application.AssetOptions{
|
|
||||||
FS: assets,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := app.Run(); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,253 +0,0 @@
|
||||||
// Package builders provides build implementations for different project types.
|
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CPPBuilder implements the Builder interface for C++ projects using CMake + Conan.
|
|
||||||
// It wraps the Makefile-based build system from the .core/build submodule.
|
|
||||||
type CPPBuilder struct{}
|
|
||||||
|
|
||||||
// NewCPPBuilder creates a new CPPBuilder instance.
|
|
||||||
func NewCPPBuilder() *CPPBuilder {
|
|
||||||
return &CPPBuilder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the builder's identifier.
|
|
||||||
func (b *CPPBuilder) Name() string {
|
|
||||||
return "cpp"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect checks if this builder can handle the project in the given directory.
|
|
||||||
func (b *CPPBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
|
||||||
return build.IsCPPProject(fs, dir), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build compiles the C++ project using Make targets.
|
|
||||||
// The build flow is: make configure → make build → make package.
|
|
||||||
// Cross-compilation is handled via Conan profiles specified in .core/build.yaml.
|
|
||||||
func (b *CPPBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
|
||||||
if cfg == nil {
|
|
||||||
return nil, fmt.Errorf("builders.CPPBuilder.Build: config is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate make is available
|
|
||||||
if err := b.validateMake(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// For C++ projects, the Makefile handles everything.
|
|
||||||
// We don't iterate per-target like Go — the Makefile's configure + build
|
|
||||||
// produces binaries for the host platform, and cross-compilation uses
|
|
||||||
// named Conan profiles (e.g., make gcc-linux-armv8).
|
|
||||||
if len(targets) == 0 {
|
|
||||||
// Default to host platform
|
|
||||||
targets = []build.Target{{OS: runtime.GOOS, Arch: runtime.GOARCH}}
|
|
||||||
}
|
|
||||||
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
|
|
||||||
for _, target := range targets {
|
|
||||||
built, err := b.buildTarget(ctx, cfg, target)
|
|
||||||
if err != nil {
|
|
||||||
return artifacts, fmt.Errorf("builders.CPPBuilder.Build: %w", err)
|
|
||||||
}
|
|
||||||
artifacts = append(artifacts, built...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTarget compiles for a single target platform.
|
|
||||||
func (b *CPPBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
|
||||||
// Determine if this is a cross-compile or host build
|
|
||||||
isHostBuild := target.OS == runtime.GOOS && target.Arch == runtime.GOARCH
|
|
||||||
|
|
||||||
if isHostBuild {
|
|
||||||
return b.buildHost(ctx, cfg, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.buildCross(ctx, cfg, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildHost runs the standard make configure → make build → make package flow.
|
|
||||||
func (b *CPPBuilder) buildHost(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
|
||||||
fmt.Printf("Building C++ project for %s/%s (host)\n", target.OS, target.Arch)
|
|
||||||
|
|
||||||
// Step 1: Configure (runs conan install + cmake configure)
|
|
||||||
if err := b.runMake(ctx, cfg.ProjectDir, "configure"); err != nil {
|
|
||||||
return nil, fmt.Errorf("configure failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 2: Build
|
|
||||||
if err := b.runMake(ctx, cfg.ProjectDir, "build"); err != nil {
|
|
||||||
return nil, fmt.Errorf("build failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 3: Package
|
|
||||||
if err := b.runMake(ctx, cfg.ProjectDir, "package"); err != nil {
|
|
||||||
return nil, fmt.Errorf("package failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Discover artifacts from build/packages/
|
|
||||||
return b.findArtifacts(cfg.FS, cfg.ProjectDir, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildCross runs a cross-compilation using a Conan profile name.
|
|
||||||
// The Makefile supports profile targets like: make gcc-linux-armv8
|
|
||||||
func (b *CPPBuilder) buildCross(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
|
||||||
// Map target to a Conan profile name
|
|
||||||
profile := b.targetToProfile(target)
|
|
||||||
if profile == "" {
|
|
||||||
return nil, fmt.Errorf("no Conan profile mapped for target %s/%s", target.OS, target.Arch)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Building C++ project for %s/%s (cross: %s)\n", target.OS, target.Arch, profile)
|
|
||||||
|
|
||||||
// The Makefile exposes each profile as a top-level target
|
|
||||||
if err := b.runMake(ctx, cfg.ProjectDir, profile); err != nil {
|
|
||||||
return nil, fmt.Errorf("cross-compile for %s failed: %w", profile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.findArtifacts(cfg.FS, cfg.ProjectDir, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// runMake executes a make target in the project directory.
|
|
||||||
func (b *CPPBuilder) runMake(ctx context.Context, projectDir string, target string) error {
|
|
||||||
cmd := exec.CommandContext(ctx, "make", target)
|
|
||||||
cmd.Dir = projectDir
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
cmd.Env = os.Environ()
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("make %s: %w", target, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findArtifacts searches for built packages in build/packages/.
|
|
||||||
func (b *CPPBuilder) findArtifacts(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) {
|
|
||||||
packagesDir := filepath.Join(projectDir, "build", "packages")
|
|
||||||
|
|
||||||
if !fs.IsDir(packagesDir) {
|
|
||||||
// Fall back to searching build/release/src/ for raw binaries
|
|
||||||
return b.findBinaries(fs, projectDir, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := fs.List(packagesDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list packages directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
name := entry.Name()
|
|
||||||
// Skip checksum files and hidden files
|
|
||||||
if strings.HasSuffix(name, ".sha256") || strings.HasPrefix(name, ".") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: filepath.Join(packagesDir, name),
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findBinaries searches for compiled binaries in build/release/src/.
|
|
||||||
func (b *CPPBuilder) findBinaries(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) {
|
|
||||||
binDir := filepath.Join(projectDir, "build", "release", "src")
|
|
||||||
|
|
||||||
if !fs.IsDir(binDir) {
|
|
||||||
return nil, fmt.Errorf("no build output found in %s", binDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := fs.List(binDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list build directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
name := entry.Name()
|
|
||||||
// Skip non-executable files (libraries, cmake files, etc.)
|
|
||||||
if strings.HasSuffix(name, ".a") || strings.HasSuffix(name, ".o") ||
|
|
||||||
strings.HasSuffix(name, ".cmake") || strings.HasPrefix(name, ".") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fullPath := filepath.Join(binDir, name)
|
|
||||||
|
|
||||||
// On Unix, check if file is executable
|
|
||||||
if target.OS != "windows" {
|
|
||||||
info, err := os.Stat(fullPath)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if info.Mode()&0111 == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: fullPath,
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// targetToProfile maps a build target to a Conan cross-compilation profile name.
|
|
||||||
// Profile names match those in .core/build/cmake/profiles/.
|
|
||||||
func (b *CPPBuilder) targetToProfile(target build.Target) string {
|
|
||||||
key := target.OS + "/" + target.Arch
|
|
||||||
profiles := map[string]string{
|
|
||||||
"linux/amd64": "gcc-linux-x86_64",
|
|
||||||
"linux/x86_64": "gcc-linux-x86_64",
|
|
||||||
"linux/arm64": "gcc-linux-armv8",
|
|
||||||
"linux/armv8": "gcc-linux-armv8",
|
|
||||||
"darwin/arm64": "apple-clang-armv8",
|
|
||||||
"darwin/armv8": "apple-clang-armv8",
|
|
||||||
"darwin/amd64": "apple-clang-x86_64",
|
|
||||||
"darwin/x86_64": "apple-clang-x86_64",
|
|
||||||
"windows/amd64": "msvc-194-x86_64",
|
|
||||||
"windows/x86_64": "msvc-194-x86_64",
|
|
||||||
}
|
|
||||||
|
|
||||||
return profiles[key]
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateMake checks if make is available.
|
|
||||||
func (b *CPPBuilder) validateMake() error {
|
|
||||||
if _, err := exec.LookPath("make"); err != nil {
|
|
||||||
return fmt.Errorf("cpp: make not found. Install build-essential (Linux) or Xcode Command Line Tools (macOS)")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure CPPBuilder implements the Builder interface.
|
|
||||||
var _ build.Builder = (*CPPBuilder)(nil)
|
|
||||||
|
|
@ -1,149 +0,0 @@
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCPPBuilder_Name_Good(t *testing.T) {
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
assert.Equal(t, "cpp", builder.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCPPBuilder_Detect_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
|
|
||||||
t.Run("detects C++ project with CMakeLists.txt", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "CMakeLists.txt"), []byte("cmake_minimum_required(VERSION 3.16)"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for non-C++ project", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, detected)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCPPBuilder_Build_Bad(t *testing.T) {
|
|
||||||
t.Run("returns error for nil config", func(t *testing.T) {
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
artifacts, err := builder.Build(nil, nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, artifacts)
|
|
||||||
assert.Contains(t, err.Error(), "config is nil")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCPPBuilder_TargetToProfile_Good(t *testing.T) {
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
os, arch string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"linux", "amd64", "gcc-linux-x86_64"},
|
|
||||||
{"linux", "x86_64", "gcc-linux-x86_64"},
|
|
||||||
{"linux", "arm64", "gcc-linux-armv8"},
|
|
||||||
{"darwin", "arm64", "apple-clang-armv8"},
|
|
||||||
{"darwin", "amd64", "apple-clang-x86_64"},
|
|
||||||
{"windows", "amd64", "msvc-194-x86_64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.os+"/"+tt.arch, func(t *testing.T) {
|
|
||||||
profile := builder.targetToProfile(build.Target{OS: tt.os, Arch: tt.arch})
|
|
||||||
assert.Equal(t, tt.expected, profile)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCPPBuilder_TargetToProfile_Bad(t *testing.T) {
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
|
|
||||||
t.Run("returns empty for unknown target", func(t *testing.T) {
|
|
||||||
profile := builder.targetToProfile(build.Target{OS: "plan9", Arch: "mips"})
|
|
||||||
assert.Empty(t, profile)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCPPBuilder_FindArtifacts_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
|
|
||||||
t.Run("finds packages in build/packages", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
packagesDir := filepath.Join(dir, "build", "packages")
|
|
||||||
require.NoError(t, os.MkdirAll(packagesDir, 0755))
|
|
||||||
|
|
||||||
// Create mock package files
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz"), []byte("pkg"), 0644))
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz.sha256"), []byte("checksum"), 0644))
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.rpm"), []byte("rpm"), 0644))
|
|
||||||
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
|
||||||
artifacts, err := builder.findArtifacts(fs, dir, target)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should find tar.xz and rpm but not sha256
|
|
||||||
assert.Len(t, artifacts, 2)
|
|
||||||
for _, a := range artifacts {
|
|
||||||
assert.Equal(t, "linux", a.OS)
|
|
||||||
assert.Equal(t, "amd64", a.Arch)
|
|
||||||
assert.False(t, filepath.Ext(a.Path) == ".sha256")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("falls back to binaries in build/release/src", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
binDir := filepath.Join(dir, "build", "release", "src")
|
|
||||||
require.NoError(t, os.MkdirAll(binDir, 0755))
|
|
||||||
|
|
||||||
// Create mock binary (executable)
|
|
||||||
binPath := filepath.Join(binDir, "test-daemon")
|
|
||||||
require.NoError(t, os.WriteFile(binPath, []byte("binary"), 0755))
|
|
||||||
|
|
||||||
// Create a library (should be skipped)
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(binDir, "libcrypto.a"), []byte("lib"), 0644))
|
|
||||||
|
|
||||||
builder := NewCPPBuilder()
|
|
||||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
|
||||||
artifacts, err := builder.findArtifacts(fs, dir, target)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should find the executable but not the library
|
|
||||||
assert.Len(t, artifacts, 1)
|
|
||||||
assert.Contains(t, artifacts[0].Path, "test-daemon")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCPPBuilder_Interface_Good(t *testing.T) {
|
|
||||||
var _ build.Builder = (*CPPBuilder)(nil)
|
|
||||||
var _ build.Builder = NewCPPBuilder()
|
|
||||||
}
|
|
||||||
|
|
@ -1,215 +0,0 @@
|
||||||
// Package builders provides build implementations for different project types.
|
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DockerBuilder builds Docker images.
|
|
||||||
type DockerBuilder struct{}
|
|
||||||
|
|
||||||
// NewDockerBuilder creates a new Docker builder.
|
|
||||||
func NewDockerBuilder() *DockerBuilder {
|
|
||||||
return &DockerBuilder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the builder's identifier.
|
|
||||||
func (b *DockerBuilder) Name() string {
|
|
||||||
return "docker"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect checks if a Dockerfile exists in the directory.
|
|
||||||
func (b *DockerBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
|
||||||
dockerfilePath := filepath.Join(dir, "Dockerfile")
|
|
||||||
if fs.IsFile(dockerfilePath) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build builds Docker images for the specified targets.
|
|
||||||
func (b *DockerBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
|
||||||
// Validate docker CLI is available
|
|
||||||
if err := b.validateDockerCli(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure buildx is available
|
|
||||||
if err := b.ensureBuildx(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine Dockerfile path
|
|
||||||
dockerfile := cfg.Dockerfile
|
|
||||||
if dockerfile == "" {
|
|
||||||
dockerfile = filepath.Join(cfg.ProjectDir, "Dockerfile")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate Dockerfile exists
|
|
||||||
if !cfg.FS.IsFile(dockerfile) {
|
|
||||||
return nil, fmt.Errorf("docker.Build: Dockerfile not found: %s", dockerfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine image name
|
|
||||||
imageName := cfg.Image
|
|
||||||
if imageName == "" {
|
|
||||||
imageName = cfg.Name
|
|
||||||
}
|
|
||||||
if imageName == "" {
|
|
||||||
imageName = filepath.Base(cfg.ProjectDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build platform string from targets
|
|
||||||
var platforms []string
|
|
||||||
for _, t := range targets {
|
|
||||||
platforms = append(platforms, fmt.Sprintf("%s/%s", t.OS, t.Arch))
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no targets specified, use current platform
|
|
||||||
if len(platforms) == 0 {
|
|
||||||
platforms = []string{"linux/amd64"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine registry
|
|
||||||
registry := cfg.Registry
|
|
||||||
if registry == "" {
|
|
||||||
registry = "ghcr.io"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine tags
|
|
||||||
tags := cfg.Tags
|
|
||||||
if len(tags) == 0 {
|
|
||||||
tags = []string{"latest"}
|
|
||||||
if cfg.Version != "" {
|
|
||||||
tags = append(tags, cfg.Version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build full image references
|
|
||||||
var imageRefs []string
|
|
||||||
for _, tag := range tags {
|
|
||||||
// Expand version template
|
|
||||||
expandedTag := strings.ReplaceAll(tag, "{{.Version}}", cfg.Version)
|
|
||||||
expandedTag = strings.ReplaceAll(expandedTag, "{{Version}}", cfg.Version)
|
|
||||||
|
|
||||||
if registry != "" {
|
|
||||||
imageRefs = append(imageRefs, fmt.Sprintf("%s/%s:%s", registry, imageName, expandedTag))
|
|
||||||
} else {
|
|
||||||
imageRefs = append(imageRefs, fmt.Sprintf("%s:%s", imageName, expandedTag))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the docker buildx command
|
|
||||||
args := []string{"buildx", "build"}
|
|
||||||
|
|
||||||
// Multi-platform support
|
|
||||||
args = append(args, "--platform", strings.Join(platforms, ","))
|
|
||||||
|
|
||||||
// Add all tags
|
|
||||||
for _, ref := range imageRefs {
|
|
||||||
args = append(args, "-t", ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dockerfile path
|
|
||||||
args = append(args, "-f", dockerfile)
|
|
||||||
|
|
||||||
// Build arguments
|
|
||||||
for k, v := range cfg.BuildArgs {
|
|
||||||
expandedValue := strings.ReplaceAll(v, "{{.Version}}", cfg.Version)
|
|
||||||
expandedValue = strings.ReplaceAll(expandedValue, "{{Version}}", cfg.Version)
|
|
||||||
args = append(args, "--build-arg", fmt.Sprintf("%s=%s", k, expandedValue))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always add VERSION build arg if version is set
|
|
||||||
if cfg.Version != "" {
|
|
||||||
args = append(args, "--build-arg", fmt.Sprintf("VERSION=%s", cfg.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output to local docker images or push
|
|
||||||
if cfg.Push {
|
|
||||||
args = append(args, "--push")
|
|
||||||
} else {
|
|
||||||
// For multi-platform builds without push, we need to load or output somewhere
|
|
||||||
if len(platforms) == 1 {
|
|
||||||
args = append(args, "--load")
|
|
||||||
} else {
|
|
||||||
// Multi-platform builds can't use --load, output to tarball
|
|
||||||
outputPath := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s.tar", imageName))
|
|
||||||
args = append(args, "--output", fmt.Sprintf("type=oci,dest=%s", outputPath))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build context (project directory)
|
|
||||||
args = append(args, cfg.ProjectDir)
|
|
||||||
|
|
||||||
// Create output directory
|
|
||||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("docker.Build: failed to create output directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute build
|
|
||||||
cmd := exec.CommandContext(ctx, "docker", args...)
|
|
||||||
cmd.Dir = cfg.ProjectDir
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
fmt.Printf("Building Docker image: %s\n", imageName)
|
|
||||||
fmt.Printf(" Platforms: %s\n", strings.Join(platforms, ", "))
|
|
||||||
fmt.Printf(" Tags: %s\n", strings.Join(imageRefs, ", "))
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("docker.Build: buildx build failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create artifacts for each platform
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
for _, t := range targets {
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: imageRefs[0], // Primary image reference
|
|
||||||
OS: t.OS,
|
|
||||||
Arch: t.Arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateDockerCli checks if the docker CLI is available.
|
|
||||||
func (b *DockerBuilder) validateDockerCli() error {
|
|
||||||
cmd := exec.Command("docker", "--version")
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("docker: docker CLI not found. Install it from https://docs.docker.com/get-docker/")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureBuildx ensures docker buildx is available and has a builder.
|
|
||||||
func (b *DockerBuilder) ensureBuildx(ctx context.Context) error {
|
|
||||||
// Check if buildx is available
|
|
||||||
cmd := exec.CommandContext(ctx, "docker", "buildx", "version")
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("docker: buildx is not available. Install it from https://docs.docker.com/buildx/working-with-buildx/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we have a builder, create one if not
|
|
||||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "inspect", "--bootstrap")
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
// Try to create a builder
|
|
||||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "create", "--use", "--bootstrap")
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("docker: failed to create buildx builder: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,129 +0,0 @@
|
||||||
// Package builders provides build implementations for different project types.
|
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GoBuilder implements the Builder interface for Go projects.
|
|
||||||
type GoBuilder struct{}
|
|
||||||
|
|
||||||
// NewGoBuilder creates a new GoBuilder instance.
|
|
||||||
func NewGoBuilder() *GoBuilder {
|
|
||||||
return &GoBuilder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the builder's identifier.
|
|
||||||
func (b *GoBuilder) Name() string {
|
|
||||||
return "go"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect checks if this builder can handle the project in the given directory.
|
|
||||||
// Uses IsGoProject from the build package which checks for go.mod or wails.json.
|
|
||||||
func (b *GoBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
|
||||||
return build.IsGoProject(fs, dir), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build compiles the Go project for the specified targets.
|
|
||||||
// It sets GOOS, GOARCH, and CGO_ENABLED environment variables,
|
|
||||||
// applies ldflags and trimpath, and runs go build.
|
|
||||||
func (b *GoBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
|
||||||
if cfg == nil {
|
|
||||||
return nil, fmt.Errorf("builders.GoBuilder.Build: config is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(targets) == 0 {
|
|
||||||
return nil, fmt.Errorf("builders.GoBuilder.Build: no targets specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure output directory exists
|
|
||||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("builders.GoBuilder.Build: failed to create output directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
|
|
||||||
for _, target := range targets {
|
|
||||||
artifact, err := b.buildTarget(ctx, cfg, target)
|
|
||||||
if err != nil {
|
|
||||||
return artifacts, fmt.Errorf("builders.GoBuilder.Build: failed to build %s: %w", target.String(), err)
|
|
||||||
}
|
|
||||||
artifacts = append(artifacts, artifact)
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildTarget compiles for a single target platform.
|
|
||||||
func (b *GoBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) {
|
|
||||||
// Determine output binary name
|
|
||||||
binaryName := cfg.Name
|
|
||||||
if binaryName == "" {
|
|
||||||
binaryName = filepath.Base(cfg.ProjectDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add .exe extension for Windows
|
|
||||||
if target.OS == "windows" && !strings.HasSuffix(binaryName, ".exe") {
|
|
||||||
binaryName += ".exe"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create platform-specific output path: output/os_arch/binary
|
|
||||||
platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
|
||||||
if err := cfg.FS.EnsureDir(platformDir); err != nil {
|
|
||||||
return build.Artifact{}, fmt.Errorf("failed to create platform directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
outputPath := filepath.Join(platformDir, binaryName)
|
|
||||||
|
|
||||||
// Build the go build arguments
|
|
||||||
args := []string{"build"}
|
|
||||||
|
|
||||||
// Add trimpath flag
|
|
||||||
args = append(args, "-trimpath")
|
|
||||||
|
|
||||||
// Add ldflags if specified
|
|
||||||
if len(cfg.LDFlags) > 0 {
|
|
||||||
ldflags := strings.Join(cfg.LDFlags, " ")
|
|
||||||
args = append(args, "-ldflags", ldflags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add output path
|
|
||||||
args = append(args, "-o", outputPath)
|
|
||||||
|
|
||||||
// Add the project directory as the build target (current directory)
|
|
||||||
args = append(args, ".")
|
|
||||||
|
|
||||||
// Create the command
|
|
||||||
cmd := exec.CommandContext(ctx, "go", args...)
|
|
||||||
cmd.Dir = cfg.ProjectDir
|
|
||||||
|
|
||||||
// Set up environment
|
|
||||||
env := os.Environ()
|
|
||||||
env = append(env, fmt.Sprintf("GOOS=%s", target.OS))
|
|
||||||
env = append(env, fmt.Sprintf("GOARCH=%s", target.Arch))
|
|
||||||
env = append(env, "CGO_ENABLED=0") // CGO disabled by default for cross-compilation
|
|
||||||
cmd.Env = env
|
|
||||||
|
|
||||||
// Capture output for error messages
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return build.Artifact{}, fmt.Errorf("go build failed: %w\nOutput: %s", err, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
return build.Artifact{
|
|
||||||
Path: outputPath,
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure GoBuilder implements the Builder interface.
|
|
||||||
var _ build.Builder = (*GoBuilder)(nil)
|
|
||||||
|
|
@ -1,398 +0,0 @@
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// setupGoTestProject creates a minimal Go project for testing.
|
|
||||||
func setupGoTestProject(t *testing.T) string {
|
|
||||||
t.Helper()
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
// Create a minimal go.mod
|
|
||||||
goMod := `module testproject
|
|
||||||
|
|
||||||
go 1.21
|
|
||||||
`
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create a minimal main.go
|
|
||||||
mainGo := `package main
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
println("hello")
|
|
||||||
}
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGoBuilder_Name_Good(t *testing.T) {
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
assert.Equal(t, "go", builder.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGoBuilder_Detect_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("detects Go project with go.mod", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects Wails project", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for non-Go project", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
// Create a Node.js project instead
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, detected)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGoBuilder_Build_Good(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("builds for current platform", func(t *testing.T) {
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "testbinary",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, artifacts, 1)
|
|
||||||
|
|
||||||
// Verify artifact properties
|
|
||||||
artifact := artifacts[0]
|
|
||||||
assert.Equal(t, runtime.GOOS, artifact.OS)
|
|
||||||
assert.Equal(t, runtime.GOARCH, artifact.Arch)
|
|
||||||
|
|
||||||
// Verify binary was created
|
|
||||||
assert.FileExists(t, artifact.Path)
|
|
||||||
|
|
||||||
// Verify the path is in the expected location
|
|
||||||
expectedName := "testbinary"
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
expectedName += ".exe"
|
|
||||||
}
|
|
||||||
assert.Contains(t, artifact.Path, expectedName)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("builds multiple targets", func(t *testing.T) {
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "multitest",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: "linux", Arch: "amd64"},
|
|
||||||
{OS: "linux", Arch: "arm64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, artifacts, 2)
|
|
||||||
|
|
||||||
// Verify both artifacts were created
|
|
||||||
for i, artifact := range artifacts {
|
|
||||||
assert.Equal(t, targets[i].OS, artifact.OS)
|
|
||||||
assert.Equal(t, targets[i].Arch, artifact.Arch)
|
|
||||||
assert.FileExists(t, artifact.Path)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("adds .exe extension for Windows", func(t *testing.T) {
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "wintest",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: "windows", Arch: "amd64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, artifacts, 1)
|
|
||||||
|
|
||||||
// Verify .exe extension
|
|
||||||
assert.True(t, filepath.Ext(artifacts[0].Path) == ".exe")
|
|
||||||
assert.FileExists(t, artifacts[0].Path)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("uses directory name when Name not specified", func(t *testing.T) {
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "", // Empty name
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, artifacts, 1)
|
|
||||||
|
|
||||||
// Binary should use the project directory base name
|
|
||||||
baseName := filepath.Base(projectDir)
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
baseName += ".exe"
|
|
||||||
}
|
|
||||||
assert.Contains(t, artifacts[0].Path, baseName)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("applies ldflags", func(t *testing.T) {
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "ldflagstest",
|
|
||||||
LDFlags: []string{"-s", "-w"}, // Strip debug info
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, artifacts, 1)
|
|
||||||
assert.FileExists(t, artifacts[0].Path)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("creates output directory if missing", func(t *testing.T) {
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
outputDir := filepath.Join(t.TempDir(), "nested", "output")
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "nestedtest",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, artifacts, 1)
|
|
||||||
assert.FileExists(t, artifacts[0].Path)
|
|
||||||
assert.DirExists(t, outputDir)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGoBuilder_Build_Bad(t *testing.T) {
|
|
||||||
t.Run("returns error for nil config", func(t *testing.T) {
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, artifacts)
|
|
||||||
assert.Contains(t, err.Error(), "config is nil")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for empty targets", func(t *testing.T) {
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: t.TempDir(),
|
|
||||||
Name: "test",
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, []build.Target{})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, artifacts)
|
|
||||||
assert.Contains(t, err.Error(), "no targets specified")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for invalid project directory", func(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: "/nonexistent/path",
|
|
||||||
OutputDir: t.TempDir(),
|
|
||||||
Name: "test",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, artifacts)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for invalid Go code", func(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
// Create go.mod
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test\n\ngo 1.21"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create invalid Go code
|
|
||||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte("this is not valid go code"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: dir,
|
|
||||||
OutputDir: t.TempDir(),
|
|
||||||
Name: "test",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "go build failed")
|
|
||||||
assert.Empty(t, artifacts)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns partial artifacts on partial failure", func(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a project that will fail on one target
|
|
||||||
// Using an invalid arch for linux
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "partialtest",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH}, // This should succeed
|
|
||||||
{OS: "linux", Arch: "invalid_arch"}, // This should fail
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
// Should return error for the failed build
|
|
||||||
assert.Error(t, err)
|
|
||||||
// Should have the successful artifact
|
|
||||||
assert.Len(t, artifacts, 1)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("respects context cancellation", func(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
projectDir := setupGoTestProject(t)
|
|
||||||
|
|
||||||
builder := NewGoBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: t.TempDir(),
|
|
||||||
Name: "canceltest",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an already cancelled context
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(ctx, cfg, targets)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, artifacts)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGoBuilder_Interface_Good(t *testing.T) {
|
|
||||||
// Verify GoBuilder implements Builder interface
|
|
||||||
var _ build.Builder = (*GoBuilder)(nil)
|
|
||||||
var _ build.Builder = NewGoBuilder()
|
|
||||||
}
|
|
||||||
|
|
@ -1,270 +0,0 @@
|
||||||
// Package builders provides build implementations for different project types.
|
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LinuxKitBuilder builds LinuxKit images.
|
|
||||||
type LinuxKitBuilder struct{}
|
|
||||||
|
|
||||||
// NewLinuxKitBuilder creates a new LinuxKit builder.
|
|
||||||
func NewLinuxKitBuilder() *LinuxKitBuilder {
|
|
||||||
return &LinuxKitBuilder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the builder's identifier.
|
|
||||||
func (b *LinuxKitBuilder) Name() string {
|
|
||||||
return "linuxkit"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect checks if a linuxkit.yml or .yml config exists in the directory.
|
|
||||||
func (b *LinuxKitBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
|
||||||
// Check for linuxkit.yml
|
|
||||||
if fs.IsFile(filepath.Join(dir, "linuxkit.yml")) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
// Check for .core/linuxkit/
|
|
||||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
|
||||||
if fs.IsDir(lkDir) {
|
|
||||||
entries, err := fs.List(lkDir)
|
|
||||||
if err == nil {
|
|
||||||
for _, entry := range entries {
|
|
||||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build builds LinuxKit images for the specified targets.
|
|
||||||
func (b *LinuxKitBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
|
||||||
// Validate linuxkit CLI is available
|
|
||||||
if err := b.validateLinuxKitCli(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine config file path
|
|
||||||
configPath := cfg.LinuxKitConfig
|
|
||||||
if configPath == "" {
|
|
||||||
// Auto-detect
|
|
||||||
if cfg.FS.IsFile(filepath.Join(cfg.ProjectDir, "linuxkit.yml")) {
|
|
||||||
configPath = filepath.Join(cfg.ProjectDir, "linuxkit.yml")
|
|
||||||
} else {
|
|
||||||
// Look in .core/linuxkit/
|
|
||||||
lkDir := filepath.Join(cfg.ProjectDir, ".core", "linuxkit")
|
|
||||||
if cfg.FS.IsDir(lkDir) {
|
|
||||||
entries, err := cfg.FS.List(lkDir)
|
|
||||||
if err == nil {
|
|
||||||
for _, entry := range entries {
|
|
||||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") {
|
|
||||||
configPath = filepath.Join(lkDir, entry.Name())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if configPath == "" {
|
|
||||||
return nil, fmt.Errorf("linuxkit.Build: no LinuxKit config file found. Specify with --config or create linuxkit.yml")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate config file exists
|
|
||||||
if !cfg.FS.IsFile(configPath) {
|
|
||||||
return nil, fmt.Errorf("linuxkit.Build: config file not found: %s", configPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine output formats
|
|
||||||
formats := cfg.Formats
|
|
||||||
if len(formats) == 0 {
|
|
||||||
formats = []string{"qcow2-bios"} // Default to QEMU-compatible format
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create output directory
|
|
||||||
outputDir := cfg.OutputDir
|
|
||||||
if outputDir == "" {
|
|
||||||
outputDir = filepath.Join(cfg.ProjectDir, "dist")
|
|
||||||
}
|
|
||||||
if err := cfg.FS.EnsureDir(outputDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("linuxkit.Build: failed to create output directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine base name from config file or project name
|
|
||||||
baseName := cfg.Name
|
|
||||||
if baseName == "" {
|
|
||||||
baseName = strings.TrimSuffix(filepath.Base(configPath), ".yml")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no targets, default to linux/amd64
|
|
||||||
if len(targets) == 0 {
|
|
||||||
targets = []build.Target{{OS: "linux", Arch: "amd64"}}
|
|
||||||
}
|
|
||||||
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
|
|
||||||
// Build for each target and format
|
|
||||||
for _, target := range targets {
|
|
||||||
// LinuxKit only supports Linux
|
|
||||||
if target.OS != "linux" {
|
|
||||||
fmt.Printf("Skipping %s/%s (LinuxKit only supports Linux)\n", target.OS, target.Arch)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, format := range formats {
|
|
||||||
outputName := fmt.Sprintf("%s-%s", baseName, target.Arch)
|
|
||||||
|
|
||||||
args := b.buildLinuxKitArgs(configPath, format, outputName, outputDir, target.Arch)
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "linuxkit", args...)
|
|
||||||
cmd.Dir = cfg.ProjectDir
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
fmt.Printf("Building LinuxKit image: %s (%s, %s)\n", outputName, format, target.Arch)
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("linuxkit.Build: build failed for %s/%s: %w", target.Arch, format, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the actual output file path
|
|
||||||
artifactPath := b.getArtifactPath(outputDir, outputName, format)
|
|
||||||
|
|
||||||
// Verify the artifact was created
|
|
||||||
if !cfg.FS.Exists(artifactPath) {
|
|
||||||
// Try alternate naming conventions
|
|
||||||
artifactPath = b.findArtifact(cfg.FS, outputDir, outputName, format)
|
|
||||||
if artifactPath == "" {
|
|
||||||
return nil, fmt.Errorf("linuxkit.Build: artifact not found after build: expected %s", b.getArtifactPath(outputDir, outputName, format))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: artifactPath,
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildLinuxKitArgs builds the arguments for linuxkit build command.
|
|
||||||
func (b *LinuxKitBuilder) buildLinuxKitArgs(configPath, format, outputName, outputDir, arch string) []string {
|
|
||||||
args := []string{"build"}
|
|
||||||
|
|
||||||
// Output format
|
|
||||||
args = append(args, "--format", format)
|
|
||||||
|
|
||||||
// Output name
|
|
||||||
args = append(args, "--name", outputName)
|
|
||||||
|
|
||||||
// Output directory
|
|
||||||
args = append(args, "--dir", outputDir)
|
|
||||||
|
|
||||||
// Architecture (if not amd64)
|
|
||||||
if arch != "amd64" {
|
|
||||||
args = append(args, "--arch", arch)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config file
|
|
||||||
args = append(args, configPath)
|
|
||||||
|
|
||||||
return args
|
|
||||||
}
|
|
||||||
|
|
||||||
// getArtifactPath returns the expected path of the built artifact.
|
|
||||||
func (b *LinuxKitBuilder) getArtifactPath(outputDir, outputName, format string) string {
|
|
||||||
ext := b.getFormatExtension(format)
|
|
||||||
return filepath.Join(outputDir, outputName+ext)
|
|
||||||
}
|
|
||||||
|
|
||||||
// findArtifact searches for the built artifact with various naming conventions.
|
|
||||||
func (b *LinuxKitBuilder) findArtifact(fs io.Medium, outputDir, outputName, format string) string {
|
|
||||||
// LinuxKit can create files with different suffixes
|
|
||||||
extensions := []string{
|
|
||||||
b.getFormatExtension(format),
|
|
||||||
"-bios" + b.getFormatExtension(format),
|
|
||||||
"-efi" + b.getFormatExtension(format),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ext := range extensions {
|
|
||||||
path := filepath.Join(outputDir, outputName+ext)
|
|
||||||
if fs.Exists(path) {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to find any file matching the output name
|
|
||||||
entries, err := fs.List(outputDir)
|
|
||||||
if err == nil {
|
|
||||||
for _, entry := range entries {
|
|
||||||
if strings.HasPrefix(entry.Name(), outputName) {
|
|
||||||
match := filepath.Join(outputDir, entry.Name())
|
|
||||||
// Return first match that looks like an image
|
|
||||||
ext := filepath.Ext(match)
|
|
||||||
if ext == ".iso" || ext == ".qcow2" || ext == ".raw" || ext == ".vmdk" || ext == ".vhd" {
|
|
||||||
return match
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFormatExtension returns the file extension for a LinuxKit output format.
|
|
||||||
func (b *LinuxKitBuilder) getFormatExtension(format string) string {
|
|
||||||
switch format {
|
|
||||||
case "iso", "iso-bios", "iso-efi":
|
|
||||||
return ".iso"
|
|
||||||
case "raw", "raw-bios", "raw-efi":
|
|
||||||
return ".raw"
|
|
||||||
case "qcow2", "qcow2-bios", "qcow2-efi":
|
|
||||||
return ".qcow2"
|
|
||||||
case "vmdk":
|
|
||||||
return ".vmdk"
|
|
||||||
case "vhd":
|
|
||||||
return ".vhd"
|
|
||||||
case "gcp":
|
|
||||||
return ".img.tar.gz"
|
|
||||||
case "aws":
|
|
||||||
return ".raw"
|
|
||||||
default:
|
|
||||||
return "." + strings.TrimSuffix(format, "-bios")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateLinuxKitCli checks if the linuxkit CLI is available.
|
|
||||||
func (b *LinuxKitBuilder) validateLinuxKitCli() error {
|
|
||||||
// Check PATH first
|
|
||||||
if _, err := exec.LookPath("linuxkit"); err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check common locations
|
|
||||||
paths := []string{
|
|
||||||
"/usr/local/bin/linuxkit",
|
|
||||||
"/opt/homebrew/bin/linuxkit",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range paths {
|
|
||||||
if _, err := os.Stat(p); err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("linuxkit: linuxkit CLI not found. Install with: brew install linuxkit (macOS) or see https://github.com/linuxkit/linuxkit")
|
|
||||||
}
|
|
||||||
|
|
@ -1,275 +0,0 @@
|
||||||
// Package builders provides build implementations for different project types.
|
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TaskfileBuilder builds projects using Taskfile (https://taskfile.dev/).
|
|
||||||
// This is a generic builder that can handle any project type that has a Taskfile.
|
|
||||||
type TaskfileBuilder struct{}
|
|
||||||
|
|
||||||
// NewTaskfileBuilder creates a new Taskfile builder.
|
|
||||||
func NewTaskfileBuilder() *TaskfileBuilder {
|
|
||||||
return &TaskfileBuilder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the builder's identifier.
|
|
||||||
func (b *TaskfileBuilder) Name() string {
|
|
||||||
return "taskfile"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect checks if a Taskfile exists in the directory.
|
|
||||||
func (b *TaskfileBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
|
||||||
// Check for Taskfile.yml, Taskfile.yaml, or Taskfile
|
|
||||||
taskfiles := []string{
|
|
||||||
"Taskfile.yml",
|
|
||||||
"Taskfile.yaml",
|
|
||||||
"Taskfile",
|
|
||||||
"taskfile.yml",
|
|
||||||
"taskfile.yaml",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tf := range taskfiles {
|
|
||||||
if fs.IsFile(filepath.Join(dir, tf)) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build runs the Taskfile build task for each target platform.
|
|
||||||
func (b *TaskfileBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
|
||||||
// Validate task CLI is available
|
|
||||||
if err := b.validateTaskCli(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create output directory
|
|
||||||
outputDir := cfg.OutputDir
|
|
||||||
if outputDir == "" {
|
|
||||||
outputDir = filepath.Join(cfg.ProjectDir, "dist")
|
|
||||||
}
|
|
||||||
if err := cfg.FS.EnsureDir(outputDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("taskfile.Build: failed to create output directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
|
|
||||||
// If no targets specified, just run the build task once
|
|
||||||
if len(targets) == 0 {
|
|
||||||
if err := b.runTask(ctx, cfg, "", ""); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to find artifacts in output directory
|
|
||||||
found := b.findArtifacts(cfg.FS, outputDir)
|
|
||||||
artifacts = append(artifacts, found...)
|
|
||||||
} else {
|
|
||||||
// Run build task for each target
|
|
||||||
for _, target := range targets {
|
|
||||||
if err := b.runTask(ctx, cfg, target.OS, target.Arch); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to find artifacts for this target
|
|
||||||
found := b.findArtifactsForTarget(cfg.FS, outputDir, target)
|
|
||||||
artifacts = append(artifacts, found...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// runTask executes the Taskfile build task.
|
|
||||||
func (b *TaskfileBuilder) runTask(ctx context.Context, cfg *build.Config, goos, goarch string) error {
|
|
||||||
// Build task command
|
|
||||||
args := []string{"build"}
|
|
||||||
|
|
||||||
// Pass variables if targets are specified
|
|
||||||
if goos != "" {
|
|
||||||
args = append(args, fmt.Sprintf("GOOS=%s", goos))
|
|
||||||
}
|
|
||||||
if goarch != "" {
|
|
||||||
args = append(args, fmt.Sprintf("GOARCH=%s", goarch))
|
|
||||||
}
|
|
||||||
if cfg.OutputDir != "" {
|
|
||||||
args = append(args, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir))
|
|
||||||
}
|
|
||||||
if cfg.Name != "" {
|
|
||||||
args = append(args, fmt.Sprintf("NAME=%s", cfg.Name))
|
|
||||||
}
|
|
||||||
if cfg.Version != "" {
|
|
||||||
args = append(args, fmt.Sprintf("VERSION=%s", cfg.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "task", args...)
|
|
||||||
cmd.Dir = cfg.ProjectDir
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
// Set environment variables
|
|
||||||
cmd.Env = os.Environ()
|
|
||||||
if goos != "" {
|
|
||||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GOOS=%s", goos))
|
|
||||||
}
|
|
||||||
if goarch != "" {
|
|
||||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GOARCH=%s", goarch))
|
|
||||||
}
|
|
||||||
if cfg.OutputDir != "" {
|
|
||||||
cmd.Env = append(cmd.Env, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir))
|
|
||||||
}
|
|
||||||
if cfg.Name != "" {
|
|
||||||
cmd.Env = append(cmd.Env, fmt.Sprintf("NAME=%s", cfg.Name))
|
|
||||||
}
|
|
||||||
if cfg.Version != "" {
|
|
||||||
cmd.Env = append(cmd.Env, fmt.Sprintf("VERSION=%s", cfg.Version))
|
|
||||||
}
|
|
||||||
|
|
||||||
if goos != "" && goarch != "" {
|
|
||||||
fmt.Printf("Running task build for %s/%s\n", goos, goarch)
|
|
||||||
} else {
|
|
||||||
fmt.Println("Running task build")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return fmt.Errorf("taskfile.Build: task build failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findArtifacts searches for built artifacts in the output directory.
|
|
||||||
func (b *TaskfileBuilder) findArtifacts(fs io.Medium, outputDir string) []build.Artifact {
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
|
|
||||||
entries, err := fs.List(outputDir)
|
|
||||||
if err != nil {
|
|
||||||
return artifacts
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip common non-artifact files
|
|
||||||
name := entry.Name()
|
|
||||||
if strings.HasPrefix(name, ".") || name == "CHECKSUMS.txt" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: filepath.Join(outputDir, name),
|
|
||||||
OS: "",
|
|
||||||
Arch: "",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts
|
|
||||||
}
|
|
||||||
|
|
||||||
// findArtifactsForTarget searches for built artifacts for a specific target.
|
|
||||||
func (b *TaskfileBuilder) findArtifactsForTarget(fs io.Medium, outputDir string, target build.Target) []build.Artifact {
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
|
|
||||||
// 1. Look for platform-specific subdirectory: output/os_arch/
|
|
||||||
platformSubdir := filepath.Join(outputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
|
||||||
if fs.IsDir(platformSubdir) {
|
|
||||||
entries, _ := fs.List(platformSubdir)
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
// Handle .app bundles on macOS
|
|
||||||
if target.OS == "darwin" && strings.HasSuffix(entry.Name(), ".app") {
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: filepath.Join(platformSubdir, entry.Name()),
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Skip hidden files
|
|
||||||
if strings.HasPrefix(entry.Name(), ".") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: filepath.Join(platformSubdir, entry.Name()),
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if len(artifacts) > 0 {
|
|
||||||
return artifacts
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Look for files matching the target pattern in the root output dir
|
|
||||||
patterns := []string{
|
|
||||||
fmt.Sprintf("*-%s-%s*", target.OS, target.Arch),
|
|
||||||
fmt.Sprintf("*_%s_%s*", target.OS, target.Arch),
|
|
||||||
fmt.Sprintf("*-%s*", target.Arch),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pattern := range patterns {
|
|
||||||
entries, _ := fs.List(outputDir)
|
|
||||||
for _, entry := range entries {
|
|
||||||
match := entry.Name()
|
|
||||||
// Simple glob matching
|
|
||||||
if b.matchPattern(match, pattern) {
|
|
||||||
fullPath := filepath.Join(outputDir, match)
|
|
||||||
if fs.IsDir(fullPath) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts = append(artifacts, build.Artifact{
|
|
||||||
Path: fullPath,
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(artifacts) > 0 {
|
|
||||||
break // Found matches, stop looking
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchPattern implements glob matching for Taskfile artifacts.
|
|
||||||
func (b *TaskfileBuilder) matchPattern(name, pattern string) bool {
|
|
||||||
matched, _ := filepath.Match(pattern, name)
|
|
||||||
return matched
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTaskCli checks if the task CLI is available.
|
|
||||||
func (b *TaskfileBuilder) validateTaskCli() error {
|
|
||||||
// Check PATH first
|
|
||||||
if _, err := exec.LookPath("task"); err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check common locations
|
|
||||||
paths := []string{
|
|
||||||
"/usr/local/bin/task",
|
|
||||||
"/opt/homebrew/bin/task",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range paths {
|
|
||||||
if _, err := os.Stat(p); err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("taskfile: task CLI not found. Install with: brew install go-task (macOS), go install github.com/go-task/task/v3/cmd/task@latest, or see https://taskfile.dev/installation/")
|
|
||||||
}
|
|
||||||
|
|
@ -1,247 +0,0 @@
|
||||||
// Package builders provides build implementations for different project types.
|
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WailsBuilder implements the Builder interface for Wails v3 projects.
|
|
||||||
type WailsBuilder struct{}
|
|
||||||
|
|
||||||
// NewWailsBuilder creates a new WailsBuilder instance.
|
|
||||||
func NewWailsBuilder() *WailsBuilder {
|
|
||||||
return &WailsBuilder{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the builder's identifier.
|
|
||||||
func (b *WailsBuilder) Name() string {
|
|
||||||
return "wails"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect checks if this builder can handle the project in the given directory.
|
|
||||||
// Uses IsWailsProject from the build package which checks for wails.json.
|
|
||||||
func (b *WailsBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
|
||||||
return build.IsWailsProject(fs, dir), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build compiles the Wails project for the specified targets.
|
|
||||||
// It detects the Wails version and chooses the appropriate build strategy:
|
|
||||||
// - Wails v3: Delegates to Taskfile (error if missing)
|
|
||||||
// - Wails v2: Uses 'wails build' command
|
|
||||||
func (b *WailsBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
|
||||||
if cfg == nil {
|
|
||||||
return nil, fmt.Errorf("builders.WailsBuilder.Build: config is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(targets) == 0 {
|
|
||||||
return nil, fmt.Errorf("builders.WailsBuilder.Build: no targets specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect Wails version
|
|
||||||
isV3 := b.isWailsV3(cfg.FS, cfg.ProjectDir)
|
|
||||||
|
|
||||||
if isV3 {
|
|
||||||
// Wails v3 strategy: Delegate to Taskfile
|
|
||||||
taskBuilder := NewTaskfileBuilder()
|
|
||||||
if detected, _ := taskBuilder.Detect(cfg.FS, cfg.ProjectDir); detected {
|
|
||||||
return taskBuilder.Build(ctx, cfg, targets)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("wails v3 projects require a Taskfile for building")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wails v2 strategy: Use 'wails build'
|
|
||||||
// Ensure output directory exists
|
|
||||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("builders.WailsBuilder.Build: failed to create output directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: Wails v2 handles frontend installation/building automatically via wails.json config
|
|
||||||
|
|
||||||
var artifacts []build.Artifact
|
|
||||||
|
|
||||||
for _, target := range targets {
|
|
||||||
artifact, err := b.buildV2Target(ctx, cfg, target)
|
|
||||||
if err != nil {
|
|
||||||
return artifacts, fmt.Errorf("builders.WailsBuilder.Build: failed to build %s: %w", target.String(), err)
|
|
||||||
}
|
|
||||||
artifacts = append(artifacts, artifact)
|
|
||||||
}
|
|
||||||
|
|
||||||
return artifacts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWailsV3 checks if the project uses Wails v3 by inspecting go.mod.
|
|
||||||
func (b *WailsBuilder) isWailsV3(fs io.Medium, dir string) bool {
|
|
||||||
goModPath := filepath.Join(dir, "go.mod")
|
|
||||||
content, err := fs.Read(goModPath)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return strings.Contains(content, "github.com/wailsapp/wails/v3")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildV2Target compiles for a single target platform using wails (v2).
|
|
||||||
func (b *WailsBuilder) buildV2Target(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) {
|
|
||||||
// Determine output binary name
|
|
||||||
binaryName := cfg.Name
|
|
||||||
if binaryName == "" {
|
|
||||||
binaryName = filepath.Base(cfg.ProjectDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the wails build arguments
|
|
||||||
args := []string{"build"}
|
|
||||||
|
|
||||||
// Platform
|
|
||||||
args = append(args, "-platform", fmt.Sprintf("%s/%s", target.OS, target.Arch))
|
|
||||||
|
|
||||||
// Output (Wails v2 uses -o for the binary name, relative to build/bin usually, but we want to control it)
|
|
||||||
// Actually, Wails v2 is opinionated about output dir (build/bin).
|
|
||||||
// We might need to copy artifacts after build if we want them in cfg.OutputDir.
|
|
||||||
// For now, let's try to let Wails do its thing and find the artifact.
|
|
||||||
|
|
||||||
// Create the command
|
|
||||||
cmd := exec.CommandContext(ctx, "wails", args...)
|
|
||||||
cmd.Dir = cfg.ProjectDir
|
|
||||||
|
|
||||||
// Capture output for error messages
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return build.Artifact{}, fmt.Errorf("wails build failed: %w\nOutput: %s", err, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wails v2 typically outputs to build/bin
|
|
||||||
// We need to move/copy it to our desired output dir
|
|
||||||
|
|
||||||
// Construct the source path where Wails v2 puts the binary
|
|
||||||
wailsOutputDir := filepath.Join(cfg.ProjectDir, "build", "bin")
|
|
||||||
|
|
||||||
// Find the artifact in Wails output dir
|
|
||||||
sourcePath, err := b.findArtifact(cfg.FS, wailsOutputDir, binaryName, target)
|
|
||||||
if err != nil {
|
|
||||||
return build.Artifact{}, fmt.Errorf("failed to find Wails v2 build artifact: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move/Copy to our output dir
|
|
||||||
// Create platform specific dir in our output
|
|
||||||
platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
|
||||||
if err := cfg.FS.EnsureDir(platformDir); err != nil {
|
|
||||||
return build.Artifact{}, fmt.Errorf("failed to create output dir: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
destPath := filepath.Join(platformDir, filepath.Base(sourcePath))
|
|
||||||
|
|
||||||
// Simple copy using the medium
|
|
||||||
content, err := cfg.FS.Read(sourcePath)
|
|
||||||
if err != nil {
|
|
||||||
return build.Artifact{}, err
|
|
||||||
}
|
|
||||||
if err := cfg.FS.Write(destPath, content); err != nil {
|
|
||||||
return build.Artifact{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return build.Artifact{
|
|
||||||
Path: destPath,
|
|
||||||
OS: target.OS,
|
|
||||||
Arch: target.Arch,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findArtifact locates the built artifact based on the target platform.
|
|
||||||
func (b *WailsBuilder) findArtifact(fs io.Medium, platformDir, binaryName string, target build.Target) (string, error) {
|
|
||||||
var candidates []string
|
|
||||||
|
|
||||||
switch target.OS {
|
|
||||||
case "windows":
|
|
||||||
// Look for NSIS installer first, then plain exe
|
|
||||||
candidates = []string{
|
|
||||||
filepath.Join(platformDir, binaryName+"-installer.exe"),
|
|
||||||
filepath.Join(platformDir, binaryName+".exe"),
|
|
||||||
filepath.Join(platformDir, binaryName+"-amd64-installer.exe"),
|
|
||||||
}
|
|
||||||
case "darwin":
|
|
||||||
// Look for .dmg, then .app bundle, then plain binary
|
|
||||||
candidates = []string{
|
|
||||||
filepath.Join(platformDir, binaryName+".dmg"),
|
|
||||||
filepath.Join(platformDir, binaryName+".app"),
|
|
||||||
filepath.Join(platformDir, binaryName),
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Linux and others: look for plain binary
|
|
||||||
candidates = []string{
|
|
||||||
filepath.Join(platformDir, binaryName),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try each candidate
|
|
||||||
for _, candidate := range candidates {
|
|
||||||
if fs.Exists(candidate) {
|
|
||||||
return candidate, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no specific candidate found, try to find any executable or package in the directory
|
|
||||||
entries, err := fs.List(platformDir)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to read platform directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
name := entry.Name()
|
|
||||||
// Skip common non-artifact files
|
|
||||||
if strings.HasSuffix(name, ".go") || strings.HasSuffix(name, ".json") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
path := filepath.Join(platformDir, name)
|
|
||||||
info, err := entry.Info()
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// On Unix, check if it's executable; on Windows, check for .exe
|
|
||||||
if target.OS == "windows" {
|
|
||||||
if strings.HasSuffix(name, ".exe") {
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
} else if info.Mode()&0111 != 0 || entry.IsDir() {
|
|
||||||
// Executable file or directory (.app bundle)
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("no artifact found in %s", platformDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// detectPackageManager detects the frontend package manager based on lock files.
|
|
||||||
// Returns "bun", "pnpm", "yarn", or "npm" (default).
|
|
||||||
func detectPackageManager(fs io.Medium, dir string) string {
|
|
||||||
// Check in priority order: bun, pnpm, yarn, npm
|
|
||||||
lockFiles := []struct {
|
|
||||||
file string
|
|
||||||
manager string
|
|
||||||
}{
|
|
||||||
{"bun.lockb", "bun"},
|
|
||||||
{"pnpm-lock.yaml", "pnpm"},
|
|
||||||
{"yarn.lock", "yarn"},
|
|
||||||
{"package-lock.json", "npm"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, lf := range lockFiles {
|
|
||||||
if fs.IsFile(filepath.Join(dir, lf.file)) {
|
|
||||||
return lf.manager
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default to npm if no lock file found
|
|
||||||
return "npm"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure WailsBuilder implements the Builder interface.
|
|
||||||
var _ build.Builder = (*WailsBuilder)(nil)
|
|
||||||
|
|
@ -1,416 +0,0 @@
|
||||||
package builders
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// setupWailsTestProject creates a minimal Wails project structure for testing.
|
|
||||||
func setupWailsTestProject(t *testing.T) string {
|
|
||||||
t.Helper()
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
// Create wails.json
|
|
||||||
wailsJSON := `{
|
|
||||||
"name": "testapp",
|
|
||||||
"outputfilename": "testapp"
|
|
||||||
}`
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte(wailsJSON), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create a minimal go.mod
|
|
||||||
goMod := `module testapp
|
|
||||||
|
|
||||||
go 1.21
|
|
||||||
|
|
||||||
require github.com/wailsapp/wails/v3 v3.0.0
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create a minimal main.go
|
|
||||||
mainGo := `package main
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
println("hello wails")
|
|
||||||
}
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create a minimal Taskfile.yml
|
|
||||||
taskfile := `version: '3'
|
|
||||||
tasks:
|
|
||||||
build:
|
|
||||||
cmds:
|
|
||||||
- mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}
|
|
||||||
- touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(dir, "Taskfile.yml"), []byte(taskfile), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
// setupWailsV2TestProject creates a Wails v2 project structure.
|
|
||||||
func setupWailsV2TestProject(t *testing.T) string {
|
|
||||||
t.Helper()
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
// wails.json
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// go.mod with v2
|
|
||||||
goMod := `module testapp
|
|
||||||
go 1.21
|
|
||||||
require github.com/wailsapp/wails/v2 v2.8.0
|
|
||||||
`
|
|
||||||
err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Build_Taskfile_Good(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if task is available
|
|
||||||
if _, err := exec.LookPath("task"); err != nil {
|
|
||||||
t.Skip("task not installed, skipping test")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("delegates to Taskfile if present", func(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
projectDir := setupWailsTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
// Create a Taskfile that just touches a file
|
|
||||||
taskfile := `version: '3'
|
|
||||||
tasks:
|
|
||||||
build:
|
|
||||||
cmds:
|
|
||||||
- mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}
|
|
||||||
- touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp
|
|
||||||
`
|
|
||||||
err := os.WriteFile(filepath.Join(projectDir, "Taskfile.yml"), []byte(taskfile), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: fs,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "testapp",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, artifacts)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Name_Good(t *testing.T) {
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
assert.Equal(t, "wails", builder.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Build_V2_Good(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := exec.LookPath("wails"); err != nil {
|
|
||||||
t.Skip("wails not installed, skipping integration test")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("builds v2 project", func(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
projectDir := setupWailsV2TestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: fs,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "testapp",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will likely fail in a real run because we can't easily mock the full wails v2 build process
|
|
||||||
// (which needs a valid project with main.go etc).
|
|
||||||
// But it validates we are trying to run the command.
|
|
||||||
// For now, we just verify it attempts the build - error is expected
|
|
||||||
_, _ = builder.Build(context.Background(), cfg, targets)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Detect_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("detects Wails project with wails.json", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for Go-only project", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for Node.js project", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, detected)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
detected, err := builder.Detect(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, detected)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDetectPackageManager_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("detects bun from bun.lockb", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "bun", result)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects pnpm from pnpm-lock.yaml", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "pnpm", result)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects yarn from yarn.lock", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "yarn", result)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects npm from package-lock.json", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "npm", result)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("defaults to npm when no lock file", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "npm", result)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("prefers bun over other lock files", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
// Create multiple lock files
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644))
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "bun", result)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("prefers pnpm over yarn and npm", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
// Create multiple lock files (no bun)
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644))
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "pnpm", result)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("prefers yarn over npm", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
// Create multiple lock files (no bun or pnpm)
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
|
||||||
|
|
||||||
result := detectPackageManager(fs, dir)
|
|
||||||
assert.Equal(t, "yarn", result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Build_Bad(t *testing.T) {
|
|
||||||
t.Run("returns error for nil config", func(t *testing.T) {
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, artifacts)
|
|
||||||
assert.Contains(t, err.Error(), "config is nil")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for empty targets", func(t *testing.T) {
|
|
||||||
projectDir := setupWailsTestProject(t)
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: t.TempDir(),
|
|
||||||
Name: "test",
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, []build.Target{})
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, artifacts)
|
|
||||||
assert.Contains(t, err.Error(), "no targets specified")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Build_Good(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if wails3 is available in PATH
|
|
||||||
if _, err := exec.LookPath("wails3"); err != nil {
|
|
||||||
t.Skip("wails3 not installed, skipping integration test")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("builds for current platform", func(t *testing.T) {
|
|
||||||
projectDir := setupWailsTestProject(t)
|
|
||||||
outputDir := t.TempDir()
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Name: "testapp",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, artifacts, 1)
|
|
||||||
|
|
||||||
// Verify artifact properties
|
|
||||||
artifact := artifacts[0]
|
|
||||||
assert.Equal(t, runtime.GOOS, artifact.OS)
|
|
||||||
assert.Equal(t, runtime.GOARCH, artifact.Arch)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Interface_Good(t *testing.T) {
|
|
||||||
// Verify WailsBuilder implements Builder interface
|
|
||||||
var _ build.Builder = (*WailsBuilder)(nil)
|
|
||||||
var _ build.Builder = NewWailsBuilder()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWailsBuilder_Ugly(t *testing.T) {
|
|
||||||
t.Run("handles nonexistent frontend directory gracefully", func(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a Wails project without a frontend directory
|
|
||||||
dir := t.TempDir()
|
|
||||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: dir,
|
|
||||||
OutputDir: t.TempDir(),
|
|
||||||
Name: "test",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will fail because wails3 isn't set up, but it shouldn't panic
|
|
||||||
// due to missing frontend directory
|
|
||||||
_, err = builder.Build(context.Background(), cfg, targets)
|
|
||||||
// We expect an error (wails3 build will fail), but not a panic
|
|
||||||
// The error should be about wails3 build, not about frontend
|
|
||||||
if err != nil {
|
|
||||||
assert.NotContains(t, err.Error(), "frontend dependencies")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("handles context cancellation", func(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping integration test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
projectDir := setupWailsTestProject(t)
|
|
||||||
|
|
||||||
builder := NewWailsBuilder()
|
|
||||||
cfg := &build.Config{
|
|
||||||
FS: io.Local,
|
|
||||||
ProjectDir: projectDir,
|
|
||||||
OutputDir: t.TempDir(),
|
|
||||||
Name: "canceltest",
|
|
||||||
}
|
|
||||||
targets := []build.Target{
|
|
||||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an already cancelled context
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
artifacts, err := builder.Build(ctx, cfg, targets)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, artifacts)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,97 +0,0 @@
|
||||||
// Package build provides project type detection and cross-compilation for the Core build system.
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
io_interface "forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Checksum computes SHA256 for an artifact and returns the artifact with the Checksum field filled.
|
|
||||||
func Checksum(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
|
||||||
if artifact.Path == "" {
|
|
||||||
return Artifact{}, fmt.Errorf("build.Checksum: artifact path is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open the file
|
|
||||||
file, err := fs.Open(artifact.Path)
|
|
||||||
if err != nil {
|
|
||||||
return Artifact{}, fmt.Errorf("build.Checksum: failed to open file: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = file.Close() }()
|
|
||||||
|
|
||||||
// Compute SHA256 hash
|
|
||||||
hasher := sha256.New()
|
|
||||||
if _, err := io.Copy(hasher, file); err != nil {
|
|
||||||
return Artifact{}, fmt.Errorf("build.Checksum: failed to hash file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
checksum := hex.EncodeToString(hasher.Sum(nil))
|
|
||||||
|
|
||||||
return Artifact{
|
|
||||||
Path: artifact.Path,
|
|
||||||
OS: artifact.OS,
|
|
||||||
Arch: artifact.Arch,
|
|
||||||
Checksum: checksum,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChecksumAll computes checksums for all artifacts.
|
|
||||||
// Returns a slice of artifacts with their Checksum fields filled.
|
|
||||||
func ChecksumAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
|
||||||
if len(artifacts) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var checksummed []Artifact
|
|
||||||
for _, artifact := range artifacts {
|
|
||||||
cs, err := Checksum(fs, artifact)
|
|
||||||
if err != nil {
|
|
||||||
return checksummed, fmt.Errorf("build.ChecksumAll: failed to checksum %s: %w", artifact.Path, err)
|
|
||||||
}
|
|
||||||
checksummed = append(checksummed, cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
return checksummed, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteChecksumFile writes a CHECKSUMS.txt file with the format:
|
|
||||||
//
|
|
||||||
// sha256hash filename1
|
|
||||||
// sha256hash filename2
|
|
||||||
//
|
|
||||||
// The artifacts should have their Checksum fields filled (call ChecksumAll first).
|
|
||||||
// Filenames are relative to the output directory (just the basename).
|
|
||||||
func WriteChecksumFile(fs io_interface.Medium, artifacts []Artifact, path string) error {
|
|
||||||
if len(artifacts) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the content
|
|
||||||
var lines []string
|
|
||||||
for _, artifact := range artifacts {
|
|
||||||
if artifact.Checksum == "" {
|
|
||||||
return fmt.Errorf("build.WriteChecksumFile: artifact %s has no checksum", artifact.Path)
|
|
||||||
}
|
|
||||||
filename := filepath.Base(artifact.Path)
|
|
||||||
lines = append(lines, fmt.Sprintf("%s %s", artifact.Checksum, filename))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort lines for consistent output
|
|
||||||
sort.Strings(lines)
|
|
||||||
|
|
||||||
content := strings.Join(lines, "\n") + "\n"
|
|
||||||
|
|
||||||
// Write the file using the medium (which handles directory creation in Write)
|
|
||||||
if err := fs.Write(path, content); err != nil {
|
|
||||||
return fmt.Errorf("build.WriteChecksumFile: failed to write file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,282 +0,0 @@
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// setupChecksumTestFile creates a test file with known content.
|
|
||||||
func setupChecksumTestFile(t *testing.T, content string) string {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
path := filepath.Join(dir, "testfile")
|
|
||||||
err := os.WriteFile(path, []byte(content), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChecksum_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("computes SHA256 checksum", func(t *testing.T) {
|
|
||||||
// Known SHA256 of "Hello, World!\n"
|
|
||||||
path := setupChecksumTestFile(t, "Hello, World!\n")
|
|
||||||
expectedChecksum := "c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31"
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: path,
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Checksum(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedChecksum, result.Checksum)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("preserves artifact fields", func(t *testing.T) {
|
|
||||||
path := setupChecksumTestFile(t, "test content")
|
|
||||||
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: path,
|
|
||||||
OS: "darwin",
|
|
||||||
Arch: "arm64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Checksum(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, path, result.Path)
|
|
||||||
assert.Equal(t, "darwin", result.OS)
|
|
||||||
assert.Equal(t, "arm64", result.Arch)
|
|
||||||
assert.NotEmpty(t, result.Checksum)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("produces 64 character hex string", func(t *testing.T) {
|
|
||||||
path := setupChecksumTestFile(t, "any content")
|
|
||||||
|
|
||||||
artifact := Artifact{Path: path, OS: "linux", Arch: "amd64"}
|
|
||||||
|
|
||||||
result, err := Checksum(fs, artifact)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// SHA256 produces 32 bytes = 64 hex characters
|
|
||||||
assert.Len(t, result.Checksum, 64)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("different content produces different checksums", func(t *testing.T) {
|
|
||||||
path1 := setupChecksumTestFile(t, "content one")
|
|
||||||
path2 := setupChecksumTestFile(t, "content two")
|
|
||||||
|
|
||||||
result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.NotEqual(t, result1.Checksum, result2.Checksum)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("same content produces same checksum", func(t *testing.T) {
|
|
||||||
content := "identical content"
|
|
||||||
path1 := setupChecksumTestFile(t, content)
|
|
||||||
path2 := setupChecksumTestFile(t, content)
|
|
||||||
|
|
||||||
result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, result1.Checksum, result2.Checksum)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChecksum_Bad(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("returns error for empty path", func(t *testing.T) {
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: "",
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Checksum(fs, artifact)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "artifact path is empty")
|
|
||||||
assert.Empty(t, result.Checksum)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for non-existent file", func(t *testing.T) {
|
|
||||||
artifact := Artifact{
|
|
||||||
Path: "/nonexistent/path/file",
|
|
||||||
OS: "linux",
|
|
||||||
Arch: "amd64",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := Checksum(fs, artifact)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "failed to open file")
|
|
||||||
assert.Empty(t, result.Checksum)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChecksumAll_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("checksums multiple artifacts", func(t *testing.T) {
|
|
||||||
paths := []string{
|
|
||||||
setupChecksumTestFile(t, "content one"),
|
|
||||||
setupChecksumTestFile(t, "content two"),
|
|
||||||
setupChecksumTestFile(t, "content three"),
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: paths[0], OS: "linux", Arch: "amd64"},
|
|
||||||
{Path: paths[1], OS: "darwin", Arch: "arm64"},
|
|
||||||
{Path: paths[2], OS: "windows", Arch: "amd64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := ChecksumAll(fs, artifacts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, results, 3)
|
|
||||||
|
|
||||||
for i, result := range results {
|
|
||||||
assert.Equal(t, artifacts[i].Path, result.Path)
|
|
||||||
assert.Equal(t, artifacts[i].OS, result.OS)
|
|
||||||
assert.Equal(t, artifacts[i].Arch, result.Arch)
|
|
||||||
assert.NotEmpty(t, result.Checksum)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns nil for empty slice", func(t *testing.T) {
|
|
||||||
results, err := ChecksumAll(fs, []Artifact{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, results)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns nil for nil slice", func(t *testing.T) {
|
|
||||||
results, err := ChecksumAll(fs, nil)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Nil(t, results)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestChecksumAll_Bad(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("returns partial results on error", func(t *testing.T) {
|
|
||||||
path := setupChecksumTestFile(t, "valid content")
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: path, OS: "linux", Arch: "amd64"},
|
|
||||||
{Path: "/nonexistent/file", OS: "linux", Arch: "arm64"}, // This will fail
|
|
||||||
}
|
|
||||||
|
|
||||||
results, err := ChecksumAll(fs, artifacts)
|
|
||||||
assert.Error(t, err)
|
|
||||||
// Should have the first successful result
|
|
||||||
assert.Len(t, results, 1)
|
|
||||||
assert.NotEmpty(t, results[0].Checksum)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteChecksumFile_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("writes checksum file with correct format", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/output/app_linux_amd64.tar.gz", Checksum: "abc123def456", OS: "linux", Arch: "amd64"},
|
|
||||||
{Path: "/output/app_darwin_arm64.tar.gz", Checksum: "789xyz000111", OS: "darwin", Arch: "arm64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Read and verify content
|
|
||||||
content, err := os.ReadFile(checksumPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
lines := strings.Split(strings.TrimSpace(string(content)), "\n")
|
|
||||||
require.Len(t, lines, 2)
|
|
||||||
|
|
||||||
// Lines should be sorted alphabetically
|
|
||||||
assert.Equal(t, "789xyz000111 app_darwin_arm64.tar.gz", lines[0])
|
|
||||||
assert.Equal(t, "abc123def456 app_linux_amd64.tar.gz", lines[1])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("creates parent directories", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
checksumPath := filepath.Join(dir, "nested", "deep", "CHECKSUMS.txt")
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/output/app.tar.gz", Checksum: "abc123", OS: "linux", Arch: "amd64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.FileExists(t, checksumPath)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("does nothing for empty artifacts", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
|
||||||
|
|
||||||
err := WriteChecksumFile(fs, []Artifact{}, checksumPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// File should not exist
|
|
||||||
_, err = os.Stat(checksumPath)
|
|
||||||
assert.True(t, os.IsNotExist(err))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("does nothing for nil artifacts", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
|
||||||
|
|
||||||
err := WriteChecksumFile(fs, nil, checksumPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("uses only basename for filenames", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/some/deep/nested/path/myapp_linux_amd64.tar.gz", Checksum: "checksum123", OS: "linux", Arch: "amd64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
content, err := os.ReadFile(checksumPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should only contain the basename
|
|
||||||
assert.Contains(t, string(content), "myapp_linux_amd64.tar.gz")
|
|
||||||
assert.NotContains(t, string(content), "/some/deep/nested/path/")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteChecksumFile_Bad(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("returns error for artifact without checksum", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/output/app.tar.gz", Checksum: "", OS: "linux", Arch: "amd64"}, // No checksum
|
|
||||||
}
|
|
||||||
|
|
||||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "has no checksum")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,169 +0,0 @@
|
||||||
// Package build provides project type detection and cross-compilation for the Core build system.
|
|
||||||
// This file handles configuration loading from .core/build.yaml files.
|
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/build/signing"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigFileName is the name of the build configuration file.
|
|
||||||
const ConfigFileName = "build.yaml"
|
|
||||||
|
|
||||||
// ConfigDir is the directory where build configuration is stored.
|
|
||||||
const ConfigDir = ".core"
|
|
||||||
|
|
||||||
// BuildConfig holds the complete build configuration loaded from .core/build.yaml.
|
|
||||||
// This is distinct from Config which holds runtime build parameters.
|
|
||||||
type BuildConfig struct {
|
|
||||||
// Version is the config file format version.
|
|
||||||
Version int `yaml:"version"`
|
|
||||||
// Project contains project metadata.
|
|
||||||
Project Project `yaml:"project"`
|
|
||||||
// Build contains build settings.
|
|
||||||
Build Build `yaml:"build"`
|
|
||||||
// Targets defines the build targets.
|
|
||||||
Targets []TargetConfig `yaml:"targets"`
|
|
||||||
// Sign contains code signing configuration.
|
|
||||||
Sign signing.SignConfig `yaml:"sign,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Project holds project metadata.
|
|
||||||
type Project struct {
|
|
||||||
// Name is the project name.
|
|
||||||
Name string `yaml:"name"`
|
|
||||||
// Description is a brief description of the project.
|
|
||||||
Description string `yaml:"description"`
|
|
||||||
// Main is the path to the main package (e.g., ./cmd/core).
|
|
||||||
Main string `yaml:"main"`
|
|
||||||
// Binary is the output binary name.
|
|
||||||
Binary string `yaml:"binary"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build holds build-time settings.
|
|
||||||
type Build struct {
|
|
||||||
// CGO enables CGO for the build.
|
|
||||||
CGO bool `yaml:"cgo"`
|
|
||||||
// Flags are additional build flags (e.g., ["-trimpath"]).
|
|
||||||
Flags []string `yaml:"flags"`
|
|
||||||
// LDFlags are linker flags (e.g., ["-s", "-w"]).
|
|
||||||
LDFlags []string `yaml:"ldflags"`
|
|
||||||
// Env are additional environment variables.
|
|
||||||
Env []string `yaml:"env"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TargetConfig defines a build target in the config file.
|
|
||||||
// This is separate from Target to allow for additional config-specific fields.
|
|
||||||
type TargetConfig struct {
|
|
||||||
// OS is the target operating system (e.g., "linux", "darwin", "windows").
|
|
||||||
OS string `yaml:"os"`
|
|
||||||
// Arch is the target architecture (e.g., "amd64", "arm64").
|
|
||||||
Arch string `yaml:"arch"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadConfig loads build configuration from the .core/build.yaml file in the given directory.
|
|
||||||
// If the config file does not exist, it returns DefaultConfig().
|
|
||||||
// Returns an error if the file exists but cannot be parsed.
|
|
||||||
func LoadConfig(fs io.Medium, dir string) (*BuildConfig, error) {
|
|
||||||
configPath := filepath.Join(dir, ConfigDir, ConfigFileName)
|
|
||||||
|
|
||||||
content, err := fs.Read(configPath)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return DefaultConfig(), nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("build.LoadConfig: failed to read config file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cfg BuildConfig
|
|
||||||
data := []byte(content)
|
|
||||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
|
||||||
return nil, fmt.Errorf("build.LoadConfig: failed to parse config file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply defaults for any missing fields
|
|
||||||
applyDefaults(&cfg)
|
|
||||||
|
|
||||||
return &cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultConfig returns sensible defaults for Go projects.
|
|
||||||
func DefaultConfig() *BuildConfig {
|
|
||||||
return &BuildConfig{
|
|
||||||
Version: 1,
|
|
||||||
Project: Project{
|
|
||||||
Name: "",
|
|
||||||
Main: ".",
|
|
||||||
Binary: "",
|
|
||||||
},
|
|
||||||
Build: Build{
|
|
||||||
CGO: false,
|
|
||||||
Flags: []string{"-trimpath"},
|
|
||||||
LDFlags: []string{"-s", "-w"},
|
|
||||||
Env: []string{},
|
|
||||||
},
|
|
||||||
Targets: []TargetConfig{
|
|
||||||
{OS: "linux", Arch: "amd64"},
|
|
||||||
{OS: "linux", Arch: "arm64"},
|
|
||||||
{OS: "darwin", Arch: "arm64"},
|
|
||||||
{OS: "windows", Arch: "amd64"},
|
|
||||||
},
|
|
||||||
Sign: signing.DefaultSignConfig(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// applyDefaults fills in default values for any empty fields in the config.
|
|
||||||
func applyDefaults(cfg *BuildConfig) {
|
|
||||||
defaults := DefaultConfig()
|
|
||||||
|
|
||||||
if cfg.Version == 0 {
|
|
||||||
cfg.Version = defaults.Version
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Project.Main == "" {
|
|
||||||
cfg.Project.Main = defaults.Project.Main
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Build.Flags == nil {
|
|
||||||
cfg.Build.Flags = defaults.Build.Flags
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Build.LDFlags == nil {
|
|
||||||
cfg.Build.LDFlags = defaults.Build.LDFlags
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Build.Env == nil {
|
|
||||||
cfg.Build.Env = defaults.Build.Env
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cfg.Targets) == 0 {
|
|
||||||
cfg.Targets = defaults.Targets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expand environment variables in sign config
|
|
||||||
cfg.Sign.ExpandEnv()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigPath returns the path to the build config file for a given directory.
|
|
||||||
func ConfigPath(dir string) string {
|
|
||||||
return filepath.Join(dir, ConfigDir, ConfigFileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigExists checks if a build config file exists in the given directory.
|
|
||||||
func ConfigExists(fs io.Medium, dir string) bool {
|
|
||||||
return fileExists(fs, ConfigPath(dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToTargets converts TargetConfig slice to Target slice for use with builders.
|
|
||||||
func (cfg *BuildConfig) ToTargets() []Target {
|
|
||||||
targets := make([]Target, len(cfg.Targets))
|
|
||||||
for i, t := range cfg.Targets {
|
|
||||||
targets[i] = Target(t)
|
|
||||||
}
|
|
||||||
return targets
|
|
||||||
}
|
|
||||||
|
|
@ -1,324 +0,0 @@
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// setupConfigTestDir creates a temp directory with optional .core/build.yaml content.
|
|
||||||
func setupConfigTestDir(t *testing.T, configContent string) string {
|
|
||||||
t.Helper()
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
if configContent != "" {
|
|
||||||
coreDir := filepath.Join(dir, ConfigDir)
|
|
||||||
err := os.MkdirAll(coreDir, 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
|
||||||
err = os.WriteFile(configPath, []byte(configContent), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("loads valid config", func(t *testing.T) {
|
|
||||||
content := `
|
|
||||||
version: 1
|
|
||||||
project:
|
|
||||||
name: myapp
|
|
||||||
description: A test application
|
|
||||||
main: ./cmd/myapp
|
|
||||||
binary: myapp
|
|
||||||
build:
|
|
||||||
cgo: true
|
|
||||||
flags:
|
|
||||||
- -trimpath
|
|
||||||
- -race
|
|
||||||
ldflags:
|
|
||||||
- -s
|
|
||||||
- -w
|
|
||||||
env:
|
|
||||||
- FOO=bar
|
|
||||||
targets:
|
|
||||||
- os: linux
|
|
||||||
arch: amd64
|
|
||||||
- os: darwin
|
|
||||||
arch: arm64
|
|
||||||
`
|
|
||||||
dir := setupConfigTestDir(t, content)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(fs, dir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, cfg)
|
|
||||||
|
|
||||||
assert.Equal(t, 1, cfg.Version)
|
|
||||||
assert.Equal(t, "myapp", cfg.Project.Name)
|
|
||||||
assert.Equal(t, "A test application", cfg.Project.Description)
|
|
||||||
assert.Equal(t, "./cmd/myapp", cfg.Project.Main)
|
|
||||||
assert.Equal(t, "myapp", cfg.Project.Binary)
|
|
||||||
assert.True(t, cfg.Build.CGO)
|
|
||||||
assert.Equal(t, []string{"-trimpath", "-race"}, cfg.Build.Flags)
|
|
||||||
assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags)
|
|
||||||
assert.Equal(t, []string{"FOO=bar"}, cfg.Build.Env)
|
|
||||||
assert.Len(t, cfg.Targets, 2)
|
|
||||||
assert.Equal(t, "linux", cfg.Targets[0].OS)
|
|
||||||
assert.Equal(t, "amd64", cfg.Targets[0].Arch)
|
|
||||||
assert.Equal(t, "darwin", cfg.Targets[1].OS)
|
|
||||||
assert.Equal(t, "arm64", cfg.Targets[1].Arch)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns defaults when config file missing", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(fs, dir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, cfg)
|
|
||||||
|
|
||||||
defaults := DefaultConfig()
|
|
||||||
assert.Equal(t, defaults.Version, cfg.Version)
|
|
||||||
assert.Equal(t, defaults.Project.Main, cfg.Project.Main)
|
|
||||||
assert.Equal(t, defaults.Build.CGO, cfg.Build.CGO)
|
|
||||||
assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags)
|
|
||||||
assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags)
|
|
||||||
assert.Equal(t, defaults.Targets, cfg.Targets)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("applies defaults for missing fields", func(t *testing.T) {
|
|
||||||
content := `
|
|
||||||
version: 2
|
|
||||||
project:
|
|
||||||
name: partial
|
|
||||||
`
|
|
||||||
dir := setupConfigTestDir(t, content)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(fs, dir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, cfg)
|
|
||||||
|
|
||||||
// Explicit values preserved
|
|
||||||
assert.Equal(t, 2, cfg.Version)
|
|
||||||
assert.Equal(t, "partial", cfg.Project.Name)
|
|
||||||
|
|
||||||
// Defaults applied
|
|
||||||
defaults := DefaultConfig()
|
|
||||||
assert.Equal(t, defaults.Project.Main, cfg.Project.Main)
|
|
||||||
assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags)
|
|
||||||
assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags)
|
|
||||||
assert.Equal(t, defaults.Targets, cfg.Targets)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("preserves empty arrays when explicitly set", func(t *testing.T) {
|
|
||||||
content := `
|
|
||||||
version: 1
|
|
||||||
project:
|
|
||||||
name: noflags
|
|
||||||
build:
|
|
||||||
flags: []
|
|
||||||
ldflags: []
|
|
||||||
targets:
|
|
||||||
- os: linux
|
|
||||||
arch: amd64
|
|
||||||
`
|
|
||||||
dir := setupConfigTestDir(t, content)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(fs, dir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, cfg)
|
|
||||||
|
|
||||||
// Empty arrays are preserved (not replaced with defaults)
|
|
||||||
assert.Empty(t, cfg.Build.Flags)
|
|
||||||
assert.Empty(t, cfg.Build.LDFlags)
|
|
||||||
// Targets explicitly set
|
|
||||||
assert.Len(t, cfg.Targets, 1)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Bad(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("returns error for invalid YAML", func(t *testing.T) {
|
|
||||||
content := `
|
|
||||||
version: 1
|
|
||||||
project:
|
|
||||||
name: [invalid yaml
|
|
||||||
`
|
|
||||||
dir := setupConfigTestDir(t, content)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(fs, dir)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, cfg)
|
|
||||||
assert.Contains(t, err.Error(), "failed to parse config file")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns error for unreadable file", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
coreDir := filepath.Join(dir, ConfigDir)
|
|
||||||
err := os.MkdirAll(coreDir, 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create config as a directory instead of file
|
|
||||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
|
||||||
err = os.Mkdir(configPath, 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(fs, dir)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Nil(t, cfg)
|
|
||||||
assert.Contains(t, err.Error(), "failed to read config file")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefaultConfig_Good(t *testing.T) {
|
|
||||||
t.Run("returns sensible defaults", func(t *testing.T) {
|
|
||||||
cfg := DefaultConfig()
|
|
||||||
|
|
||||||
assert.Equal(t, 1, cfg.Version)
|
|
||||||
assert.Equal(t, ".", cfg.Project.Main)
|
|
||||||
assert.Empty(t, cfg.Project.Name)
|
|
||||||
assert.Empty(t, cfg.Project.Binary)
|
|
||||||
assert.False(t, cfg.Build.CGO)
|
|
||||||
assert.Contains(t, cfg.Build.Flags, "-trimpath")
|
|
||||||
assert.Contains(t, cfg.Build.LDFlags, "-s")
|
|
||||||
assert.Contains(t, cfg.Build.LDFlags, "-w")
|
|
||||||
assert.Empty(t, cfg.Build.Env)
|
|
||||||
|
|
||||||
// Default targets cover common platforms
|
|
||||||
assert.Len(t, cfg.Targets, 4)
|
|
||||||
hasLinuxAmd64 := false
|
|
||||||
hasDarwinArm64 := false
|
|
||||||
hasWindowsAmd64 := false
|
|
||||||
for _, t := range cfg.Targets {
|
|
||||||
if t.OS == "linux" && t.Arch == "amd64" {
|
|
||||||
hasLinuxAmd64 = true
|
|
||||||
}
|
|
||||||
if t.OS == "darwin" && t.Arch == "arm64" {
|
|
||||||
hasDarwinArm64 = true
|
|
||||||
}
|
|
||||||
if t.OS == "windows" && t.Arch == "amd64" {
|
|
||||||
hasWindowsAmd64 = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.True(t, hasLinuxAmd64)
|
|
||||||
assert.True(t, hasDarwinArm64)
|
|
||||||
assert.True(t, hasWindowsAmd64)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigPath_Good(t *testing.T) {
|
|
||||||
t.Run("returns correct path", func(t *testing.T) {
|
|
||||||
path := ConfigPath("/project/root")
|
|
||||||
assert.Equal(t, "/project/root/.core/build.yaml", path)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigExists_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("returns true when config exists", func(t *testing.T) {
|
|
||||||
dir := setupConfigTestDir(t, "version: 1")
|
|
||||||
assert.True(t, ConfigExists(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false when config missing", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
assert.False(t, ConfigExists(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false when .core dir missing", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
assert.False(t, ConfigExists(fs, dir))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadConfig_Good_SignConfig(t *testing.T) {
|
|
||||||
tmpDir := t.TempDir()
|
|
||||||
coreDir := filepath.Join(tmpDir, ".core")
|
|
||||||
_ = os.MkdirAll(coreDir, 0755)
|
|
||||||
|
|
||||||
configContent := `version: 1
|
|
||||||
sign:
|
|
||||||
enabled: true
|
|
||||||
gpg:
|
|
||||||
key: "ABCD1234"
|
|
||||||
macos:
|
|
||||||
identity: "Developer ID Application: Test"
|
|
||||||
notarize: true
|
|
||||||
`
|
|
||||||
_ = os.WriteFile(filepath.Join(coreDir, "build.yaml"), []byte(configContent), 0644)
|
|
||||||
|
|
||||||
cfg, err := LoadConfig(io.Local, tmpDir)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !cfg.Sign.Enabled {
|
|
||||||
t.Error("expected Sign.Enabled to be true")
|
|
||||||
}
|
|
||||||
if cfg.Sign.GPG.Key != "ABCD1234" {
|
|
||||||
t.Errorf("expected GPG.Key 'ABCD1234', got %q", cfg.Sign.GPG.Key)
|
|
||||||
}
|
|
||||||
if cfg.Sign.MacOS.Identity != "Developer ID Application: Test" {
|
|
||||||
t.Errorf("expected MacOS.Identity, got %q", cfg.Sign.MacOS.Identity)
|
|
||||||
}
|
|
||||||
if !cfg.Sign.MacOS.Notarize {
|
|
||||||
t.Error("expected MacOS.Notarize to be true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildConfig_ToTargets_Good(t *testing.T) {
|
|
||||||
t.Run("converts TargetConfig to Target", func(t *testing.T) {
|
|
||||||
cfg := &BuildConfig{
|
|
||||||
Targets: []TargetConfig{
|
|
||||||
{OS: "linux", Arch: "amd64"},
|
|
||||||
{OS: "darwin", Arch: "arm64"},
|
|
||||||
{OS: "windows", Arch: "386"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
targets := cfg.ToTargets()
|
|
||||||
require.Len(t, targets, 3)
|
|
||||||
|
|
||||||
assert.Equal(t, Target{OS: "linux", Arch: "amd64"}, targets[0])
|
|
||||||
assert.Equal(t, Target{OS: "darwin", Arch: "arm64"}, targets[1])
|
|
||||||
assert.Equal(t, Target{OS: "windows", Arch: "386"}, targets[2])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns empty slice for no targets", func(t *testing.T) {
|
|
||||||
cfg := &BuildConfig{
|
|
||||||
Targets: []TargetConfig{},
|
|
||||||
}
|
|
||||||
|
|
||||||
targets := cfg.ToTargets()
|
|
||||||
assert.Empty(t, targets)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestLoadConfig_Testdata tests loading from the testdata fixture.
|
|
||||||
func TestLoadConfig_Testdata(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
abs, err := filepath.Abs("testdata/config-project")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Run("loads config-project fixture", func(t *testing.T) {
|
|
||||||
cfg, err := LoadConfig(fs, abs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, cfg)
|
|
||||||
|
|
||||||
assert.Equal(t, 1, cfg.Version)
|
|
||||||
assert.Equal(t, "example-cli", cfg.Project.Name)
|
|
||||||
assert.Equal(t, "An example CLI application", cfg.Project.Description)
|
|
||||||
assert.Equal(t, "./cmd/example", cfg.Project.Main)
|
|
||||||
assert.Equal(t, "example", cfg.Project.Binary)
|
|
||||||
assert.False(t, cfg.Build.CGO)
|
|
||||||
assert.Equal(t, []string{"-trimpath"}, cfg.Build.Flags)
|
|
||||||
assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags)
|
|
||||||
assert.Len(t, cfg.Targets, 3)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,94 +0,0 @@
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"slices"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Marker files for project type detection.
|
|
||||||
const (
|
|
||||||
markerGoMod = "go.mod"
|
|
||||||
markerWails = "wails.json"
|
|
||||||
markerNodePackage = "package.json"
|
|
||||||
markerComposer = "composer.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// projectMarker maps a marker file to its project type.
|
|
||||||
type projectMarker struct {
|
|
||||||
file string
|
|
||||||
projectType ProjectType
|
|
||||||
}
|
|
||||||
|
|
||||||
// markers defines the detection order. More specific types come first.
|
|
||||||
// Wails projects have both wails.json and go.mod, so wails is checked first.
|
|
||||||
var markers = []projectMarker{
|
|
||||||
{markerWails, ProjectTypeWails},
|
|
||||||
{markerGoMod, ProjectTypeGo},
|
|
||||||
{markerNodePackage, ProjectTypeNode},
|
|
||||||
{markerComposer, ProjectTypePHP},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Discover detects project types in the given directory by checking for marker files.
|
|
||||||
// Returns a slice of detected project types, ordered by priority (most specific first).
|
|
||||||
// For example, a Wails project returns [wails, go] since it has both wails.json and go.mod.
|
|
||||||
func Discover(fs io.Medium, dir string) ([]ProjectType, error) {
|
|
||||||
var detected []ProjectType
|
|
||||||
|
|
||||||
for _, m := range markers {
|
|
||||||
path := filepath.Join(dir, m.file)
|
|
||||||
if fileExists(fs, path) {
|
|
||||||
// Avoid duplicates (shouldn't happen with current markers, but defensive)
|
|
||||||
if !slices.Contains(detected, m.projectType) {
|
|
||||||
detected = append(detected, m.projectType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return detected, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrimaryType returns the most specific project type detected in the directory.
|
|
||||||
// Returns empty string if no project type is detected.
|
|
||||||
func PrimaryType(fs io.Medium, dir string) (ProjectType, error) {
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if len(types) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
return types[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsGoProject checks if the directory contains a Go project (go.mod or wails.json).
|
|
||||||
func IsGoProject(fs io.Medium, dir string) bool {
|
|
||||||
return fileExists(fs, filepath.Join(dir, markerGoMod)) ||
|
|
||||||
fileExists(fs, filepath.Join(dir, markerWails))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsWailsProject checks if the directory contains a Wails project.
|
|
||||||
func IsWailsProject(fs io.Medium, dir string) bool {
|
|
||||||
return fileExists(fs, filepath.Join(dir, markerWails))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNodeProject checks if the directory contains a Node.js project.
|
|
||||||
func IsNodeProject(fs io.Medium, dir string) bool {
|
|
||||||
return fileExists(fs, filepath.Join(dir, markerNodePackage))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPHPProject checks if the directory contains a PHP project.
|
|
||||||
func IsPHPProject(fs io.Medium, dir string) bool {
|
|
||||||
return fileExists(fs, filepath.Join(dir, markerComposer))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsCPPProject checks if the directory contains a C++ project (CMakeLists.txt).
|
|
||||||
func IsCPPProject(fs io.Medium, dir string) bool {
|
|
||||||
return fileExists(fs, filepath.Join(dir, "CMakeLists.txt"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// fileExists checks if a file exists and is not a directory.
|
|
||||||
func fileExists(fs io.Medium, path string) bool {
|
|
||||||
return fs.IsFile(path)
|
|
||||||
}
|
|
||||||
|
|
@ -1,228 +0,0 @@
|
||||||
package build
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// setupTestDir creates a temporary directory with the specified marker files.
|
|
||||||
func setupTestDir(t *testing.T, markers ...string) string {
|
|
||||||
t.Helper()
|
|
||||||
dir := t.TempDir()
|
|
||||||
for _, m := range markers {
|
|
||||||
path := filepath.Join(dir, m)
|
|
||||||
err := os.WriteFile(path, []byte("{}"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDiscover_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("detects Go project", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "go.mod")
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []ProjectType{ProjectTypeGo}, types)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects Wails project with priority over Go", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "wails.json", "go.mod")
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []ProjectType{ProjectTypeWails, ProjectTypeGo}, types)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects Node.js project", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "package.json")
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []ProjectType{ProjectTypeNode}, types)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects PHP project", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "composer.json")
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []ProjectType{ProjectTypePHP}, types)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects multiple project types", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "go.mod", "package.json")
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []ProjectType{ProjectTypeGo, ProjectTypeNode}, types)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("empty directory returns empty slice", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Empty(t, types)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDiscover_Bad(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("non-existent directory returns empty slice", func(t *testing.T) {
|
|
||||||
types, err := Discover(fs, "/non/existent/path")
|
|
||||||
assert.NoError(t, err) // os.Stat fails silently in fileExists
|
|
||||||
assert.Empty(t, types)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("directory marker is ignored", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
// Create go.mod as a directory instead of a file
|
|
||||||
err := os.Mkdir(filepath.Join(dir, "go.mod"), 0755)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Empty(t, types)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrimaryType_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("returns wails for wails project", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "wails.json", "go.mod")
|
|
||||||
primary, err := PrimaryType(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, ProjectTypeWails, primary)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns go for go-only project", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "go.mod")
|
|
||||||
primary, err := PrimaryType(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, ProjectTypeGo, primary)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns empty string for empty directory", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
primary, err := PrimaryType(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Empty(t, primary)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsGoProject_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("true with go.mod", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "go.mod")
|
|
||||||
assert.True(t, IsGoProject(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("true with wails.json", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "wails.json")
|
|
||||||
assert.True(t, IsGoProject(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("false without markers", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
assert.False(t, IsGoProject(fs, dir))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsWailsProject_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("true with wails.json", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "wails.json")
|
|
||||||
assert.True(t, IsWailsProject(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("false with only go.mod", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "go.mod")
|
|
||||||
assert.False(t, IsWailsProject(fs, dir))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsNodeProject_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("true with package.json", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "package.json")
|
|
||||||
assert.True(t, IsNodeProject(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("false without package.json", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
assert.False(t, IsNodeProject(fs, dir))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsPHPProject_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("true with composer.json", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "composer.json")
|
|
||||||
assert.True(t, IsPHPProject(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("false without composer.json", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
assert.False(t, IsPHPProject(fs, dir))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTarget_Good(t *testing.T) {
|
|
||||||
target := Target{OS: "linux", Arch: "amd64"}
|
|
||||||
assert.Equal(t, "linux/amd64", target.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileExists_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("returns true for existing file", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
path := filepath.Join(dir, "test.txt")
|
|
||||||
err := os.WriteFile(path, []byte("content"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, fileExists(fs, path))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for directory", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
assert.False(t, fileExists(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("returns false for non-existent path", func(t *testing.T) {
|
|
||||||
assert.False(t, fileExists(fs, "/non/existent/file"))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDiscover_Testdata tests discovery using the testdata fixtures.
|
|
||||||
// These serve as integration tests with realistic project structures.
|
|
||||||
func TestDiscover_Testdata(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
testdataDir, err := filepath.Abs("testdata")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
dir string
|
|
||||||
expected []ProjectType
|
|
||||||
}{
|
|
||||||
{"go-project", "go-project", []ProjectType{ProjectTypeGo}},
|
|
||||||
{"wails-project", "wails-project", []ProjectType{ProjectTypeWails, ProjectTypeGo}},
|
|
||||||
{"node-project", "node-project", []ProjectType{ProjectTypeNode}},
|
|
||||||
{"php-project", "php-project", []ProjectType{ProjectTypePHP}},
|
|
||||||
{"multi-project", "multi-project", []ProjectType{ProjectTypeGo, ProjectTypeNode}},
|
|
||||||
{"empty-project", "empty-project", []ProjectType{}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
dir := filepath.Join(testdataDir, tt.dir)
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
if len(tt.expected) == 0 {
|
|
||||||
assert.Empty(t, types)
|
|
||||||
} else {
|
|
||||||
assert.Equal(t, tt.expected, types)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,103 +0,0 @@
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MacOSSigner signs binaries using macOS codesign.
|
|
||||||
type MacOSSigner struct {
|
|
||||||
config MacOSConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface check.
|
|
||||||
var _ Signer = (*MacOSSigner)(nil)
|
|
||||||
|
|
||||||
// NewMacOSSigner creates a new macOS signer.
|
|
||||||
func NewMacOSSigner(cfg MacOSConfig) *MacOSSigner {
|
|
||||||
return &MacOSSigner{config: cfg}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns "codesign".
|
|
||||||
func (s *MacOSSigner) Name() string {
|
|
||||||
return "codesign"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available checks if running on macOS with codesign and identity configured.
|
|
||||||
func (s *MacOSSigner) Available() bool {
|
|
||||||
if runtime.GOOS != "darwin" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if s.config.Identity == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
_, err := exec.LookPath("codesign")
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign codesigns a binary with hardened runtime.
|
|
||||||
func (s *MacOSSigner) Sign(ctx context.Context, fs io.Medium, binary string) error {
|
|
||||||
if !s.Available() {
|
|
||||||
return fmt.Errorf("codesign.Sign: codesign not available")
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "codesign",
|
|
||||||
"--sign", s.config.Identity,
|
|
||||||
"--timestamp",
|
|
||||||
"--options", "runtime", // Hardened runtime for notarization
|
|
||||||
"--force",
|
|
||||||
binary,
|
|
||||||
)
|
|
||||||
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("codesign.Sign: %w\nOutput: %s", err, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notarize submits binary to Apple for notarization and staples the ticket.
|
|
||||||
// This blocks until Apple responds (typically 1-5 minutes).
|
|
||||||
func (s *MacOSSigner) Notarize(ctx context.Context, fs io.Medium, binary string) error {
|
|
||||||
if s.config.AppleID == "" || s.config.TeamID == "" || s.config.AppPassword == "" {
|
|
||||||
return fmt.Errorf("codesign.Notarize: missing Apple credentials (apple_id, team_id, app_password)")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create ZIP for submission
|
|
||||||
zipPath := binary + ".zip"
|
|
||||||
zipCmd := exec.CommandContext(ctx, "zip", "-j", zipPath, binary)
|
|
||||||
if output, err := zipCmd.CombinedOutput(); err != nil {
|
|
||||||
return fmt.Errorf("codesign.Notarize: failed to create zip: %w\nOutput: %s", err, string(output))
|
|
||||||
}
|
|
||||||
defer func() { _ = fs.Delete(zipPath) }()
|
|
||||||
|
|
||||||
// Submit to Apple and wait
|
|
||||||
submitCmd := exec.CommandContext(ctx, "xcrun", "notarytool", "submit",
|
|
||||||
zipPath,
|
|
||||||
"--apple-id", s.config.AppleID,
|
|
||||||
"--team-id", s.config.TeamID,
|
|
||||||
"--password", s.config.AppPassword,
|
|
||||||
"--wait",
|
|
||||||
)
|
|
||||||
if output, err := submitCmd.CombinedOutput(); err != nil {
|
|
||||||
return fmt.Errorf("codesign.Notarize: notarization failed: %w\nOutput: %s", err, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Staple the ticket
|
|
||||||
stapleCmd := exec.CommandContext(ctx, "xcrun", "stapler", "staple", binary)
|
|
||||||
if output, err := stapleCmd.CombinedOutput(); err != nil {
|
|
||||||
return fmt.Errorf("codesign.Notarize: failed to staple: %w\nOutput: %s", err, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldNotarize returns true if notarization is enabled.
|
|
||||||
func (s *MacOSSigner) ShouldNotarize() bool {
|
|
||||||
return s.config.Notarize
|
|
||||||
}
|
|
||||||
|
|
@ -1,62 +0,0 @@
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMacOSSigner_Good_Name(t *testing.T) {
|
|
||||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
|
||||||
assert.Equal(t, "codesign", s.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMacOSSigner_Good_Available(t *testing.T) {
|
|
||||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
|
||||||
|
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
// Just verify it doesn't panic
|
|
||||||
_ = s.Available()
|
|
||||||
} else {
|
|
||||||
assert.False(t, s.Available())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMacOSSigner_Bad_NoIdentity(t *testing.T) {
|
|
||||||
s := NewMacOSSigner(MacOSConfig{})
|
|
||||||
assert.False(t, s.Available())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMacOSSigner_Sign_Bad(t *testing.T) {
|
|
||||||
t.Run("fails when not available", func(t *testing.T) {
|
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("skipping on macOS")
|
|
||||||
}
|
|
||||||
fs := io.Local
|
|
||||||
s := NewMacOSSigner(MacOSConfig{Identity: "test"})
|
|
||||||
err := s.Sign(context.Background(), fs, "test")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "not available")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMacOSSigner_Notarize_Bad(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("fails with missing credentials", func(t *testing.T) {
|
|
||||||
s := NewMacOSSigner(MacOSConfig{})
|
|
||||||
err := s.Notarize(context.Background(), fs, "test")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "missing Apple credentials")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMacOSSigner_ShouldNotarize(t *testing.T) {
|
|
||||||
s := NewMacOSSigner(MacOSConfig{Notarize: true})
|
|
||||||
assert.True(t, s.ShouldNotarize())
|
|
||||||
|
|
||||||
s2 := NewMacOSSigner(MacOSConfig{Notarize: false})
|
|
||||||
assert.False(t, s2.ShouldNotarize())
|
|
||||||
}
|
|
||||||
|
|
@ -1,59 +0,0 @@
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GPGSigner signs files using GPG.
|
|
||||||
type GPGSigner struct {
|
|
||||||
KeyID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface check.
|
|
||||||
var _ Signer = (*GPGSigner)(nil)
|
|
||||||
|
|
||||||
// NewGPGSigner creates a new GPG signer.
|
|
||||||
func NewGPGSigner(keyID string) *GPGSigner {
|
|
||||||
return &GPGSigner{KeyID: keyID}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns "gpg".
|
|
||||||
func (s *GPGSigner) Name() string {
|
|
||||||
return "gpg"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available checks if gpg is installed and key is configured.
|
|
||||||
func (s *GPGSigner) Available() bool {
|
|
||||||
if s.KeyID == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
_, err := exec.LookPath("gpg")
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign creates a detached ASCII-armored signature.
|
|
||||||
// For file.txt, creates file.txt.asc
|
|
||||||
func (s *GPGSigner) Sign(ctx context.Context, fs io.Medium, file string) error {
|
|
||||||
if !s.Available() {
|
|
||||||
return fmt.Errorf("gpg.Sign: gpg not available or key not configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "gpg",
|
|
||||||
"--detach-sign",
|
|
||||||
"--armor",
|
|
||||||
"--local-user", s.KeyID,
|
|
||||||
"--output", file+".asc",
|
|
||||||
file,
|
|
||||||
)
|
|
||||||
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("gpg.Sign: %w\nOutput: %s", err, string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGPGSigner_Good_Name(t *testing.T) {
|
|
||||||
s := NewGPGSigner("ABCD1234")
|
|
||||||
assert.Equal(t, "gpg", s.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGSigner_Good_Available(t *testing.T) {
|
|
||||||
s := NewGPGSigner("ABCD1234")
|
|
||||||
_ = s.Available()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGSigner_Bad_NoKey(t *testing.T) {
|
|
||||||
s := NewGPGSigner("")
|
|
||||||
assert.False(t, s.Available())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGPGSigner_Sign_Bad(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("fails when no key", func(t *testing.T) {
|
|
||||||
s := NewGPGSigner("")
|
|
||||||
err := s.Sign(context.Background(), fs, "test.txt")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Contains(t, err.Error(), "not available or key not configured")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,96 +0,0 @@
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Artifact represents a build output that can be signed.
|
|
||||||
// This mirrors build.Artifact to avoid import cycles.
|
|
||||||
type Artifact struct {
|
|
||||||
Path string
|
|
||||||
OS string
|
|
||||||
Arch string
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignBinaries signs macOS binaries in the artifacts list.
|
|
||||||
// Only signs darwin binaries when running on macOS with a configured identity.
|
|
||||||
func SignBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error {
|
|
||||||
if !cfg.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only sign on macOS
|
|
||||||
if runtime.GOOS != "darwin" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := NewMacOSSigner(cfg.MacOS)
|
|
||||||
if !signer.Available() {
|
|
||||||
return nil // Silently skip if not configured
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, artifact := range artifacts {
|
|
||||||
if artifact.OS != "darwin" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf(" Signing %s...\n", artifact.Path)
|
|
||||||
if err := signer.Sign(ctx, fs, artifact.Path); err != nil {
|
|
||||||
return fmt.Errorf("failed to sign %s: %w", artifact.Path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotarizeBinaries notarizes macOS binaries if enabled.
|
|
||||||
func NotarizeBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error {
|
|
||||||
if !cfg.Enabled || !cfg.MacOS.Notarize {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if runtime.GOOS != "darwin" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := NewMacOSSigner(cfg.MacOS)
|
|
||||||
if !signer.Available() {
|
|
||||||
return fmt.Errorf("notarization requested but codesign not available")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, artifact := range artifacts {
|
|
||||||
if artifact.OS != "darwin" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf(" Notarizing %s (this may take a few minutes)...\n", artifact.Path)
|
|
||||||
if err := signer.Notarize(ctx, fs, artifact.Path); err != nil {
|
|
||||||
return fmt.Errorf("failed to notarize %s: %w", artifact.Path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignChecksums signs the checksums file with GPG.
|
|
||||||
func SignChecksums(ctx context.Context, fs io.Medium, cfg SignConfig, checksumFile string) error {
|
|
||||||
if !cfg.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
signer := NewGPGSigner(cfg.GPG.Key)
|
|
||||||
if !signer.Available() {
|
|
||||||
return nil // Silently skip if not configured
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf(" Signing %s with GPG...\n", checksumFile)
|
|
||||||
if err := signer.Sign(ctx, fs, checksumFile); err != nil {
|
|
||||||
return fmt.Errorf("failed to sign checksums: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,83 +0,0 @@
|
||||||
// Package signing provides code signing for build artifacts.
|
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Signer defines the interface for code signing implementations.
|
|
||||||
type Signer interface {
|
|
||||||
// Name returns the signer's identifier.
|
|
||||||
Name() string
|
|
||||||
// Available checks if this signer can be used.
|
|
||||||
Available() bool
|
|
||||||
// Sign signs the artifact at the given path.
|
|
||||||
Sign(ctx context.Context, fs io.Medium, path string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignConfig holds signing configuration from .core/build.yaml.
|
|
||||||
type SignConfig struct {
|
|
||||||
Enabled bool `yaml:"enabled"`
|
|
||||||
GPG GPGConfig `yaml:"gpg,omitempty"`
|
|
||||||
MacOS MacOSConfig `yaml:"macos,omitempty"`
|
|
||||||
Windows WindowsConfig `yaml:"windows,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GPGConfig holds GPG signing configuration.
|
|
||||||
type GPGConfig struct {
|
|
||||||
Key string `yaml:"key"` // Key ID or fingerprint, supports $ENV
|
|
||||||
}
|
|
||||||
|
|
||||||
// MacOSConfig holds macOS codesign configuration.
|
|
||||||
type MacOSConfig struct {
|
|
||||||
Identity string `yaml:"identity"` // Developer ID Application: ...
|
|
||||||
Notarize bool `yaml:"notarize"` // Submit to Apple for notarization
|
|
||||||
AppleID string `yaml:"apple_id"` // Apple account email
|
|
||||||
TeamID string `yaml:"team_id"` // Team ID
|
|
||||||
AppPassword string `yaml:"app_password"` // App-specific password
|
|
||||||
}
|
|
||||||
|
|
||||||
// WindowsConfig holds Windows signtool configuration (placeholder).
|
|
||||||
type WindowsConfig struct {
|
|
||||||
Certificate string `yaml:"certificate"` // Path to .pfx
|
|
||||||
Password string `yaml:"password"` // Certificate password
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultSignConfig returns sensible defaults.
|
|
||||||
func DefaultSignConfig() SignConfig {
|
|
||||||
return SignConfig{
|
|
||||||
Enabled: true,
|
|
||||||
GPG: GPGConfig{
|
|
||||||
Key: os.Getenv("GPG_KEY_ID"),
|
|
||||||
},
|
|
||||||
MacOS: MacOSConfig{
|
|
||||||
Identity: os.Getenv("CODESIGN_IDENTITY"),
|
|
||||||
AppleID: os.Getenv("APPLE_ID"),
|
|
||||||
TeamID: os.Getenv("APPLE_TEAM_ID"),
|
|
||||||
AppPassword: os.Getenv("APPLE_APP_PASSWORD"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandEnv expands environment variables in config values.
|
|
||||||
func (c *SignConfig) ExpandEnv() {
|
|
||||||
c.GPG.Key = expandEnv(c.GPG.Key)
|
|
||||||
c.MacOS.Identity = expandEnv(c.MacOS.Identity)
|
|
||||||
c.MacOS.AppleID = expandEnv(c.MacOS.AppleID)
|
|
||||||
c.MacOS.TeamID = expandEnv(c.MacOS.TeamID)
|
|
||||||
c.MacOS.AppPassword = expandEnv(c.MacOS.AppPassword)
|
|
||||||
c.Windows.Certificate = expandEnv(c.Windows.Certificate)
|
|
||||||
c.Windows.Password = expandEnv(c.Windows.Password)
|
|
||||||
}
|
|
||||||
|
|
||||||
// expandEnv expands $VAR or ${VAR} in a string.
|
|
||||||
func expandEnv(s string) string {
|
|
||||||
if strings.HasPrefix(s, "$") {
|
|
||||||
return os.ExpandEnv(s)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
@ -1,162 +0,0 @@
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"runtime"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSignBinaries_Good_SkipsNonDarwin(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fs := io.Local
|
|
||||||
cfg := SignConfig{
|
|
||||||
Enabled: true,
|
|
||||||
MacOS: MacOSConfig{
|
|
||||||
Identity: "Developer ID Application: Test",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create fake artifact for linux
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/tmp/test-binary", OS: "linux", Arch: "amd64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not error even though binary doesn't exist (skips non-darwin)
|
|
||||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignBinaries_Good_DisabledConfig(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fs := io.Local
|
|
||||||
cfg := SignConfig{
|
|
||||||
Enabled: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignBinaries_Good_SkipsOnNonMacOS(t *testing.T) {
|
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("Skipping on macOS - this tests non-macOS behavior")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
fs := io.Local
|
|
||||||
cfg := SignConfig{
|
|
||||||
Enabled: true,
|
|
||||||
MacOS: MacOSConfig{
|
|
||||||
Identity: "Developer ID Application: Test",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNotarizeBinaries_Good_DisabledConfig(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fs := io.Local
|
|
||||||
cfg := SignConfig{
|
|
||||||
Enabled: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := NotarizeBinaries(ctx, fs, cfg, artifacts)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNotarizeBinaries_Good_NotarizeDisabled(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fs := io.Local
|
|
||||||
cfg := SignConfig{
|
|
||||||
Enabled: true,
|
|
||||||
MacOS: MacOSConfig{
|
|
||||||
Notarize: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
artifacts := []Artifact{
|
|
||||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := NotarizeBinaries(ctx, fs, cfg, artifacts)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignChecksums_Good_SkipsNoKey(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fs := io.Local
|
|
||||||
cfg := SignConfig{
|
|
||||||
Enabled: true,
|
|
||||||
GPG: GPGConfig{
|
|
||||||
Key: "", // No key configured
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should silently skip when no key
|
|
||||||
err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignChecksums_Good_Disabled(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fs := io.Local
|
|
||||||
cfg := SignConfig{
|
|
||||||
Enabled: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDefaultSignConfig(t *testing.T) {
|
|
||||||
cfg := DefaultSignConfig()
|
|
||||||
assert.True(t, cfg.Enabled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSignConfig_ExpandEnv(t *testing.T) {
|
|
||||||
t.Setenv("TEST_KEY", "ABC")
|
|
||||||
cfg := SignConfig{
|
|
||||||
GPG: GPGConfig{Key: "$TEST_KEY"},
|
|
||||||
}
|
|
||||||
cfg.ExpandEnv()
|
|
||||||
assert.Equal(t, "ABC", cfg.GPG.Key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWindowsSigner_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
s := NewWindowsSigner(WindowsConfig{})
|
|
||||||
assert.Equal(t, "signtool", s.Name())
|
|
||||||
assert.False(t, s.Available())
|
|
||||||
assert.NoError(t, s.Sign(context.Background(), fs, "test.exe"))
|
|
||||||
}
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
package signing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WindowsSigner signs binaries using Windows signtool (placeholder).
|
|
||||||
type WindowsSigner struct {
|
|
||||||
config WindowsConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile-time interface check.
|
|
||||||
var _ Signer = (*WindowsSigner)(nil)
|
|
||||||
|
|
||||||
// NewWindowsSigner creates a new Windows signer.
|
|
||||||
func NewWindowsSigner(cfg WindowsConfig) *WindowsSigner {
|
|
||||||
return &WindowsSigner{config: cfg}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns "signtool".
|
|
||||||
func (s *WindowsSigner) Name() string {
|
|
||||||
return "signtool"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Available returns false (not yet implemented).
|
|
||||||
func (s *WindowsSigner) Available() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign is a placeholder that does nothing.
|
|
||||||
func (s *WindowsSigner) Sign(ctx context.Context, fs io.Medium, binary string) error {
|
|
||||||
// TODO: Implement Windows signing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
# Example build configuration for Core build system
|
|
||||||
version: 1
|
|
||||||
|
|
||||||
project:
|
|
||||||
name: example-cli
|
|
||||||
description: An example CLI application
|
|
||||||
main: ./cmd/example
|
|
||||||
binary: example
|
|
||||||
|
|
||||||
build:
|
|
||||||
cgo: false
|
|
||||||
flags:
|
|
||||||
- -trimpath
|
|
||||||
ldflags:
|
|
||||||
- -s
|
|
||||||
- -w
|
|
||||||
env: []
|
|
||||||
|
|
||||||
targets:
|
|
||||||
- os: linux
|
|
||||||
arch: amd64
|
|
||||||
- os: darwin
|
|
||||||
arch: arm64
|
|
||||||
- os: windows
|
|
||||||
arch: amd64
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
cmake_minimum_required(VERSION 3.16)
|
|
||||||
project(TestCPP)
|
|
||||||
0
pkg/build/testdata/empty-project/.gitkeep
vendored
0
pkg/build/testdata/empty-project/.gitkeep
vendored
3
pkg/build/testdata/go-project/go.mod
vendored
3
pkg/build/testdata/go-project/go.mod
vendored
|
|
@ -1,3 +0,0 @@
|
||||||
module example.com/go-project
|
|
||||||
|
|
||||||
go 1.21
|
|
||||||
3
pkg/build/testdata/multi-project/go.mod
vendored
3
pkg/build/testdata/multi-project/go.mod
vendored
|
|
@ -1,3 +0,0 @@
|
||||||
module example.com/multi-project
|
|
||||||
|
|
||||||
go 1.21
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
{
|
|
||||||
"name": "multi-project",
|
|
||||||
"version": "1.0.0"
|
|
||||||
}
|
|
||||||
4
pkg/build/testdata/node-project/package.json
vendored
4
pkg/build/testdata/node-project/package.json
vendored
|
|
@ -1,4 +0,0 @@
|
||||||
{
|
|
||||||
"name": "node-project",
|
|
||||||
"version": "1.0.0"
|
|
||||||
}
|
|
||||||
4
pkg/build/testdata/php-project/composer.json
vendored
4
pkg/build/testdata/php-project/composer.json
vendored
|
|
@ -1,4 +0,0 @@
|
||||||
{
|
|
||||||
"name": "vendor/php-project",
|
|
||||||
"type": "library"
|
|
||||||
}
|
|
||||||
3
pkg/build/testdata/wails-project/go.mod
vendored
3
pkg/build/testdata/wails-project/go.mod
vendored
|
|
@ -1,3 +0,0 @@
|
||||||
module example.com/wails-project
|
|
||||||
|
|
||||||
go 1.21
|
|
||||||
4
pkg/build/testdata/wails-project/wails.json
vendored
4
pkg/build/testdata/wails-project/wails.json
vendored
|
|
@ -1,4 +0,0 @@
|
||||||
{
|
|
||||||
"name": "wails-project",
|
|
||||||
"outputfilename": "wails-project"
|
|
||||||
}
|
|
||||||
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/crypt/openpgp"
|
|
||||||
"forge.lthn.ai/core/go/pkg/framework"
|
"forge.lthn.ai/core/go/pkg/framework"
|
||||||
"forge.lthn.ai/core/go/pkg/log"
|
"forge.lthn.ai/core/go/pkg/log"
|
||||||
"forge.lthn.ai/core/go/pkg/workspace"
|
"forge.lthn.ai/core/go/pkg/workspace"
|
||||||
|
|
@ -70,7 +69,6 @@ func Main() {
|
||||||
framework.WithName("log", NewLogService(log.Options{
|
framework.WithName("log", NewLogService(log.Options{
|
||||||
Level: log.LevelInfo,
|
Level: log.LevelInfo,
|
||||||
})),
|
})),
|
||||||
framework.WithName("crypt", openpgp.New),
|
|
||||||
framework.WithName("workspace", workspace.New),
|
framework.WithName("workspace", workspace.New),
|
||||||
},
|
},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -1,297 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
// httpClient is the HTTP client used for all collection requests.
|
|
||||||
// Use SetHTTPClient to override for testing.
|
|
||||||
var httpClient = &http.Client{
|
|
||||||
Timeout: 30 * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
// BitcoinTalkCollector collects forum posts from BitcoinTalk.
|
|
||||||
type BitcoinTalkCollector struct {
|
|
||||||
// TopicID is the numeric topic identifier.
|
|
||||||
TopicID string
|
|
||||||
|
|
||||||
// URL is a full URL to a BitcoinTalk topic page. If set, TopicID is
|
|
||||||
// extracted from it.
|
|
||||||
URL string
|
|
||||||
|
|
||||||
// Pages limits collection to this many pages. 0 means all pages.
|
|
||||||
Pages int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the collector name.
|
|
||||||
func (b *BitcoinTalkCollector) Name() string {
|
|
||||||
id := b.TopicID
|
|
||||||
if id == "" && b.URL != "" {
|
|
||||||
id = "url"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("bitcointalk:%s", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect gathers posts from a BitcoinTalk topic.
|
|
||||||
func (b *BitcoinTalkCollector) Collect(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: b.Name()}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitStart(b.Name(), "Starting BitcoinTalk collection")
|
|
||||||
}
|
|
||||||
|
|
||||||
topicID := b.TopicID
|
|
||||||
if topicID == "" {
|
|
||||||
return result, core.E("collect.BitcoinTalk.Collect", "topic ID is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.DryRun {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(b.Name(), fmt.Sprintf("[dry-run] Would collect topic %s", topicID), nil)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
baseDir := filepath.Join(cfg.OutputDir, "bitcointalk", topicID, "posts")
|
|
||||||
if err := cfg.Output.EnsureDir(baseDir); err != nil {
|
|
||||||
return result, core.E("collect.BitcoinTalk.Collect", "failed to create output directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
postNum := 0
|
|
||||||
offset := 0
|
|
||||||
pageCount := 0
|
|
||||||
postsPerPage := 20
|
|
||||||
|
|
||||||
for {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return result, core.E("collect.BitcoinTalk.Collect", "context cancelled", ctx.Err())
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.Pages > 0 && pageCount >= b.Pages {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Limiter != nil {
|
|
||||||
if err := cfg.Limiter.Wait(ctx, "bitcointalk"); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pageURL := fmt.Sprintf("https://bitcointalk.org/index.php?topic=%s.%d", topicID, offset)
|
|
||||||
|
|
||||||
posts, err := b.fetchPage(ctx, pageURL)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(b.Name(), fmt.Sprintf("Failed to fetch page at offset %d: %v", offset, err), nil)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(posts) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, post := range posts {
|
|
||||||
postNum++
|
|
||||||
filePath := filepath.Join(baseDir, fmt.Sprintf("%d.md", postNum))
|
|
||||||
content := formatPostMarkdown(postNum, post)
|
|
||||||
|
|
||||||
if err := cfg.Output.Write(filePath, content); err != nil {
|
|
||||||
result.Errors++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, filePath)
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitItem(b.Name(), fmt.Sprintf("Post %d by %s", postNum, post.Author), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pageCount++
|
|
||||||
offset += postsPerPage
|
|
||||||
|
|
||||||
// If we got fewer posts than expected, we've reached the end
|
|
||||||
if len(posts) < postsPerPage {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitComplete(b.Name(), fmt.Sprintf("Collected %d posts", result.Items), result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// btPost represents a parsed BitcoinTalk forum post.
|
|
||||||
type btPost struct {
|
|
||||||
Author string
|
|
||||||
Date string
|
|
||||||
Content string
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchPage fetches and parses a single BitcoinTalk topic page.
|
|
||||||
func (b *BitcoinTalkCollector) fetchPage(ctx context.Context, pageURL string) ([]btPost, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, pageURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("collect.BitcoinTalk.fetchPage", "failed to create request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("User-Agent", "Mozilla/5.0 (compatible; CoreCollector/1.0)")
|
|
||||||
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("collect.BitcoinTalk.fetchPage", "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, core.E("collect.BitcoinTalk.fetchPage",
|
|
||||||
fmt.Sprintf("unexpected status code: %d", resp.StatusCode), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
doc, err := html.Parse(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("collect.BitcoinTalk.fetchPage", "failed to parse HTML", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return extractPosts(doc), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractPosts extracts post data from a parsed HTML document.
|
|
||||||
// It looks for the common BitcoinTalk post structure using div.post elements.
|
|
||||||
func extractPosts(doc *html.Node) []btPost {
|
|
||||||
var posts []btPost
|
|
||||||
var walk func(*html.Node)
|
|
||||||
|
|
||||||
walk = func(n *html.Node) {
|
|
||||||
if n.Type == html.ElementNode && n.Data == "div" {
|
|
||||||
for _, attr := range n.Attr {
|
|
||||||
if attr.Key == "class" && strings.Contains(attr.Val, "post") {
|
|
||||||
post := parsePost(n)
|
|
||||||
if post.Content != "" {
|
|
||||||
posts = append(posts, post)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
walk(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
walk(doc)
|
|
||||||
return posts
|
|
||||||
}
|
|
||||||
|
|
||||||
// parsePost extracts author, date, and content from a post div.
|
|
||||||
func parsePost(node *html.Node) btPost {
|
|
||||||
post := btPost{}
|
|
||||||
var walk func(*html.Node)
|
|
||||||
|
|
||||||
walk = func(n *html.Node) {
|
|
||||||
if n.Type == html.ElementNode {
|
|
||||||
for _, attr := range n.Attr {
|
|
||||||
if attr.Key == "class" {
|
|
||||||
switch {
|
|
||||||
case strings.Contains(attr.Val, "poster_info"):
|
|
||||||
post.Author = extractText(n)
|
|
||||||
case strings.Contains(attr.Val, "headerandpost"):
|
|
||||||
// Look for date in smalltext
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
if c.Type == html.ElementNode && c.Data == "div" {
|
|
||||||
for _, a := range c.Attr {
|
|
||||||
if a.Key == "class" && strings.Contains(a.Val, "smalltext") {
|
|
||||||
post.Date = strings.TrimSpace(extractText(c))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case strings.Contains(attr.Val, "inner"):
|
|
||||||
post.Content = strings.TrimSpace(extractText(n))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
walk(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
walk(node)
|
|
||||||
return post
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractText recursively extracts text content from an HTML node.
|
|
||||||
func extractText(n *html.Node) string {
|
|
||||||
if n.Type == html.TextNode {
|
|
||||||
return n.Data
|
|
||||||
}
|
|
||||||
|
|
||||||
var b strings.Builder
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
text := extractText(c)
|
|
||||||
if text != "" {
|
|
||||||
if b.Len() > 0 && c.Type == html.ElementNode && (c.Data == "br" || c.Data == "p" || c.Data == "div") {
|
|
||||||
b.WriteString("\n")
|
|
||||||
}
|
|
||||||
b.WriteString(text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatPostMarkdown formats a BitcoinTalk post as markdown.
|
|
||||||
func formatPostMarkdown(num int, post btPost) string {
|
|
||||||
var b strings.Builder
|
|
||||||
fmt.Fprintf(&b, "# Post %d by %s\n\n", num, post.Author)
|
|
||||||
|
|
||||||
if post.Date != "" {
|
|
||||||
fmt.Fprintf(&b, "**Date:** %s\n\n", post.Date)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.WriteString(post.Content)
|
|
||||||
b.WriteString("\n")
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParsePostsFromHTML parses BitcoinTalk posts from raw HTML content.
|
|
||||||
// This is exported for testing purposes.
|
|
||||||
func ParsePostsFromHTML(htmlContent string) ([]btPost, error) {
|
|
||||||
doc, err := html.Parse(strings.NewReader(htmlContent))
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("collect.ParsePostsFromHTML", "failed to parse HTML", err)
|
|
||||||
}
|
|
||||||
return extractPosts(doc), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatPostMarkdown is exported for testing purposes.
|
|
||||||
func FormatPostMarkdown(num int, author, date, content string) string {
|
|
||||||
return formatPostMarkdown(num, btPost{Author: author, Date: date, Content: content})
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchPageFunc is an injectable function type for fetching pages, used in testing.
|
|
||||||
type FetchPageFunc func(ctx context.Context, url string) ([]btPost, error)
|
|
||||||
|
|
||||||
// BitcoinTalkCollectorWithFetcher wraps BitcoinTalkCollector with a custom fetcher for testing.
|
|
||||||
type BitcoinTalkCollectorWithFetcher struct {
|
|
||||||
BitcoinTalkCollector
|
|
||||||
Fetcher FetchPageFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHTTPClient replaces the package-level HTTP client.
|
|
||||||
// Use this in tests to inject a custom transport or timeout.
|
|
||||||
func SetHTTPClient(c *http.Client) {
|
|
||||||
httpClient = c
|
|
||||||
}
|
|
||||||
|
|
@ -1,93 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBitcoinTalkCollector_Name_Good(t *testing.T) {
|
|
||||||
b := &BitcoinTalkCollector{TopicID: "12345"}
|
|
||||||
assert.Equal(t, "bitcointalk:12345", b.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBitcoinTalkCollector_Name_Good_URL(t *testing.T) {
|
|
||||||
b := &BitcoinTalkCollector{URL: "https://bitcointalk.org/index.php?topic=12345.0"}
|
|
||||||
assert.Equal(t, "bitcointalk:url", b.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBitcoinTalkCollector_Collect_Bad_NoTopicID(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
|
|
||||||
b := &BitcoinTalkCollector{}
|
|
||||||
_, err := b.Collect(context.Background(), cfg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBitcoinTalkCollector_Collect_Good_DryRun(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
b := &BitcoinTalkCollector{TopicID: "12345"}
|
|
||||||
result, err := b.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParsePostsFromHTML_Good(t *testing.T) {
|
|
||||||
sampleHTML := `
|
|
||||||
<html><body>
|
|
||||||
<div class="post">
|
|
||||||
<div class="poster_info">satoshi</div>
|
|
||||||
<div class="headerandpost">
|
|
||||||
<div class="smalltext">January 03, 2009</div>
|
|
||||||
</div>
|
|
||||||
<div class="inner">This is the first post content.</div>
|
|
||||||
</div>
|
|
||||||
<div class="post">
|
|
||||||
<div class="poster_info">hal</div>
|
|
||||||
<div class="headerandpost">
|
|
||||||
<div class="smalltext">January 10, 2009</div>
|
|
||||||
</div>
|
|
||||||
<div class="inner">Running bitcoin!</div>
|
|
||||||
</div>
|
|
||||||
</body></html>`
|
|
||||||
|
|
||||||
posts, err := ParsePostsFromHTML(sampleHTML)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Len(t, posts, 2)
|
|
||||||
|
|
||||||
assert.Contains(t, posts[0].Author, "satoshi")
|
|
||||||
assert.Contains(t, posts[0].Content, "This is the first post content.")
|
|
||||||
assert.Contains(t, posts[0].Date, "January 03, 2009")
|
|
||||||
|
|
||||||
assert.Contains(t, posts[1].Author, "hal")
|
|
||||||
assert.Contains(t, posts[1].Content, "Running bitcoin!")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParsePostsFromHTML_Good_Empty(t *testing.T) {
|
|
||||||
posts, err := ParsePostsFromHTML("<html><body></body></html>")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Empty(t, posts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatPostMarkdown_Good(t *testing.T) {
|
|
||||||
md := FormatPostMarkdown(1, "satoshi", "January 03, 2009", "Hello, world!")
|
|
||||||
|
|
||||||
assert.Contains(t, md, "# Post 1 by satoshi")
|
|
||||||
assert.Contains(t, md, "**Date:** January 03, 2009")
|
|
||||||
assert.Contains(t, md, "Hello, world!")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatPostMarkdown_Good_NoDate(t *testing.T) {
|
|
||||||
md := FormatPostMarkdown(5, "user", "", "Content here")
|
|
||||||
|
|
||||||
assert.Contains(t, md, "# Post 5 by user")
|
|
||||||
assert.NotContains(t, md, "**Date:**")
|
|
||||||
assert.Contains(t, md, "Content here")
|
|
||||||
}
|
|
||||||
|
|
@ -1,103 +0,0 @@
|
||||||
// Package collect provides a data collection subsystem for gathering information
|
|
||||||
// from multiple sources including GitHub, BitcoinTalk, CoinGecko, and academic
|
|
||||||
// paper repositories. It supports rate limiting, incremental state tracking,
|
|
||||||
// and event-driven progress reporting.
|
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Collector is the interface all collection sources implement.
|
|
||||||
type Collector interface {
|
|
||||||
// Name returns a human-readable name for this collector.
|
|
||||||
Name() string
|
|
||||||
|
|
||||||
// Collect gathers data from the source and writes it to the configured output.
|
|
||||||
Collect(ctx context.Context, cfg *Config) (*Result, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config holds shared configuration for all collectors.
|
|
||||||
type Config struct {
|
|
||||||
// Output is the storage medium for writing collected data.
|
|
||||||
Output io.Medium
|
|
||||||
|
|
||||||
// OutputDir is the base directory for all collected data.
|
|
||||||
OutputDir string
|
|
||||||
|
|
||||||
// Limiter provides per-source rate limiting.
|
|
||||||
Limiter *RateLimiter
|
|
||||||
|
|
||||||
// State tracks collection progress for incremental runs.
|
|
||||||
State *State
|
|
||||||
|
|
||||||
// Dispatcher manages event dispatch for progress reporting.
|
|
||||||
Dispatcher *Dispatcher
|
|
||||||
|
|
||||||
// Verbose enables detailed logging output.
|
|
||||||
Verbose bool
|
|
||||||
|
|
||||||
// DryRun simulates collection without writing files.
|
|
||||||
DryRun bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Result holds the output of a collection run.
|
|
||||||
type Result struct {
|
|
||||||
// Source identifies which collector produced this result.
|
|
||||||
Source string
|
|
||||||
|
|
||||||
// Items is the number of items successfully collected.
|
|
||||||
Items int
|
|
||||||
|
|
||||||
// Errors is the number of errors encountered during collection.
|
|
||||||
Errors int
|
|
||||||
|
|
||||||
// Skipped is the number of items skipped (e.g. already collected).
|
|
||||||
Skipped int
|
|
||||||
|
|
||||||
// Files lists the paths of all files written.
|
|
||||||
Files []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfig creates a Config with sensible defaults.
|
|
||||||
// It initialises a MockMedium for output if none is provided,
|
|
||||||
// sets up a rate limiter, state tracker, and event dispatcher.
|
|
||||||
func NewConfig(outputDir string) *Config {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
return &Config{
|
|
||||||
Output: m,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Limiter: NewRateLimiter(),
|
|
||||||
State: NewState(m, filepath.Join(outputDir, ".collect-state.json")),
|
|
||||||
Dispatcher: NewDispatcher(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConfigWithMedium creates a Config using the specified storage medium.
|
|
||||||
func NewConfigWithMedium(m io.Medium, outputDir string) *Config {
|
|
||||||
return &Config{
|
|
||||||
Output: m,
|
|
||||||
OutputDir: outputDir,
|
|
||||||
Limiter: NewRateLimiter(),
|
|
||||||
State: NewState(m, filepath.Join(outputDir, ".collect-state.json")),
|
|
||||||
Dispatcher: NewDispatcher(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeResults combines multiple results into a single aggregated result.
|
|
||||||
func MergeResults(source string, results ...*Result) *Result {
|
|
||||||
merged := &Result{Source: source}
|
|
||||||
for _, r := range results {
|
|
||||||
if r == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
merged.Items += r.Items
|
|
||||||
merged.Errors += r.Errors
|
|
||||||
merged.Skipped += r.Skipped
|
|
||||||
merged.Files = append(merged.Files, r.Files...)
|
|
||||||
}
|
|
||||||
return merged
|
|
||||||
}
|
|
||||||
|
|
@ -1,68 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewConfig_Good(t *testing.T) {
|
|
||||||
cfg := NewConfig("/tmp/output")
|
|
||||||
|
|
||||||
assert.NotNil(t, cfg)
|
|
||||||
assert.Equal(t, "/tmp/output", cfg.OutputDir)
|
|
||||||
assert.NotNil(t, cfg.Output)
|
|
||||||
assert.NotNil(t, cfg.Limiter)
|
|
||||||
assert.NotNil(t, cfg.State)
|
|
||||||
assert.NotNil(t, cfg.Dispatcher)
|
|
||||||
assert.False(t, cfg.Verbose)
|
|
||||||
assert.False(t, cfg.DryRun)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewConfigWithMedium_Good(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/data")
|
|
||||||
|
|
||||||
assert.NotNil(t, cfg)
|
|
||||||
assert.Equal(t, m, cfg.Output)
|
|
||||||
assert.Equal(t, "/data", cfg.OutputDir)
|
|
||||||
assert.NotNil(t, cfg.Limiter)
|
|
||||||
assert.NotNil(t, cfg.State)
|
|
||||||
assert.NotNil(t, cfg.Dispatcher)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMergeResults_Good(t *testing.T) {
|
|
||||||
r1 := &Result{
|
|
||||||
Source: "a",
|
|
||||||
Items: 5,
|
|
||||||
Errors: 1,
|
|
||||||
Files: []string{"a.md", "b.md"},
|
|
||||||
}
|
|
||||||
r2 := &Result{
|
|
||||||
Source: "b",
|
|
||||||
Items: 3,
|
|
||||||
Skipped: 2,
|
|
||||||
Files: []string{"c.md"},
|
|
||||||
}
|
|
||||||
|
|
||||||
merged := MergeResults("combined", r1, r2)
|
|
||||||
assert.Equal(t, "combined", merged.Source)
|
|
||||||
assert.Equal(t, 8, merged.Items)
|
|
||||||
assert.Equal(t, 1, merged.Errors)
|
|
||||||
assert.Equal(t, 2, merged.Skipped)
|
|
||||||
assert.Len(t, merged.Files, 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMergeResults_Good_NilResults(t *testing.T) {
|
|
||||||
r1 := &Result{Items: 3}
|
|
||||||
merged := MergeResults("test", r1, nil, nil)
|
|
||||||
assert.Equal(t, 3, merged.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMergeResults_Good_Empty(t *testing.T) {
|
|
||||||
merged := MergeResults("empty")
|
|
||||||
assert.Equal(t, 0, merged.Items)
|
|
||||||
assert.Equal(t, 0, merged.Errors)
|
|
||||||
assert.Nil(t, merged.Files)
|
|
||||||
}
|
|
||||||
|
|
@ -1,133 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Event types used by the collection subsystem.
|
|
||||||
const (
|
|
||||||
// EventStart is emitted when a collector begins its run.
|
|
||||||
EventStart = "start"
|
|
||||||
|
|
||||||
// EventProgress is emitted to report incremental progress.
|
|
||||||
EventProgress = "progress"
|
|
||||||
|
|
||||||
// EventItem is emitted when a single item is collected.
|
|
||||||
EventItem = "item"
|
|
||||||
|
|
||||||
// EventError is emitted when an error occurs during collection.
|
|
||||||
EventError = "error"
|
|
||||||
|
|
||||||
// EventComplete is emitted when a collector finishes its run.
|
|
||||||
EventComplete = "complete"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Event represents a collection event.
|
|
||||||
type Event struct {
|
|
||||||
// Type is one of the Event* constants.
|
|
||||||
Type string `json:"type"`
|
|
||||||
|
|
||||||
// Source identifies the collector that emitted the event.
|
|
||||||
Source string `json:"source"`
|
|
||||||
|
|
||||||
// Message is a human-readable description of the event.
|
|
||||||
Message string `json:"message"`
|
|
||||||
|
|
||||||
// Data carries optional event-specific payload.
|
|
||||||
Data any `json:"data,omitempty"`
|
|
||||||
|
|
||||||
// Time is when the event occurred.
|
|
||||||
Time time.Time `json:"time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EventHandler handles collection events.
|
|
||||||
type EventHandler func(Event)
|
|
||||||
|
|
||||||
// Dispatcher manages event dispatch. Handlers are registered per event type
|
|
||||||
// and are called synchronously when an event is emitted.
|
|
||||||
type Dispatcher struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
handlers map[string][]EventHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDispatcher creates a new event dispatcher.
|
|
||||||
func NewDispatcher() *Dispatcher {
|
|
||||||
return &Dispatcher{
|
|
||||||
handlers: make(map[string][]EventHandler),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// On registers a handler for an event type. Multiple handlers can be
|
|
||||||
// registered for the same event type and will be called in order.
|
|
||||||
func (d *Dispatcher) On(eventType string, handler EventHandler) {
|
|
||||||
d.mu.Lock()
|
|
||||||
defer d.mu.Unlock()
|
|
||||||
d.handlers[eventType] = append(d.handlers[eventType], handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit dispatches an event to all registered handlers for that event type.
|
|
||||||
// If no handlers are registered for the event type, the event is silently dropped.
|
|
||||||
// The event's Time field is set to now if it is zero.
|
|
||||||
func (d *Dispatcher) Emit(event Event) {
|
|
||||||
if event.Time.IsZero() {
|
|
||||||
event.Time = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
d.mu.RLock()
|
|
||||||
handlers := d.handlers[event.Type]
|
|
||||||
d.mu.RUnlock()
|
|
||||||
|
|
||||||
for _, h := range handlers {
|
|
||||||
h(event)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmitStart emits a start event for the given source.
|
|
||||||
func (d *Dispatcher) EmitStart(source, message string) {
|
|
||||||
d.Emit(Event{
|
|
||||||
Type: EventStart,
|
|
||||||
Source: source,
|
|
||||||
Message: message,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmitProgress emits a progress event.
|
|
||||||
func (d *Dispatcher) EmitProgress(source, message string, data any) {
|
|
||||||
d.Emit(Event{
|
|
||||||
Type: EventProgress,
|
|
||||||
Source: source,
|
|
||||||
Message: message,
|
|
||||||
Data: data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmitItem emits an item event.
|
|
||||||
func (d *Dispatcher) EmitItem(source, message string, data any) {
|
|
||||||
d.Emit(Event{
|
|
||||||
Type: EventItem,
|
|
||||||
Source: source,
|
|
||||||
Message: message,
|
|
||||||
Data: data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmitError emits an error event.
|
|
||||||
func (d *Dispatcher) EmitError(source, message string, data any) {
|
|
||||||
d.Emit(Event{
|
|
||||||
Type: EventError,
|
|
||||||
Source: source,
|
|
||||||
Message: message,
|
|
||||||
Data: data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmitComplete emits a complete event.
|
|
||||||
func (d *Dispatcher) EmitComplete(source, message string, data any) {
|
|
||||||
d.Emit(Event{
|
|
||||||
Type: EventComplete,
|
|
||||||
Source: source,
|
|
||||||
Message: message,
|
|
||||||
Data: data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,133 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDispatcher_Emit_Good(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
|
|
||||||
var received Event
|
|
||||||
d.On(EventStart, func(e Event) {
|
|
||||||
received = e
|
|
||||||
})
|
|
||||||
|
|
||||||
d.Emit(Event{
|
|
||||||
Type: EventStart,
|
|
||||||
Source: "test",
|
|
||||||
Message: "hello",
|
|
||||||
})
|
|
||||||
|
|
||||||
assert.Equal(t, EventStart, received.Type)
|
|
||||||
assert.Equal(t, "test", received.Source)
|
|
||||||
assert.Equal(t, "hello", received.Message)
|
|
||||||
assert.False(t, received.Time.IsZero(), "Time should be set automatically")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatcher_On_Good(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
|
|
||||||
var count int
|
|
||||||
handler := func(e Event) { count++ }
|
|
||||||
|
|
||||||
d.On(EventProgress, handler)
|
|
||||||
d.On(EventProgress, handler)
|
|
||||||
d.On(EventProgress, handler)
|
|
||||||
|
|
||||||
d.Emit(Event{Type: EventProgress, Source: "test"})
|
|
||||||
assert.Equal(t, 3, count, "All three handlers should be called")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatcher_Emit_Good_NoHandlers(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
|
|
||||||
// Should not panic when emitting an event with no handlers
|
|
||||||
assert.NotPanics(t, func() {
|
|
||||||
d.Emit(Event{
|
|
||||||
Type: "unknown-event",
|
|
||||||
Source: "test",
|
|
||||||
Message: "this should be silently dropped",
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatcher_Emit_Good_MultipleEventTypes(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
|
|
||||||
var starts, errors int
|
|
||||||
d.On(EventStart, func(e Event) { starts++ })
|
|
||||||
d.On(EventError, func(e Event) { errors++ })
|
|
||||||
|
|
||||||
d.Emit(Event{Type: EventStart, Source: "test"})
|
|
||||||
d.Emit(Event{Type: EventStart, Source: "test"})
|
|
||||||
d.Emit(Event{Type: EventError, Source: "test"})
|
|
||||||
|
|
||||||
assert.Equal(t, 2, starts)
|
|
||||||
assert.Equal(t, 1, errors)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatcher_Emit_Good_SetsTime(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
|
|
||||||
var received Event
|
|
||||||
d.On(EventItem, func(e Event) {
|
|
||||||
received = e
|
|
||||||
})
|
|
||||||
|
|
||||||
before := time.Now()
|
|
||||||
d.Emit(Event{Type: EventItem, Source: "test"})
|
|
||||||
after := time.Now()
|
|
||||||
|
|
||||||
assert.True(t, received.Time.After(before) || received.Time.Equal(before))
|
|
||||||
assert.True(t, received.Time.Before(after) || received.Time.Equal(after))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatcher_Emit_Good_PreservesExistingTime(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
|
|
||||||
customTime := time.Date(2025, 6, 15, 12, 0, 0, 0, time.UTC)
|
|
||||||
var received Event
|
|
||||||
d.On(EventItem, func(e Event) {
|
|
||||||
received = e
|
|
||||||
})
|
|
||||||
|
|
||||||
d.Emit(Event{Type: EventItem, Source: "test", Time: customTime})
|
|
||||||
assert.True(t, customTime.Equal(received.Time))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDispatcher_EmitHelpers_Good(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
|
|
||||||
events := make(map[string]Event)
|
|
||||||
for _, eventType := range []string{EventStart, EventProgress, EventItem, EventError, EventComplete} {
|
|
||||||
et := eventType
|
|
||||||
d.On(et, func(e Event) {
|
|
||||||
events[et] = e
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
d.EmitStart("s1", "started")
|
|
||||||
d.EmitProgress("s2", "progressing", map[string]int{"count": 5})
|
|
||||||
d.EmitItem("s3", "got item", nil)
|
|
||||||
d.EmitError("s4", "something failed", nil)
|
|
||||||
d.EmitComplete("s5", "done", nil)
|
|
||||||
|
|
||||||
assert.Equal(t, "s1", events[EventStart].Source)
|
|
||||||
assert.Equal(t, "started", events[EventStart].Message)
|
|
||||||
|
|
||||||
assert.Equal(t, "s2", events[EventProgress].Source)
|
|
||||||
assert.NotNil(t, events[EventProgress].Data)
|
|
||||||
|
|
||||||
assert.Equal(t, "s3", events[EventItem].Source)
|
|
||||||
assert.Equal(t, "s4", events[EventError].Source)
|
|
||||||
assert.Equal(t, "s5", events[EventComplete].Source)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewDispatcher_Good(t *testing.T) {
|
|
||||||
d := NewDispatcher()
|
|
||||||
assert.NotNil(t, d)
|
|
||||||
assert.NotNil(t, d.handlers)
|
|
||||||
}
|
|
||||||
|
|
@ -1,128 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Excavator runs multiple collectors as a coordinated operation.
|
|
||||||
// It provides sequential execution with rate limit respect, state tracking
|
|
||||||
// for resume support, and aggregated results.
|
|
||||||
type Excavator struct {
|
|
||||||
// Collectors is the list of collectors to run.
|
|
||||||
Collectors []Collector
|
|
||||||
|
|
||||||
// ScanOnly reports what would be collected without performing collection.
|
|
||||||
ScanOnly bool
|
|
||||||
|
|
||||||
// Resume enables incremental collection using saved state.
|
|
||||||
Resume bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the orchestrator name.
|
|
||||||
func (e *Excavator) Name() string {
|
|
||||||
return "excavator"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run executes all collectors sequentially, respecting rate limits and
|
|
||||||
// using state for resume support. Results are aggregated from all collectors.
|
|
||||||
func (e *Excavator) Run(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: e.Name()}
|
|
||||||
|
|
||||||
if len(e.Collectors) == 0 {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitStart(e.Name(), fmt.Sprintf("Starting excavation with %d collectors", len(e.Collectors)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load state if resuming
|
|
||||||
if e.Resume && cfg.State != nil {
|
|
||||||
if err := cfg.State.Load(); err != nil {
|
|
||||||
return result, core.E("collect.Excavator.Run", "failed to load state", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If scan-only, just report what would be collected
|
|
||||||
if e.ScanOnly {
|
|
||||||
for _, c := range e.Collectors {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(e.Name(), fmt.Sprintf("[scan] Would run collector: %s", c.Name()), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, c := range e.Collectors {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return result, core.E("collect.Excavator.Run", "context cancelled", ctx.Err())
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(e.Name(),
|
|
||||||
fmt.Sprintf("Running collector %d/%d: %s", i+1, len(e.Collectors), c.Name()), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we should skip (already completed in a previous run)
|
|
||||||
if e.Resume && cfg.State != nil {
|
|
||||||
if entry, ok := cfg.State.Get(c.Name()); ok {
|
|
||||||
if entry.Items > 0 && !entry.LastRun.IsZero() {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(e.Name(),
|
|
||||||
fmt.Sprintf("Skipping %s (already collected %d items on %s)",
|
|
||||||
c.Name(), entry.Items, entry.LastRun.Format(time.RFC3339)), nil)
|
|
||||||
}
|
|
||||||
result.Skipped++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
collectorResult, err := c.Collect(ctx, cfg)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(e.Name(),
|
|
||||||
fmt.Sprintf("Collector %s failed: %v", c.Name(), err), nil)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if collectorResult != nil {
|
|
||||||
result.Items += collectorResult.Items
|
|
||||||
result.Errors += collectorResult.Errors
|
|
||||||
result.Skipped += collectorResult.Skipped
|
|
||||||
result.Files = append(result.Files, collectorResult.Files...)
|
|
||||||
|
|
||||||
// Update state
|
|
||||||
if cfg.State != nil {
|
|
||||||
cfg.State.Set(c.Name(), &StateEntry{
|
|
||||||
Source: c.Name(),
|
|
||||||
LastRun: time.Now(),
|
|
||||||
Items: collectorResult.Items,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save state
|
|
||||||
if cfg.State != nil {
|
|
||||||
if err := cfg.State.Save(); err != nil {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(e.Name(), fmt.Sprintf("Failed to save state: %v", err), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitComplete(e.Name(),
|
|
||||||
fmt.Sprintf("Excavation complete: %d items, %d errors, %d skipped",
|
|
||||||
result.Items, result.Errors, result.Skipped), result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,202 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
// mockCollector is a simple collector for testing the Excavator.
|
|
||||||
type mockCollector struct {
|
|
||||||
name string
|
|
||||||
items int
|
|
||||||
err error
|
|
||||||
called bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockCollector) Name() string { return m.name }
|
|
||||||
|
|
||||||
func (m *mockCollector) Collect(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
m.called = true
|
|
||||||
if m.err != nil {
|
|
||||||
return &Result{Source: m.name, Errors: 1}, m.err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &Result{Source: m.name, Items: m.items}
|
|
||||||
for i := 0; i < m.items; i++ {
|
|
||||||
result.Files = append(result.Files, fmt.Sprintf("/output/%s/%d.md", m.name, i))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.DryRun {
|
|
||||||
return &Result{Source: m.name}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Name_Good(t *testing.T) {
|
|
||||||
e := &Excavator{}
|
|
||||||
assert.Equal(t, "excavator", e.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
c1 := &mockCollector{name: "source-a", items: 3}
|
|
||||||
c2 := &mockCollector{name: "source-b", items: 5}
|
|
||||||
|
|
||||||
e := &Excavator{
|
|
||||||
Collectors: []Collector{c1, c2},
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := e.Run(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, c1.called)
|
|
||||||
assert.True(t, c2.called)
|
|
||||||
assert.Equal(t, 8, result.Items)
|
|
||||||
assert.Len(t, result.Files, 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good_Empty(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
|
|
||||||
e := &Excavator{}
|
|
||||||
result, err := e.Run(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good_DryRun(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
c1 := &mockCollector{name: "source-a", items: 10}
|
|
||||||
c2 := &mockCollector{name: "source-b", items: 20}
|
|
||||||
|
|
||||||
e := &Excavator{
|
|
||||||
Collectors: []Collector{c1, c2},
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := e.Run(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, c1.called)
|
|
||||||
assert.True(t, c2.called)
|
|
||||||
// In dry run, mockCollector returns 0 items
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good_ScanOnly(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
|
|
||||||
c1 := &mockCollector{name: "source-a", items: 10}
|
|
||||||
|
|
||||||
var progressMessages []string
|
|
||||||
cfg.Dispatcher.On(EventProgress, func(e Event) {
|
|
||||||
progressMessages = append(progressMessages, e.Message)
|
|
||||||
})
|
|
||||||
|
|
||||||
e := &Excavator{
|
|
||||||
Collectors: []Collector{c1},
|
|
||||||
ScanOnly: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := e.Run(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, c1.called, "Collector should not be called in scan-only mode")
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
assert.NotEmpty(t, progressMessages)
|
|
||||||
assert.Contains(t, progressMessages[0], "source-a")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good_WithErrors(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
c1 := &mockCollector{name: "good", items: 5}
|
|
||||||
c2 := &mockCollector{name: "bad", err: fmt.Errorf("network error")}
|
|
||||||
c3 := &mockCollector{name: "also-good", items: 3}
|
|
||||||
|
|
||||||
e := &Excavator{
|
|
||||||
Collectors: []Collector{c1, c2, c3},
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := e.Run(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 8, result.Items)
|
|
||||||
assert.Equal(t, 1, result.Errors) // c2 failed
|
|
||||||
assert.True(t, c1.called)
|
|
||||||
assert.True(t, c2.called)
|
|
||||||
assert.True(t, c3.called)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good_CancelledContext(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel() // Cancel immediately
|
|
||||||
|
|
||||||
c1 := &mockCollector{name: "source-a", items: 5}
|
|
||||||
|
|
||||||
e := &Excavator{
|
|
||||||
Collectors: []Collector{c1},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := e.Run(ctx, cfg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good_SavesState(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
c1 := &mockCollector{name: "source-a", items: 5}
|
|
||||||
|
|
||||||
e := &Excavator{
|
|
||||||
Collectors: []Collector{c1},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := e.Run(context.Background(), cfg)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify state was saved
|
|
||||||
entry, ok := cfg.State.Get("source-a")
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, 5, entry.Items)
|
|
||||||
assert.Equal(t, "source-a", entry.Source)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExcavator_Run_Good_Events(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
var startCount, completeCount int
|
|
||||||
cfg.Dispatcher.On(EventStart, func(e Event) { startCount++ })
|
|
||||||
cfg.Dispatcher.On(EventComplete, func(e Event) { completeCount++ })
|
|
||||||
|
|
||||||
c1 := &mockCollector{name: "source-a", items: 1}
|
|
||||||
e := &Excavator{
|
|
||||||
Collectors: []Collector{c1},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := e.Run(context.Background(), cfg)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, startCount)
|
|
||||||
assert.Equal(t, 1, completeCount)
|
|
||||||
}
|
|
||||||
|
|
@ -1,289 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ghIssue represents a GitHub issue or pull request as returned by the gh CLI.
|
|
||||||
type ghIssue struct {
|
|
||||||
Number int `json:"number"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
State string `json:"state"`
|
|
||||||
Author ghAuthor `json:"author"`
|
|
||||||
Body string `json:"body"`
|
|
||||||
CreatedAt time.Time `json:"createdAt"`
|
|
||||||
Labels []ghLabel `json:"labels"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ghAuthor struct {
|
|
||||||
Login string `json:"login"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ghLabel struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ghRepo represents a GitHub repository as returned by the gh CLI.
|
|
||||||
type ghRepo struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GitHubCollector collects issues and PRs from GitHub repositories.
|
|
||||||
type GitHubCollector struct {
|
|
||||||
// Org is the GitHub organisation.
|
|
||||||
Org string
|
|
||||||
|
|
||||||
// Repo is the repository name. If empty and Org is set, all repos are collected.
|
|
||||||
Repo string
|
|
||||||
|
|
||||||
// IssuesOnly limits collection to issues (excludes PRs).
|
|
||||||
IssuesOnly bool
|
|
||||||
|
|
||||||
// PRsOnly limits collection to PRs (excludes issues).
|
|
||||||
PRsOnly bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the collector name.
|
|
||||||
func (g *GitHubCollector) Name() string {
|
|
||||||
if g.Repo != "" {
|
|
||||||
return fmt.Sprintf("github:%s/%s", g.Org, g.Repo)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("github:%s", g.Org)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect gathers issues and/or PRs from GitHub repositories.
|
|
||||||
func (g *GitHubCollector) Collect(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: g.Name()}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitStart(g.Name(), "Starting GitHub collection")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no specific repo, list all repos in the org
|
|
||||||
repos := []string{g.Repo}
|
|
||||||
if g.Repo == "" {
|
|
||||||
var err error
|
|
||||||
repos, err = g.listOrgRepos(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, repo := range repos {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return result, core.E("collect.GitHub.Collect", "context cancelled", ctx.Err())
|
|
||||||
}
|
|
||||||
|
|
||||||
if !g.PRsOnly {
|
|
||||||
issueResult, err := g.collectIssues(ctx, cfg, repo)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(g.Name(), fmt.Sprintf("Error collecting issues for %s: %v", repo, err), nil)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
result.Items += issueResult.Items
|
|
||||||
result.Skipped += issueResult.Skipped
|
|
||||||
result.Files = append(result.Files, issueResult.Files...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !g.IssuesOnly {
|
|
||||||
prResult, err := g.collectPRs(ctx, cfg, repo)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(g.Name(), fmt.Sprintf("Error collecting PRs for %s: %v", repo, err), nil)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
result.Items += prResult.Items
|
|
||||||
result.Skipped += prResult.Skipped
|
|
||||||
result.Files = append(result.Files, prResult.Files...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitComplete(g.Name(), fmt.Sprintf("Collected %d items", result.Items), result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// listOrgRepos returns all repository names for the configured org.
|
|
||||||
func (g *GitHubCollector) listOrgRepos(ctx context.Context) ([]string, error) {
|
|
||||||
cmd := exec.CommandContext(ctx, "gh", "repo", "list", g.Org,
|
|
||||||
"--json", "name",
|
|
||||||
"--limit", "1000",
|
|
||||||
)
|
|
||||||
out, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("collect.GitHub.listOrgRepos", "failed to list repos", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var repos []ghRepo
|
|
||||||
if err := json.Unmarshal(out, &repos); err != nil {
|
|
||||||
return nil, core.E("collect.GitHub.listOrgRepos", "failed to parse repo list", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
names := make([]string, len(repos))
|
|
||||||
for i, r := range repos {
|
|
||||||
names[i] = r.Name
|
|
||||||
}
|
|
||||||
return names, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectIssues collects issues for a single repository.
|
|
||||||
func (g *GitHubCollector) collectIssues(ctx context.Context, cfg *Config, repo string) (*Result, error) {
|
|
||||||
result := &Result{Source: fmt.Sprintf("github:%s/%s/issues", g.Org, repo)}
|
|
||||||
|
|
||||||
if cfg.DryRun {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(g.Name(), fmt.Sprintf("[dry-run] Would collect issues for %s/%s", g.Org, repo), nil)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Limiter != nil {
|
|
||||||
if err := cfg.Limiter.Wait(ctx, "github"); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
repoRef := fmt.Sprintf("%s/%s", g.Org, repo)
|
|
||||||
cmd := exec.CommandContext(ctx, "gh", "issue", "list",
|
|
||||||
"--repo", repoRef,
|
|
||||||
"--json", "number,title,state,author,body,createdAt,labels,url",
|
|
||||||
"--limit", "100",
|
|
||||||
"--state", "all",
|
|
||||||
)
|
|
||||||
out, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.GitHub.collectIssues", "gh issue list failed for "+repoRef, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var issues []ghIssue
|
|
||||||
if err := json.Unmarshal(out, &issues); err != nil {
|
|
||||||
return result, core.E("collect.GitHub.collectIssues", "failed to parse issues", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
baseDir := filepath.Join(cfg.OutputDir, "github", g.Org, repo, "issues")
|
|
||||||
if err := cfg.Output.EnsureDir(baseDir); err != nil {
|
|
||||||
return result, core.E("collect.GitHub.collectIssues", "failed to create output directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, issue := range issues {
|
|
||||||
filePath := filepath.Join(baseDir, fmt.Sprintf("%d.md", issue.Number))
|
|
||||||
content := formatIssueMarkdown(issue)
|
|
||||||
|
|
||||||
if err := cfg.Output.Write(filePath, content); err != nil {
|
|
||||||
result.Errors++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, filePath)
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitItem(g.Name(), fmt.Sprintf("Issue #%d: %s", issue.Number, issue.Title), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectPRs collects pull requests for a single repository.
|
|
||||||
func (g *GitHubCollector) collectPRs(ctx context.Context, cfg *Config, repo string) (*Result, error) {
|
|
||||||
result := &Result{Source: fmt.Sprintf("github:%s/%s/pulls", g.Org, repo)}
|
|
||||||
|
|
||||||
if cfg.DryRun {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(g.Name(), fmt.Sprintf("[dry-run] Would collect PRs for %s/%s", g.Org, repo), nil)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Limiter != nil {
|
|
||||||
if err := cfg.Limiter.Wait(ctx, "github"); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
repoRef := fmt.Sprintf("%s/%s", g.Org, repo)
|
|
||||||
cmd := exec.CommandContext(ctx, "gh", "pr", "list",
|
|
||||||
"--repo", repoRef,
|
|
||||||
"--json", "number,title,state,author,body,createdAt,labels,url",
|
|
||||||
"--limit", "100",
|
|
||||||
"--state", "all",
|
|
||||||
)
|
|
||||||
out, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.GitHub.collectPRs", "gh pr list failed for "+repoRef, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var prs []ghIssue
|
|
||||||
if err := json.Unmarshal(out, &prs); err != nil {
|
|
||||||
return result, core.E("collect.GitHub.collectPRs", "failed to parse pull requests", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
baseDir := filepath.Join(cfg.OutputDir, "github", g.Org, repo, "pulls")
|
|
||||||
if err := cfg.Output.EnsureDir(baseDir); err != nil {
|
|
||||||
return result, core.E("collect.GitHub.collectPRs", "failed to create output directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pr := range prs {
|
|
||||||
filePath := filepath.Join(baseDir, fmt.Sprintf("%d.md", pr.Number))
|
|
||||||
content := formatIssueMarkdown(pr)
|
|
||||||
|
|
||||||
if err := cfg.Output.Write(filePath, content); err != nil {
|
|
||||||
result.Errors++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, filePath)
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitItem(g.Name(), fmt.Sprintf("PR #%d: %s", pr.Number, pr.Title), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatIssueMarkdown formats a GitHub issue or PR as markdown.
|
|
||||||
func formatIssueMarkdown(issue ghIssue) string {
|
|
||||||
var b strings.Builder
|
|
||||||
fmt.Fprintf(&b, "# %s\n\n", issue.Title)
|
|
||||||
fmt.Fprintf(&b, "- **Number:** #%d\n", issue.Number)
|
|
||||||
fmt.Fprintf(&b, "- **State:** %s\n", issue.State)
|
|
||||||
fmt.Fprintf(&b, "- **Author:** %s\n", issue.Author.Login)
|
|
||||||
fmt.Fprintf(&b, "- **Created:** %s\n", issue.CreatedAt.Format(time.RFC3339))
|
|
||||||
|
|
||||||
if len(issue.Labels) > 0 {
|
|
||||||
labels := make([]string, len(issue.Labels))
|
|
||||||
for i, l := range issue.Labels {
|
|
||||||
labels[i] = l.Name
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&b, "- **Labels:** %s\n", strings.Join(labels, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
if issue.URL != "" {
|
|
||||||
fmt.Fprintf(&b, "- **URL:** %s\n", issue.URL)
|
|
||||||
}
|
|
||||||
|
|
||||||
if issue.Body != "" {
|
|
||||||
fmt.Fprintf(&b, "\n%s\n", issue.Body)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
@ -1,103 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGitHubCollector_Name_Good(t *testing.T) {
|
|
||||||
g := &GitHubCollector{Org: "host-uk", Repo: "core"}
|
|
||||||
assert.Equal(t, "github:host-uk/core", g.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGitHubCollector_Name_Good_OrgOnly(t *testing.T) {
|
|
||||||
g := &GitHubCollector{Org: "host-uk"}
|
|
||||||
assert.Equal(t, "github:host-uk", g.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGitHubCollector_Collect_Good_DryRun(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
var progressEmitted bool
|
|
||||||
cfg.Dispatcher.On(EventProgress, func(e Event) {
|
|
||||||
progressEmitted = true
|
|
||||||
})
|
|
||||||
|
|
||||||
g := &GitHubCollector{Org: "host-uk", Repo: "core"}
|
|
||||||
result, err := g.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, result)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
assert.True(t, progressEmitted, "Should emit progress event in dry-run mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGitHubCollector_Collect_Good_DryRun_IssuesOnly(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
g := &GitHubCollector{Org: "test-org", Repo: "test-repo", IssuesOnly: true}
|
|
||||||
result, err := g.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGitHubCollector_Collect_Good_DryRun_PRsOnly(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
g := &GitHubCollector{Org: "test-org", Repo: "test-repo", PRsOnly: true}
|
|
||||||
result, err := g.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatIssueMarkdown_Good(t *testing.T) {
|
|
||||||
issue := ghIssue{
|
|
||||||
Number: 42,
|
|
||||||
Title: "Test Issue",
|
|
||||||
State: "open",
|
|
||||||
Author: ghAuthor{Login: "testuser"},
|
|
||||||
Body: "This is the body.",
|
|
||||||
CreatedAt: time.Date(2025, 1, 15, 10, 0, 0, 0, time.UTC),
|
|
||||||
Labels: []ghLabel{
|
|
||||||
{Name: "bug"},
|
|
||||||
{Name: "priority"},
|
|
||||||
},
|
|
||||||
URL: "https://github.com/test/repo/issues/42",
|
|
||||||
}
|
|
||||||
|
|
||||||
md := formatIssueMarkdown(issue)
|
|
||||||
|
|
||||||
assert.Contains(t, md, "# Test Issue")
|
|
||||||
assert.Contains(t, md, "**Number:** #42")
|
|
||||||
assert.Contains(t, md, "**State:** open")
|
|
||||||
assert.Contains(t, md, "**Author:** testuser")
|
|
||||||
assert.Contains(t, md, "**Labels:** bug, priority")
|
|
||||||
assert.Contains(t, md, "This is the body.")
|
|
||||||
assert.Contains(t, md, "**URL:** https://github.com/test/repo/issues/42")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatIssueMarkdown_Good_NoLabels(t *testing.T) {
|
|
||||||
issue := ghIssue{
|
|
||||||
Number: 1,
|
|
||||||
Title: "Simple",
|
|
||||||
State: "closed",
|
|
||||||
Author: ghAuthor{Login: "user"},
|
|
||||||
}
|
|
||||||
|
|
||||||
md := formatIssueMarkdown(issue)
|
|
||||||
|
|
||||||
assert.Contains(t, md, "# Simple")
|
|
||||||
assert.NotContains(t, md, "**Labels:**")
|
|
||||||
}
|
|
||||||
|
|
@ -1,277 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
)
|
|
||||||
|
|
||||||
// coinGeckoBaseURL is the base URL for the CoinGecko API.
|
|
||||||
// It is a variable so it can be overridden in tests.
|
|
||||||
var coinGeckoBaseURL = "https://api.coingecko.com/api/v3"
|
|
||||||
|
|
||||||
// MarketCollector collects market data from CoinGecko.
|
|
||||||
type MarketCollector struct {
|
|
||||||
// CoinID is the CoinGecko coin identifier (e.g. "bitcoin", "ethereum").
|
|
||||||
CoinID string
|
|
||||||
|
|
||||||
// Historical enables collection of historical market chart data.
|
|
||||||
Historical bool
|
|
||||||
|
|
||||||
// FromDate is the start date for historical data in YYYY-MM-DD format.
|
|
||||||
FromDate string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the collector name.
|
|
||||||
func (m *MarketCollector) Name() string {
|
|
||||||
return fmt.Sprintf("market:%s", m.CoinID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// coinData represents the current coin data from CoinGecko.
|
|
||||||
type coinData struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Symbol string `json:"symbol"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
MarketData marketData `json:"market_data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type marketData struct {
|
|
||||||
CurrentPrice map[string]float64 `json:"current_price"`
|
|
||||||
MarketCap map[string]float64 `json:"market_cap"`
|
|
||||||
TotalVolume map[string]float64 `json:"total_volume"`
|
|
||||||
High24h map[string]float64 `json:"high_24h"`
|
|
||||||
Low24h map[string]float64 `json:"low_24h"`
|
|
||||||
PriceChange24h float64 `json:"price_change_24h"`
|
|
||||||
PriceChangePct24h float64 `json:"price_change_percentage_24h"`
|
|
||||||
MarketCapRank int `json:"market_cap_rank"`
|
|
||||||
TotalSupply float64 `json:"total_supply"`
|
|
||||||
CirculatingSupply float64 `json:"circulating_supply"`
|
|
||||||
LastUpdated string `json:"last_updated"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// historicalData represents historical market chart data from CoinGecko.
|
|
||||||
type historicalData struct {
|
|
||||||
Prices [][]float64 `json:"prices"`
|
|
||||||
MarketCaps [][]float64 `json:"market_caps"`
|
|
||||||
TotalVolumes [][]float64 `json:"total_volumes"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect gathers market data from CoinGecko.
|
|
||||||
func (m *MarketCollector) Collect(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: m.Name()}
|
|
||||||
|
|
||||||
if m.CoinID == "" {
|
|
||||||
return result, core.E("collect.Market.Collect", "coin ID is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitStart(m.Name(), fmt.Sprintf("Starting market data collection for %s", m.CoinID))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.DryRun {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(m.Name(), fmt.Sprintf("[dry-run] Would collect market data for %s", m.CoinID), nil)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
baseDir := filepath.Join(cfg.OutputDir, "market", m.CoinID)
|
|
||||||
if err := cfg.Output.EnsureDir(baseDir); err != nil {
|
|
||||||
return result, core.E("collect.Market.Collect", "failed to create output directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect current data
|
|
||||||
currentResult, err := m.collectCurrent(ctx, cfg, baseDir)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(m.Name(), fmt.Sprintf("Failed to collect current data: %v", err), nil)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
result.Items += currentResult.Items
|
|
||||||
result.Files = append(result.Files, currentResult.Files...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect historical data if requested
|
|
||||||
if m.Historical {
|
|
||||||
histResult, err := m.collectHistorical(ctx, cfg, baseDir)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(m.Name(), fmt.Sprintf("Failed to collect historical data: %v", err), nil)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
result.Items += histResult.Items
|
|
||||||
result.Files = append(result.Files, histResult.Files...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitComplete(m.Name(), fmt.Sprintf("Collected market data for %s", m.CoinID), result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectCurrent fetches current coin data from CoinGecko.
|
|
||||||
func (m *MarketCollector) collectCurrent(ctx context.Context, cfg *Config, baseDir string) (*Result, error) {
|
|
||||||
result := &Result{Source: m.Name()}
|
|
||||||
|
|
||||||
if cfg.Limiter != nil {
|
|
||||||
if err := cfg.Limiter.Wait(ctx, "coingecko"); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/coins/%s", coinGeckoBaseURL, m.CoinID)
|
|
||||||
data, err := fetchJSON[coinData](ctx, url)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Market.collectCurrent", "failed to fetch coin data", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write raw JSON
|
|
||||||
jsonBytes, err := json.MarshalIndent(data, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Market.collectCurrent", "failed to marshal data", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonPath := filepath.Join(baseDir, "current.json")
|
|
||||||
if err := cfg.Output.Write(jsonPath, string(jsonBytes)); err != nil {
|
|
||||||
return result, core.E("collect.Market.collectCurrent", "failed to write JSON", err)
|
|
||||||
}
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, jsonPath)
|
|
||||||
|
|
||||||
// Write summary markdown
|
|
||||||
summary := formatMarketSummary(data)
|
|
||||||
summaryPath := filepath.Join(baseDir, "summary.md")
|
|
||||||
if err := cfg.Output.Write(summaryPath, summary); err != nil {
|
|
||||||
return result, core.E("collect.Market.collectCurrent", "failed to write summary", err)
|
|
||||||
}
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, summaryPath)
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectHistorical fetches historical market chart data from CoinGecko.
|
|
||||||
func (m *MarketCollector) collectHistorical(ctx context.Context, cfg *Config, baseDir string) (*Result, error) {
|
|
||||||
result := &Result{Source: m.Name()}
|
|
||||||
|
|
||||||
if cfg.Limiter != nil {
|
|
||||||
if err := cfg.Limiter.Wait(ctx, "coingecko"); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
days := "365"
|
|
||||||
if m.FromDate != "" {
|
|
||||||
fromTime, err := time.Parse("2006-01-02", m.FromDate)
|
|
||||||
if err == nil {
|
|
||||||
dayCount := int(time.Since(fromTime).Hours() / 24)
|
|
||||||
if dayCount > 0 {
|
|
||||||
days = fmt.Sprintf("%d", dayCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
url := fmt.Sprintf("%s/coins/%s/market_chart?vs_currency=usd&days=%s", coinGeckoBaseURL, m.CoinID, days)
|
|
||||||
data, err := fetchJSON[historicalData](ctx, url)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Market.collectHistorical", "failed to fetch historical data", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonBytes, err := json.MarshalIndent(data, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Market.collectHistorical", "failed to marshal data", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonPath := filepath.Join(baseDir, "historical.json")
|
|
||||||
if err := cfg.Output.Write(jsonPath, string(jsonBytes)); err != nil {
|
|
||||||
return result, core.E("collect.Market.collectHistorical", "failed to write JSON", err)
|
|
||||||
}
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, jsonPath)
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchJSON fetches JSON from a URL and unmarshals it into the given type.
|
|
||||||
func fetchJSON[T any](ctx context.Context, url string) (*T, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("collect.fetchJSON", "failed to create request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("User-Agent", "CoreCollector/1.0")
|
|
||||||
req.Header.Set("Accept", "application/json")
|
|
||||||
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, core.E("collect.fetchJSON", "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, core.E("collect.fetchJSON",
|
|
||||||
fmt.Sprintf("unexpected status code: %d for %s", resp.StatusCode, url), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var data T
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
|
||||||
return nil, core.E("collect.fetchJSON", "failed to decode response", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatMarketSummary formats coin data as a markdown summary.
|
|
||||||
func formatMarketSummary(data *coinData) string {
|
|
||||||
var b strings.Builder
|
|
||||||
fmt.Fprintf(&b, "# %s (%s)\n\n", data.Name, strings.ToUpper(data.Symbol))
|
|
||||||
|
|
||||||
md := data.MarketData
|
|
||||||
|
|
||||||
if price, ok := md.CurrentPrice["usd"]; ok {
|
|
||||||
fmt.Fprintf(&b, "- **Current Price (USD):** $%.2f\n", price)
|
|
||||||
}
|
|
||||||
if cap, ok := md.MarketCap["usd"]; ok {
|
|
||||||
fmt.Fprintf(&b, "- **Market Cap (USD):** $%.0f\n", cap)
|
|
||||||
}
|
|
||||||
if vol, ok := md.TotalVolume["usd"]; ok {
|
|
||||||
fmt.Fprintf(&b, "- **24h Volume (USD):** $%.0f\n", vol)
|
|
||||||
}
|
|
||||||
if high, ok := md.High24h["usd"]; ok {
|
|
||||||
fmt.Fprintf(&b, "- **24h High (USD):** $%.2f\n", high)
|
|
||||||
}
|
|
||||||
if low, ok := md.Low24h["usd"]; ok {
|
|
||||||
fmt.Fprintf(&b, "- **24h Low (USD):** $%.2f\n", low)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(&b, "- **24h Price Change:** $%.2f (%.2f%%)\n", md.PriceChange24h, md.PriceChangePct24h)
|
|
||||||
|
|
||||||
if md.MarketCapRank > 0 {
|
|
||||||
fmt.Fprintf(&b, "- **Market Cap Rank:** #%d\n", md.MarketCapRank)
|
|
||||||
}
|
|
||||||
if md.CirculatingSupply > 0 {
|
|
||||||
fmt.Fprintf(&b, "- **Circulating Supply:** %.0f\n", md.CirculatingSupply)
|
|
||||||
}
|
|
||||||
if md.TotalSupply > 0 {
|
|
||||||
fmt.Fprintf(&b, "- **Total Supply:** %.0f\n", md.TotalSupply)
|
|
||||||
}
|
|
||||||
if md.LastUpdated != "" {
|
|
||||||
fmt.Fprintf(&b, "\n*Last updated: %s*\n", md.LastUpdated)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatMarketSummary is exported for testing.
|
|
||||||
func FormatMarketSummary(data *coinData) string {
|
|
||||||
return formatMarketSummary(data)
|
|
||||||
}
|
|
||||||
|
|
@ -1,187 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMarketCollector_Name_Good(t *testing.T) {
|
|
||||||
m := &MarketCollector{CoinID: "bitcoin"}
|
|
||||||
assert.Equal(t, "market:bitcoin", m.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarketCollector_Collect_Bad_NoCoinID(t *testing.T) {
|
|
||||||
mock := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(mock, "/output")
|
|
||||||
|
|
||||||
m := &MarketCollector{}
|
|
||||||
_, err := m.Collect(context.Background(), cfg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarketCollector_Collect_Good_DryRun(t *testing.T) {
|
|
||||||
mock := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(mock, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
m := &MarketCollector{CoinID: "bitcoin"}
|
|
||||||
result, err := m.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarketCollector_Collect_Good_CurrentData(t *testing.T) {
|
|
||||||
// Set up a mock CoinGecko server
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
data := coinData{
|
|
||||||
ID: "bitcoin",
|
|
||||||
Symbol: "btc",
|
|
||||||
Name: "Bitcoin",
|
|
||||||
MarketData: marketData{
|
|
||||||
CurrentPrice: map[string]float64{"usd": 42000.50},
|
|
||||||
MarketCap: map[string]float64{"usd": 800000000000},
|
|
||||||
TotalVolume: map[string]float64{"usd": 25000000000},
|
|
||||||
High24h: map[string]float64{"usd": 43000},
|
|
||||||
Low24h: map[string]float64{"usd": 41000},
|
|
||||||
PriceChange24h: 500.25,
|
|
||||||
PriceChangePct24h: 1.2,
|
|
||||||
MarketCapRank: 1,
|
|
||||||
CirculatingSupply: 19500000,
|
|
||||||
TotalSupply: 21000000,
|
|
||||||
LastUpdated: "2025-01-15T10:00:00Z",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
_ = json.NewEncoder(w).Encode(data)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
// Override base URL
|
|
||||||
oldURL := coinGeckoBaseURL
|
|
||||||
coinGeckoBaseURL = server.URL
|
|
||||||
defer func() { coinGeckoBaseURL = oldURL }()
|
|
||||||
|
|
||||||
mock := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(mock, "/output")
|
|
||||||
// Disable rate limiter to avoid delays in tests
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
m := &MarketCollector{CoinID: "bitcoin"}
|
|
||||||
result, err := m.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 2, result.Items) // current.json + summary.md
|
|
||||||
assert.Len(t, result.Files, 2)
|
|
||||||
|
|
||||||
// Verify current.json was written
|
|
||||||
content, err := mock.Read("/output/market/bitcoin/current.json")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, content, "bitcoin")
|
|
||||||
|
|
||||||
// Verify summary.md was written
|
|
||||||
summary, err := mock.Read("/output/market/bitcoin/summary.md")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, summary, "Bitcoin")
|
|
||||||
assert.Contains(t, summary, "42000.50")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarketCollector_Collect_Good_Historical(t *testing.T) {
|
|
||||||
callCount := 0
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
callCount++
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
if callCount == 1 {
|
|
||||||
// Current data response
|
|
||||||
data := coinData{
|
|
||||||
ID: "ethereum",
|
|
||||||
Symbol: "eth",
|
|
||||||
Name: "Ethereum",
|
|
||||||
MarketData: marketData{
|
|
||||||
CurrentPrice: map[string]float64{"usd": 3000},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_ = json.NewEncoder(w).Encode(data)
|
|
||||||
} else {
|
|
||||||
// Historical data response
|
|
||||||
data := historicalData{
|
|
||||||
Prices: [][]float64{{1705305600000, 3000.0}, {1705392000000, 3100.0}},
|
|
||||||
MarketCaps: [][]float64{{1705305600000, 360000000000}},
|
|
||||||
TotalVolumes: [][]float64{{1705305600000, 15000000000}},
|
|
||||||
}
|
|
||||||
_ = json.NewEncoder(w).Encode(data)
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
oldURL := coinGeckoBaseURL
|
|
||||||
coinGeckoBaseURL = server.URL
|
|
||||||
defer func() { coinGeckoBaseURL = oldURL }()
|
|
||||||
|
|
||||||
mock := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(mock, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
m := &MarketCollector{CoinID: "ethereum", Historical: true}
|
|
||||||
result, err := m.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 3, result.Items) // current.json + summary.md + historical.json
|
|
||||||
assert.Len(t, result.Files, 3)
|
|
||||||
|
|
||||||
// Verify historical.json was written
|
|
||||||
content, err := mock.Read("/output/market/ethereum/historical.json")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, content, "3000")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatMarketSummary_Good(t *testing.T) {
|
|
||||||
data := &coinData{
|
|
||||||
Name: "Bitcoin",
|
|
||||||
Symbol: "btc",
|
|
||||||
MarketData: marketData{
|
|
||||||
CurrentPrice: map[string]float64{"usd": 50000},
|
|
||||||
MarketCap: map[string]float64{"usd": 1000000000000},
|
|
||||||
MarketCapRank: 1,
|
|
||||||
CirculatingSupply: 19500000,
|
|
||||||
TotalSupply: 21000000,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
summary := FormatMarketSummary(data)
|
|
||||||
|
|
||||||
assert.Contains(t, summary, "# Bitcoin (BTC)")
|
|
||||||
assert.Contains(t, summary, "$50000.00")
|
|
||||||
assert.Contains(t, summary, "Market Cap Rank:** #1")
|
|
||||||
assert.Contains(t, summary, "Circulating Supply")
|
|
||||||
assert.Contains(t, summary, "Total Supply")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMarketCollector_Collect_Bad_ServerError(t *testing.T) {
|
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
}))
|
|
||||||
defer server.Close()
|
|
||||||
|
|
||||||
oldURL := coinGeckoBaseURL
|
|
||||||
coinGeckoBaseURL = server.URL
|
|
||||||
defer func() { coinGeckoBaseURL = oldURL }()
|
|
||||||
|
|
||||||
mock := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(mock, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
m := &MarketCollector{CoinID: "bitcoin"}
|
|
||||||
result, err := m.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
// Should have errors but not fail entirely
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, result.Errors)
|
|
||||||
}
|
|
||||||
|
|
@ -1,402 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Paper source identifiers.
|
|
||||||
const (
|
|
||||||
PaperSourceIACR = "iacr"
|
|
||||||
PaperSourceArXiv = "arxiv"
|
|
||||||
PaperSourceAll = "all"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PapersCollector collects papers from IACR and arXiv.
|
|
||||||
type PapersCollector struct {
|
|
||||||
// Source is one of PaperSourceIACR, PaperSourceArXiv, or PaperSourceAll.
|
|
||||||
Source string
|
|
||||||
|
|
||||||
// Category is the arXiv category (e.g. "cs.CR" for cryptography).
|
|
||||||
Category string
|
|
||||||
|
|
||||||
// Query is the search query string.
|
|
||||||
Query string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the collector name.
|
|
||||||
func (p *PapersCollector) Name() string {
|
|
||||||
return fmt.Sprintf("papers:%s", p.Source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// paper represents a parsed academic paper.
|
|
||||||
type paper struct {
|
|
||||||
ID string
|
|
||||||
Title string
|
|
||||||
Authors []string
|
|
||||||
Abstract string
|
|
||||||
Date string
|
|
||||||
URL string
|
|
||||||
Source string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect gathers papers from the configured sources.
|
|
||||||
func (p *PapersCollector) Collect(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: p.Name()}
|
|
||||||
|
|
||||||
if p.Query == "" {
|
|
||||||
return result, core.E("collect.Papers.Collect", "query is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitStart(p.Name(), fmt.Sprintf("Starting paper collection for %q", p.Query))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.DryRun {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(p.Name(), fmt.Sprintf("[dry-run] Would search papers for %q", p.Query), nil)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch p.Source {
|
|
||||||
case PaperSourceIACR:
|
|
||||||
return p.collectIACR(ctx, cfg)
|
|
||||||
case PaperSourceArXiv:
|
|
||||||
return p.collectArXiv(ctx, cfg)
|
|
||||||
case PaperSourceAll:
|
|
||||||
iacrResult, iacrErr := p.collectIACR(ctx, cfg)
|
|
||||||
arxivResult, arxivErr := p.collectArXiv(ctx, cfg)
|
|
||||||
|
|
||||||
if iacrErr != nil && arxivErr != nil {
|
|
||||||
return result, core.E("collect.Papers.Collect", "all sources failed", iacrErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
merged := MergeResults(p.Name(), iacrResult, arxivResult)
|
|
||||||
if iacrErr != nil {
|
|
||||||
merged.Errors++
|
|
||||||
}
|
|
||||||
if arxivErr != nil {
|
|
||||||
merged.Errors++
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitComplete(p.Name(), fmt.Sprintf("Collected %d papers", merged.Items), merged)
|
|
||||||
}
|
|
||||||
|
|
||||||
return merged, nil
|
|
||||||
default:
|
|
||||||
return result, core.E("collect.Papers.Collect",
|
|
||||||
fmt.Sprintf("unknown source: %s (use iacr, arxiv, or all)", p.Source), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectIACR fetches papers from the IACR ePrint archive.
|
|
||||||
func (p *PapersCollector) collectIACR(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: "papers:iacr"}
|
|
||||||
|
|
||||||
if cfg.Limiter != nil {
|
|
||||||
if err := cfg.Limiter.Wait(ctx, "iacr"); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
searchURL := fmt.Sprintf("https://eprint.iacr.org/search?q=%s", url.QueryEscape(p.Query))
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, searchURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectIACR", "failed to create request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("User-Agent", "CoreCollector/1.0")
|
|
||||||
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectIACR", "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return result, core.E("collect.Papers.collectIACR",
|
|
||||||
fmt.Sprintf("unexpected status code: %d", resp.StatusCode), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
doc, err := html.Parse(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectIACR", "failed to parse HTML", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
papers := extractIACRPapers(doc)
|
|
||||||
|
|
||||||
baseDir := filepath.Join(cfg.OutputDir, "papers", "iacr")
|
|
||||||
if err := cfg.Output.EnsureDir(baseDir); err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectIACR", "failed to create output directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ppr := range papers {
|
|
||||||
filePath := filepath.Join(baseDir, ppr.ID+".md")
|
|
||||||
content := formatPaperMarkdown(ppr)
|
|
||||||
|
|
||||||
if err := cfg.Output.Write(filePath, content); err != nil {
|
|
||||||
result.Errors++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, filePath)
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitItem(p.Name(), fmt.Sprintf("Paper: %s", ppr.Title), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// arxivFeed represents the Atom feed returned by the arXiv API.
|
|
||||||
type arxivFeed struct {
|
|
||||||
XMLName xml.Name `xml:"feed"`
|
|
||||||
Entries []arxivEntry `xml:"entry"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type arxivEntry struct {
|
|
||||||
ID string `xml:"id"`
|
|
||||||
Title string `xml:"title"`
|
|
||||||
Summary string `xml:"summary"`
|
|
||||||
Published string `xml:"published"`
|
|
||||||
Authors []arxivAuthor `xml:"author"`
|
|
||||||
Links []arxivLink `xml:"link"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type arxivAuthor struct {
|
|
||||||
Name string `xml:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type arxivLink struct {
|
|
||||||
Href string `xml:"href,attr"`
|
|
||||||
Rel string `xml:"rel,attr"`
|
|
||||||
Type string `xml:"type,attr"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectArXiv fetches papers from the arXiv API.
|
|
||||||
func (p *PapersCollector) collectArXiv(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: "papers:arxiv"}
|
|
||||||
|
|
||||||
if cfg.Limiter != nil {
|
|
||||||
if err := cfg.Limiter.Wait(ctx, "arxiv"); err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
query := url.QueryEscape(p.Query)
|
|
||||||
if p.Category != "" {
|
|
||||||
query = fmt.Sprintf("cat:%s+AND+%s", url.QueryEscape(p.Category), query)
|
|
||||||
}
|
|
||||||
|
|
||||||
searchURL := fmt.Sprintf("https://export.arxiv.org/api/query?search_query=%s&max_results=50", query)
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, searchURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectArXiv", "failed to create request", err)
|
|
||||||
}
|
|
||||||
req.Header.Set("User-Agent", "CoreCollector/1.0")
|
|
||||||
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectArXiv", "request failed", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return result, core.E("collect.Papers.collectArXiv",
|
|
||||||
fmt.Sprintf("unexpected status code: %d", resp.StatusCode), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var feed arxivFeed
|
|
||||||
if err := xml.NewDecoder(resp.Body).Decode(&feed); err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectArXiv", "failed to parse XML", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
baseDir := filepath.Join(cfg.OutputDir, "papers", "arxiv")
|
|
||||||
if err := cfg.Output.EnsureDir(baseDir); err != nil {
|
|
||||||
return result, core.E("collect.Papers.collectArXiv", "failed to create output directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range feed.Entries {
|
|
||||||
ppr := arxivEntryToPaper(entry)
|
|
||||||
|
|
||||||
filePath := filepath.Join(baseDir, ppr.ID+".md")
|
|
||||||
content := formatPaperMarkdown(ppr)
|
|
||||||
|
|
||||||
if err := cfg.Output.Write(filePath, content); err != nil {
|
|
||||||
result.Errors++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, filePath)
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitItem(p.Name(), fmt.Sprintf("Paper: %s", ppr.Title), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// arxivEntryToPaper converts an arXiv Atom entry to a paper.
|
|
||||||
func arxivEntryToPaper(entry arxivEntry) paper {
|
|
||||||
authors := make([]string, len(entry.Authors))
|
|
||||||
for i, a := range entry.Authors {
|
|
||||||
authors[i] = a.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the arXiv ID from the URL
|
|
||||||
id := entry.ID
|
|
||||||
if idx := strings.LastIndex(id, "/abs/"); idx != -1 {
|
|
||||||
id = id[idx+5:]
|
|
||||||
}
|
|
||||||
// Replace characters that are not valid in file names
|
|
||||||
id = strings.ReplaceAll(id, "/", "-")
|
|
||||||
id = strings.ReplaceAll(id, ":", "-")
|
|
||||||
|
|
||||||
paperURL := entry.ID
|
|
||||||
for _, link := range entry.Links {
|
|
||||||
if link.Rel == "alternate" {
|
|
||||||
paperURL = link.Href
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return paper{
|
|
||||||
ID: id,
|
|
||||||
Title: strings.TrimSpace(entry.Title),
|
|
||||||
Authors: authors,
|
|
||||||
Abstract: strings.TrimSpace(entry.Summary),
|
|
||||||
Date: entry.Published,
|
|
||||||
URL: paperURL,
|
|
||||||
Source: "arxiv",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractIACRPapers extracts paper metadata from an IACR search results page.
|
|
||||||
func extractIACRPapers(doc *html.Node) []paper {
|
|
||||||
var papers []paper
|
|
||||||
var walk func(*html.Node)
|
|
||||||
|
|
||||||
walk = func(n *html.Node) {
|
|
||||||
if n.Type == html.ElementNode && n.Data == "div" {
|
|
||||||
for _, attr := range n.Attr {
|
|
||||||
if attr.Key == "class" && strings.Contains(attr.Val, "paperentry") {
|
|
||||||
ppr := parseIACREntry(n)
|
|
||||||
if ppr.Title != "" {
|
|
||||||
papers = append(papers, ppr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
walk(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
walk(doc)
|
|
||||||
return papers
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseIACREntry extracts paper data from an IACR paper entry div.
|
|
||||||
func parseIACREntry(node *html.Node) paper {
|
|
||||||
ppr := paper{Source: "iacr"}
|
|
||||||
var walk func(*html.Node)
|
|
||||||
|
|
||||||
walk = func(n *html.Node) {
|
|
||||||
if n.Type == html.ElementNode {
|
|
||||||
switch n.Data {
|
|
||||||
case "a":
|
|
||||||
for _, attr := range n.Attr {
|
|
||||||
if attr.Key == "href" && strings.Contains(attr.Val, "/eprint/") {
|
|
||||||
ppr.URL = "https://eprint.iacr.org" + attr.Val
|
|
||||||
// Extract ID from URL
|
|
||||||
parts := strings.Split(attr.Val, "/")
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
ppr.ID = parts[len(parts)-2] + "-" + parts[len(parts)-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ppr.Title == "" {
|
|
||||||
ppr.Title = strings.TrimSpace(extractText(n))
|
|
||||||
}
|
|
||||||
case "span":
|
|
||||||
for _, attr := range n.Attr {
|
|
||||||
if attr.Key == "class" {
|
|
||||||
switch {
|
|
||||||
case strings.Contains(attr.Val, "author"):
|
|
||||||
author := strings.TrimSpace(extractText(n))
|
|
||||||
if author != "" {
|
|
||||||
ppr.Authors = append(ppr.Authors, author)
|
|
||||||
}
|
|
||||||
case strings.Contains(attr.Val, "date"):
|
|
||||||
ppr.Date = strings.TrimSpace(extractText(n))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "p":
|
|
||||||
for _, attr := range n.Attr {
|
|
||||||
if attr.Key == "class" && strings.Contains(attr.Val, "abstract") {
|
|
||||||
ppr.Abstract = strings.TrimSpace(extractText(n))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
walk(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
walk(node)
|
|
||||||
return ppr
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatPaperMarkdown formats a paper as markdown.
|
|
||||||
func formatPaperMarkdown(ppr paper) string {
|
|
||||||
var b strings.Builder
|
|
||||||
fmt.Fprintf(&b, "# %s\n\n", ppr.Title)
|
|
||||||
|
|
||||||
if len(ppr.Authors) > 0 {
|
|
||||||
fmt.Fprintf(&b, "- **Authors:** %s\n", strings.Join(ppr.Authors, ", "))
|
|
||||||
}
|
|
||||||
if ppr.Date != "" {
|
|
||||||
fmt.Fprintf(&b, "- **Published:** %s\n", ppr.Date)
|
|
||||||
}
|
|
||||||
if ppr.URL != "" {
|
|
||||||
fmt.Fprintf(&b, "- **URL:** %s\n", ppr.URL)
|
|
||||||
}
|
|
||||||
if ppr.Source != "" {
|
|
||||||
fmt.Fprintf(&b, "- **Source:** %s\n", ppr.Source)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ppr.Abstract != "" {
|
|
||||||
fmt.Fprintf(&b, "\n## Abstract\n\n%s\n", ppr.Abstract)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormatPaperMarkdown is exported for testing.
|
|
||||||
func FormatPaperMarkdown(title string, authors []string, date, paperURL, source, abstract string) string {
|
|
||||||
return formatPaperMarkdown(paper{
|
|
||||||
Title: title,
|
|
||||||
Authors: authors,
|
|
||||||
Date: date,
|
|
||||||
URL: paperURL,
|
|
||||||
Source: source,
|
|
||||||
Abstract: abstract,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
@ -1,108 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPapersCollector_Name_Good(t *testing.T) {
|
|
||||||
p := &PapersCollector{Source: PaperSourceIACR}
|
|
||||||
assert.Equal(t, "papers:iacr", p.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPapersCollector_Name_Good_ArXiv(t *testing.T) {
|
|
||||||
p := &PapersCollector{Source: PaperSourceArXiv}
|
|
||||||
assert.Equal(t, "papers:arxiv", p.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPapersCollector_Name_Good_All(t *testing.T) {
|
|
||||||
p := &PapersCollector{Source: PaperSourceAll}
|
|
||||||
assert.Equal(t, "papers:all", p.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPapersCollector_Collect_Bad_NoQuery(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
|
|
||||||
p := &PapersCollector{Source: PaperSourceIACR}
|
|
||||||
_, err := p.Collect(context.Background(), cfg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPapersCollector_Collect_Bad_UnknownSource(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
|
|
||||||
p := &PapersCollector{Source: "unknown", Query: "test"}
|
|
||||||
_, err := p.Collect(context.Background(), cfg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPapersCollector_Collect_Good_DryRun(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
p := &PapersCollector{Source: PaperSourceAll, Query: "cryptography"}
|
|
||||||
result, err := p.Collect(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatPaperMarkdown_Good(t *testing.T) {
|
|
||||||
md := FormatPaperMarkdown(
|
|
||||||
"Zero-Knowledge Proofs Revisited",
|
|
||||||
[]string{"Alice", "Bob"},
|
|
||||||
"2025-01-15",
|
|
||||||
"https://eprint.iacr.org/2025/001",
|
|
||||||
"iacr",
|
|
||||||
"We present a new construction for zero-knowledge proofs.",
|
|
||||||
)
|
|
||||||
|
|
||||||
assert.Contains(t, md, "# Zero-Knowledge Proofs Revisited")
|
|
||||||
assert.Contains(t, md, "**Authors:** Alice, Bob")
|
|
||||||
assert.Contains(t, md, "**Published:** 2025-01-15")
|
|
||||||
assert.Contains(t, md, "**URL:** https://eprint.iacr.org/2025/001")
|
|
||||||
assert.Contains(t, md, "**Source:** iacr")
|
|
||||||
assert.Contains(t, md, "## Abstract")
|
|
||||||
assert.Contains(t, md, "zero-knowledge proofs")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatPaperMarkdown_Good_Minimal(t *testing.T) {
|
|
||||||
md := FormatPaperMarkdown("Title Only", nil, "", "", "", "")
|
|
||||||
|
|
||||||
assert.Contains(t, md, "# Title Only")
|
|
||||||
assert.NotContains(t, md, "**Authors:**")
|
|
||||||
assert.NotContains(t, md, "## Abstract")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestArxivEntryToPaper_Good(t *testing.T) {
|
|
||||||
entry := arxivEntry{
|
|
||||||
ID: "http://arxiv.org/abs/2501.12345v1",
|
|
||||||
Title: " A Great Paper ",
|
|
||||||
Summary: " This paper presents... ",
|
|
||||||
Published: "2025-01-15T00:00:00Z",
|
|
||||||
Authors: []arxivAuthor{
|
|
||||||
{Name: "Alice"},
|
|
||||||
{Name: "Bob"},
|
|
||||||
},
|
|
||||||
Links: []arxivLink{
|
|
||||||
{Href: "http://arxiv.org/abs/2501.12345v1", Rel: "alternate"},
|
|
||||||
{Href: "http://arxiv.org/pdf/2501.12345v1", Rel: "related", Type: "application/pdf"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ppr := arxivEntryToPaper(entry)
|
|
||||||
|
|
||||||
assert.Equal(t, "2501.12345v1", ppr.ID)
|
|
||||||
assert.Equal(t, "A Great Paper", ppr.Title)
|
|
||||||
assert.Equal(t, "This paper presents...", ppr.Abstract)
|
|
||||||
assert.Equal(t, "2025-01-15T00:00:00Z", ppr.Date)
|
|
||||||
assert.Equal(t, []string{"Alice", "Bob"}, ppr.Authors)
|
|
||||||
assert.Equal(t, "http://arxiv.org/abs/2501.12345v1", ppr.URL)
|
|
||||||
assert.Equal(t, "arxiv", ppr.Source)
|
|
||||||
}
|
|
||||||
|
|
@ -1,345 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Processor converts collected data to clean markdown.
|
|
||||||
type Processor struct {
|
|
||||||
// Source identifies the data source directory to process.
|
|
||||||
Source string
|
|
||||||
|
|
||||||
// Dir is the directory containing files to process.
|
|
||||||
Dir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the processor name.
|
|
||||||
func (p *Processor) Name() string {
|
|
||||||
return fmt.Sprintf("process:%s", p.Source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process reads files from the source directory, converts HTML or JSON
|
|
||||||
// to clean markdown, and writes the results to the output directory.
|
|
||||||
func (p *Processor) Process(ctx context.Context, cfg *Config) (*Result, error) {
|
|
||||||
result := &Result{Source: p.Name()}
|
|
||||||
|
|
||||||
if p.Dir == "" {
|
|
||||||
return result, core.E("collect.Processor.Process", "directory is required", nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitStart(p.Name(), fmt.Sprintf("Processing files in %s", p.Dir))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.DryRun {
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitProgress(p.Name(), fmt.Sprintf("[dry-run] Would process files in %s", p.Dir), nil)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := cfg.Output.List(p.Dir)
|
|
||||||
if err != nil {
|
|
||||||
return result, core.E("collect.Processor.Process", "failed to list directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
outputDir := filepath.Join(cfg.OutputDir, "processed", p.Source)
|
|
||||||
if err := cfg.Output.EnsureDir(outputDir); err != nil {
|
|
||||||
return result, core.E("collect.Processor.Process", "failed to create output directory", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return result, core.E("collect.Processor.Process", "context cancelled", ctx.Err())
|
|
||||||
}
|
|
||||||
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
name := entry.Name()
|
|
||||||
srcPath := filepath.Join(p.Dir, name)
|
|
||||||
|
|
||||||
content, err := cfg.Output.Read(srcPath)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var processed string
|
|
||||||
ext := strings.ToLower(filepath.Ext(name))
|
|
||||||
|
|
||||||
switch ext {
|
|
||||||
case ".html", ".htm":
|
|
||||||
processed, err = htmlToMarkdown(content)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(p.Name(), fmt.Sprintf("Failed to convert %s: %v", name, err), nil)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case ".json":
|
|
||||||
processed, err = jsonToMarkdown(content)
|
|
||||||
if err != nil {
|
|
||||||
result.Errors++
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitError(p.Name(), fmt.Sprintf("Failed to convert %s: %v", name, err), nil)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case ".md":
|
|
||||||
// Already markdown, just clean up
|
|
||||||
processed = strings.TrimSpace(content)
|
|
||||||
default:
|
|
||||||
result.Skipped++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write with .md extension
|
|
||||||
outName := strings.TrimSuffix(name, ext) + ".md"
|
|
||||||
outPath := filepath.Join(outputDir, outName)
|
|
||||||
|
|
||||||
if err := cfg.Output.Write(outPath, processed); err != nil {
|
|
||||||
result.Errors++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Items++
|
|
||||||
result.Files = append(result.Files, outPath)
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitItem(p.Name(), fmt.Sprintf("Processed: %s", name), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Dispatcher != nil {
|
|
||||||
cfg.Dispatcher.EmitComplete(p.Name(), fmt.Sprintf("Processed %d files", result.Items), result)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// htmlToMarkdown converts HTML content to clean markdown.
|
|
||||||
func htmlToMarkdown(content string) (string, error) {
|
|
||||||
doc, err := html.Parse(strings.NewReader(content))
|
|
||||||
if err != nil {
|
|
||||||
return "", core.E("collect.htmlToMarkdown", "failed to parse HTML", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var b strings.Builder
|
|
||||||
nodeToMarkdown(&b, doc, 0)
|
|
||||||
return strings.TrimSpace(b.String()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nodeToMarkdown recursively converts an HTML node tree to markdown.
|
|
||||||
func nodeToMarkdown(b *strings.Builder, n *html.Node, depth int) {
|
|
||||||
switch n.Type {
|
|
||||||
case html.TextNode:
|
|
||||||
text := n.Data
|
|
||||||
if strings.TrimSpace(text) != "" {
|
|
||||||
b.WriteString(text)
|
|
||||||
}
|
|
||||||
case html.ElementNode:
|
|
||||||
switch n.Data {
|
|
||||||
case "h1":
|
|
||||||
b.WriteString("\n# ")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("\n\n")
|
|
||||||
return
|
|
||||||
case "h2":
|
|
||||||
b.WriteString("\n## ")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("\n\n")
|
|
||||||
return
|
|
||||||
case "h3":
|
|
||||||
b.WriteString("\n### ")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("\n\n")
|
|
||||||
return
|
|
||||||
case "h4":
|
|
||||||
b.WriteString("\n#### ")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("\n\n")
|
|
||||||
return
|
|
||||||
case "h5":
|
|
||||||
b.WriteString("\n##### ")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("\n\n")
|
|
||||||
return
|
|
||||||
case "h6":
|
|
||||||
b.WriteString("\n###### ")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("\n\n")
|
|
||||||
return
|
|
||||||
case "p":
|
|
||||||
b.WriteString("\n")
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
nodeToMarkdown(b, c, depth)
|
|
||||||
}
|
|
||||||
b.WriteString("\n")
|
|
||||||
return
|
|
||||||
case "br":
|
|
||||||
b.WriteString("\n")
|
|
||||||
return
|
|
||||||
case "strong", "b":
|
|
||||||
b.WriteString("**")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("**")
|
|
||||||
return
|
|
||||||
case "em", "i":
|
|
||||||
b.WriteString("*")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("*")
|
|
||||||
return
|
|
||||||
case "code":
|
|
||||||
b.WriteString("`")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("`")
|
|
||||||
return
|
|
||||||
case "pre":
|
|
||||||
b.WriteString("\n```\n")
|
|
||||||
writeChildrenText(b, n)
|
|
||||||
b.WriteString("\n```\n")
|
|
||||||
return
|
|
||||||
case "a":
|
|
||||||
var href string
|
|
||||||
for _, attr := range n.Attr {
|
|
||||||
if attr.Key == "href" {
|
|
||||||
href = attr.Val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
text := getChildrenText(n)
|
|
||||||
if href != "" {
|
|
||||||
fmt.Fprintf(b, "[%s](%s)", text, href)
|
|
||||||
} else {
|
|
||||||
b.WriteString(text)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case "ul":
|
|
||||||
b.WriteString("\n")
|
|
||||||
case "ol":
|
|
||||||
b.WriteString("\n")
|
|
||||||
counter := 1
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
if c.Type == html.ElementNode && c.Data == "li" {
|
|
||||||
fmt.Fprintf(b, "%d. ", counter)
|
|
||||||
for gc := c.FirstChild; gc != nil; gc = gc.NextSibling {
|
|
||||||
nodeToMarkdown(b, gc, depth+1)
|
|
||||||
}
|
|
||||||
b.WriteString("\n")
|
|
||||||
counter++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case "li":
|
|
||||||
b.WriteString("- ")
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
nodeToMarkdown(b, c, depth+1)
|
|
||||||
}
|
|
||||||
b.WriteString("\n")
|
|
||||||
return
|
|
||||||
case "blockquote":
|
|
||||||
b.WriteString("\n> ")
|
|
||||||
text := getChildrenText(n)
|
|
||||||
b.WriteString(strings.ReplaceAll(text, "\n", "\n> "))
|
|
||||||
b.WriteString("\n")
|
|
||||||
return
|
|
||||||
case "hr":
|
|
||||||
b.WriteString("\n---\n")
|
|
||||||
return
|
|
||||||
case "script", "style", "head":
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
nodeToMarkdown(b, c, depth)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeChildrenText writes the text content of all children.
|
|
||||||
func writeChildrenText(b *strings.Builder, n *html.Node) {
|
|
||||||
b.WriteString(getChildrenText(n))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getChildrenText returns the concatenated text content of all children.
|
|
||||||
func getChildrenText(n *html.Node) string {
|
|
||||||
var b strings.Builder
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
if c.Type == html.TextNode {
|
|
||||||
b.WriteString(c.Data)
|
|
||||||
} else {
|
|
||||||
b.WriteString(getChildrenText(c))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// jsonToMarkdown converts JSON content to a formatted markdown document.
|
|
||||||
func jsonToMarkdown(content string) (string, error) {
|
|
||||||
var data any
|
|
||||||
if err := json.Unmarshal([]byte(content), &data); err != nil {
|
|
||||||
return "", core.E("collect.jsonToMarkdown", "failed to parse JSON", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var b strings.Builder
|
|
||||||
b.WriteString("# Data\n\n")
|
|
||||||
jsonValueToMarkdown(&b, data, 0)
|
|
||||||
return strings.TrimSpace(b.String()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// jsonValueToMarkdown recursively formats a JSON value as markdown.
|
|
||||||
func jsonValueToMarkdown(b *strings.Builder, data any, depth int) {
|
|
||||||
switch v := data.(type) {
|
|
||||||
case map[string]any:
|
|
||||||
keys := make([]string, 0, len(v))
|
|
||||||
for key := range v {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
for _, key := range keys {
|
|
||||||
val := v[key]
|
|
||||||
indent := strings.Repeat(" ", depth)
|
|
||||||
switch child := val.(type) {
|
|
||||||
case map[string]any, []any:
|
|
||||||
fmt.Fprintf(b, "%s- **%s:**\n", indent, key)
|
|
||||||
jsonValueToMarkdown(b, child, depth+1)
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(b, "%s- **%s:** %v\n", indent, key, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case []any:
|
|
||||||
for i, item := range v {
|
|
||||||
indent := strings.Repeat(" ", depth)
|
|
||||||
switch child := item.(type) {
|
|
||||||
case map[string]any, []any:
|
|
||||||
fmt.Fprintf(b, "%s- Item %d:\n", indent, i+1)
|
|
||||||
jsonValueToMarkdown(b, child, depth+1)
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(b, "%s- %v\n", indent, item)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
indent := strings.Repeat(" ", depth)
|
|
||||||
fmt.Fprintf(b, "%s%v\n", indent, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTMLToMarkdown is exported for testing.
|
|
||||||
func HTMLToMarkdown(content string) (string, error) {
|
|
||||||
return htmlToMarkdown(content)
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONToMarkdown is exported for testing.
|
|
||||||
func JSONToMarkdown(content string) (string, error) {
|
|
||||||
return jsonToMarkdown(content)
|
|
||||||
}
|
|
||||||
|
|
@ -1,201 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestProcessor_Name_Good(t *testing.T) {
|
|
||||||
p := &Processor{Source: "github"}
|
|
||||||
assert.Equal(t, "process:github", p.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessor_Process_Bad_NoDir(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
|
|
||||||
p := &Processor{Source: "test"}
|
|
||||||
_, err := p.Process(context.Background(), cfg)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessor_Process_Good_DryRun(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.DryRun = true
|
|
||||||
|
|
||||||
p := &Processor{Source: "test", Dir: "/input"}
|
|
||||||
result, err := p.Process(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, result.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessor_Process_Good_HTMLFiles(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
m.Dirs["/input"] = true
|
|
||||||
m.Files["/input/page.html"] = `<html><body><h1>Hello</h1><p>World</p></body></html>`
|
|
||||||
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
p := &Processor{Source: "test", Dir: "/input"}
|
|
||||||
result, err := p.Process(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, result.Items)
|
|
||||||
assert.Len(t, result.Files, 1)
|
|
||||||
|
|
||||||
content, err := m.Read("/output/processed/test/page.md")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, content, "# Hello")
|
|
||||||
assert.Contains(t, content, "World")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessor_Process_Good_JSONFiles(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
m.Dirs["/input"] = true
|
|
||||||
m.Files["/input/data.json"] = `{"name": "Bitcoin", "price": 42000}`
|
|
||||||
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
p := &Processor{Source: "market", Dir: "/input"}
|
|
||||||
result, err := p.Process(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, result.Items)
|
|
||||||
|
|
||||||
content, err := m.Read("/output/processed/market/data.md")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, content, "# Data")
|
|
||||||
assert.Contains(t, content, "Bitcoin")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessor_Process_Good_MarkdownPassthrough(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
m.Dirs["/input"] = true
|
|
||||||
m.Files["/input/readme.md"] = "# Already Markdown\n\nThis is already formatted."
|
|
||||||
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
p := &Processor{Source: "docs", Dir: "/input"}
|
|
||||||
result, err := p.Process(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, result.Items)
|
|
||||||
|
|
||||||
content, err := m.Read("/output/processed/docs/readme.md")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, content, "# Already Markdown")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProcessor_Process_Good_SkipUnknownTypes(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
m.Dirs["/input"] = true
|
|
||||||
m.Files["/input/image.png"] = "binary data"
|
|
||||||
m.Files["/input/doc.html"] = "<h1>Heading</h1>"
|
|
||||||
|
|
||||||
cfg := NewConfigWithMedium(m, "/output")
|
|
||||||
cfg.Limiter = nil
|
|
||||||
|
|
||||||
p := &Processor{Source: "mixed", Dir: "/input"}
|
|
||||||
result, err := p.Process(context.Background(), cfg)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, result.Items) // Only the HTML file
|
|
||||||
assert.Equal(t, 1, result.Skipped) // The PNG file
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTMLToMarkdown_Good(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
contains []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "heading",
|
|
||||||
input: "<h1>Title</h1>",
|
|
||||||
contains: []string{"# Title"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "paragraph",
|
|
||||||
input: "<p>Hello world</p>",
|
|
||||||
contains: []string{"Hello world"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "bold",
|
|
||||||
input: "<p><strong>bold text</strong></p>",
|
|
||||||
contains: []string{"**bold text**"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "italic",
|
|
||||||
input: "<p><em>italic text</em></p>",
|
|
||||||
contains: []string{"*italic text*"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "code",
|
|
||||||
input: "<p><code>code</code></p>",
|
|
||||||
contains: []string{"`code`"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "link",
|
|
||||||
input: `<p><a href="https://example.com">Example</a></p>`,
|
|
||||||
contains: []string{"[Example](https://example.com)"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested headings",
|
|
||||||
input: "<h2>Section</h2><h3>Subsection</h3>",
|
|
||||||
contains: []string{"## Section", "### Subsection"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "pre block",
|
|
||||||
input: "<pre>func main() {}</pre>",
|
|
||||||
contains: []string{"```", "func main() {}"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := HTMLToMarkdown(tt.input)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
for _, s := range tt.contains {
|
|
||||||
assert.Contains(t, result, s)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHTMLToMarkdown_Good_StripsScripts(t *testing.T) {
|
|
||||||
input := `<html><head><script>alert('xss')</script></head><body><p>Clean</p></body></html>`
|
|
||||||
result, err := HTMLToMarkdown(input)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, result, "Clean")
|
|
||||||
assert.NotContains(t, result, "alert")
|
|
||||||
assert.NotContains(t, result, "script")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJSONToMarkdown_Good(t *testing.T) {
|
|
||||||
input := `{"name": "test", "count": 42}`
|
|
||||||
result, err := JSONToMarkdown(input)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, result, "# Data")
|
|
||||||
assert.Contains(t, result, "test")
|
|
||||||
assert.Contains(t, result, "42")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJSONToMarkdown_Good_Array(t *testing.T) {
|
|
||||||
input := `[{"id": 1}, {"id": 2}]`
|
|
||||||
result, err := JSONToMarkdown(input)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, result, "# Data")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJSONToMarkdown_Bad_InvalidJSON(t *testing.T) {
|
|
||||||
_, err := JSONToMarkdown("not json")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
@ -1,130 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RateLimiter tracks per-source rate limiting to avoid overwhelming APIs.
|
|
||||||
type RateLimiter struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
delays map[string]time.Duration
|
|
||||||
last map[string]time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default rate limit delays per source.
|
|
||||||
var defaultDelays = map[string]time.Duration{
|
|
||||||
"github": 500 * time.Millisecond,
|
|
||||||
"bitcointalk": 2 * time.Second,
|
|
||||||
"coingecko": 1500 * time.Millisecond,
|
|
||||||
"iacr": 1 * time.Second,
|
|
||||||
"arxiv": 1 * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRateLimiter creates a limiter with default delays.
|
|
||||||
func NewRateLimiter() *RateLimiter {
|
|
||||||
delays := make(map[string]time.Duration, len(defaultDelays))
|
|
||||||
for k, v := range defaultDelays {
|
|
||||||
delays[k] = v
|
|
||||||
}
|
|
||||||
return &RateLimiter{
|
|
||||||
delays: delays,
|
|
||||||
last: make(map[string]time.Time),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait blocks until the rate limit allows the next request for the given source.
|
|
||||||
// It respects context cancellation.
|
|
||||||
func (r *RateLimiter) Wait(ctx context.Context, source string) error {
|
|
||||||
r.mu.Lock()
|
|
||||||
delay, ok := r.delays[source]
|
|
||||||
if !ok {
|
|
||||||
delay = 500 * time.Millisecond
|
|
||||||
}
|
|
||||||
lastTime := r.last[source]
|
|
||||||
|
|
||||||
elapsed := time.Since(lastTime)
|
|
||||||
if elapsed >= delay {
|
|
||||||
// Enough time has passed — claim the slot immediately.
|
|
||||||
r.last[source] = time.Now()
|
|
||||||
r.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
remaining := delay - elapsed
|
|
||||||
r.mu.Unlock()
|
|
||||||
|
|
||||||
// Wait outside the lock, then reclaim.
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return core.E("collect.RateLimiter.Wait", "context cancelled", ctx.Err())
|
|
||||||
case <-time.After(remaining):
|
|
||||||
}
|
|
||||||
|
|
||||||
r.mu.Lock()
|
|
||||||
r.last[source] = time.Now()
|
|
||||||
r.mu.Unlock()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDelay sets the delay for a source.
|
|
||||||
func (r *RateLimiter) SetDelay(source string, d time.Duration) {
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
r.delays[source] = d
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDelay returns the delay configured for a source.
|
|
||||||
func (r *RateLimiter) GetDelay(source string) time.Duration {
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
if d, ok := r.delays[source]; ok {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
return 500 * time.Millisecond
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckGitHubRateLimit checks GitHub API rate limit status via gh api.
|
|
||||||
// Returns used and limit counts. Auto-pauses at 75% usage by increasing
|
|
||||||
// the GitHub rate limit delay.
|
|
||||||
func (r *RateLimiter) CheckGitHubRateLimit() (used, limit int, err error) {
|
|
||||||
cmd := exec.Command("gh", "api", "rate_limit", "--jq", ".rate | \"\\(.used) \\(.limit)\"")
|
|
||||||
out, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, core.E("collect.RateLimiter.CheckGitHubRateLimit", "failed to check rate limit", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Fields(strings.TrimSpace(string(out)))
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return 0, 0, core.E("collect.RateLimiter.CheckGitHubRateLimit",
|
|
||||||
fmt.Sprintf("unexpected output format: %q", string(out)), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
used, err = strconv.Atoi(parts[0])
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, core.E("collect.RateLimiter.CheckGitHubRateLimit", "failed to parse used count", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
limit, err = strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, core.E("collect.RateLimiter.CheckGitHubRateLimit", "failed to parse limit count", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Auto-pause at 75% usage
|
|
||||||
if limit > 0 {
|
|
||||||
usage := float64(used) / float64(limit)
|
|
||||||
if usage >= 0.75 {
|
|
||||||
r.SetDelay("github", 5*time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return used, limit, nil
|
|
||||||
}
|
|
||||||
|
|
@ -1,84 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRateLimiter_Wait_Good(t *testing.T) {
|
|
||||||
rl := NewRateLimiter()
|
|
||||||
rl.SetDelay("test", 50*time.Millisecond)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// First call should return immediately
|
|
||||||
start := time.Now()
|
|
||||||
err := rl.Wait(ctx, "test")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Less(t, time.Since(start), 50*time.Millisecond)
|
|
||||||
|
|
||||||
// Second call should wait at least the delay
|
|
||||||
start = time.Now()
|
|
||||||
err = rl.Wait(ctx, "test")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.GreaterOrEqual(t, time.Since(start), 40*time.Millisecond) // allow small timing variance
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimiter_Wait_Bad_ContextCancelled(t *testing.T) {
|
|
||||||
rl := NewRateLimiter()
|
|
||||||
rl.SetDelay("test", 5*time.Second)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// First call to set the last time
|
|
||||||
err := rl.Wait(ctx, "test")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Cancel context before second call
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
err = rl.Wait(ctx, "test")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimiter_SetDelay_Good(t *testing.T) {
|
|
||||||
rl := NewRateLimiter()
|
|
||||||
rl.SetDelay("custom", 3*time.Second)
|
|
||||||
assert.Equal(t, 3*time.Second, rl.GetDelay("custom"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimiter_GetDelay_Good_Defaults(t *testing.T) {
|
|
||||||
rl := NewRateLimiter()
|
|
||||||
|
|
||||||
assert.Equal(t, 500*time.Millisecond, rl.GetDelay("github"))
|
|
||||||
assert.Equal(t, 2*time.Second, rl.GetDelay("bitcointalk"))
|
|
||||||
assert.Equal(t, 1500*time.Millisecond, rl.GetDelay("coingecko"))
|
|
||||||
assert.Equal(t, 1*time.Second, rl.GetDelay("iacr"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimiter_GetDelay_Good_UnknownSource(t *testing.T) {
|
|
||||||
rl := NewRateLimiter()
|
|
||||||
// Unknown sources should get the default 500ms delay
|
|
||||||
assert.Equal(t, 500*time.Millisecond, rl.GetDelay("unknown"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRateLimiter_Wait_Good_UnknownSource(t *testing.T) {
|
|
||||||
rl := NewRateLimiter()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Unknown source should use default delay of 500ms
|
|
||||||
err := rl.Wait(ctx, "unknown-source")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewRateLimiter_Good(t *testing.T) {
|
|
||||||
rl := NewRateLimiter()
|
|
||||||
assert.NotNil(t, rl)
|
|
||||||
assert.NotNil(t, rl.delays)
|
|
||||||
assert.NotNil(t, rl.last)
|
|
||||||
assert.Len(t, rl.delays, len(defaultDelays))
|
|
||||||
}
|
|
||||||
|
|
@ -1,113 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
core "forge.lthn.ai/core/go/pkg/framework/core"
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// State tracks collection progress for incremental runs.
|
|
||||||
// It persists entries to disk so that subsequent runs can resume
|
|
||||||
// where they left off.
|
|
||||||
type State struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
medium io.Medium
|
|
||||||
path string
|
|
||||||
entries map[string]*StateEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
// StateEntry tracks state for one source.
|
|
||||||
type StateEntry struct {
|
|
||||||
// Source identifies the collector.
|
|
||||||
Source string `json:"source"`
|
|
||||||
|
|
||||||
// LastRun is the timestamp of the last successful run.
|
|
||||||
LastRun time.Time `json:"last_run"`
|
|
||||||
|
|
||||||
// LastID is an opaque identifier for the last item processed.
|
|
||||||
LastID string `json:"last_id,omitempty"`
|
|
||||||
|
|
||||||
// Items is the total number of items collected so far.
|
|
||||||
Items int `json:"items"`
|
|
||||||
|
|
||||||
// Cursor is an opaque pagination cursor for resumption.
|
|
||||||
Cursor string `json:"cursor,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewState creates a state tracker that persists to the given path
|
|
||||||
// using the provided storage medium.
|
|
||||||
func NewState(m io.Medium, path string) *State {
|
|
||||||
return &State{
|
|
||||||
medium: m,
|
|
||||||
path: path,
|
|
||||||
entries: make(map[string]*StateEntry),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load reads state from disk. If the file does not exist, the state
|
|
||||||
// is initialised as empty without error.
|
|
||||||
func (s *State) Load() error {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
if !s.medium.IsFile(s.path) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := s.medium.Read(s.path)
|
|
||||||
if err != nil {
|
|
||||||
return core.E("collect.State.Load", "failed to read state file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries map[string]*StateEntry
|
|
||||||
if err := json.Unmarshal([]byte(data), &entries); err != nil {
|
|
||||||
return core.E("collect.State.Load", "failed to parse state file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if entries == nil {
|
|
||||||
entries = make(map[string]*StateEntry)
|
|
||||||
}
|
|
||||||
s.entries = entries
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save writes state to disk.
|
|
||||||
func (s *State) Save() error {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
data, err := json.MarshalIndent(s.entries, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return core.E("collect.State.Save", "failed to marshal state", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.medium.Write(s.path, string(data)); err != nil {
|
|
||||||
return core.E("collect.State.Save", "failed to write state file", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a copy of the state for a source. The second return value
|
|
||||||
// indicates whether the entry was found.
|
|
||||||
func (s *State) Get(source string) (*StateEntry, bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
entry, ok := s.entries[source]
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
// Return a copy to avoid callers mutating internal state.
|
|
||||||
cp := *entry
|
|
||||||
return &cp, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set updates state for a source.
|
|
||||||
func (s *State) Set(source string, entry *StateEntry) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
s.entries[source] = entry
|
|
||||||
}
|
|
||||||
|
|
@ -1,144 +0,0 @@
|
||||||
package collect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"forge.lthn.ai/core/go/pkg/io"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestState_SetGet_Good(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
s := NewState(m, "/state.json")
|
|
||||||
|
|
||||||
entry := &StateEntry{
|
|
||||||
Source: "github:test",
|
|
||||||
LastRun: time.Now(),
|
|
||||||
Items: 42,
|
|
||||||
LastID: "abc123",
|
|
||||||
Cursor: "cursor-xyz",
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Set("github:test", entry)
|
|
||||||
|
|
||||||
got, ok := s.Get("github:test")
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, entry.Source, got.Source)
|
|
||||||
assert.Equal(t, entry.Items, got.Items)
|
|
||||||
assert.Equal(t, entry.LastID, got.LastID)
|
|
||||||
assert.Equal(t, entry.Cursor, got.Cursor)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestState_Get_Bad(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
s := NewState(m, "/state.json")
|
|
||||||
|
|
||||||
got, ok := s.Get("nonexistent")
|
|
||||||
assert.False(t, ok)
|
|
||||||
assert.Nil(t, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestState_SaveLoad_Good(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
s := NewState(m, "/state.json")
|
|
||||||
|
|
||||||
now := time.Date(2025, 1, 15, 10, 30, 0, 0, time.UTC)
|
|
||||||
entry := &StateEntry{
|
|
||||||
Source: "market:bitcoin",
|
|
||||||
LastRun: now,
|
|
||||||
Items: 100,
|
|
||||||
LastID: "btc-100",
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Set("market:bitcoin", entry)
|
|
||||||
|
|
||||||
// Save state
|
|
||||||
err := s.Save()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify file was written
|
|
||||||
assert.True(t, m.IsFile("/state.json"))
|
|
||||||
|
|
||||||
// Load into a new state instance
|
|
||||||
s2 := NewState(m, "/state.json")
|
|
||||||
err = s2.Load()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
got, ok := s2.Get("market:bitcoin")
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, "market:bitcoin", got.Source)
|
|
||||||
assert.Equal(t, 100, got.Items)
|
|
||||||
assert.Equal(t, "btc-100", got.LastID)
|
|
||||||
assert.True(t, now.Equal(got.LastRun))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestState_Load_Good_NoFile(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
s := NewState(m, "/nonexistent.json")
|
|
||||||
|
|
||||||
// Loading when no file exists should not error
|
|
||||||
err := s.Load()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// State should be empty
|
|
||||||
_, ok := s.Get("anything")
|
|
||||||
assert.False(t, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestState_Load_Bad_InvalidJSON(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
m.Files["/state.json"] = "not valid json"
|
|
||||||
|
|
||||||
s := NewState(m, "/state.json")
|
|
||||||
err := s.Load()
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestState_SaveLoad_Good_MultipleEntries(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
s := NewState(m, "/state.json")
|
|
||||||
|
|
||||||
s.Set("source-a", &StateEntry{Source: "source-a", Items: 10})
|
|
||||||
s.Set("source-b", &StateEntry{Source: "source-b", Items: 20})
|
|
||||||
s.Set("source-c", &StateEntry{Source: "source-c", Items: 30})
|
|
||||||
|
|
||||||
err := s.Save()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
s2 := NewState(m, "/state.json")
|
|
||||||
err = s2.Load()
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
a, ok := s2.Get("source-a")
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, 10, a.Items)
|
|
||||||
|
|
||||||
b, ok := s2.Get("source-b")
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, 20, b.Items)
|
|
||||||
|
|
||||||
c, ok := s2.Get("source-c")
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, 30, c.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestState_Set_Good_Overwrite(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
s := NewState(m, "/state.json")
|
|
||||||
|
|
||||||
s.Set("source", &StateEntry{Source: "source", Items: 5})
|
|
||||||
s.Set("source", &StateEntry{Source: "source", Items: 15})
|
|
||||||
|
|
||||||
got, ok := s.Get("source")
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, 15, got.Items)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewState_Good(t *testing.T) {
|
|
||||||
m := io.NewMockMedium()
|
|
||||||
s := NewState(m, "/test/state.json")
|
|
||||||
|
|
||||||
assert.NotNil(t, s)
|
|
||||||
assert.NotNil(t, s.entries)
|
|
||||||
}
|
|
||||||
|
|
@ -1,106 +0,0 @@
|
||||||
// Package container provides a runtime for managing LinuxKit containers.
|
|
||||||
// It supports running LinuxKit images (ISO, qcow2, vmdk, raw) using
|
|
||||||
// available hypervisors (QEMU on Linux, Hyperkit on macOS).
|
|
||||||
package container
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Container represents a running LinuxKit container/VM instance.
|
|
||||||
type Container struct {
|
|
||||||
// ID is a unique identifier for the container (8 character hex string).
|
|
||||||
ID string `json:"id"`
|
|
||||||
// Name is the optional human-readable name for the container.
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
// Image is the path to the LinuxKit image being run.
|
|
||||||
Image string `json:"image"`
|
|
||||||
// Status represents the current state of the container.
|
|
||||||
Status Status `json:"status"`
|
|
||||||
// PID is the process ID of the hypervisor running this container.
|
|
||||||
PID int `json:"pid"`
|
|
||||||
// StartedAt is when the container was started.
|
|
||||||
StartedAt time.Time `json:"started_at"`
|
|
||||||
// Ports maps host ports to container ports.
|
|
||||||
Ports map[int]int `json:"ports,omitempty"`
|
|
||||||
// Memory is the amount of memory allocated in MB.
|
|
||||||
Memory int `json:"memory,omitempty"`
|
|
||||||
// CPUs is the number of CPUs allocated.
|
|
||||||
CPUs int `json:"cpus,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status represents the state of a container.
|
|
||||||
type Status string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// StatusRunning indicates the container is running.
|
|
||||||
StatusRunning Status = "running"
|
|
||||||
// StatusStopped indicates the container has stopped.
|
|
||||||
StatusStopped Status = "stopped"
|
|
||||||
// StatusError indicates the container encountered an error.
|
|
||||||
StatusError Status = "error"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RunOptions configures how a container should be run.
|
|
||||||
type RunOptions struct {
|
|
||||||
// Name is an optional human-readable name for the container.
|
|
||||||
Name string
|
|
||||||
// Detach runs the container in the background.
|
|
||||||
Detach bool
|
|
||||||
// Memory is the amount of memory to allocate in MB (default: 1024).
|
|
||||||
Memory int
|
|
||||||
// CPUs is the number of CPUs to allocate (default: 1).
|
|
||||||
CPUs int
|
|
||||||
// Ports maps host ports to container ports.
|
|
||||||
Ports map[int]int
|
|
||||||
// Volumes maps host paths to container paths.
|
|
||||||
Volumes map[string]string
|
|
||||||
// SSHPort is the port to use for SSH access (default: 2222).
|
|
||||||
SSHPort int
|
|
||||||
// SSHKey is the path to the SSH private key for exec commands.
|
|
||||||
SSHKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manager defines the interface for container lifecycle management.
|
|
||||||
type Manager interface {
|
|
||||||
// Run starts a new container from the given image.
|
|
||||||
Run(ctx context.Context, image string, opts RunOptions) (*Container, error)
|
|
||||||
// Stop stops a running container by ID.
|
|
||||||
Stop(ctx context.Context, id string) error
|
|
||||||
// List returns all known containers.
|
|
||||||
List(ctx context.Context) ([]*Container, error)
|
|
||||||
// Logs returns a reader for the container's log output.
|
|
||||||
// If follow is true, the reader will continue to stream new log entries.
|
|
||||||
Logs(ctx context.Context, id string, follow bool) (io.ReadCloser, error)
|
|
||||||
// Exec executes a command inside the container via SSH.
|
|
||||||
Exec(ctx context.Context, id string, cmd []string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateID creates a new unique container ID (8 hex characters).
|
|
||||||
func GenerateID() (string, error) {
|
|
||||||
bytes := make([]byte, 4)
|
|
||||||
if _, err := rand.Read(bytes); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return hex.EncodeToString(bytes), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageFormat represents the format of a LinuxKit image.
|
|
||||||
type ImageFormat string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// FormatISO is an ISO image format.
|
|
||||||
FormatISO ImageFormat = "iso"
|
|
||||||
// FormatQCOW2 is a QEMU Copy-On-Write image format.
|
|
||||||
FormatQCOW2 ImageFormat = "qcow2"
|
|
||||||
// FormatVMDK is a VMware disk image format.
|
|
||||||
FormatVMDK ImageFormat = "vmdk"
|
|
||||||
// FormatRaw is a raw disk image format.
|
|
||||||
FormatRaw ImageFormat = "raw"
|
|
||||||
// FormatUnknown indicates an unknown image format.
|
|
||||||
FormatUnknown ImageFormat = "unknown"
|
|
||||||
)
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue