Merge pull request 'feat(bugseti): wire BugSETI into root build system and ship v0.1.0' (#38) from feat/bugseti-launch into new
This commit is contained in:
commit
03a9640f05
148 changed files with 61152 additions and 2130 deletions
146
.forgejo/workflows/deploy.yml
Normal file
146
.forgejo/workflows/deploy.yml
Normal file
|
|
@ -0,0 +1,146 @@
|
||||||
|
# Host UK Production Deployment Pipeline
|
||||||
|
# Runs on Forgejo Actions (gitea.snider.dev)
|
||||||
|
# Runner: build.de.host.uk.com
|
||||||
|
#
|
||||||
|
# Workflow:
|
||||||
|
# 1. composer install + test
|
||||||
|
# 2. npm ci + build
|
||||||
|
# 3. docker build + push
|
||||||
|
# 4. Coolify deploy webhook (rolling restart)
|
||||||
|
|
||||||
|
name: Deploy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [main]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: dappco.re/osi
|
||||||
|
IMAGE_APP: host-uk/app
|
||||||
|
IMAGE_WEB: host-uk/web
|
||||||
|
IMAGE_CORE: host-uk/core
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: Test
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup PHP
|
||||||
|
uses: shivammathur/setup-php@v2
|
||||||
|
with:
|
||||||
|
php-version: "8.3"
|
||||||
|
extensions: bcmath, gd, intl, mbstring, pdo_mysql, redis, zip
|
||||||
|
coverage: none
|
||||||
|
|
||||||
|
- name: Install Composer dependencies
|
||||||
|
run: composer install --no-interaction --prefer-dist
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: composer test
|
||||||
|
|
||||||
|
- name: Check code style
|
||||||
|
run: ./vendor/bin/pint --test
|
||||||
|
|
||||||
|
build-app:
|
||||||
|
name: Build App Image
|
||||||
|
needs: test
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: "22"
|
||||||
|
cache: "npm"
|
||||||
|
|
||||||
|
- name: Login to registry
|
||||||
|
run: echo "${{ secrets.REGISTRY_TOKEN }}" | docker login ${{ env.REGISTRY }} -u ${{ secrets.REGISTRY_USER }} --password-stdin
|
||||||
|
|
||||||
|
- name: Build and push app image
|
||||||
|
run: |
|
||||||
|
SHA=$(git rev-parse --short HEAD)
|
||||||
|
docker build \
|
||||||
|
-f docker/Dockerfile.app \
|
||||||
|
-t ${{ env.REGISTRY }}/${{ env.IMAGE_APP }}:${SHA} \
|
||||||
|
-t ${{ env.REGISTRY }}/${{ env.IMAGE_APP }}:latest \
|
||||||
|
.
|
||||||
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_APP }}:${SHA}
|
||||||
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_APP }}:latest
|
||||||
|
|
||||||
|
build-web:
|
||||||
|
name: Build Web Image
|
||||||
|
needs: test
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Login to registry
|
||||||
|
run: echo "${{ secrets.REGISTRY_TOKEN }}" | docker login ${{ env.REGISTRY }} -u ${{ secrets.REGISTRY_USER }} --password-stdin
|
||||||
|
|
||||||
|
- name: Build and push web image
|
||||||
|
run: |
|
||||||
|
SHA=$(git rev-parse --short HEAD)
|
||||||
|
docker build \
|
||||||
|
-f docker/Dockerfile.web \
|
||||||
|
-t ${{ env.REGISTRY }}/${{ env.IMAGE_WEB }}:${SHA} \
|
||||||
|
-t ${{ env.REGISTRY }}/${{ env.IMAGE_WEB }}:latest \
|
||||||
|
.
|
||||||
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_WEB }}:${SHA}
|
||||||
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_WEB }}:latest
|
||||||
|
|
||||||
|
build-core:
|
||||||
|
name: Build Core Image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: "1.25"
|
||||||
|
|
||||||
|
- name: Build core binary
|
||||||
|
run: |
|
||||||
|
go build -ldflags '-s -w' -o bin/core .
|
||||||
|
|
||||||
|
- name: Login to registry
|
||||||
|
run: echo "${{ secrets.REGISTRY_TOKEN }}" | docker login ${{ env.REGISTRY }} -u ${{ secrets.REGISTRY_USER }} --password-stdin
|
||||||
|
|
||||||
|
- name: Build and push core image
|
||||||
|
run: |
|
||||||
|
SHA=$(git rev-parse --short HEAD)
|
||||||
|
cat > Dockerfile.core <<'EOF'
|
||||||
|
FROM alpine:3.20
|
||||||
|
RUN apk add --no-cache ca-certificates
|
||||||
|
COPY bin/core /usr/local/bin/core
|
||||||
|
ENTRYPOINT ["core"]
|
||||||
|
EOF
|
||||||
|
docker build \
|
||||||
|
-f Dockerfile.core \
|
||||||
|
-t ${{ env.REGISTRY }}/${{ env.IMAGE_CORE }}:${SHA} \
|
||||||
|
-t ${{ env.REGISTRY }}/${{ env.IMAGE_CORE }}:latest \
|
||||||
|
.
|
||||||
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_CORE }}:${SHA}
|
||||||
|
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_CORE }}:latest
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
name: Deploy to Production
|
||||||
|
needs: [build-app, build-web, build-core]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Trigger Coolify deploy
|
||||||
|
run: |
|
||||||
|
curl -s -X POST \
|
||||||
|
-H "Authorization: Bearer ${{ secrets.COOLIFY_TOKEN }}" \
|
||||||
|
"${{ secrets.COOLIFY_URL }}/api/v1/deploy" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"uuid": "${{ secrets.COOLIFY_APP_UUID }}", "force": false}'
|
||||||
|
|
||||||
|
- name: Wait for deployment
|
||||||
|
run: |
|
||||||
|
echo "Deployment triggered. Coolify will perform rolling restart."
|
||||||
|
echo "Monitor at: ${{ secrets.COOLIFY_URL }}"
|
||||||
92
.gh-actions/workflows/alpha-release-manual.yml
Normal file
92
.gh-actions/workflows/alpha-release-manual.yml
Normal file
|
|
@ -0,0 +1,92 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch
|
||||||
|
name: "Alpha Release: Manual"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
id-token: write
|
||||||
|
attestations: write
|
||||||
|
|
||||||
|
env:
|
||||||
|
NEXT_VERSION: "0.0.4"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
platform: linux/amd64
|
||||||
|
- os: ubuntu-latest
|
||||||
|
platform: linux/arm64
|
||||||
|
- os: macos-latest
|
||||||
|
platform: darwin/universal
|
||||||
|
- os: windows-latest
|
||||||
|
platform: windows/amd64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
uses: host-uk/build@v3
|
||||||
|
with:
|
||||||
|
build-name: core
|
||||||
|
build-platform: ${{ matrix.platform }}
|
||||||
|
build: true
|
||||||
|
package: true
|
||||||
|
sign: false
|
||||||
|
|
||||||
|
release:
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v7
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
|
- name: Prepare release files
|
||||||
|
run: |
|
||||||
|
mkdir -p release
|
||||||
|
cp dist/* release/ 2>/dev/null || true
|
||||||
|
ls -la release/
|
||||||
|
|
||||||
|
- name: Create alpha release
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
VERSION="v${{ env.NEXT_VERSION }}-alpha.${{ github.run_number }}"
|
||||||
|
|
||||||
|
gh release create "$VERSION" \
|
||||||
|
--title "Alpha: $VERSION" \
|
||||||
|
--notes "Canary build from dev branch.
|
||||||
|
|
||||||
|
**Version:** $VERSION
|
||||||
|
**Commit:** ${{ github.sha }}
|
||||||
|
**Built:** $(date -u +'%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
**Run:** ${{ github.run_id }}
|
||||||
|
|
||||||
|
## Channel: Alpha (Canary)
|
||||||
|
|
||||||
|
This is an automated pre-release for early testing.
|
||||||
|
|
||||||
|
- Systems and early adopters can test breaking changes
|
||||||
|
- Quality scoring determines promotion to beta
|
||||||
|
- Use stable releases for production
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
\`\`\`bash
|
||||||
|
# macOS/Linux
|
||||||
|
curl -fsSL https://github.com/host-uk/core/releases/download/$VERSION/core-linux-amd64 -o core
|
||||||
|
chmod +x core && sudo mv core /usr/local/bin/
|
||||||
|
\`\`\`
|
||||||
|
" \
|
||||||
|
--prerelease \
|
||||||
|
--target dev \
|
||||||
|
release/*
|
||||||
93
.gh-actions/workflows/alpha-release-push.yml
Normal file
93
.gh-actions/workflows/alpha-release-push.yml
Normal file
|
|
@ -0,0 +1,93 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#push
|
||||||
|
name: "Alpha Release: Push"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [dev]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
id-token: write
|
||||||
|
attestations: write
|
||||||
|
|
||||||
|
env:
|
||||||
|
NEXT_VERSION: "0.0.4"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
platform: linux/amd64
|
||||||
|
- os: ubuntu-latest
|
||||||
|
platform: linux/arm64
|
||||||
|
- os: macos-latest
|
||||||
|
platform: darwin/universal
|
||||||
|
- os: windows-latest
|
||||||
|
platform: windows/amd64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
uses: host-uk/build@v3
|
||||||
|
with:
|
||||||
|
build-name: core
|
||||||
|
build-platform: ${{ matrix.platform }}
|
||||||
|
build: true
|
||||||
|
package: true
|
||||||
|
sign: false
|
||||||
|
|
||||||
|
release:
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v7
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
|
- name: Prepare release files
|
||||||
|
run: |
|
||||||
|
mkdir -p release
|
||||||
|
cp dist/* release/ 2>/dev/null || true
|
||||||
|
ls -la release/
|
||||||
|
|
||||||
|
- name: Create alpha release
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
VERSION="v${{ env.NEXT_VERSION }}-alpha.${{ github.run_number }}"
|
||||||
|
|
||||||
|
gh release create "$VERSION" \
|
||||||
|
--title "Alpha: $VERSION" \
|
||||||
|
--notes "Canary build from dev branch.
|
||||||
|
|
||||||
|
**Version:** $VERSION
|
||||||
|
**Commit:** ${{ github.sha }}
|
||||||
|
**Built:** $(date -u +'%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
**Run:** ${{ github.run_id }}
|
||||||
|
|
||||||
|
## Channel: Alpha (Canary)
|
||||||
|
|
||||||
|
This is an automated pre-release for early testing.
|
||||||
|
|
||||||
|
- Systems and early adopters can test breaking changes
|
||||||
|
- Quality scoring determines promotion to beta
|
||||||
|
- Use stable releases for production
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
\`\`\`bash
|
||||||
|
# macOS/Linux
|
||||||
|
curl -fsSL https://github.com/host-uk/core/releases/download/$VERSION/core-linux-amd64 -o core
|
||||||
|
chmod +x core && sudo mv core /usr/local/bin/
|
||||||
|
\`\`\`
|
||||||
|
" \
|
||||||
|
--prerelease \
|
||||||
|
--target dev \
|
||||||
|
release/*
|
||||||
41
.gh-actions/workflows/ci-manual.yml
Normal file
41
.gh-actions/workflows/ci-manual.yml
Normal file
|
|
@ -0,0 +1,41 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch
|
||||||
|
name: "CI: Manual"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
CORE_VERSION: dev
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
qa:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev
|
||||||
|
|
||||||
|
- name: Build core CLI
|
||||||
|
run: |
|
||||||
|
go build -ldflags "-X github.com/host-uk/core/pkg/cli.AppVersion=${{ env.CORE_VERSION }}" -o /usr/local/bin/core .
|
||||||
|
core --version
|
||||||
|
|
||||||
|
- name: Generate code
|
||||||
|
run: go generate ./internal/cmd/updater/...
|
||||||
|
|
||||||
|
- name: Run QA
|
||||||
|
# Skip lint until golangci-lint supports Go 1.25
|
||||||
|
run: core go qa --skip=lint
|
||||||
|
|
||||||
|
- name: Verify build
|
||||||
|
run: |
|
||||||
|
core build --targets=linux/amd64 --ci
|
||||||
|
dist/linux_amd64/core --version
|
||||||
42
.gh-actions/workflows/ci-pull-request.yml
Normal file
42
.gh-actions/workflows/ci-pull-request.yml
Normal file
|
|
@ -0,0 +1,42 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
|
||||||
|
name: "CI: Pull Request"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [dev, main]
|
||||||
|
|
||||||
|
env:
|
||||||
|
CORE_VERSION: dev
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
qa:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev
|
||||||
|
|
||||||
|
- name: Build core CLI
|
||||||
|
run: |
|
||||||
|
go build -ldflags "-X github.com/host-uk/core/pkg/cli.AppVersion=${{ env.CORE_VERSION }}" -o /usr/local/bin/core .
|
||||||
|
core --version
|
||||||
|
|
||||||
|
- name: Generate code
|
||||||
|
run: go generate ./internal/cmd/updater/...
|
||||||
|
|
||||||
|
- name: Run QA
|
||||||
|
# Skip lint until golangci-lint supports Go 1.25
|
||||||
|
run: core go qa --skip=lint
|
||||||
|
|
||||||
|
- name: Verify build
|
||||||
|
run: |
|
||||||
|
core build --targets=linux/amd64 --ci
|
||||||
|
dist/linux_amd64/core --version
|
||||||
42
.gh-actions/workflows/ci-push.yml
Normal file
42
.gh-actions/workflows/ci-push.yml
Normal file
|
|
@ -0,0 +1,42 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#push
|
||||||
|
name: "CI: Push"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [dev, main]
|
||||||
|
|
||||||
|
env:
|
||||||
|
CORE_VERSION: dev
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
qa:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev
|
||||||
|
|
||||||
|
- name: Build core CLI
|
||||||
|
run: |
|
||||||
|
go build -ldflags "-X github.com/host-uk/core/pkg/cli.AppVersion=${{ env.CORE_VERSION }}" -o /usr/local/bin/core .
|
||||||
|
core --version
|
||||||
|
|
||||||
|
- name: Generate code
|
||||||
|
run: go generate ./internal/cmd/updater/...
|
||||||
|
|
||||||
|
- name: Run QA
|
||||||
|
# Skip lint until golangci-lint supports Go 1.25
|
||||||
|
run: core go qa --skip=lint
|
||||||
|
|
||||||
|
- name: Verify build
|
||||||
|
run: |
|
||||||
|
core build --targets=linux/amd64 --ci
|
||||||
|
dist/linux_amd64/core --version
|
||||||
32
.gh-actions/workflows/codeql-pull-request.yml
Normal file
32
.gh-actions/workflows/codeql-pull-request.yml
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
|
||||||
|
name: "CodeQL: Pull Request"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [dev, main]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v4
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v4
|
||||||
|
with:
|
||||||
|
category: "/language:go"
|
||||||
32
.gh-actions/workflows/codeql-push.yml
Normal file
32
.gh-actions/workflows/codeql-push.yml
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#push
|
||||||
|
name: "CodeQL: Push"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [dev, main]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v4
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v4
|
||||||
|
with:
|
||||||
|
category: "/language:go"
|
||||||
32
.gh-actions/workflows/codeql-schedule.yml
Normal file
32
.gh-actions/workflows/codeql-schedule.yml
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#schedule
|
||||||
|
name: "CodeQL: Schedule"
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 6 * * 1"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v4
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v4
|
||||||
|
with:
|
||||||
|
category: "/language:go"
|
||||||
30
.gh-actions/workflows/codescan-pull-request.yml
Normal file
30
.gh-actions/workflows/codescan-pull-request.yml
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
|
||||||
|
name: "Code Scanning: Pull Request"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: ["dev"]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
CodeQL:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: "Checkout Repository"
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: "Initialize CodeQL"
|
||||||
|
uses: github/codeql-action/init@v4
|
||||||
|
with:
|
||||||
|
languages: go,javascript,typescript
|
||||||
|
|
||||||
|
- name: "Autobuild"
|
||||||
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
|
- name: "Perform CodeQL Analysis"
|
||||||
|
uses: github/codeql-action/analyze@v4
|
||||||
30
.gh-actions/workflows/codescan-push.yml
Normal file
30
.gh-actions/workflows/codescan-push.yml
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#push
|
||||||
|
name: "Code Scanning: Push"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["dev"]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
CodeQL:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: "Checkout Repository"
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: "Initialize CodeQL"
|
||||||
|
uses: github/codeql-action/init@v4
|
||||||
|
with:
|
||||||
|
languages: go,javascript,typescript
|
||||||
|
|
||||||
|
- name: "Autobuild"
|
||||||
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
|
- name: "Perform CodeQL Analysis"
|
||||||
|
uses: github/codeql-action/analyze@v4
|
||||||
30
.gh-actions/workflows/codescan-schedule.yml
Normal file
30
.gh-actions/workflows/codescan-schedule.yml
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#schedule
|
||||||
|
name: "Code Scanning: Schedule"
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 2 * * 1-5"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
CodeQL:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: "Checkout Repository"
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: "Initialize CodeQL"
|
||||||
|
uses: github/codeql-action/init@v4
|
||||||
|
with:
|
||||||
|
languages: go,javascript,typescript
|
||||||
|
|
||||||
|
- name: "Autobuild"
|
||||||
|
uses: github/codeql-action/autobuild@v4
|
||||||
|
|
||||||
|
- name: "Perform CodeQL Analysis"
|
||||||
|
uses: github/codeql-action/analyze@v4
|
||||||
46
.gh-actions/workflows/coverage-manual.yml
Normal file
46
.gh-actions/workflows/coverage-manual.yml
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch
|
||||||
|
name: "Coverage: Manual"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
CORE_VERSION: dev
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
coverage:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev
|
||||||
|
|
||||||
|
- name: Build core CLI
|
||||||
|
run: |
|
||||||
|
go build -ldflags "-X github.com/host-uk/core/pkg/cli.AppVersion=${{ env.CORE_VERSION }}" -o /usr/local/bin/core .
|
||||||
|
core --version
|
||||||
|
|
||||||
|
- name: Generate code
|
||||||
|
run: go generate ./internal/cmd/updater/...
|
||||||
|
|
||||||
|
- name: Run coverage
|
||||||
|
run: core go cov
|
||||||
|
|
||||||
|
- name: Upload coverage reports to Codecov
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
|
||||||
|
- name: Upload coverage report
|
||||||
|
uses: actions/upload-artifact@v6
|
||||||
|
with:
|
||||||
|
name: coverage-report
|
||||||
|
path: coverage.txt
|
||||||
47
.gh-actions/workflows/coverage-pull-request.yml
Normal file
47
.gh-actions/workflows/coverage-pull-request.yml
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
|
||||||
|
name: "Coverage: Pull Request"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [dev, main]
|
||||||
|
|
||||||
|
env:
|
||||||
|
CORE_VERSION: dev
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
coverage:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev
|
||||||
|
|
||||||
|
- name: Build core CLI
|
||||||
|
run: |
|
||||||
|
go build -ldflags "-X github.com/host-uk/core/pkg/cli.AppVersion=${{ env.CORE_VERSION }}" -o /usr/local/bin/core .
|
||||||
|
core --version
|
||||||
|
|
||||||
|
- name: Generate code
|
||||||
|
run: go generate ./internal/cmd/updater/...
|
||||||
|
|
||||||
|
- name: Run coverage
|
||||||
|
run: core go cov
|
||||||
|
|
||||||
|
- name: Upload coverage reports to Codecov
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
|
||||||
|
- name: Upload coverage report
|
||||||
|
uses: actions/upload-artifact@v6
|
||||||
|
with:
|
||||||
|
name: coverage-report
|
||||||
|
path: coverage.txt
|
||||||
47
.gh-actions/workflows/coverage-push.yml
Normal file
47
.gh-actions/workflows/coverage-push.yml
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#push
|
||||||
|
name: "Coverage: Push"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [dev, main]
|
||||||
|
|
||||||
|
env:
|
||||||
|
CORE_VERSION: dev
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
coverage:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
|
- name: Install system dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y libgtk-3-dev libwebkit2gtk-4.1-dev
|
||||||
|
|
||||||
|
- name: Build core CLI
|
||||||
|
run: |
|
||||||
|
go build -ldflags "-X github.com/host-uk/core/pkg/cli.AppVersion=${{ env.CORE_VERSION }}" -o /usr/local/bin/core .
|
||||||
|
core --version
|
||||||
|
|
||||||
|
- name: Generate code
|
||||||
|
run: go generate ./internal/cmd/updater/...
|
||||||
|
|
||||||
|
- name: Run coverage
|
||||||
|
run: core go cov
|
||||||
|
|
||||||
|
- name: Upload coverage reports to Codecov
|
||||||
|
uses: codecov/codecov-action@v5
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
|
||||||
|
- name: Upload coverage report
|
||||||
|
uses: actions/upload-artifact@v6
|
||||||
|
with:
|
||||||
|
name: coverage-report
|
||||||
|
path: coverage.txt
|
||||||
89
.gh-actions/workflows/pr-build-manual.yml
Normal file
89
.gh-actions/workflows/pr-build-manual.yml
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_dispatch
|
||||||
|
name: "PR Build: Manual"
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
pr_number:
|
||||||
|
description: 'PR number to build'
|
||||||
|
required: true
|
||||||
|
type: number
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: read
|
||||||
|
|
||||||
|
env:
|
||||||
|
NEXT_VERSION: "0.0.4"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
platform: linux/amd64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
uses: host-uk/build@v3
|
||||||
|
with:
|
||||||
|
build-name: core
|
||||||
|
build-platform: ${{ matrix.platform }}
|
||||||
|
build: true
|
||||||
|
package: true
|
||||||
|
sign: false
|
||||||
|
|
||||||
|
draft-release:
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
PR_NUM: ${{ inputs.pr_number }}
|
||||||
|
PR_SHA: ${{ github.sha }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v7
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
|
- name: Prepare release files
|
||||||
|
run: |
|
||||||
|
mkdir -p release
|
||||||
|
cp dist/* release/ 2>/dev/null || true
|
||||||
|
ls -la release/
|
||||||
|
|
||||||
|
- name: Create draft release
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
TAG="v${{ env.NEXT_VERSION }}.pr.${PR_NUM}.bid.${{ github.run_id }}"
|
||||||
|
|
||||||
|
# Delete existing draft for this PR if it exists
|
||||||
|
gh release delete "$TAG" -y 2>/dev/null || true
|
||||||
|
git push origin ":refs/tags/$TAG" 2>/dev/null || true
|
||||||
|
|
||||||
|
gh release create "$TAG" \
|
||||||
|
--title "Draft: PR #${PR_NUM}" \
|
||||||
|
--notes "Draft build for PR #${PR_NUM}.
|
||||||
|
|
||||||
|
**Version:** $TAG
|
||||||
|
**PR:** #${PR_NUM}
|
||||||
|
**Commit:** ${PR_SHA}
|
||||||
|
**Built:** $(date -u +'%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
**Run:** ${{ github.run_id }}
|
||||||
|
|
||||||
|
## Channel: Draft
|
||||||
|
|
||||||
|
This is a draft build for testing PR changes before merge.
|
||||||
|
Not intended for production use.
|
||||||
|
|
||||||
|
Build artifacts available for download and testing.
|
||||||
|
" \
|
||||||
|
--draft \
|
||||||
|
--prerelease \
|
||||||
|
release/*
|
||||||
89
.gh-actions/workflows/pr-build-pull-request.yml
Normal file
89
.gh-actions/workflows/pr-build-pull-request.yml
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
# https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
|
||||||
|
name: "PR Build: Pull Request"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: read
|
||||||
|
|
||||||
|
env:
|
||||||
|
NEXT_VERSION: "0.0.4"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
# Only build if PR is from the same repo (not forks)
|
||||||
|
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
platform: linux/amd64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
uses: host-uk/build@v3
|
||||||
|
with:
|
||||||
|
build-name: core
|
||||||
|
build-platform: ${{ matrix.platform }}
|
||||||
|
build: true
|
||||||
|
package: true
|
||||||
|
sign: false
|
||||||
|
|
||||||
|
draft-release:
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
PR_NUM: ${{ github.event.pull_request.number }}
|
||||||
|
PR_SHA: ${{ github.event.pull_request.head.sha }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v7
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
merge-multiple: true
|
||||||
|
|
||||||
|
- name: Prepare release files
|
||||||
|
run: |
|
||||||
|
mkdir -p release
|
||||||
|
cp dist/* release/ 2>/dev/null || true
|
||||||
|
ls -la release/
|
||||||
|
|
||||||
|
- name: Create draft release
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
TAG="v${{ env.NEXT_VERSION }}.pr.${PR_NUM}.bid.${{ github.run_id }}"
|
||||||
|
|
||||||
|
# Delete existing draft for this PR if it exists
|
||||||
|
gh release delete "$TAG" -y 2>/dev/null || true
|
||||||
|
git push origin ":refs/tags/$TAG" 2>/dev/null || true
|
||||||
|
|
||||||
|
gh release create "$TAG" \
|
||||||
|
--title "Draft: PR #${PR_NUM}" \
|
||||||
|
--notes "Draft build for PR #${PR_NUM}.
|
||||||
|
|
||||||
|
**Version:** $TAG
|
||||||
|
**PR:** #${PR_NUM}
|
||||||
|
**Commit:** ${PR_SHA}
|
||||||
|
**Built:** $(date -u +'%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
**Run:** ${{ github.run_id }}
|
||||||
|
|
||||||
|
## Channel: Draft
|
||||||
|
|
||||||
|
This is a draft build for testing PR changes before merge.
|
||||||
|
Not intended for production use.
|
||||||
|
|
||||||
|
Build artifacts available for download and testing.
|
||||||
|
" \
|
||||||
|
--draft \
|
||||||
|
--prerelease \
|
||||||
|
release/*
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
|
|
@ -18,6 +18,8 @@ tasks
|
||||||
/core
|
/core
|
||||||
/i18n-validate
|
/i18n-validate
|
||||||
cmd/bugseti/bugseti
|
cmd/bugseti/bugseti
|
||||||
|
internal/core-ide/core-ide
|
||||||
|
.angular/
|
||||||
|
|
||||||
patch_cov.*
|
patch_cov.*
|
||||||
go.work.sum
|
go.work.sum
|
||||||
|
|
|
||||||
6
Taskfile.yaml
Normal file
6
Taskfile.yaml
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
build:
|
||||||
|
cmds:
|
||||||
|
- go build -o build/bin/core cmd/app/main.go
|
||||||
23
Taskfile.yml
23
Taskfile.yml
|
|
@ -220,6 +220,29 @@ tasks:
|
||||||
- go build -tags nowatcher -o ../../bin/core-app .
|
- go build -tags nowatcher -o ../../bin/core-app .
|
||||||
- ../../bin/core-app
|
- ../../bin/core-app
|
||||||
|
|
||||||
|
# --- BugSETI (Wails v3 System Tray) ---
|
||||||
|
bugseti:dev:
|
||||||
|
desc: "Build and run BugSETI (production binary with embedded frontend)"
|
||||||
|
dir: cmd/bugseti
|
||||||
|
cmds:
|
||||||
|
- cd frontend && npm install && npm run build
|
||||||
|
- go build -buildvcs=false -o ../../bin/bugseti .
|
||||||
|
- ../../bin/bugseti
|
||||||
|
|
||||||
|
bugseti:build:
|
||||||
|
desc: "Build BugSETI production binary"
|
||||||
|
dir: cmd/bugseti
|
||||||
|
cmds:
|
||||||
|
- cd frontend && npm install && npm run build
|
||||||
|
- go build -trimpath -buildvcs=false -ldflags="-w -s" -o ../../bin/bugseti .
|
||||||
|
|
||||||
|
bugseti:frontend:
|
||||||
|
desc: "Build BugSETI frontend only"
|
||||||
|
dir: cmd/bugseti/frontend
|
||||||
|
cmds:
|
||||||
|
- npm install
|
||||||
|
- npm run build
|
||||||
|
|
||||||
# --- Multi-repo (when in workspace) ---
|
# --- Multi-repo (when in workspace) ---
|
||||||
dev:health:
|
dev:health:
|
||||||
desc: "Check health of all repos"
|
desc: "Check health of all repos"
|
||||||
|
|
|
||||||
|
|
@ -1,30 +1,38 @@
|
||||||
# BugSETI Wails v3 Build Configuration
|
# BugSETI Wails v3 Build Configuration
|
||||||
|
version: '3'
|
||||||
|
|
||||||
version: "3"
|
# Build metadata
|
||||||
|
|
||||||
# Application information
|
|
||||||
name: "BugSETI"
|
|
||||||
outputfilename: "bugseti"
|
|
||||||
description: "Distributed Bug Fixing - like SETI@home but for code"
|
|
||||||
productidentifier: "io.lethean.bugseti"
|
|
||||||
productname: "BugSETI"
|
|
||||||
productcompany: "Lethean"
|
|
||||||
copyright: "Copyright 2026 Lethean"
|
|
||||||
|
|
||||||
# Development server
|
|
||||||
devserver:
|
|
||||||
frontend: "http://localhost:9246"
|
|
||||||
|
|
||||||
# Frontend configuration
|
|
||||||
frontend:
|
|
||||||
dir: "frontend"
|
|
||||||
installcmd: "npm install"
|
|
||||||
buildcmd: "npm run build"
|
|
||||||
devcmd: "npm run dev"
|
|
||||||
|
|
||||||
# Build information
|
|
||||||
info:
|
info:
|
||||||
companyname: "Lethean"
|
companyName: "Lethean"
|
||||||
productversion: "0.1.0"
|
productName: "BugSETI"
|
||||||
fileversion: "0.1.0"
|
productIdentifier: "io.lethean.bugseti"
|
||||||
|
description: "Distributed Bug Fixing - like SETI@home but for code"
|
||||||
|
copyright: "Copyright 2026 Lethean"
|
||||||
comments: "Distributed OSS bug fixing application"
|
comments: "Distributed OSS bug fixing application"
|
||||||
|
version: "0.1.0"
|
||||||
|
|
||||||
|
# Dev mode configuration
|
||||||
|
dev_mode:
|
||||||
|
root_path: .
|
||||||
|
log_level: warn
|
||||||
|
debounce: 1000
|
||||||
|
ignore:
|
||||||
|
dir:
|
||||||
|
- .git
|
||||||
|
- node_modules
|
||||||
|
- frontend
|
||||||
|
- bin
|
||||||
|
file:
|
||||||
|
- .DS_Store
|
||||||
|
- .gitignore
|
||||||
|
- .gitkeep
|
||||||
|
watched_extension:
|
||||||
|
- "*.go"
|
||||||
|
git_ignore: true
|
||||||
|
executes:
|
||||||
|
- cmd: go build -buildvcs=false -gcflags=all=-l -o bin/bugseti .
|
||||||
|
type: blocking
|
||||||
|
- cmd: cd frontend && npx ng serve --port ${WAILS_FRONTEND_PORT:-9246}
|
||||||
|
type: background
|
||||||
|
- cmd: bin/bugseti
|
||||||
|
type: primary
|
||||||
|
|
|
||||||
|
|
@ -43,8 +43,8 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "anyComponentStyle",
|
"type": "anyComponentStyle",
|
||||||
"maximumWarning": "2kb",
|
"maximumWarning": "6kb",
|
||||||
"maximumError": "4kb"
|
"maximumError": "10kb"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"outputHashing": "all"
|
"outputHashing": "all"
|
||||||
|
|
@ -87,5 +87,8 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"cli": {
|
||||||
|
"analytics": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -145,10 +145,12 @@ export class JellyfinComponent {
|
||||||
apiKey = '';
|
apiKey = '';
|
||||||
mediaSourceId = '';
|
mediaSourceId = '';
|
||||||
|
|
||||||
safeWebUrl: SafeResourceUrl = this.sanitizer.bypassSecurityTrustResourceUrl('https://media.lthn.ai/web/index.html');
|
safeWebUrl!: SafeResourceUrl;
|
||||||
streamUrl = '';
|
streamUrl = '';
|
||||||
|
|
||||||
constructor(private sanitizer: DomSanitizer) {}
|
constructor(private sanitizer: DomSanitizer) {
|
||||||
|
this.safeWebUrl = this.sanitizer.bypassSecurityTrustResourceUrl('https://media.lthn.ai/web/index.html');
|
||||||
|
}
|
||||||
|
|
||||||
load(): void {
|
load(): void {
|
||||||
const base = this.normalizeBase(this.serverUrl);
|
const base = this.normalizeBase(this.serverUrl);
|
||||||
|
|
|
||||||
|
|
@ -12,12 +12,15 @@ import (
|
||||||
"embed"
|
"embed"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"log"
|
"log"
|
||||||
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/host-uk/core/cmd/bugseti/icons"
|
"github.com/host-uk/core/cmd/bugseti/icons"
|
||||||
"github.com/host-uk/core/internal/bugseti"
|
"github.com/host-uk/core/internal/bugseti"
|
||||||
"github.com/host-uk/core/internal/bugseti/updater"
|
"github.com/host-uk/core/internal/bugseti/updater"
|
||||||
"github.com/wailsapp/wails/v3/pkg/application"
|
"github.com/wailsapp/wails/v3/pkg/application"
|
||||||
|
"github.com/wailsapp/wails/v3/pkg/events"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed all:frontend/dist/bugseti/browser
|
//go:embed all:frontend/dist/bugseti/browser
|
||||||
|
|
@ -80,7 +83,7 @@ func main() {
|
||||||
Description: "Distributed Bug Fixing - like SETI@home but for code",
|
Description: "Distributed Bug Fixing - like SETI@home but for code",
|
||||||
Services: services,
|
Services: services,
|
||||||
Assets: application.AssetOptions{
|
Assets: application.AssetOptions{
|
||||||
Handler: application.AssetFileServerFS(staticAssets),
|
Handler: spaHandler(staticAssets),
|
||||||
},
|
},
|
||||||
Mac: application.MacOptions{
|
Mac: application.MacOptions{
|
||||||
ActivationPolicy: application.ActivationPolicyAccessory,
|
ActivationPolicy: application.ActivationPolicyAccessory,
|
||||||
|
|
@ -236,9 +239,31 @@ func setupSystemTray(app *application.App, fetcher *bugseti.FetcherService, queu
|
||||||
|
|
||||||
systray.SetMenu(trayMenu)
|
systray.SetMenu(trayMenu)
|
||||||
|
|
||||||
// Check if onboarding needed
|
// Check if onboarding needed (deferred until app is running)
|
||||||
|
app.Event.RegisterApplicationEventHook(events.Common.ApplicationStarted, func(event *application.ApplicationEvent) {
|
||||||
if !config.IsOnboarded() {
|
if !config.IsOnboarded() {
|
||||||
onboardingWindow.Show()
|
onboardingWindow.Show()
|
||||||
onboardingWindow.Focus()
|
onboardingWindow.Focus()
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// spaHandler wraps an fs.FS to serve static files with SPA fallback.
|
||||||
|
// If the requested path doesn't match a real file, it serves index.html
|
||||||
|
// so Angular's client-side router can handle the route.
|
||||||
|
func spaHandler(fsys fs.FS) http.Handler {
|
||||||
|
fileServer := http.FileServer(http.FS(fsys))
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
path := strings.TrimPrefix(r.URL.Path, "/")
|
||||||
|
if path == "" {
|
||||||
|
path = "index.html"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the file exists
|
||||||
|
if _, err := fs.Stat(fsys, path); err != nil {
|
||||||
|
// File doesn't exist — serve index.html for SPA routing
|
||||||
|
r.URL.Path = "/"
|
||||||
|
}
|
||||||
|
fileServer.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
||||||
171
cmd/core-ide/claude_bridge.go
Normal file
171
cmd/core-ide/claude_bridge.go
Normal file
|
|
@ -0,0 +1,171 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
|
)
|
||||||
|
|
||||||
|
var wsUpgrader = websocket.Upgrader{
|
||||||
|
ReadBufferSize: 1024,
|
||||||
|
WriteBufferSize: 1024,
|
||||||
|
CheckOrigin: func(r *http.Request) bool {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClaudeBridge forwards messages between GUI clients and the MCP core WebSocket.
|
||||||
|
// This is the CLIENT bridge — it connects to the MCP core process on port 9876
|
||||||
|
// and relays messages bidirectionally with connected GUI WebSocket clients.
|
||||||
|
type ClaudeBridge struct {
|
||||||
|
mcpConn *websocket.Conn
|
||||||
|
mcpURL string
|
||||||
|
clients map[*websocket.Conn]bool
|
||||||
|
clientsMu sync.RWMutex
|
||||||
|
broadcast chan []byte
|
||||||
|
reconnectMu sync.Mutex
|
||||||
|
connected bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClaudeBridge creates a new bridge to the MCP core WebSocket.
|
||||||
|
func NewClaudeBridge(mcpURL string) *ClaudeBridge {
|
||||||
|
return &ClaudeBridge{
|
||||||
|
mcpURL: mcpURL,
|
||||||
|
clients: make(map[*websocket.Conn]bool),
|
||||||
|
broadcast: make(chan []byte, 256),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connected reports whether the bridge is connected to MCP core.
|
||||||
|
func (cb *ClaudeBridge) Connected() bool {
|
||||||
|
cb.reconnectMu.Lock()
|
||||||
|
defer cb.reconnectMu.Unlock()
|
||||||
|
return cb.connected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start connects to the MCP WebSocket and starts the bridge.
|
||||||
|
func (cb *ClaudeBridge) Start() {
|
||||||
|
go cb.connectToMCP()
|
||||||
|
go cb.broadcastLoop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// connectToMCP establishes connection to the MCP core WebSocket.
|
||||||
|
func (cb *ClaudeBridge) connectToMCP() {
|
||||||
|
for {
|
||||||
|
cb.reconnectMu.Lock()
|
||||||
|
if cb.mcpConn != nil {
|
||||||
|
cb.mcpConn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("ide bridge: connect to MCP at %s", cb.mcpURL)
|
||||||
|
conn, _, err := websocket.DefaultDialer.Dial(cb.mcpURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ide bridge: connect failed: %v", err)
|
||||||
|
cb.connected = false
|
||||||
|
cb.reconnectMu.Unlock()
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cb.mcpConn = conn
|
||||||
|
cb.connected = true
|
||||||
|
cb.reconnectMu.Unlock()
|
||||||
|
log.Println("ide bridge: connected to MCP core")
|
||||||
|
|
||||||
|
// Read messages from MCP and broadcast to GUI clients
|
||||||
|
for {
|
||||||
|
_, message, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ide bridge: MCP read error: %v", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
cb.broadcast <- message
|
||||||
|
}
|
||||||
|
|
||||||
|
cb.reconnectMu.Lock()
|
||||||
|
cb.connected = false
|
||||||
|
cb.reconnectMu.Unlock()
|
||||||
|
|
||||||
|
// Connection lost, retry after delay
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// broadcastLoop sends messages from MCP core to all connected GUI clients.
|
||||||
|
func (cb *ClaudeBridge) broadcastLoop() {
|
||||||
|
for message := range cb.broadcast {
|
||||||
|
cb.clientsMu.RLock()
|
||||||
|
for client := range cb.clients {
|
||||||
|
if err := client.WriteMessage(websocket.TextMessage, message); err != nil {
|
||||||
|
log.Printf("ide bridge: client write error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cb.clientsMu.RUnlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleWebSocket handles WebSocket connections from GUI clients.
|
||||||
|
func (cb *ClaudeBridge) HandleWebSocket(w http.ResponseWriter, r *http.Request) {
|
||||||
|
conn, err := wsUpgrader.Upgrade(w, r, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("ide bridge: upgrade error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cb.clientsMu.Lock()
|
||||||
|
cb.clients[conn] = true
|
||||||
|
cb.clientsMu.Unlock()
|
||||||
|
|
||||||
|
// Send connected message
|
||||||
|
connMsg, _ := json.Marshal(map[string]any{
|
||||||
|
"type": "system",
|
||||||
|
"data": "Connected to Claude bridge",
|
||||||
|
"timestamp": time.Now(),
|
||||||
|
})
|
||||||
|
conn.WriteMessage(websocket.TextMessage, connMsg)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
cb.clientsMu.Lock()
|
||||||
|
delete(cb.clients, conn)
|
||||||
|
cb.clientsMu.Unlock()
|
||||||
|
conn.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Read messages from GUI client and forward to MCP core
|
||||||
|
for {
|
||||||
|
_, message, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the message to check type
|
||||||
|
var msg map[string]any
|
||||||
|
if err := json.Unmarshal(message, &msg); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward claude_message to MCP core
|
||||||
|
if msgType, ok := msg["type"].(string); ok && msgType == "claude_message" {
|
||||||
|
cb.sendToMCP(message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendToMCP sends a message to the MCP WebSocket.
|
||||||
|
func (cb *ClaudeBridge) sendToMCP(message []byte) {
|
||||||
|
cb.reconnectMu.Lock()
|
||||||
|
defer cb.reconnectMu.Unlock()
|
||||||
|
|
||||||
|
if cb.mcpConn == nil {
|
||||||
|
log.Println("ide bridge: MCP not connected, dropping message")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cb.mcpConn.WriteMessage(websocket.TextMessage, message); err != nil {
|
||||||
|
log.Printf("ide bridge: MCP write error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
16159
cmd/core-ide/frontend/package-lock.json
generated
Normal file
16159
cmd/core-ide/frontend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,9 +1,9 @@
|
||||||
import { ApplicationConfig } from '@angular/core';
|
import { ApplicationConfig } from '@angular/core';
|
||||||
import { provideRouter, withHashLocation } from '@angular/router';
|
import { provideRouter } from '@angular/router';
|
||||||
import { routes } from './app.routes';
|
import { routes } from './app.routes';
|
||||||
|
|
||||||
export const appConfig: ApplicationConfig = {
|
export const appConfig: ApplicationConfig = {
|
||||||
providers: [
|
providers: [
|
||||||
provideRouter(routes, withHashLocation())
|
provideRouter(routes)
|
||||||
]
|
]
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@ import { Subscription } from 'rxjs';
|
||||||
<textarea
|
<textarea
|
||||||
class="form-textarea"
|
class="form-textarea"
|
||||||
[(ngModel)]="draft"
|
[(ngModel)]="draft"
|
||||||
(keydown.enter)="sendMessage($event)"
|
(keydown.enter)="sendMessage($any($event))"
|
||||||
placeholder="Type a message... (Enter to send)"
|
placeholder="Type a message... (Enter to send)"
|
||||||
rows="2"
|
rows="2"
|
||||||
></textarea>
|
></textarea>
|
||||||
|
|
|
||||||
|
|
@ -133,10 +133,12 @@ export class JellyfinComponent {
|
||||||
apiKey = '';
|
apiKey = '';
|
||||||
mediaSourceId = '';
|
mediaSourceId = '';
|
||||||
|
|
||||||
safeWebUrl: SafeResourceUrl = this.sanitizer.bypassSecurityTrustResourceUrl('https://media.lthn.ai/web/index.html');
|
safeWebUrl!: SafeResourceUrl;
|
||||||
streamUrl = '';
|
streamUrl = '';
|
||||||
|
|
||||||
constructor(private sanitizer: DomSanitizer) {}
|
constructor(private sanitizer: DomSanitizer) {
|
||||||
|
this.safeWebUrl = this.sanitizer.bypassSecurityTrustResourceUrl('https://media.lthn.ai/web/index.html');
|
||||||
|
}
|
||||||
|
|
||||||
load(): void {
|
load(): void {
|
||||||
const base = this.normalizeBase(this.serverUrl);
|
const base = this.normalizeBase(this.serverUrl);
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
import { Component } from '@angular/core';
|
import { Component } from '@angular/core';
|
||||||
import { CommonModule } from '@angular/common';
|
import { CommonModule } from '@angular/common';
|
||||||
import { RouterLink, RouterLinkActive, RouterOutlet } from '@angular/router';
|
|
||||||
import { ChatComponent } from '../chat/chat.component';
|
import { ChatComponent } from '../chat/chat.component';
|
||||||
import { BuildComponent } from '../build/build.component';
|
import { BuildComponent } from '../build/build.component';
|
||||||
import { DashboardComponent } from '../dashboard/dashboard.component';
|
import { DashboardComponent } from '../dashboard/dashboard.component';
|
||||||
|
|
@ -11,7 +10,7 @@ type Panel = 'chat' | 'build' | 'dashboard' | 'jellyfin';
|
||||||
@Component({
|
@Component({
|
||||||
selector: 'app-main',
|
selector: 'app-main',
|
||||||
standalone: true,
|
standalone: true,
|
||||||
imports: [CommonModule, RouterLink, RouterLinkActive, RouterOutlet, ChatComponent, BuildComponent, DashboardComponent, JellyfinComponent],
|
imports: [CommonModule, ChatComponent, BuildComponent, DashboardComponent, JellyfinComponent],
|
||||||
template: `
|
template: `
|
||||||
<div class="ide">
|
<div class="ide">
|
||||||
<nav class="ide__sidebar">
|
<nav class="ide__sidebar">
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ module github.com/host-uk/core/cmd/core-ide
|
||||||
go 1.25.5
|
go 1.25.5
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/host-uk/core v0.0.0
|
github.com/host-uk/core v0.0.0
|
||||||
github.com/wailsapp/wails/v3 v3.0.0-alpha.64
|
github.com/wailsapp/wails/v3 v3.0.0-alpha.64
|
||||||
)
|
)
|
||||||
|
|
@ -26,7 +27,6 @@ require (
|
||||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||||
github.com/google/jsonschema-go v0.4.2 // indirect
|
github.com/google/jsonschema-go v0.4.2 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gorilla/websocket v1.5.3 // indirect
|
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||||
github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1 // indirect
|
github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1 // indirect
|
||||||
github.com/kevinburke/ssh_config v1.4.0 // indirect
|
github.com/kevinburke/ssh_config v1.4.0 // indirect
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/mcp/ide"
|
"github.com/host-uk/core/pkg/mcp/ide"
|
||||||
"github.com/host-uk/core/pkg/ws"
|
"github.com/host-uk/core/pkg/ws"
|
||||||
|
|
@ -26,9 +25,7 @@ func NewIDEService(ideSub *ide.Subsystem, hub *ws.Hub) *IDEService {
|
||||||
func (s *IDEService) ServiceName() string { return "IDEService" }
|
func (s *IDEService) ServiceName() string { return "IDEService" }
|
||||||
|
|
||||||
// ServiceStartup is called when the Wails application starts.
|
// ServiceStartup is called when the Wails application starts.
|
||||||
func (s *IDEService) ServiceStartup(ctx context.Context, options application.ServiceOptions) error {
|
func (s *IDEService) ServiceStartup(_ context.Context, _ application.ServiceOptions) error {
|
||||||
// Start WebSocket HTTP server for the Angular frontend
|
|
||||||
go s.startWSServer()
|
|
||||||
log.Println("IDEService started")
|
log.Println("IDEService started")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -84,19 +81,3 @@ func (s *IDEService) ShowWindow(name string) {
|
||||||
w.Focus()
|
w.Focus()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWSServer starts the WebSocket HTTP server for the Angular frontend.
|
|
||||||
func (s *IDEService) startWSServer() {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
mux.HandleFunc("/ws", s.hub.HandleWebSocket)
|
|
||||||
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte(`{"status":"ok"}`))
|
|
||||||
})
|
|
||||||
|
|
||||||
addr := "127.0.0.1:9877"
|
|
||||||
log.Printf("IDE WebSocket server listening on %s", addr)
|
|
||||||
if err := http.ListenAndServe(addr, mux); err != nil {
|
|
||||||
log.Printf("IDE WebSocket server error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,9 @@ import (
|
||||||
"embed"
|
"embed"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"log"
|
"log"
|
||||||
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/host-uk/core/cmd/core-ide/icons"
|
"github.com/host-uk/core/cmd/core-ide/icons"
|
||||||
"github.com/host-uk/core/pkg/mcp/ide"
|
"github.com/host-uk/core/pkg/mcp/ide"
|
||||||
|
|
@ -41,6 +43,9 @@ func main() {
|
||||||
chatService := NewChatService(ideSub)
|
chatService := NewChatService(ideSub)
|
||||||
buildService := NewBuildService(ideSub)
|
buildService := NewBuildService(ideSub)
|
||||||
|
|
||||||
|
// Create MCP bridge (SERVER: HTTP tool server + CLIENT: WebSocket relay)
|
||||||
|
mcpBridge := NewMCPBridge(hub, 9877)
|
||||||
|
|
||||||
app := application.New(application.Options{
|
app := application.New(application.Options{
|
||||||
Name: "Core IDE",
|
Name: "Core IDE",
|
||||||
Description: "Host UK Platform IDE - AI Agent Sessions, Build Monitoring & Dashboard",
|
Description: "Host UK Platform IDE - AI Agent Sessions, Build Monitoring & Dashboard",
|
||||||
|
|
@ -48,9 +53,10 @@ func main() {
|
||||||
application.NewService(ideService),
|
application.NewService(ideService),
|
||||||
application.NewService(chatService),
|
application.NewService(chatService),
|
||||||
application.NewService(buildService),
|
application.NewService(buildService),
|
||||||
|
application.NewService(mcpBridge),
|
||||||
},
|
},
|
||||||
Assets: application.AssetOptions{
|
Assets: application.AssetOptions{
|
||||||
Handler: application.AssetFileServerFS(staticAssets),
|
Handler: spaHandler(staticAssets),
|
||||||
},
|
},
|
||||||
Mac: application.MacOptions{
|
Mac: application.MacOptions{
|
||||||
ActivationPolicy: application.ActivationPolicyAccessory,
|
ActivationPolicy: application.ActivationPolicyAccessory,
|
||||||
|
|
@ -63,7 +69,8 @@ func main() {
|
||||||
|
|
||||||
log.Println("Starting Core IDE...")
|
log.Println("Starting Core IDE...")
|
||||||
log.Println(" - System tray active")
|
log.Println(" - System tray active")
|
||||||
log.Println(" - Bridge connecting to Laravel core-agentic...")
|
log.Println(" - MCP bridge (SERVER) on :9877")
|
||||||
|
log.Println(" - Claude bridge (CLIENT) → MCP core on :9876")
|
||||||
|
|
||||||
if err := app.Run(); err != nil {
|
if err := app.Run(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|
@ -149,3 +156,18 @@ func setupSystemTray(app *application.App, ideService *IDEService) {
|
||||||
|
|
||||||
systray.SetMenu(trayMenu)
|
systray.SetMenu(trayMenu)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// spaHandler wraps an fs.FS to serve static files with SPA fallback.
|
||||||
|
func spaHandler(fsys fs.FS) http.Handler {
|
||||||
|
fileServer := http.FileServer(http.FS(fsys))
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
path := strings.TrimPrefix(r.URL.Path, "/")
|
||||||
|
if path == "" {
|
||||||
|
path = "index.html"
|
||||||
|
}
|
||||||
|
if _, err := fs.Stat(fsys, path); err != nil {
|
||||||
|
r.URL.Path = "/"
|
||||||
|
}
|
||||||
|
fileServer.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
||||||
504
cmd/core-ide/mcp_bridge.go
Normal file
504
cmd/core-ide/mcp_bridge.go
Normal file
|
|
@ -0,0 +1,504 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/host-uk/core/pkg/ws"
|
||||||
|
"github.com/wailsapp/wails/v3/pkg/application"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MCPBridge is the SERVER bridge that exposes MCP tools via HTTP.
|
||||||
|
// AI agents call these endpoints to control windows, execute JS in webviews,
|
||||||
|
// access the clipboard, show notifications, and query the app state.
|
||||||
|
type MCPBridge struct {
|
||||||
|
app *application.App
|
||||||
|
hub *ws.Hub
|
||||||
|
claudeBridge *ClaudeBridge
|
||||||
|
port int
|
||||||
|
running bool
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMCPBridge creates a new MCP bridge server.
|
||||||
|
func NewMCPBridge(hub *ws.Hub, port int) *MCPBridge {
|
||||||
|
cb := NewClaudeBridge("ws://localhost:9876/ws")
|
||||||
|
return &MCPBridge{
|
||||||
|
hub: hub,
|
||||||
|
claudeBridge: cb,
|
||||||
|
port: port,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceName returns the Wails service name.
|
||||||
|
func (b *MCPBridge) ServiceName() string { return "MCPBridge" }
|
||||||
|
|
||||||
|
// ServiceStartup is called by Wails when the app starts.
|
||||||
|
func (b *MCPBridge) ServiceStartup(_ context.Context, _ application.ServiceOptions) error {
|
||||||
|
b.app = application.Get()
|
||||||
|
go b.startHTTPServer()
|
||||||
|
log.Printf("MCP Bridge started on port %d", b.port)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceShutdown is called when the app shuts down.
|
||||||
|
func (b *MCPBridge) ServiceShutdown() error {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
b.running = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// startHTTPServer starts the HTTP server for MCP tools and WebSocket.
|
||||||
|
func (b *MCPBridge) startHTTPServer() {
|
||||||
|
b.mu.Lock()
|
||||||
|
b.running = true
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
// Start the Claude bridge (CLIENT → MCP core on :9876)
|
||||||
|
b.claudeBridge.Start()
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
|
||||||
|
// WebSocket endpoint for Angular frontend
|
||||||
|
mux.HandleFunc("/ws", b.hub.HandleWebSocket)
|
||||||
|
|
||||||
|
// Claude bridge WebSocket relay (GUI clients ↔ MCP core)
|
||||||
|
mux.HandleFunc("/claude", b.claudeBridge.HandleWebSocket)
|
||||||
|
|
||||||
|
// MCP server endpoints
|
||||||
|
mux.HandleFunc("/mcp", b.handleMCPInfo)
|
||||||
|
mux.HandleFunc("/mcp/tools", b.handleMCPTools)
|
||||||
|
mux.HandleFunc("/mcp/call", b.handleMCPCall)
|
||||||
|
|
||||||
|
// Health check
|
||||||
|
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(map[string]any{
|
||||||
|
"status": "ok",
|
||||||
|
"mcp": true,
|
||||||
|
"claudeBridge": b.claudeBridge.Connected(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
addr := fmt.Sprintf("127.0.0.1:%d", b.port)
|
||||||
|
log.Printf("MCP HTTP server listening on %s", addr)
|
||||||
|
|
||||||
|
if err := http.ListenAndServe(addr, mux); err != nil {
|
||||||
|
log.Printf("MCP HTTP server error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMCPInfo returns MCP server information.
|
||||||
|
func (b *MCPBridge) handleMCPInfo(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
|
||||||
|
json.NewEncoder(w).Encode(map[string]any{
|
||||||
|
"name": "core-ide",
|
||||||
|
"version": "0.1.0",
|
||||||
|
"capabilities": map[string]any{
|
||||||
|
"webview": true,
|
||||||
|
"windowControl": true,
|
||||||
|
"clipboard": true,
|
||||||
|
"notifications": true,
|
||||||
|
"websocket": fmt.Sprintf("ws://localhost:%d/ws", b.port),
|
||||||
|
"claude": fmt.Sprintf("ws://localhost:%d/claude", b.port),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMCPTools returns the list of available tools.
|
||||||
|
func (b *MCPBridge) handleMCPTools(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
|
||||||
|
tools := []map[string]string{
|
||||||
|
// Window management
|
||||||
|
{"name": "window_list", "description": "List all windows with positions and sizes"},
|
||||||
|
{"name": "window_get", "description": "Get info about a specific window"},
|
||||||
|
{"name": "window_position", "description": "Move a window to specific coordinates"},
|
||||||
|
{"name": "window_size", "description": "Resize a window"},
|
||||||
|
{"name": "window_bounds", "description": "Set position and size in one call"},
|
||||||
|
{"name": "window_maximize", "description": "Maximize a window"},
|
||||||
|
{"name": "window_minimize", "description": "Minimize a window"},
|
||||||
|
{"name": "window_restore", "description": "Restore from maximized/minimized"},
|
||||||
|
{"name": "window_focus", "description": "Bring window to front"},
|
||||||
|
{"name": "window_visibility", "description": "Show or hide a window"},
|
||||||
|
{"name": "window_title", "description": "Change window title"},
|
||||||
|
{"name": "window_title_get", "description": "Get current window title"},
|
||||||
|
{"name": "window_fullscreen", "description": "Toggle fullscreen mode"},
|
||||||
|
{"name": "window_always_on_top", "description": "Pin window above others"},
|
||||||
|
{"name": "window_create", "description": "Create a new window at specific position"},
|
||||||
|
{"name": "window_close", "description": "Close a window by name"},
|
||||||
|
{"name": "window_background_colour", "description": "Set window background colour with alpha"},
|
||||||
|
// Webview interaction
|
||||||
|
{"name": "webview_eval", "description": "Execute JavaScript in a window's webview"},
|
||||||
|
{"name": "webview_navigate", "description": "Navigate window to a URL"},
|
||||||
|
{"name": "webview_list", "description": "List windows with webview info"},
|
||||||
|
// System integration
|
||||||
|
{"name": "clipboard_read", "description": "Read text from system clipboard"},
|
||||||
|
{"name": "clipboard_write", "description": "Write text to system clipboard"},
|
||||||
|
// System tray
|
||||||
|
{"name": "tray_set_tooltip", "description": "Set system tray tooltip"},
|
||||||
|
{"name": "tray_set_label", "description": "Set system tray label"},
|
||||||
|
}
|
||||||
|
json.NewEncoder(w).Encode(map[string]any{"tools": tools})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMCPCall handles tool calls via HTTP POST.
|
||||||
|
func (b *MCPBridge) handleMCPCall(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS")
|
||||||
|
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||||
|
|
||||||
|
if r.Method == "OPTIONS" {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.Method != "POST" {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req struct {
|
||||||
|
Tool string `json:"tool"`
|
||||||
|
Params map[string]any `json:"params"`
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var result map[string]any
|
||||||
|
if len(req.Tool) > 8 && req.Tool[:8] == "webview_" {
|
||||||
|
result = b.executeWebviewTool(req.Tool, req.Params)
|
||||||
|
} else {
|
||||||
|
result = b.executeWindowTool(req.Tool, req.Params)
|
||||||
|
}
|
||||||
|
json.NewEncoder(w).Encode(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeWindowTool handles window, clipboard, tray, and notification tools.
|
||||||
|
func (b *MCPBridge) executeWindowTool(tool string, params map[string]any) map[string]any {
|
||||||
|
if b.app == nil {
|
||||||
|
return map[string]any{"error": "app not available"}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tool {
|
||||||
|
case "window_list":
|
||||||
|
return b.windowList()
|
||||||
|
|
||||||
|
case "window_get":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
return b.windowGet(name)
|
||||||
|
|
||||||
|
case "window_position":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
x := intParam(params, "x")
|
||||||
|
y := intParam(params, "y")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.SetPosition(x, y)
|
||||||
|
return map[string]any{"success": true, "name": name, "x": x, "y": y}
|
||||||
|
|
||||||
|
case "window_size":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
width := intParam(params, "width")
|
||||||
|
height := intParam(params, "height")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.SetSize(width, height)
|
||||||
|
return map[string]any{"success": true, "name": name, "width": width, "height": height}
|
||||||
|
|
||||||
|
case "window_bounds":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
x := intParam(params, "x")
|
||||||
|
y := intParam(params, "y")
|
||||||
|
width := intParam(params, "width")
|
||||||
|
height := intParam(params, "height")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.SetPosition(x, y)
|
||||||
|
w.SetSize(width, height)
|
||||||
|
return map[string]any{"success": true, "name": name, "x": x, "y": y, "width": width, "height": height}
|
||||||
|
|
||||||
|
case "window_maximize":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.Maximise()
|
||||||
|
return map[string]any{"success": true, "action": "maximize"}
|
||||||
|
|
||||||
|
case "window_minimize":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.Minimise()
|
||||||
|
return map[string]any{"success": true, "action": "minimize"}
|
||||||
|
|
||||||
|
case "window_restore":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.Restore()
|
||||||
|
return map[string]any{"success": true, "action": "restore"}
|
||||||
|
|
||||||
|
case "window_focus":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.Show()
|
||||||
|
w.Focus()
|
||||||
|
return map[string]any{"success": true, "action": "focus"}
|
||||||
|
|
||||||
|
case "window_visibility":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
visible, _ := params["visible"].(bool)
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
if visible {
|
||||||
|
w.Show()
|
||||||
|
} else {
|
||||||
|
w.Hide()
|
||||||
|
}
|
||||||
|
return map[string]any{"success": true, "visible": visible}
|
||||||
|
|
||||||
|
case "window_title":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
title := strParam(params, "title")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.SetTitle(title)
|
||||||
|
return map[string]any{"success": true, "title": title}
|
||||||
|
|
||||||
|
case "window_title_get":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
_, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
// Wails v3 Window interface has SetTitle but no Title getter;
|
||||||
|
// return the window name as a fallback identifier.
|
||||||
|
return map[string]any{"name": name}
|
||||||
|
|
||||||
|
case "window_fullscreen":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
fullscreen, _ := params["fullscreen"].(bool)
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
if fullscreen {
|
||||||
|
w.Fullscreen()
|
||||||
|
} else {
|
||||||
|
w.UnFullscreen()
|
||||||
|
}
|
||||||
|
return map[string]any{"success": true, "fullscreen": fullscreen}
|
||||||
|
|
||||||
|
case "window_always_on_top":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
onTop, _ := params["onTop"].(bool)
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.SetAlwaysOnTop(onTop)
|
||||||
|
return map[string]any{"success": true, "alwaysOnTop": onTop}
|
||||||
|
|
||||||
|
case "window_create":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
title := strParam(params, "title")
|
||||||
|
url := strParam(params, "url")
|
||||||
|
x := intParam(params, "x")
|
||||||
|
y := intParam(params, "y")
|
||||||
|
width := intParam(params, "width")
|
||||||
|
height := intParam(params, "height")
|
||||||
|
if width == 0 {
|
||||||
|
width = 800
|
||||||
|
}
|
||||||
|
if height == 0 {
|
||||||
|
height = 600
|
||||||
|
}
|
||||||
|
opts := application.WebviewWindowOptions{
|
||||||
|
Name: name,
|
||||||
|
Title: title,
|
||||||
|
URL: url,
|
||||||
|
Width: width,
|
||||||
|
Height: height,
|
||||||
|
Hidden: false,
|
||||||
|
BackgroundColour: application.NewRGB(22, 27, 34),
|
||||||
|
}
|
||||||
|
w := b.app.Window.NewWithOptions(opts)
|
||||||
|
if x != 0 || y != 0 {
|
||||||
|
w.SetPosition(x, y)
|
||||||
|
}
|
||||||
|
return map[string]any{"success": true, "name": name}
|
||||||
|
|
||||||
|
case "window_close":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
return map[string]any{"success": true, "action": "close"}
|
||||||
|
|
||||||
|
case "window_background_colour":
|
||||||
|
name := strParam(params, "name")
|
||||||
|
r := uint8(intParam(params, "r"))
|
||||||
|
g := uint8(intParam(params, "g"))
|
||||||
|
bv := uint8(intParam(params, "b"))
|
||||||
|
a := uint8(intParam(params, "a"))
|
||||||
|
if a == 0 {
|
||||||
|
a = 255
|
||||||
|
}
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
w.SetBackgroundColour(application.NewRGBA(r, g, bv, a))
|
||||||
|
return map[string]any{"success": true}
|
||||||
|
|
||||||
|
case "clipboard_read":
|
||||||
|
text, ok := b.app.Clipboard.Text()
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "failed to read clipboard"}
|
||||||
|
}
|
||||||
|
return map[string]any{"text": text}
|
||||||
|
|
||||||
|
case "clipboard_write":
|
||||||
|
text, _ := params["text"].(string)
|
||||||
|
ok := b.app.Clipboard.SetText(text)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "failed to write clipboard"}
|
||||||
|
}
|
||||||
|
return map[string]any{"success": true}
|
||||||
|
|
||||||
|
case "tray_set_tooltip":
|
||||||
|
// System tray is managed at startup; this is informational
|
||||||
|
return map[string]any{"info": "tray tooltip can be set via system tray menu"}
|
||||||
|
|
||||||
|
case "tray_set_label":
|
||||||
|
return map[string]any{"info": "tray label can be set via system tray menu"}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return map[string]any{"error": "unknown tool", "tool": tool}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeWebviewTool handles webview/JS tools.
|
||||||
|
func (b *MCPBridge) executeWebviewTool(tool string, params map[string]any) map[string]any {
|
||||||
|
if b.app == nil {
|
||||||
|
return map[string]any{"error": "app not available"}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tool {
|
||||||
|
case "webview_eval":
|
||||||
|
windowName := strParam(params, "window")
|
||||||
|
code := strParam(params, "code")
|
||||||
|
w, ok := b.app.Window.Get(windowName)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "window": windowName}
|
||||||
|
}
|
||||||
|
w.ExecJS(code)
|
||||||
|
return map[string]any{"success": true, "window": windowName}
|
||||||
|
|
||||||
|
case "webview_navigate":
|
||||||
|
windowName := strParam(params, "window")
|
||||||
|
url := strParam(params, "url")
|
||||||
|
w, ok := b.app.Window.Get(windowName)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "window": windowName}
|
||||||
|
}
|
||||||
|
w.SetURL(url)
|
||||||
|
return map[string]any{"success": true, "url": url}
|
||||||
|
|
||||||
|
case "webview_list":
|
||||||
|
return b.windowList()
|
||||||
|
|
||||||
|
default:
|
||||||
|
return map[string]any{"error": "unknown webview tool", "tool": tool}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// windowList returns info for all known windows.
|
||||||
|
func (b *MCPBridge) windowList() map[string]any {
|
||||||
|
knownNames := []string{"tray-panel", "main", "settings"}
|
||||||
|
var windows []map[string]any
|
||||||
|
for _, name := range knownNames {
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x, y := w.Position()
|
||||||
|
width, height := w.Size()
|
||||||
|
windows = append(windows, map[string]any{
|
||||||
|
"name": name,
|
||||||
|
"title": w.Name(),
|
||||||
|
"x": x,
|
||||||
|
"y": y,
|
||||||
|
"width": width,
|
||||||
|
"height": height,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return map[string]any{"windows": windows}
|
||||||
|
}
|
||||||
|
|
||||||
|
// windowGet returns info for a specific window.
|
||||||
|
func (b *MCPBridge) windowGet(name string) map[string]any {
|
||||||
|
w, ok := b.app.Window.Get(name)
|
||||||
|
if !ok {
|
||||||
|
return map[string]any{"error": "window not found", "name": name}
|
||||||
|
}
|
||||||
|
x, y := w.Position()
|
||||||
|
width, height := w.Size()
|
||||||
|
return map[string]any{
|
||||||
|
"window": map[string]any{
|
||||||
|
"name": name,
|
||||||
|
"title": w.Name(),
|
||||||
|
"x": x,
|
||||||
|
"y": y,
|
||||||
|
"width": width,
|
||||||
|
"height": height,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parameter helpers
|
||||||
|
func strParam(params map[string]any, key string) string {
|
||||||
|
if v, ok := params[key].(string); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func intParam(params map[string]any, key string) int {
|
||||||
|
if v, ok := params[key].(float64); ok {
|
||||||
|
return int(v)
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
107
docker/Dockerfile.app
Normal file
107
docker/Dockerfile.app
Normal file
|
|
@ -0,0 +1,107 @@
|
||||||
|
# Host UK — Laravel Application Container
|
||||||
|
# PHP 8.3-FPM with all extensions required by the federated monorepo
|
||||||
|
#
|
||||||
|
# Build: docker build -f docker/Dockerfile.app -t host-uk/app:latest ..
|
||||||
|
# (run from host-uk/ workspace root, not core/)
|
||||||
|
|
||||||
|
FROM php:8.3-fpm-alpine AS base
|
||||||
|
|
||||||
|
# System dependencies
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
libpng-dev \
|
||||||
|
libjpeg-turbo-dev \
|
||||||
|
freetype-dev \
|
||||||
|
libwebp-dev \
|
||||||
|
libzip-dev \
|
||||||
|
icu-dev \
|
||||||
|
oniguruma-dev \
|
||||||
|
libxml2-dev \
|
||||||
|
linux-headers \
|
||||||
|
$PHPIZE_DEPS
|
||||||
|
|
||||||
|
# PHP extensions
|
||||||
|
RUN docker-php-ext-configure gd \
|
||||||
|
--with-freetype \
|
||||||
|
--with-jpeg \
|
||||||
|
--with-webp \
|
||||||
|
&& docker-php-ext-install -j$(nproc) \
|
||||||
|
bcmath \
|
||||||
|
exif \
|
||||||
|
gd \
|
||||||
|
intl \
|
||||||
|
mbstring \
|
||||||
|
opcache \
|
||||||
|
pcntl \
|
||||||
|
pdo_mysql \
|
||||||
|
soap \
|
||||||
|
xml \
|
||||||
|
zip
|
||||||
|
|
||||||
|
# Redis extension
|
||||||
|
RUN pecl install redis && docker-php-ext-enable redis
|
||||||
|
|
||||||
|
# Composer
|
||||||
|
COPY --from=composer:2 /usr/bin/composer /usr/bin/composer
|
||||||
|
|
||||||
|
# PHP configuration
|
||||||
|
RUN mv "$PHP_INI_DIR/php.ini-production" "$PHP_INI_DIR/php.ini"
|
||||||
|
COPY docker/php/opcache.ini $PHP_INI_DIR/conf.d/opcache.ini
|
||||||
|
COPY docker/php/php-fpm.conf /usr/local/etc/php-fpm.d/zz-host-uk.conf
|
||||||
|
|
||||||
|
# --- Build stage ---
|
||||||
|
FROM base AS build
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Install dependencies first (cache layer)
|
||||||
|
COPY composer.json composer.lock ./
|
||||||
|
RUN composer install \
|
||||||
|
--no-dev \
|
||||||
|
--no-scripts \
|
||||||
|
--no-autoloader \
|
||||||
|
--prefer-dist \
|
||||||
|
--no-interaction
|
||||||
|
|
||||||
|
# Copy application
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Generate autoloader and run post-install
|
||||||
|
RUN composer dump-autoload --optimize --no-dev \
|
||||||
|
&& php artisan package:discover --ansi
|
||||||
|
|
||||||
|
# Build frontend assets
|
||||||
|
RUN if [ -f package.json ]; then \
|
||||||
|
apk add --no-cache nodejs npm && \
|
||||||
|
npm ci --production=false && \
|
||||||
|
npm run build && \
|
||||||
|
rm -rf node_modules; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Production stage ---
|
||||||
|
FROM base AS production
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy built application
|
||||||
|
COPY --from=build /app /app
|
||||||
|
|
||||||
|
# Create storage directories
|
||||||
|
RUN mkdir -p \
|
||||||
|
storage/framework/cache/data \
|
||||||
|
storage/framework/sessions \
|
||||||
|
storage/framework/views \
|
||||||
|
storage/logs \
|
||||||
|
bootstrap/cache
|
||||||
|
|
||||||
|
# Permissions
|
||||||
|
RUN chown -R www-data:www-data storage bootstrap/cache
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \
|
||||||
|
CMD php-fpm-healthcheck || exit 1
|
||||||
|
|
||||||
|
USER www-data
|
||||||
|
|
||||||
|
EXPOSE 9000
|
||||||
19
docker/Dockerfile.web
Normal file
19
docker/Dockerfile.web
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Host UK — Nginx Web Server
|
||||||
|
# Serves static files and proxies PHP to FPM container
|
||||||
|
#
|
||||||
|
# Build: docker build -f docker/Dockerfile.web -t host-uk/web:latest .
|
||||||
|
|
||||||
|
FROM nginx:1.27-alpine
|
||||||
|
|
||||||
|
# Copy nginx configuration
|
||||||
|
COPY docker/nginx/default.conf /etc/nginx/conf.d/default.conf
|
||||||
|
COPY docker/nginx/security-headers.conf /etc/nginx/snippets/security-headers.conf
|
||||||
|
|
||||||
|
# Copy static assets from app build
|
||||||
|
# (In production, these are volume-mounted from the app container)
|
||||||
|
# COPY --from=host-uk/app:latest /app/public /app/public
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||||
|
CMD wget -qO- http://localhost/health || exit 1
|
||||||
|
|
||||||
|
EXPOSE 80
|
||||||
200
docker/docker-compose.prod.yml
Normal file
200
docker/docker-compose.prod.yml
Normal file
|
|
@ -0,0 +1,200 @@
|
||||||
|
# Host UK Production Docker Compose
|
||||||
|
# Deployed to de.host.uk.com and de2.host.uk.com via Coolify
|
||||||
|
#
|
||||||
|
# Container topology per app server:
|
||||||
|
# app - PHP 8.3-FPM (all Laravel modules)
|
||||||
|
# web - Nginx (static files + FastCGI proxy)
|
||||||
|
# horizon - Laravel Horizon (queue worker)
|
||||||
|
# scheduler - Laravel scheduler
|
||||||
|
# mcp - Go MCP server
|
||||||
|
# redis - Redis 7 (local cache + sessions)
|
||||||
|
# galera - MariaDB 11 (Galera cluster node)
|
||||||
|
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
image: ${REGISTRY:-gitea.snider.dev}/host-uk/app:${TAG:-latest}
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- app-storage:/app/storage
|
||||||
|
environment:
|
||||||
|
- APP_ENV=production
|
||||||
|
- APP_DEBUG=false
|
||||||
|
- APP_URL=${APP_URL:-https://host.uk.com}
|
||||||
|
- DB_HOST=galera
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_DATABASE=${DB_DATABASE:-hostuk}
|
||||||
|
- DB_USERNAME=${DB_USERNAME:-hostuk}
|
||||||
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
|
- REDIS_HOST=redis
|
||||||
|
- REDIS_PORT=6379
|
||||||
|
- CACHE_DRIVER=redis
|
||||||
|
- SESSION_DRIVER=redis
|
||||||
|
- QUEUE_CONNECTION=redis
|
||||||
|
depends_on:
|
||||||
|
redis:
|
||||||
|
condition: service_healthy
|
||||||
|
galera:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "php-fpm-healthcheck || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 3s
|
||||||
|
start_period: 10s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- app-net
|
||||||
|
|
||||||
|
web:
|
||||||
|
image: ${REGISTRY:-gitea.snider.dev}/host-uk/web:${TAG:-latest}
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "${WEB_PORT:-80}:80"
|
||||||
|
volumes:
|
||||||
|
- app-storage:/app/storage:ro
|
||||||
|
depends_on:
|
||||||
|
app:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "-qO-", "http://localhost/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 3s
|
||||||
|
start_period: 5s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- app-net
|
||||||
|
|
||||||
|
horizon:
|
||||||
|
image: ${REGISTRY:-gitea.snider.dev}/host-uk/app:${TAG:-latest}
|
||||||
|
restart: unless-stopped
|
||||||
|
command: php artisan horizon
|
||||||
|
volumes:
|
||||||
|
- app-storage:/app/storage
|
||||||
|
environment:
|
||||||
|
- APP_ENV=production
|
||||||
|
- DB_HOST=galera
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_DATABASE=${DB_DATABASE:-hostuk}
|
||||||
|
- DB_USERNAME=${DB_USERNAME:-hostuk}
|
||||||
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
|
- REDIS_HOST=redis
|
||||||
|
- REDIS_PORT=6379
|
||||||
|
depends_on:
|
||||||
|
app:
|
||||||
|
condition: service_healthy
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "php artisan horizon:status | grep -q running"]
|
||||||
|
interval: 60s
|
||||||
|
timeout: 5s
|
||||||
|
start_period: 30s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- app-net
|
||||||
|
|
||||||
|
scheduler:
|
||||||
|
image: ${REGISTRY:-gitea.snider.dev}/host-uk/app:${TAG:-latest}
|
||||||
|
restart: unless-stopped
|
||||||
|
command: php artisan schedule:work
|
||||||
|
volumes:
|
||||||
|
- app-storage:/app/storage
|
||||||
|
environment:
|
||||||
|
- APP_ENV=production
|
||||||
|
- DB_HOST=galera
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_DATABASE=${DB_DATABASE:-hostuk}
|
||||||
|
- DB_USERNAME=${DB_USERNAME:-hostuk}
|
||||||
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
|
- REDIS_HOST=redis
|
||||||
|
- REDIS_PORT=6379
|
||||||
|
depends_on:
|
||||||
|
app:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
- app-net
|
||||||
|
|
||||||
|
mcp:
|
||||||
|
image: ${REGISTRY:-gitea.snider.dev}/host-uk/core:${TAG:-latest}
|
||||||
|
restart: unless-stopped
|
||||||
|
command: core mcp serve
|
||||||
|
ports:
|
||||||
|
- "${MCP_PORT:-9001}:9000"
|
||||||
|
environment:
|
||||||
|
- MCP_ADDR=:9000
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "nc -z localhost 9000 || exit 1"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 3
|
||||||
|
networks:
|
||||||
|
- app-net
|
||||||
|
|
||||||
|
redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
restart: unless-stopped
|
||||||
|
command: >
|
||||||
|
redis-server
|
||||||
|
--maxmemory 512mb
|
||||||
|
--maxmemory-policy allkeys-lru
|
||||||
|
--appendonly yes
|
||||||
|
--appendfsync everysec
|
||||||
|
volumes:
|
||||||
|
- redis-data:/data
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- app-net
|
||||||
|
|
||||||
|
galera:
|
||||||
|
image: mariadb:11
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
- MARIADB_ROOT_PASSWORD=${DB_ROOT_PASSWORD}
|
||||||
|
- MARIADB_DATABASE=${DB_DATABASE:-hostuk}
|
||||||
|
- MARIADB_USER=${DB_USERNAME:-hostuk}
|
||||||
|
- MARIADB_PASSWORD=${DB_PASSWORD}
|
||||||
|
- WSREP_CLUSTER_NAME=hostuk-galera
|
||||||
|
- WSREP_CLUSTER_ADDRESS=${GALERA_CLUSTER_ADDRESS:-gcomm://}
|
||||||
|
- WSREP_NODE_ADDRESS=${GALERA_NODE_ADDRESS}
|
||||||
|
- WSREP_NODE_NAME=${GALERA_NODE_NAME}
|
||||||
|
- WSREP_SST_METHOD=mariabackup
|
||||||
|
command: >
|
||||||
|
--wsrep-on=ON
|
||||||
|
--wsrep-provider=/usr/lib/galera/libgalera_smm.so
|
||||||
|
--wsrep-cluster-name=hostuk-galera
|
||||||
|
--wsrep-cluster-address=${GALERA_CLUSTER_ADDRESS:-gcomm://}
|
||||||
|
--wsrep-node-address=${GALERA_NODE_ADDRESS}
|
||||||
|
--wsrep-node-name=${GALERA_NODE_NAME}
|
||||||
|
--wsrep-sst-method=mariabackup
|
||||||
|
--binlog-format=ROW
|
||||||
|
--default-storage-engine=InnoDB
|
||||||
|
--innodb-autoinc-lock-mode=2
|
||||||
|
--innodb-buffer-pool-size=1G
|
||||||
|
--innodb-log-file-size=256M
|
||||||
|
--character-set-server=utf8mb4
|
||||||
|
--collation-server=utf8mb4_unicode_ci
|
||||||
|
volumes:
|
||||||
|
- galera-data:/var/lib/mysql
|
||||||
|
ports:
|
||||||
|
- "${GALERA_PORT:-3306}:3306"
|
||||||
|
- "4567:4567"
|
||||||
|
- "4568:4568"
|
||||||
|
- "4444:4444"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "mariadb -u root -p${DB_ROOT_PASSWORD} -e 'SHOW STATUS LIKE \"wsrep_ready\"' | grep -q ON"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
start_period: 60s
|
||||||
|
retries: 5
|
||||||
|
networks:
|
||||||
|
- app-net
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app-storage:
|
||||||
|
redis-data:
|
||||||
|
galera-data:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
app-net:
|
||||||
|
driver: bridge
|
||||||
59
docker/nginx/default.conf
Normal file
59
docker/nginx/default.conf
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
# Host UK Nginx Configuration
|
||||||
|
# Proxies PHP to the app (FPM) container, serves static files directly
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name _;
|
||||||
|
|
||||||
|
root /app/public;
|
||||||
|
index index.php;
|
||||||
|
|
||||||
|
charset utf-8;
|
||||||
|
|
||||||
|
# Security headers
|
||||||
|
include /etc/nginx/snippets/security-headers.conf;
|
||||||
|
|
||||||
|
# Health check endpoint (no logging)
|
||||||
|
location = /health {
|
||||||
|
access_log off;
|
||||||
|
try_files $uri /index.php?$query_string;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Static file caching
|
||||||
|
location ~* \.(css|js|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot|webp|avif)$ {
|
||||||
|
expires 1y;
|
||||||
|
add_header Cache-Control "public, immutable";
|
||||||
|
access_log off;
|
||||||
|
try_files $uri =404;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Laravel application
|
||||||
|
location / {
|
||||||
|
try_files $uri $uri/ /index.php?$query_string;
|
||||||
|
}
|
||||||
|
|
||||||
|
# PHP-FPM upstream
|
||||||
|
location ~ \.php$ {
|
||||||
|
fastcgi_pass app:9000;
|
||||||
|
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
|
||||||
|
include fastcgi_params;
|
||||||
|
|
||||||
|
fastcgi_hide_header X-Powered-By;
|
||||||
|
fastcgi_buffer_size 32k;
|
||||||
|
fastcgi_buffers 16 16k;
|
||||||
|
fastcgi_read_timeout 300;
|
||||||
|
|
||||||
|
# Pass real client IP from LB proxy protocol
|
||||||
|
fastcgi_param REMOTE_ADDR $http_x_forwarded_for;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Block dotfiles (except .well-known)
|
||||||
|
location ~ /\.(?!well-known) {
|
||||||
|
deny all;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Block access to sensitive files
|
||||||
|
location ~* \.(env|log|yaml|yml|toml|lock|bak|sql)$ {
|
||||||
|
deny all;
|
||||||
|
}
|
||||||
|
}
|
||||||
6
docker/nginx/security-headers.conf
Normal file
6
docker/nginx/security-headers.conf
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
# Security headers for Host UK
|
||||||
|
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||||
|
add_header X-Content-Type-Options "nosniff" always;
|
||||||
|
add_header X-XSS-Protection "1; mode=block" always;
|
||||||
|
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
|
||||||
|
add_header Permissions-Policy "camera=(), microphone=(), geolocation=(), payment=()" always;
|
||||||
10
docker/php/opcache.ini
Normal file
10
docker/php/opcache.ini
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
; OPcache configuration for production
|
||||||
|
opcache.enable=1
|
||||||
|
opcache.memory_consumption=256
|
||||||
|
opcache.interned_strings_buffer=16
|
||||||
|
opcache.max_accelerated_files=20000
|
||||||
|
opcache.validate_timestamps=0
|
||||||
|
opcache.save_comments=1
|
||||||
|
opcache.fast_shutdown=1
|
||||||
|
opcache.jit_buffer_size=128M
|
||||||
|
opcache.jit=1255
|
||||||
22
docker/php/php-fpm.conf
Normal file
22
docker/php/php-fpm.conf
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
; Host UK PHP-FPM pool configuration
|
||||||
|
[www]
|
||||||
|
pm = dynamic
|
||||||
|
pm.max_children = 50
|
||||||
|
pm.start_servers = 10
|
||||||
|
pm.min_spare_servers = 5
|
||||||
|
pm.max_spare_servers = 20
|
||||||
|
pm.max_requests = 1000
|
||||||
|
pm.process_idle_timeout = 10s
|
||||||
|
|
||||||
|
; Status page for health checks
|
||||||
|
pm.status_path = /fpm-status
|
||||||
|
ping.path = /fpm-ping
|
||||||
|
ping.response = pong
|
||||||
|
|
||||||
|
; Logging
|
||||||
|
access.log = /proc/self/fd/2
|
||||||
|
slowlog = /proc/self/fd/2
|
||||||
|
request_slowlog_timeout = 5s
|
||||||
|
|
||||||
|
; Security
|
||||||
|
security.limit_extensions = .php
|
||||||
0
diff_jules_dev.txt → docs/static/index.html
vendored
0
diff_jules_dev.txt → docs/static/index.html
vendored
6
go.mod
6
go.mod
|
|
@ -36,6 +36,7 @@ require (
|
||||||
github.com/42wim/httpsig v1.2.3 // indirect
|
github.com/42wim/httpsig v1.2.3 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||||
|
github.com/Snider/Enchantrix v0.0.2 // indirect
|
||||||
github.com/TwiN/go-color v1.4.1 // indirect
|
github.com/TwiN/go-color v1.4.1 // indirect
|
||||||
github.com/adrg/xdg v0.5.3 // indirect
|
github.com/adrg/xdg v0.5.3 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||||
|
|
@ -74,6 +75,8 @@ require (
|
||||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||||
github.com/gofrs/flock v0.12.1 // indirect
|
github.com/gofrs/flock v0.12.1 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||||
|
github.com/google/go-github/v39 v39.2.0 // indirect
|
||||||
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
github.com/google/jsonschema-go v0.4.2 // indirect
|
github.com/google/jsonschema-go v0.4.2 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gorilla/websocket v1.5.3 // indirect
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
|
|
@ -90,6 +93,7 @@ require (
|
||||||
github.com/mailru/easyjson v0.9.1 // indirect
|
github.com/mailru/easyjson v0.9.1 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
||||||
|
|
@ -103,6 +107,7 @@ require (
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||||
github.com/samber/lo v1.52.0 // indirect
|
github.com/samber/lo v1.52.0 // indirect
|
||||||
|
github.com/schollz/progressbar/v3 v3.18.0 // indirect
|
||||||
github.com/sergi/go-diff v1.4.0 // indirect
|
github.com/sergi/go-diff v1.4.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
github.com/skeema/knownhosts v1.3.2 // indirect
|
github.com/skeema/knownhosts v1.3.2 // indirect
|
||||||
|
|
@ -117,6 +122,7 @@ require (
|
||||||
github.com/tidwall/sjson v1.2.5 // indirect
|
github.com/tidwall/sjson v1.2.5 // indirect
|
||||||
github.com/ugorji/go/codec v1.3.0 // indirect
|
github.com/ugorji/go/codec v1.3.0 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||||
|
github.com/unpoller/unifi/v5 v5.17.0 // indirect
|
||||||
github.com/wI2L/jsondiff v0.7.0 // indirect
|
github.com/wI2L/jsondiff v0.7.0 // indirect
|
||||||
github.com/wailsapp/go-webview2 v1.0.23 // indirect
|
github.com/wailsapp/go-webview2 v1.0.23 // indirect
|
||||||
github.com/wailsapp/wails/v3 v3.0.0-alpha.64 // indirect
|
github.com/wailsapp/wails/v3 v3.0.0-alpha.64 // indirect
|
||||||
|
|
|
||||||
15
go.sum
15
go.sum
|
|
@ -123,10 +123,18 @@ github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeD
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
|
github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ=
|
||||||
|
github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
|
||||||
|
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||||
|
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||||
github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8=
|
github.com/google/jsonschema-go v0.4.2 h1:tmrUohrwoLZZS/P3x7ex0WAVknEkBZM46iALbcqoRA8=
|
||||||
github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
|
github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
|
@ -182,6 +190,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU=
|
github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU=
|
||||||
github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM=
|
github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM=
|
||||||
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
|
||||||
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
|
||||||
github.com/modelcontextprotocol/go-sdk v1.2.0 h1:Y23co09300CEk8iZ/tMxIX1dVmKZkzoSBZOpJwUnc/s=
|
github.com/modelcontextprotocol/go-sdk v1.2.0 h1:Y23co09300CEk8iZ/tMxIX1dVmKZkzoSBZOpJwUnc/s=
|
||||||
github.com/modelcontextprotocol/go-sdk v1.2.0/go.mod h1:6fM3LCm3yV7pAs8isnKLn07oKtB0MP9LHd3DfAcKw10=
|
github.com/modelcontextprotocol/go-sdk v1.2.0/go.mod h1:6fM3LCm3yV7pAs8isnKLn07oKtB0MP9LHd3DfAcKw10=
|
||||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||||
|
|
@ -225,6 +235,8 @@ github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDc
|
||||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||||
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
|
||||||
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
|
||||||
|
github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA=
|
||||||
|
github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
|
||||||
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
||||||
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
|
|
@ -317,6 +329,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
|
|
@ -347,8 +360,10 @@ golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||||
|
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba h1:UKgtfRM7Yh93Sya0Fo8ZzhDP4qBckrrxEr2oF5UIVb8=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba h1:UKgtfRM7Yh93Sya0Fo8ZzhDP4qBckrrxEr2oF5UIVb8=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||||
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
|
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
|
||||||
|
|
|
||||||
268
infra.yaml
Normal file
268
infra.yaml
Normal file
|
|
@ -0,0 +1,268 @@
|
||||||
|
# Infrastructure Configuration — Host UK Production
|
||||||
|
# This file is the source of truth for production topology.
|
||||||
|
# Used by: core prod status, core prod setup, core deploy ansible
|
||||||
|
|
||||||
|
# --- Hosts ---
|
||||||
|
hosts:
|
||||||
|
noc:
|
||||||
|
fqdn: noc.host.uk.com
|
||||||
|
ip: 77.42.42.205
|
||||||
|
private_ip: 10.0.0.4
|
||||||
|
type: hcloud
|
||||||
|
role: bastion
|
||||||
|
ssh:
|
||||||
|
user: root
|
||||||
|
key: ~/.ssh/hostuk
|
||||||
|
port: 22
|
||||||
|
services:
|
||||||
|
- coolify
|
||||||
|
|
||||||
|
de:
|
||||||
|
fqdn: de.host.uk.com
|
||||||
|
ip: 116.202.82.115
|
||||||
|
type: hrobot
|
||||||
|
role: app
|
||||||
|
ssh:
|
||||||
|
user: root
|
||||||
|
key: ~/.ssh/hostuk
|
||||||
|
port: 22
|
||||||
|
services:
|
||||||
|
- traefik
|
||||||
|
- app
|
||||||
|
- web
|
||||||
|
- horizon
|
||||||
|
- scheduler
|
||||||
|
- mcp
|
||||||
|
- redis
|
||||||
|
- galera
|
||||||
|
|
||||||
|
de2:
|
||||||
|
fqdn: de2.host.uk.com
|
||||||
|
ip: 88.99.195.41
|
||||||
|
type: hrobot
|
||||||
|
role: app
|
||||||
|
ssh:
|
||||||
|
user: root
|
||||||
|
key: ~/.ssh/hostuk
|
||||||
|
port: 22
|
||||||
|
services:
|
||||||
|
- traefik
|
||||||
|
- app
|
||||||
|
- web
|
||||||
|
- horizon
|
||||||
|
- scheduler
|
||||||
|
- mcp
|
||||||
|
- redis
|
||||||
|
- galera
|
||||||
|
|
||||||
|
build:
|
||||||
|
fqdn: build.de.host.uk.com
|
||||||
|
ip: 46.224.93.62
|
||||||
|
private_ip: 10.0.0.5
|
||||||
|
type: hcloud
|
||||||
|
role: builder
|
||||||
|
ssh:
|
||||||
|
user: root
|
||||||
|
key: ~/.ssh/hostuk
|
||||||
|
port: 22
|
||||||
|
services:
|
||||||
|
- forgejo-runner
|
||||||
|
|
||||||
|
# --- Load Balancer ---
|
||||||
|
load_balancer:
|
||||||
|
name: hermes
|
||||||
|
fqdn: hermes.lb.host.uk.com
|
||||||
|
provider: hetzner
|
||||||
|
type: lb11
|
||||||
|
location: fsn1
|
||||||
|
algorithm: round_robin
|
||||||
|
backends:
|
||||||
|
- host: de
|
||||||
|
port: 80
|
||||||
|
- host: de2
|
||||||
|
port: 80
|
||||||
|
health_check:
|
||||||
|
protocol: http
|
||||||
|
path: /health
|
||||||
|
interval: 15
|
||||||
|
listeners:
|
||||||
|
- frontend: 443
|
||||||
|
backend: 80
|
||||||
|
protocol: https
|
||||||
|
proxy_protocol: true
|
||||||
|
ssl:
|
||||||
|
certificate: "*.host.uk.com"
|
||||||
|
san:
|
||||||
|
- host.uk.com
|
||||||
|
|
||||||
|
# --- Private Network ---
|
||||||
|
network:
|
||||||
|
cidr: 10.0.0.0/16
|
||||||
|
name: host-uk-internal
|
||||||
|
|
||||||
|
# --- DNS ---
|
||||||
|
dns:
|
||||||
|
provider: cloudns
|
||||||
|
nameservers:
|
||||||
|
- ns1.lthn.io
|
||||||
|
- ns2.lthn.io
|
||||||
|
- ns3.lthn.io
|
||||||
|
- ns4.lthn.io
|
||||||
|
zones:
|
||||||
|
host.uk.com:
|
||||||
|
records:
|
||||||
|
- name: "@"
|
||||||
|
type: A
|
||||||
|
value: "{{.lb_ip}}"
|
||||||
|
ttl: 300
|
||||||
|
- name: "*"
|
||||||
|
type: CNAME
|
||||||
|
value: hermes.lb.host.uk.com
|
||||||
|
ttl: 300
|
||||||
|
- name: hermes.lb
|
||||||
|
type: A
|
||||||
|
value: "{{.lb_ip}}"
|
||||||
|
ttl: 300
|
||||||
|
- name: noc
|
||||||
|
type: A
|
||||||
|
value: 77.42.42.205
|
||||||
|
ttl: 300
|
||||||
|
- name: de
|
||||||
|
type: A
|
||||||
|
value: 116.202.82.115
|
||||||
|
ttl: 300
|
||||||
|
- name: de2
|
||||||
|
type: A
|
||||||
|
value: 88.99.195.41
|
||||||
|
ttl: 300
|
||||||
|
- name: build.de
|
||||||
|
type: A
|
||||||
|
value: 46.224.93.62
|
||||||
|
ttl: 300
|
||||||
|
|
||||||
|
# --- SSL ---
|
||||||
|
ssl:
|
||||||
|
wildcard:
|
||||||
|
domains:
|
||||||
|
- "*.host.uk.com"
|
||||||
|
- host.uk.com
|
||||||
|
method: dns-01
|
||||||
|
dns_provider: cloudns
|
||||||
|
termination: load_balancer
|
||||||
|
|
||||||
|
# --- Database ---
|
||||||
|
database:
|
||||||
|
engine: mariadb
|
||||||
|
version: "11"
|
||||||
|
cluster: galera
|
||||||
|
nodes:
|
||||||
|
- host: de
|
||||||
|
port: 3306
|
||||||
|
- host: de2
|
||||||
|
port: 3306
|
||||||
|
sst_method: mariabackup
|
||||||
|
backup:
|
||||||
|
schedule: "0 3 * * *"
|
||||||
|
destination: s3
|
||||||
|
bucket: hostuk
|
||||||
|
prefix: backup/galera/
|
||||||
|
|
||||||
|
# --- Cache ---
|
||||||
|
cache:
|
||||||
|
engine: redis
|
||||||
|
version: "7"
|
||||||
|
sentinel: true
|
||||||
|
nodes:
|
||||||
|
- host: de
|
||||||
|
port: 6379
|
||||||
|
- host: de2
|
||||||
|
port: 6379
|
||||||
|
|
||||||
|
# --- Containers (per app server) ---
|
||||||
|
containers:
|
||||||
|
app:
|
||||||
|
image: host-uk/app:latest
|
||||||
|
port: 9000
|
||||||
|
runtime: php-fpm
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
web:
|
||||||
|
image: host-uk/web:latest
|
||||||
|
port: 80
|
||||||
|
runtime: nginx
|
||||||
|
depends_on: [app]
|
||||||
|
|
||||||
|
horizon:
|
||||||
|
image: host-uk/app:latest
|
||||||
|
command: php artisan horizon
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
scheduler:
|
||||||
|
image: host-uk/app:latest
|
||||||
|
command: php artisan schedule:work
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
mcp:
|
||||||
|
image: host-uk/core:latest
|
||||||
|
port: 9000
|
||||||
|
command: core mcp serve
|
||||||
|
replicas: 1
|
||||||
|
|
||||||
|
# --- Object Storage ---
|
||||||
|
s3:
|
||||||
|
endpoint: fsn1.your-objectstorage.com
|
||||||
|
buckets:
|
||||||
|
hostuk:
|
||||||
|
purpose: infra
|
||||||
|
paths:
|
||||||
|
- backup/galera/
|
||||||
|
- backup/coolify/
|
||||||
|
- backup/certs/
|
||||||
|
host-uk:
|
||||||
|
purpose: media
|
||||||
|
paths:
|
||||||
|
- uploads/
|
||||||
|
- assets/
|
||||||
|
|
||||||
|
# --- CDN ---
|
||||||
|
cdn:
|
||||||
|
provider: bunnycdn
|
||||||
|
origin: hermes.lb.host.uk.com
|
||||||
|
zones:
|
||||||
|
- "*.host.uk.com"
|
||||||
|
|
||||||
|
# --- CI/CD ---
|
||||||
|
cicd:
|
||||||
|
provider: forgejo
|
||||||
|
url: https://gitea.snider.dev
|
||||||
|
runner: build.de
|
||||||
|
registry: gitea.snider.dev
|
||||||
|
deploy_hook: coolify
|
||||||
|
|
||||||
|
# --- Monitoring ---
|
||||||
|
monitoring:
|
||||||
|
health_endpoints:
|
||||||
|
- url: https://host.uk.com/health
|
||||||
|
interval: 60
|
||||||
|
- url: https://bio.host.uk.com/health
|
||||||
|
interval: 60
|
||||||
|
alerts:
|
||||||
|
galera_cluster_size: 2
|
||||||
|
redis_sentinel_quorum: 2
|
||||||
|
|
||||||
|
# --- Backups ---
|
||||||
|
backups:
|
||||||
|
daily:
|
||||||
|
- name: galera
|
||||||
|
type: mysqldump
|
||||||
|
destination: s3://hostuk/backup/galera/
|
||||||
|
- name: coolify
|
||||||
|
type: tar
|
||||||
|
destination: s3://hostuk/backup/coolify/
|
||||||
|
- name: certs
|
||||||
|
type: tar
|
||||||
|
destination: s3://hostuk/backup/certs/
|
||||||
|
weekly:
|
||||||
|
- name: snapshot
|
||||||
|
type: hcloud-snapshot
|
||||||
|
hosts: [noc, build]
|
||||||
|
|
@ -5,7 +5,6 @@ import (
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/cli"
|
"github.com/host-uk/core/pkg/cli"
|
||||||
"github.com/host-uk/core/pkg/i18n"
|
"github.com/host-uk/core/pkg/i18n"
|
||||||
"github.com/host-uk/core/pkg/io"
|
|
||||||
"github.com/host-uk/core/pkg/release"
|
"github.com/host-uk/core/pkg/release"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -18,14 +17,14 @@ func runCIReleaseInit() error {
|
||||||
cli.Print("%s %s\n\n", releaseDimStyle.Render(i18n.Label("init")), i18n.T("cmd.ci.init.initializing"))
|
cli.Print("%s %s\n\n", releaseDimStyle.Render(i18n.Label("init")), i18n.T("cmd.ci.init.initializing"))
|
||||||
|
|
||||||
// Check if already initialized
|
// Check if already initialized
|
||||||
if release.ConfigExists(io.Local, cwd) {
|
if release.ConfigExists(cwd) {
|
||||||
cli.Text(i18n.T("cmd.ci.init.already_initialized"))
|
cli.Text(i18n.T("cmd.ci.init.already_initialized"))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create release config
|
// Create release config
|
||||||
cfg := release.DefaultConfig()
|
cfg := release.DefaultConfig()
|
||||||
if err := release.WriteConfig(io.Local, cfg, cwd); err != nil {
|
if err := release.WriteConfig(cfg, cwd); err != nil {
|
||||||
return cli.Err("%s: %w", i18n.T("i18n.fail.create", "config"), err)
|
return cli.Err("%s: %w", i18n.T("i18n.fail.create", "config"), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ import (
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/cli"
|
"github.com/host-uk/core/pkg/cli"
|
||||||
"github.com/host-uk/core/pkg/i18n"
|
"github.com/host-uk/core/pkg/i18n"
|
||||||
"github.com/host-uk/core/pkg/io"
|
|
||||||
"github.com/host-uk/core/pkg/release"
|
"github.com/host-uk/core/pkg/release"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -23,7 +22,7 @@ func runCIPublish(dryRun bool, version string, draft, prerelease bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration
|
// Load configuration
|
||||||
cfg, err := release.LoadConfig(io.Local, projectDir)
|
cfg, err := release.LoadConfig(projectDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cli.WrapVerb(err, "load", "config")
|
return cli.WrapVerb(err, "load", "config")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/cli"
|
"github.com/host-uk/core/pkg/cli"
|
||||||
"github.com/host-uk/core/pkg/io"
|
|
||||||
"github.com/host-uk/core/pkg/log"
|
"github.com/host-uk/core/pkg/log"
|
||||||
"github.com/host-uk/core/pkg/mcp"
|
"github.com/host-uk/core/pkg/mcp"
|
||||||
)
|
)
|
||||||
|
|
@ -118,7 +117,6 @@ func runDaemon(cfg Config) error {
|
||||||
|
|
||||||
// Create daemon with health checks
|
// Create daemon with health checks
|
||||||
daemon := cli.NewDaemon(cli.DaemonOptions{
|
daemon := cli.NewDaemon(cli.DaemonOptions{
|
||||||
Medium: io.Local,
|
|
||||||
PIDFile: cfg.PIDFile,
|
PIDFile: cfg.PIDFile,
|
||||||
HealthAddr: cfg.HealthAddr,
|
HealthAddr: cfg.HealthAddr,
|
||||||
ShutdownTimeout: 30,
|
ShutdownTimeout: 30,
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ package dev
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -61,14 +62,25 @@ func runFileSync(source string) error {
|
||||||
return log.E("dev.sync", "path traversal not allowed", nil)
|
return log.E("dev.sync", "path traversal not allowed", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert to absolute path for io.Local
|
// Validate source exists
|
||||||
absSource, err := filepath.Abs(source)
|
sourceInfo, err := os.Stat(source) // Keep os.Stat for local source check or use coreio? coreio.Local.IsFile is bool.
|
||||||
if err != nil {
|
// If source is local file on disk (not in medium), we can use os.Stat.
|
||||||
return log.E("dev.sync", "failed to resolve source path", err)
|
// But concept is everything is via Medium?
|
||||||
|
// User is running CLI on host. `source` is relative to CWD.
|
||||||
|
// coreio.Local uses absolute path or relative to root (which is "/" by default).
|
||||||
|
// So coreio.Local works.
|
||||||
|
if !coreio.Local.IsFile(source) {
|
||||||
|
// Might be directory
|
||||||
|
// IsFile returns false for directory.
|
||||||
}
|
}
|
||||||
|
// Let's rely on os.Stat for initial source check to distinguish dir vs file easily if coreio doesn't expose Stat.
|
||||||
|
// coreio doesn't expose Stat.
|
||||||
|
|
||||||
|
// Check using standard os for source determination as we are outside strict sandbox for input args potentially?
|
||||||
|
// But we should use coreio where possible.
|
||||||
|
// coreio.Local.List worked for dirs.
|
||||||
|
// Let's stick to os.Stat for source properties finding as typically allowed for CLI args.
|
||||||
|
|
||||||
// Validate source exists using io.Local.Stat
|
|
||||||
sourceInfo, err := coreio.Local.Stat(absSource)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return log.E("dev.sync", i18n.T("cmd.dev.file_sync.error.source_not_found", map[string]interface{}{"Path": source}), err)
|
return log.E("dev.sync", i18n.T("cmd.dev.file_sync.error.source_not_found", map[string]interface{}{"Path": source}), err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package help
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/cli"
|
"github.com/host-uk/core/pkg/cli"
|
||||||
"github.com/host-uk/core/pkg/help"
|
"github.com/host-uk/core/pkg/help"
|
||||||
|
|
@ -29,17 +28,7 @@ func AddHelpCommands(root *cli.Command) {
|
||||||
}
|
}
|
||||||
fmt.Println("Search Results:")
|
fmt.Println("Search Results:")
|
||||||
for _, res := range results {
|
for _, res := range results {
|
||||||
title := res.Topic.Title
|
fmt.Printf(" %s - %s\n", res.Topic.ID, res.Topic.Title)
|
||||||
if res.Section != nil {
|
|
||||||
title = fmt.Sprintf("%s > %s", res.Topic.Title, res.Section.Title)
|
|
||||||
}
|
|
||||||
// Use bold for title
|
|
||||||
fmt.Printf(" \033[1m%s\033[0m (%s)\n", title, res.Topic.ID)
|
|
||||||
if res.Snippet != "" {
|
|
||||||
// Highlight markdown bold as ANSI bold for CLI output
|
|
||||||
fmt.Printf(" %s\n", replaceMarkdownBold(res.Snippet))
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -67,22 +56,6 @@ func AddHelpCommands(root *cli.Command) {
|
||||||
root.AddCommand(helpCmd)
|
root.AddCommand(helpCmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func replaceMarkdownBold(s string) string {
|
|
||||||
parts := strings.Split(s, "**")
|
|
||||||
var result strings.Builder
|
|
||||||
for i, part := range parts {
|
|
||||||
result.WriteString(part)
|
|
||||||
if i < len(parts)-1 {
|
|
||||||
if i%2 == 0 {
|
|
||||||
result.WriteString("\033[1m")
|
|
||||||
} else {
|
|
||||||
result.WriteString("\033[0m")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func renderTopic(t *help.Topic) {
|
func renderTopic(t *help.Topic) {
|
||||||
// Simple ANSI rendering for now
|
// Simple ANSI rendering for now
|
||||||
// Use explicit ANSI codes or just print
|
// Use explicit ANSI codes or just print
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ func runPkgSearch(org, pattern, repoType string, limit int, refresh bool) error
|
||||||
cacheDir = filepath.Join(filepath.Dir(regPath), ".core", "cache")
|
cacheDir = filepath.Join(filepath.Dir(regPath), ".core", "cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := cache.New(nil, cacheDir, 0)
|
c, err := cache.New(cacheDir, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c = nil
|
c = nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
15
internal/cmd/prod/cmd_commands.go
Normal file
15
internal/cmd/prod/cmd_commands.go
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
package prod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/host-uk/core/pkg/cli"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cli.RegisterCommands(AddProdCommands)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddProdCommands registers the 'prod' command and all subcommands.
|
||||||
|
func AddProdCommands(root *cobra.Command) {
|
||||||
|
root.AddCommand(Cmd)
|
||||||
|
}
|
||||||
129
internal/cmd/prod/cmd_dns.go
Normal file
129
internal/cmd/prod/cmd_dns.go
Normal file
|
|
@ -0,0 +1,129 @@
|
||||||
|
package prod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/host-uk/core/pkg/cli"
|
||||||
|
"github.com/host-uk/core/pkg/infra"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dnsCmd = &cobra.Command{
|
||||||
|
Use: "dns",
|
||||||
|
Short: "Manage DNS records via CloudNS",
|
||||||
|
Long: `View and manage DNS records for host.uk.com via CloudNS API.
|
||||||
|
|
||||||
|
Requires:
|
||||||
|
CLOUDNS_AUTH_ID CloudNS auth ID
|
||||||
|
CLOUDNS_AUTH_PASSWORD CloudNS auth password`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dnsListCmd = &cobra.Command{
|
||||||
|
Use: "list [zone]",
|
||||||
|
Short: "List DNS records",
|
||||||
|
Args: cobra.MaximumNArgs(1),
|
||||||
|
RunE: runDNSList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var dnsSetCmd = &cobra.Command{
|
||||||
|
Use: "set <host> <type> <value>",
|
||||||
|
Short: "Create or update a DNS record",
|
||||||
|
Long: `Create or update a DNS record. Example:
|
||||||
|
core prod dns set hermes.lb A 1.2.3.4
|
||||||
|
core prod dns set "*.host.uk.com" CNAME hermes.lb.host.uk.com`,
|
||||||
|
Args: cobra.ExactArgs(3),
|
||||||
|
RunE: runDNSSet,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dnsZone string
|
||||||
|
dnsTTL int
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
dnsCmd.PersistentFlags().StringVar(&dnsZone, "zone", "host.uk.com", "DNS zone")
|
||||||
|
|
||||||
|
dnsSetCmd.Flags().IntVar(&dnsTTL, "ttl", 300, "Record TTL in seconds")
|
||||||
|
|
||||||
|
dnsCmd.AddCommand(dnsListCmd)
|
||||||
|
dnsCmd.AddCommand(dnsSetCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDNSClient() (*infra.CloudNSClient, error) {
|
||||||
|
authID := os.Getenv("CLOUDNS_AUTH_ID")
|
||||||
|
authPass := os.Getenv("CLOUDNS_AUTH_PASSWORD")
|
||||||
|
if authID == "" || authPass == "" {
|
||||||
|
return nil, fmt.Errorf("CLOUDNS_AUTH_ID and CLOUDNS_AUTH_PASSWORD required")
|
||||||
|
}
|
||||||
|
return infra.NewCloudNSClient(authID, authPass), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDNSList(cmd *cobra.Command, args []string) error {
|
||||||
|
dns, err := getDNSClient()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
zone := dnsZone
|
||||||
|
if len(args) > 0 {
|
||||||
|
zone = args[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
records, err := dns.ListRecords(ctx, zone)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("list records: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print("%s DNS records for %s\n\n", cli.BoldStyle.Render("▶"), cli.TitleStyle.Render(zone))
|
||||||
|
|
||||||
|
if len(records) == 0 {
|
||||||
|
cli.Print(" No records found\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for id, r := range records {
|
||||||
|
cli.Print(" %s %-6s %-30s %s TTL:%s\n",
|
||||||
|
cli.DimStyle.Render(id),
|
||||||
|
cli.BoldStyle.Render(r.Type),
|
||||||
|
r.Host,
|
||||||
|
r.Record,
|
||||||
|
r.TTL)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runDNSSet(cmd *cobra.Command, args []string) error {
|
||||||
|
dns, err := getDNSClient()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
host := args[0]
|
||||||
|
recordType := args[1]
|
||||||
|
value := args[2]
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
changed, err := dns.EnsureRecord(ctx, dnsZone, host, recordType, value, dnsTTL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("set record: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if changed {
|
||||||
|
cli.Print("%s %s %s %s -> %s\n",
|
||||||
|
cli.SuccessStyle.Render("✓"),
|
||||||
|
recordType, host, dnsZone, value)
|
||||||
|
} else {
|
||||||
|
cli.Print("%s Record already correct\n", cli.DimStyle.Render("·"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
113
internal/cmd/prod/cmd_lb.go
Normal file
113
internal/cmd/prod/cmd_lb.go
Normal file
|
|
@ -0,0 +1,113 @@
|
||||||
|
package prod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/host-uk/core/pkg/cli"
|
||||||
|
"github.com/host-uk/core/pkg/infra"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var lbCmd = &cobra.Command{
|
||||||
|
Use: "lb",
|
||||||
|
Short: "Manage Hetzner load balancer",
|
||||||
|
Long: `View and manage the Hetzner Cloud managed load balancer.
|
||||||
|
|
||||||
|
Requires: HCLOUD_TOKEN`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var lbStatusCmd = &cobra.Command{
|
||||||
|
Use: "status",
|
||||||
|
Short: "Show load balancer status and target health",
|
||||||
|
RunE: runLBStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
var lbCreateCmd = &cobra.Command{
|
||||||
|
Use: "create",
|
||||||
|
Short: "Create load balancer from infra.yaml",
|
||||||
|
RunE: runLBCreate,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
lbCmd.AddCommand(lbStatusCmd)
|
||||||
|
lbCmd.AddCommand(lbCreateCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHCloudClient() (*infra.HCloudClient, error) {
|
||||||
|
token := os.Getenv("HCLOUD_TOKEN")
|
||||||
|
if token == "" {
|
||||||
|
return nil, fmt.Errorf("HCLOUD_TOKEN environment variable required")
|
||||||
|
}
|
||||||
|
return infra.NewHCloudClient(token), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLBStatus(cmd *cobra.Command, args []string) error {
|
||||||
|
hc, err := getHCloudClient()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
lbs, err := hc.ListLoadBalancers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("list load balancers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lbs) == 0 {
|
||||||
|
cli.Print("No load balancers found\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lb := range lbs {
|
||||||
|
cli.Print("%s %s\n", cli.BoldStyle.Render("▶"), cli.TitleStyle.Render(lb.Name))
|
||||||
|
cli.Print(" ID: %d\n", lb.ID)
|
||||||
|
cli.Print(" IP: %s\n", lb.PublicNet.IPv4.IP)
|
||||||
|
cli.Print(" Algorithm: %s\n", lb.Algorithm.Type)
|
||||||
|
cli.Print(" Location: %s\n", lb.Location.Name)
|
||||||
|
|
||||||
|
if len(lb.Services) > 0 {
|
||||||
|
cli.Print("\n Services:\n")
|
||||||
|
for _, s := range lb.Services {
|
||||||
|
cli.Print(" %s :%d -> :%d proxy_protocol=%v\n",
|
||||||
|
s.Protocol, s.ListenPort, s.DestinationPort, s.Proxyprotocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lb.Targets) > 0 {
|
||||||
|
cli.Print("\n Targets:\n")
|
||||||
|
for _, t := range lb.Targets {
|
||||||
|
ip := ""
|
||||||
|
if t.IP != nil {
|
||||||
|
ip = t.IP.IP
|
||||||
|
}
|
||||||
|
for _, hs := range t.HealthStatus {
|
||||||
|
icon := cli.SuccessStyle.Render("●")
|
||||||
|
if hs.Status != "healthy" {
|
||||||
|
icon = cli.ErrorStyle.Render("○")
|
||||||
|
}
|
||||||
|
cli.Print(" %s %s :%d %s\n", icon, ip, hs.ListenPort, hs.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLBCreate(cmd *cobra.Command, args []string) error {
|
||||||
|
cfg, _, err := loadConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
return stepLoadBalancer(ctx, cfg)
|
||||||
|
}
|
||||||
35
internal/cmd/prod/cmd_prod.go
Normal file
35
internal/cmd/prod/cmd_prod.go
Normal file
|
|
@ -0,0 +1,35 @@
|
||||||
|
package prod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
infraFile string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cmd is the root prod command.
|
||||||
|
var Cmd = &cobra.Command{
|
||||||
|
Use: "prod",
|
||||||
|
Short: "Production infrastructure management",
|
||||||
|
Long: `Manage the Host UK production infrastructure.
|
||||||
|
|
||||||
|
Commands:
|
||||||
|
status Show infrastructure health and connectivity
|
||||||
|
setup Phase 1: discover topology, create LB, configure DNS
|
||||||
|
dns Manage DNS records via CloudNS
|
||||||
|
lb Manage Hetzner load balancer
|
||||||
|
ssh SSH into a production host
|
||||||
|
|
||||||
|
Configuration is read from infra.yaml in the project root.`,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Cmd.PersistentFlags().StringVar(&infraFile, "config", "", "Path to infra.yaml (auto-discovered if not set)")
|
||||||
|
|
||||||
|
Cmd.AddCommand(statusCmd)
|
||||||
|
Cmd.AddCommand(setupCmd)
|
||||||
|
Cmd.AddCommand(dnsCmd)
|
||||||
|
Cmd.AddCommand(lbCmd)
|
||||||
|
Cmd.AddCommand(sshCmd)
|
||||||
|
}
|
||||||
284
internal/cmd/prod/cmd_setup.go
Normal file
284
internal/cmd/prod/cmd_setup.go
Normal file
|
|
@ -0,0 +1,284 @@
|
||||||
|
package prod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/host-uk/core/pkg/cli"
|
||||||
|
"github.com/host-uk/core/pkg/infra"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var setupCmd = &cobra.Command{
|
||||||
|
Use: "setup",
|
||||||
|
Short: "Phase 1: discover topology, create LB, configure DNS",
|
||||||
|
Long: `Run the Phase 1 foundation setup:
|
||||||
|
|
||||||
|
1. Discover Hetzner topology (Cloud + Robot servers)
|
||||||
|
2. Create Hetzner managed load balancer
|
||||||
|
3. Configure DNS records via CloudNS
|
||||||
|
4. Verify connectivity to all hosts
|
||||||
|
|
||||||
|
Required environment variables:
|
||||||
|
HCLOUD_TOKEN Hetzner Cloud API token
|
||||||
|
HETZNER_ROBOT_USER Hetzner Robot username
|
||||||
|
HETZNER_ROBOT_PASS Hetzner Robot password
|
||||||
|
CLOUDNS_AUTH_ID CloudNS auth ID
|
||||||
|
CLOUDNS_AUTH_PASSWORD CloudNS auth password`,
|
||||||
|
RunE: runSetup,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
setupDryRun bool
|
||||||
|
setupStep string
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
setupCmd.Flags().BoolVar(&setupDryRun, "dry-run", false, "Show what would be done without making changes")
|
||||||
|
setupCmd.Flags().StringVar(&setupStep, "step", "", "Run a specific step only (discover, lb, dns)")
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSetup(cmd *cobra.Command, args []string) error {
|
||||||
|
cfg, cfgPath, err := loadConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print("%s Production setup from %s\n\n",
|
||||||
|
cli.BoldStyle.Render("▶"),
|
||||||
|
cli.DimStyle.Render(cfgPath))
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
steps := []struct {
|
||||||
|
name string
|
||||||
|
fn func(context.Context, *infra.Config) error
|
||||||
|
}{
|
||||||
|
{"discover", stepDiscover},
|
||||||
|
{"lb", stepLoadBalancer},
|
||||||
|
{"dns", stepDNS},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, step := range steps {
|
||||||
|
if setupStep != "" && setupStep != step.name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print("\n%s Step: %s\n", cli.BoldStyle.Render("━━"), cli.TitleStyle.Render(step.name))
|
||||||
|
|
||||||
|
if err := step.fn(ctx, cfg); err != nil {
|
||||||
|
cli.Print(" %s %s: %s\n", cli.ErrorStyle.Render("✗"), step.name, err)
|
||||||
|
return fmt.Errorf("step %s failed: %w", step.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print(" %s %s complete\n", cli.SuccessStyle.Render("✓"), step.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print("\n%s Setup complete\n", cli.SuccessStyle.Render("✓"))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stepDiscover(ctx context.Context, cfg *infra.Config) error {
|
||||||
|
// Discover HCloud servers
|
||||||
|
hcloudToken := os.Getenv("HCLOUD_TOKEN")
|
||||||
|
if hcloudToken != "" {
|
||||||
|
cli.Print(" Discovering Hetzner Cloud servers...\n")
|
||||||
|
|
||||||
|
hc := infra.NewHCloudClient(hcloudToken)
|
||||||
|
servers, err := hc.ListServers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("list HCloud servers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range servers {
|
||||||
|
cli.Print(" %s %s %s %s %s\n",
|
||||||
|
cli.SuccessStyle.Render("●"),
|
||||||
|
cli.BoldStyle.Render(s.Name),
|
||||||
|
s.PublicNet.IPv4.IP,
|
||||||
|
s.ServerType.Name,
|
||||||
|
cli.DimStyle.Render(s.Datacenter.Name))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cli.Print(" %s HCLOUD_TOKEN not set — skipping Cloud discovery\n",
|
||||||
|
cli.WarningStyle.Render("⚠"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Discover Robot servers
|
||||||
|
robotUser := os.Getenv("HETZNER_ROBOT_USER")
|
||||||
|
robotPass := os.Getenv("HETZNER_ROBOT_PASS")
|
||||||
|
if robotUser != "" && robotPass != "" {
|
||||||
|
cli.Print(" Discovering Hetzner Robot servers...\n")
|
||||||
|
|
||||||
|
hr := infra.NewHRobotClient(robotUser, robotPass)
|
||||||
|
servers, err := hr.ListServers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("list Robot servers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range servers {
|
||||||
|
status := cli.SuccessStyle.Render("●")
|
||||||
|
if s.Status != "ready" {
|
||||||
|
status = cli.WarningStyle.Render("○")
|
||||||
|
}
|
||||||
|
cli.Print(" %s %s %s %s %s\n",
|
||||||
|
status,
|
||||||
|
cli.BoldStyle.Render(s.ServerName),
|
||||||
|
s.ServerIP,
|
||||||
|
s.Product,
|
||||||
|
cli.DimStyle.Render(s.Datacenter))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cli.Print(" %s HETZNER_ROBOT_USER/PASS not set — skipping Robot discovery\n",
|
||||||
|
cli.WarningStyle.Render("⚠"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stepLoadBalancer(ctx context.Context, cfg *infra.Config) error {
|
||||||
|
hcloudToken := os.Getenv("HCLOUD_TOKEN")
|
||||||
|
if hcloudToken == "" {
|
||||||
|
return fmt.Errorf("HCLOUD_TOKEN required for load balancer management")
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := infra.NewHCloudClient(hcloudToken)
|
||||||
|
|
||||||
|
// Check if LB already exists
|
||||||
|
lbs, err := hc.ListLoadBalancers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("list load balancers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lb := range lbs {
|
||||||
|
if lb.Name == cfg.LoadBalancer.Name {
|
||||||
|
cli.Print(" Load balancer '%s' already exists (ID: %d, IP: %s)\n",
|
||||||
|
lb.Name, lb.ID, lb.PublicNet.IPv4.IP)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if setupDryRun {
|
||||||
|
cli.Print(" [dry-run] Would create load balancer '%s' (%s) in %s\n",
|
||||||
|
cfg.LoadBalancer.Name, cfg.LoadBalancer.Type, cfg.LoadBalancer.Location)
|
||||||
|
for _, b := range cfg.LoadBalancer.Backends {
|
||||||
|
if host, ok := cfg.Hosts[b.Host]; ok {
|
||||||
|
cli.Print(" [dry-run] Backend: %s (%s:%d)\n", b.Host, host.IP, b.Port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build targets from config
|
||||||
|
targets := make([]infra.HCloudLBCreateTarget, 0, len(cfg.LoadBalancer.Backends))
|
||||||
|
for _, b := range cfg.LoadBalancer.Backends {
|
||||||
|
host, ok := cfg.Hosts[b.Host]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("backend host '%s' not found in config", b.Host)
|
||||||
|
}
|
||||||
|
targets = append(targets, infra.HCloudLBCreateTarget{
|
||||||
|
Type: "ip",
|
||||||
|
IP: &infra.HCloudLBTargetIP{IP: host.IP},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build services
|
||||||
|
services := make([]infra.HCloudLBService, 0, len(cfg.LoadBalancer.Listeners))
|
||||||
|
for _, l := range cfg.LoadBalancer.Listeners {
|
||||||
|
svc := infra.HCloudLBService{
|
||||||
|
Protocol: l.Protocol,
|
||||||
|
ListenPort: l.Frontend,
|
||||||
|
DestinationPort: l.Backend,
|
||||||
|
Proxyprotocol: l.ProxyProtocol,
|
||||||
|
HealthCheck: &infra.HCloudLBHealthCheck{
|
||||||
|
Protocol: cfg.LoadBalancer.Health.Protocol,
|
||||||
|
Port: l.Backend,
|
||||||
|
Interval: cfg.LoadBalancer.Health.Interval,
|
||||||
|
Timeout: 10,
|
||||||
|
Retries: 3,
|
||||||
|
HTTP: &infra.HCloudLBHCHTTP{
|
||||||
|
Path: cfg.LoadBalancer.Health.Path,
|
||||||
|
StatusCode: "2??",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
services = append(services, svc)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := infra.HCloudLBCreateRequest{
|
||||||
|
Name: cfg.LoadBalancer.Name,
|
||||||
|
LoadBalancerType: cfg.LoadBalancer.Type,
|
||||||
|
Location: cfg.LoadBalancer.Location,
|
||||||
|
Algorithm: infra.HCloudLBAlgorithm{Type: cfg.LoadBalancer.Algorithm},
|
||||||
|
Services: services,
|
||||||
|
Targets: targets,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"project": "host-uk",
|
||||||
|
"managed": "core-cli",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print(" Creating load balancer '%s'...\n", cfg.LoadBalancer.Name)
|
||||||
|
|
||||||
|
lb, err := hc.CreateLoadBalancer(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("create load balancer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print(" Created: %s (ID: %d, IP: %s)\n",
|
||||||
|
cli.BoldStyle.Render(lb.Name), lb.ID, lb.PublicNet.IPv4.IP)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stepDNS(ctx context.Context, cfg *infra.Config) error {
|
||||||
|
authID := os.Getenv("CLOUDNS_AUTH_ID")
|
||||||
|
authPass := os.Getenv("CLOUDNS_AUTH_PASSWORD")
|
||||||
|
if authID == "" || authPass == "" {
|
||||||
|
return fmt.Errorf("CLOUDNS_AUTH_ID and CLOUDNS_AUTH_PASSWORD required")
|
||||||
|
}
|
||||||
|
|
||||||
|
dns := infra.NewCloudNSClient(authID, authPass)
|
||||||
|
|
||||||
|
for zoneName, zone := range cfg.DNS.Zones {
|
||||||
|
cli.Print(" Zone: %s\n", cli.BoldStyle.Render(zoneName))
|
||||||
|
|
||||||
|
for _, rec := range zone.Records {
|
||||||
|
value := rec.Value
|
||||||
|
// Skip templated values (need LB IP first)
|
||||||
|
if value == "{{.lb_ip}}" {
|
||||||
|
cli.Print(" %s %s %s %s — %s\n",
|
||||||
|
cli.WarningStyle.Render("⚠"),
|
||||||
|
rec.Name, rec.Type, value,
|
||||||
|
cli.DimStyle.Render("needs LB IP (run setup --step=lb first)"))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if setupDryRun {
|
||||||
|
cli.Print(" [dry-run] %s %s -> %s (TTL: %d)\n",
|
||||||
|
rec.Type, rec.Name, value, rec.TTL)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
changed, err := dns.EnsureRecord(ctx, zoneName, rec.Name, rec.Type, value, rec.TTL)
|
||||||
|
if err != nil {
|
||||||
|
cli.Print(" %s %s %s: %s\n", cli.ErrorStyle.Render("✗"), rec.Type, rec.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if changed {
|
||||||
|
cli.Print(" %s %s %s -> %s\n",
|
||||||
|
cli.SuccessStyle.Render("✓"),
|
||||||
|
rec.Type, rec.Name, value)
|
||||||
|
} else {
|
||||||
|
cli.Print(" %s %s %s (no change)\n",
|
||||||
|
cli.DimStyle.Render("·"),
|
||||||
|
rec.Type, rec.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
64
internal/cmd/prod/cmd_ssh.go
Normal file
64
internal/cmd/prod/cmd_ssh.go
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
package prod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/host-uk/core/pkg/cli"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sshCmd = &cobra.Command{
|
||||||
|
Use: "ssh <host>",
|
||||||
|
Short: "SSH into a production host",
|
||||||
|
Long: `Open an SSH session to a production host defined in infra.yaml.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
core prod ssh noc
|
||||||
|
core prod ssh de
|
||||||
|
core prod ssh de2
|
||||||
|
core prod ssh build`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runSSH,
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSSH(cmd *cobra.Command, args []string) error {
|
||||||
|
cfg, _, err := loadConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name := args[0]
|
||||||
|
host, ok := cfg.Hosts[name]
|
||||||
|
if !ok {
|
||||||
|
// List available hosts
|
||||||
|
cli.Print("Unknown host '%s'. Available:\n", name)
|
||||||
|
for n, h := range cfg.Hosts {
|
||||||
|
cli.Print(" %s %s (%s)\n", cli.BoldStyle.Render(n), h.IP, h.Role)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("host '%s' not found in infra.yaml", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
sshArgs := []string{
|
||||||
|
"ssh",
|
||||||
|
"-i", host.SSH.Key,
|
||||||
|
"-p", fmt.Sprintf("%d", host.SSH.Port),
|
||||||
|
"-o", "StrictHostKeyChecking=accept-new",
|
||||||
|
fmt.Sprintf("%s@%s", host.SSH.User, host.IP),
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print("%s %s@%s (%s)\n",
|
||||||
|
cli.BoldStyle.Render("▶"),
|
||||||
|
host.SSH.User, host.FQDN,
|
||||||
|
cli.DimStyle.Render(host.IP))
|
||||||
|
|
||||||
|
sshPath, err := exec.LookPath("ssh")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ssh not found: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace current process with SSH
|
||||||
|
return syscall.Exec(sshPath, sshArgs, os.Environ())
|
||||||
|
}
|
||||||
325
internal/cmd/prod/cmd_status.go
Normal file
325
internal/cmd/prod/cmd_status.go
Normal file
|
|
@ -0,0 +1,325 @@
|
||||||
|
package prod
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/host-uk/core/pkg/ansible"
|
||||||
|
"github.com/host-uk/core/pkg/cli"
|
||||||
|
"github.com/host-uk/core/pkg/infra"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var statusCmd = &cobra.Command{
|
||||||
|
Use: "status",
|
||||||
|
Short: "Show production infrastructure health",
|
||||||
|
Long: `Check connectivity, services, and cluster health across all production hosts.
|
||||||
|
|
||||||
|
Tests:
|
||||||
|
- SSH connectivity to all hosts
|
||||||
|
- Docker daemon status
|
||||||
|
- Coolify controller (noc)
|
||||||
|
- Galera cluster state (de, de2)
|
||||||
|
- Redis Sentinel status (de, de2)
|
||||||
|
- Load balancer health (if HCLOUD_TOKEN set)`,
|
||||||
|
RunE: runStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
type hostStatus struct {
|
||||||
|
Name string
|
||||||
|
Host *infra.Host
|
||||||
|
Connected bool
|
||||||
|
ConnTime time.Duration
|
||||||
|
OS string
|
||||||
|
Docker string
|
||||||
|
Services map[string]string
|
||||||
|
Error error
|
||||||
|
}
|
||||||
|
|
||||||
|
func runStatus(cmd *cobra.Command, args []string) error {
|
||||||
|
cfg, cfgPath, err := loadConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print("%s Infrastructure status from %s\n\n",
|
||||||
|
cli.BoldStyle.Render("▶"),
|
||||||
|
cli.DimStyle.Render(cfgPath))
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Check all hosts in parallel
|
||||||
|
var (
|
||||||
|
wg sync.WaitGroup
|
||||||
|
mu sync.Mutex
|
||||||
|
statuses []hostStatus
|
||||||
|
)
|
||||||
|
|
||||||
|
for name, host := range cfg.Hosts {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(name string, host *infra.Host) {
|
||||||
|
defer wg.Done()
|
||||||
|
s := checkHost(ctx, name, host)
|
||||||
|
mu.Lock()
|
||||||
|
statuses = append(statuses, s)
|
||||||
|
mu.Unlock()
|
||||||
|
}(name, host)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Print results in consistent order
|
||||||
|
order := []string{"noc", "de", "de2", "build"}
|
||||||
|
for _, name := range order {
|
||||||
|
for _, s := range statuses {
|
||||||
|
if s.Name == name {
|
||||||
|
printHostStatus(s)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check LB if token available
|
||||||
|
if token := os.Getenv("HCLOUD_TOKEN"); token != "" {
|
||||||
|
fmt.Println()
|
||||||
|
checkLoadBalancer(ctx, token)
|
||||||
|
} else {
|
||||||
|
fmt.Println()
|
||||||
|
cli.Print("%s Load balancer: %s\n",
|
||||||
|
cli.DimStyle.Render(" ○"),
|
||||||
|
cli.DimStyle.Render("HCLOUD_TOKEN not set (skipped)"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkHost(ctx context.Context, name string, host *infra.Host) hostStatus {
|
||||||
|
s := hostStatus{
|
||||||
|
Name: name,
|
||||||
|
Host: host,
|
||||||
|
Services: make(map[string]string),
|
||||||
|
}
|
||||||
|
|
||||||
|
sshCfg := ansible.SSHConfig{
|
||||||
|
Host: host.IP,
|
||||||
|
Port: host.SSH.Port,
|
||||||
|
User: host.SSH.User,
|
||||||
|
KeyFile: host.SSH.Key,
|
||||||
|
Timeout: 15 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := ansible.NewSSHClient(sshCfg)
|
||||||
|
if err != nil {
|
||||||
|
s.Error = fmt.Errorf("create SSH client: %w", err)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
defer func() { _ = client.Close() }()
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
if err := client.Connect(ctx); err != nil {
|
||||||
|
s.Error = fmt.Errorf("SSH connect: %w", err)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
s.Connected = true
|
||||||
|
s.ConnTime = time.Since(start)
|
||||||
|
|
||||||
|
// OS info
|
||||||
|
stdout, _, _, _ := client.Run(ctx, "cat /etc/os-release 2>/dev/null | grep PRETTY_NAME | cut -d'\"' -f2")
|
||||||
|
s.OS = strings.TrimSpace(stdout)
|
||||||
|
|
||||||
|
// Docker
|
||||||
|
stdout, _, _, err = client.Run(ctx, "docker --version 2>/dev/null | head -1")
|
||||||
|
if err == nil && stdout != "" {
|
||||||
|
s.Docker = strings.TrimSpace(stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check each expected service
|
||||||
|
for _, svc := range host.Services {
|
||||||
|
status := checkService(ctx, client, svc)
|
||||||
|
s.Services[svc] = status
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkService(ctx context.Context, client *ansible.SSHClient, service string) string {
|
||||||
|
switch service {
|
||||||
|
case "coolify":
|
||||||
|
stdout, _, _, _ := client.Run(ctx, "docker ps --format '{{.Names}}' 2>/dev/null | grep -c coolify")
|
||||||
|
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||||
|
return "running"
|
||||||
|
}
|
||||||
|
return "not running"
|
||||||
|
|
||||||
|
case "traefik":
|
||||||
|
stdout, _, _, _ := client.Run(ctx, "docker ps --format '{{.Names}}' 2>/dev/null | grep -c traefik")
|
||||||
|
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||||
|
return "running"
|
||||||
|
}
|
||||||
|
return "not running"
|
||||||
|
|
||||||
|
case "galera":
|
||||||
|
// Check Galera cluster state
|
||||||
|
stdout, _, _, _ := client.Run(ctx,
|
||||||
|
"docker exec $(docker ps -q --filter name=mariadb 2>/dev/null || echo none) "+
|
||||||
|
"mariadb -u root -e \"SHOW STATUS LIKE 'wsrep_cluster_size'\" --skip-column-names 2>/dev/null | awk '{print $2}'")
|
||||||
|
size := strings.TrimSpace(stdout)
|
||||||
|
if size != "" && size != "0" {
|
||||||
|
return fmt.Sprintf("cluster_size=%s", size)
|
||||||
|
}
|
||||||
|
// Try non-Docker
|
||||||
|
stdout, _, _, _ = client.Run(ctx,
|
||||||
|
"mariadb -u root -e \"SHOW STATUS LIKE 'wsrep_cluster_size'\" --skip-column-names 2>/dev/null | awk '{print $2}'")
|
||||||
|
size = strings.TrimSpace(stdout)
|
||||||
|
if size != "" && size != "0" {
|
||||||
|
return fmt.Sprintf("cluster_size=%s", size)
|
||||||
|
}
|
||||||
|
return "not running"
|
||||||
|
|
||||||
|
case "redis":
|
||||||
|
stdout, _, _, _ := client.Run(ctx,
|
||||||
|
"docker exec $(docker ps -q --filter name=redis 2>/dev/null || echo none) "+
|
||||||
|
"redis-cli ping 2>/dev/null")
|
||||||
|
if strings.TrimSpace(stdout) == "PONG" {
|
||||||
|
return "running"
|
||||||
|
}
|
||||||
|
stdout, _, _, _ = client.Run(ctx, "redis-cli ping 2>/dev/null")
|
||||||
|
if strings.TrimSpace(stdout) == "PONG" {
|
||||||
|
return "running"
|
||||||
|
}
|
||||||
|
return "not running"
|
||||||
|
|
||||||
|
case "forgejo-runner":
|
||||||
|
stdout, _, _, _ := client.Run(ctx, "systemctl is-active forgejo-runner 2>/dev/null || docker ps --format '{{.Names}}' 2>/dev/null | grep -c runner")
|
||||||
|
val := strings.TrimSpace(stdout)
|
||||||
|
if val == "active" || (val != "0" && val != "") {
|
||||||
|
return "running"
|
||||||
|
}
|
||||||
|
return "not running"
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Generic docker container check
|
||||||
|
stdout, _, _, _ := client.Run(ctx,
|
||||||
|
fmt.Sprintf("docker ps --format '{{.Names}}' 2>/dev/null | grep -c %s", service))
|
||||||
|
if strings.TrimSpace(stdout) != "0" && strings.TrimSpace(stdout) != "" {
|
||||||
|
return "running"
|
||||||
|
}
|
||||||
|
return "not running"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printHostStatus(s hostStatus) {
|
||||||
|
// Host header
|
||||||
|
roleStyle := cli.DimStyle
|
||||||
|
switch s.Host.Role {
|
||||||
|
case "app":
|
||||||
|
roleStyle = cli.SuccessStyle
|
||||||
|
case "bastion":
|
||||||
|
roleStyle = cli.WarningStyle
|
||||||
|
case "builder":
|
||||||
|
roleStyle = cli.InfoStyle
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print(" %s %s %s %s\n",
|
||||||
|
cli.BoldStyle.Render(s.Name),
|
||||||
|
cli.DimStyle.Render(s.Host.IP),
|
||||||
|
roleStyle.Render(s.Host.Role),
|
||||||
|
cli.DimStyle.Render(s.Host.FQDN))
|
||||||
|
|
||||||
|
if s.Error != nil {
|
||||||
|
cli.Print(" %s %s\n", cli.ErrorStyle.Render("✗"), s.Error)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.Connected {
|
||||||
|
cli.Print(" %s SSH unreachable\n", cli.ErrorStyle.Render("✗"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connection info
|
||||||
|
cli.Print(" %s SSH %s",
|
||||||
|
cli.SuccessStyle.Render("✓"),
|
||||||
|
cli.DimStyle.Render(s.ConnTime.Round(time.Millisecond).String()))
|
||||||
|
if s.OS != "" {
|
||||||
|
cli.Print(" %s", cli.DimStyle.Render(s.OS))
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
if s.Docker != "" {
|
||||||
|
cli.Print(" %s %s\n", cli.SuccessStyle.Render("✓"), cli.DimStyle.Render(s.Docker))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services
|
||||||
|
for _, svc := range s.Host.Services {
|
||||||
|
status, ok := s.Services[svc]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
icon := cli.SuccessStyle.Render("●")
|
||||||
|
style := cli.SuccessStyle
|
||||||
|
if status == "not running" {
|
||||||
|
icon = cli.ErrorStyle.Render("○")
|
||||||
|
style = cli.ErrorStyle
|
||||||
|
}
|
||||||
|
|
||||||
|
cli.Print(" %s %s %s\n", icon, svc, style.Render(status))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLoadBalancer(ctx context.Context, token string) {
|
||||||
|
hc := infra.NewHCloudClient(token)
|
||||||
|
lbs, err := hc.ListLoadBalancers(ctx)
|
||||||
|
if err != nil {
|
||||||
|
cli.Print(" %s Load balancer: %s\n", cli.ErrorStyle.Render("✗"), err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lbs) == 0 {
|
||||||
|
cli.Print(" %s No load balancers found\n", cli.DimStyle.Render("○"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lb := range lbs {
|
||||||
|
cli.Print(" %s LB: %s IP: %s Targets: %d\n",
|
||||||
|
cli.SuccessStyle.Render("●"),
|
||||||
|
cli.BoldStyle.Render(lb.Name),
|
||||||
|
lb.PublicNet.IPv4.IP,
|
||||||
|
len(lb.Targets))
|
||||||
|
|
||||||
|
for _, t := range lb.Targets {
|
||||||
|
for _, hs := range t.HealthStatus {
|
||||||
|
icon := cli.SuccessStyle.Render("●")
|
||||||
|
if hs.Status != "healthy" {
|
||||||
|
icon = cli.ErrorStyle.Render("○")
|
||||||
|
}
|
||||||
|
ip := ""
|
||||||
|
if t.IP != nil {
|
||||||
|
ip = t.IP.IP
|
||||||
|
}
|
||||||
|
cli.Print(" %s :%d %s %s\n", icon, hs.ListenPort, hs.Status, cli.DimStyle.Render(ip))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadConfig() (*infra.Config, string, error) {
|
||||||
|
if infraFile != "" {
|
||||||
|
cfg, err := infra.Load(infraFile)
|
||||||
|
return cfg, infraFile, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return infra.Discover(cwd)
|
||||||
|
}
|
||||||
|
|
@ -208,7 +208,11 @@ func shortID(id string) string {
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatDur(d interface{ Hours() float64; Minutes() float64; Seconds() float64 }) string {
|
func formatDur(d interface {
|
||||||
|
Hours() float64
|
||||||
|
Minutes() float64
|
||||||
|
Seconds() float64
|
||||||
|
}) string {
|
||||||
type dur interface {
|
type dur interface {
|
||||||
Hours() float64
|
Hours() float64
|
||||||
Minutes() float64
|
Minutes() float64
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"github.com/host-uk/core/pkg/cli"
|
"github.com/host-uk/core/pkg/cli"
|
||||||
"github.com/host-uk/core/pkg/i18n"
|
"github.com/host-uk/core/pkg/i18n"
|
||||||
coreio "github.com/host-uk/core/pkg/io"
|
coreio "github.com/host-uk/core/pkg/io"
|
||||||
"github.com/host-uk/core/pkg/log"
|
|
||||||
"github.com/host-uk/core/pkg/repos"
|
"github.com/host-uk/core/pkg/repos"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
@ -76,7 +75,6 @@ func runGitHubSetup() error {
|
||||||
|
|
||||||
// Check gh is authenticated
|
// Check gh is authenticated
|
||||||
if !cli.GhAuthenticated() {
|
if !cli.GhAuthenticated() {
|
||||||
cli.LogSecurity("GitHub setup failed: not authenticated", "action", "setup github", "user", log.Username())
|
|
||||||
return errors.New(i18n.T("cmd.setup.github.error.not_authenticated"))
|
return errors.New(i18n.T("cmd.setup.github.error.not_authenticated"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,8 +16,6 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
var templateManager = container.NewTemplateManager(io.Local)
|
|
||||||
|
|
||||||
// addVMTemplatesCommand adds the 'templates' command under vm.
|
// addVMTemplatesCommand adds the 'templates' command under vm.
|
||||||
func addVMTemplatesCommand(parent *cobra.Command) {
|
func addVMTemplatesCommand(parent *cobra.Command) {
|
||||||
templatesCmd := &cobra.Command{
|
templatesCmd := &cobra.Command{
|
||||||
|
|
@ -71,7 +69,7 @@ func addTemplatesVarsCommand(parent *cobra.Command) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func listTemplates() error {
|
func listTemplates() error {
|
||||||
templates := templateManager.ListTemplates()
|
templates := container.ListTemplates()
|
||||||
|
|
||||||
if len(templates) == 0 {
|
if len(templates) == 0 {
|
||||||
fmt.Println(i18n.T("cmd.vm.templates.no_templates"))
|
fmt.Println(i18n.T("cmd.vm.templates.no_templates"))
|
||||||
|
|
@ -102,7 +100,7 @@ func listTemplates() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func showTemplate(name string) error {
|
func showTemplate(name string) error {
|
||||||
content, err := templateManager.GetTemplate(name)
|
content, err := container.GetTemplate(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -114,7 +112,7 @@ func showTemplate(name string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func showTemplateVars(name string) error {
|
func showTemplateVars(name string) error {
|
||||||
content, err := templateManager.GetTemplate(name)
|
content, err := container.GetTemplate(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -151,7 +149,7 @@ func showTemplateVars(name string) error {
|
||||||
// RunFromTemplate builds and runs a LinuxKit image from a template.
|
// RunFromTemplate builds and runs a LinuxKit image from a template.
|
||||||
func RunFromTemplate(templateName string, vars map[string]string, runOpts container.RunOptions) error {
|
func RunFromTemplate(templateName string, vars map[string]string, runOpts container.RunOptions) error {
|
||||||
// Apply template with variables
|
// Apply template with variables
|
||||||
content, err := templateManager.ApplyTemplate(templateName, vars)
|
content, err := container.ApplyTemplate(templateName, vars)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "apply template"})+": %w", err)
|
return fmt.Errorf(i18n.T("common.error.failed", map[string]any{"Action": "apply template"})+": %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
// cmd_agent.go manages persistent agent context within task workspaces.
|
// cmd_agent.go manages persistent agent context within task workspaces.
|
||||||
//
|
//
|
||||||
// Each agent gets a directory at:
|
// Each agent gets a directory at:
|
||||||
|
//
|
||||||
// .core/workspace/p{epic}/i{issue}/agents/{provider}/{agent-name}/
|
// .core/workspace/p{epic}/i{issue}/agents/{provider}/{agent-name}/
|
||||||
//
|
//
|
||||||
// This directory persists across invocations, allowing agents to build
|
// This directory persists across invocations, allowing agents to build
|
||||||
|
|
|
||||||
|
|
@ -20,9 +20,6 @@
|
||||||
// - test: Test runner with coverage
|
// - test: Test runner with coverage
|
||||||
// - qa: Quality assurance workflows
|
// - qa: Quality assurance workflows
|
||||||
// - monitor: Security monitoring aggregation
|
// - monitor: Security monitoring aggregation
|
||||||
// - gitea: Gitea instance management (repos, issues, PRs, mirrors)
|
|
||||||
// - forge: Forgejo instance management (repos, issues, PRs, migration, orgs, labels)
|
|
||||||
// - unifi: UniFi network management (sites, devices, clients)
|
|
||||||
|
|
||||||
package variants
|
package variants
|
||||||
|
|
||||||
|
|
@ -37,12 +34,9 @@ import (
|
||||||
_ "github.com/host-uk/core/internal/cmd/dev"
|
_ "github.com/host-uk/core/internal/cmd/dev"
|
||||||
_ "github.com/host-uk/core/internal/cmd/docs"
|
_ "github.com/host-uk/core/internal/cmd/docs"
|
||||||
_ "github.com/host-uk/core/internal/cmd/doctor"
|
_ "github.com/host-uk/core/internal/cmd/doctor"
|
||||||
_ "github.com/host-uk/core/internal/cmd/forge"
|
|
||||||
_ "github.com/host-uk/core/internal/cmd/gitcmd"
|
_ "github.com/host-uk/core/internal/cmd/gitcmd"
|
||||||
_ "github.com/host-uk/core/internal/cmd/gitea"
|
|
||||||
_ "github.com/host-uk/core/internal/cmd/go"
|
_ "github.com/host-uk/core/internal/cmd/go"
|
||||||
_ "github.com/host-uk/core/internal/cmd/help"
|
_ "github.com/host-uk/core/internal/cmd/help"
|
||||||
_ "github.com/host-uk/core/internal/cmd/mcpcmd"
|
|
||||||
_ "github.com/host-uk/core/internal/cmd/monitor"
|
_ "github.com/host-uk/core/internal/cmd/monitor"
|
||||||
_ "github.com/host-uk/core/internal/cmd/php"
|
_ "github.com/host-uk/core/internal/cmd/php"
|
||||||
_ "github.com/host-uk/core/internal/cmd/pkgcmd"
|
_ "github.com/host-uk/core/internal/cmd/pkgcmd"
|
||||||
|
|
@ -52,7 +46,6 @@ import (
|
||||||
_ "github.com/host-uk/core/internal/cmd/security"
|
_ "github.com/host-uk/core/internal/cmd/security"
|
||||||
_ "github.com/host-uk/core/internal/cmd/setup"
|
_ "github.com/host-uk/core/internal/cmd/setup"
|
||||||
_ "github.com/host-uk/core/internal/cmd/test"
|
_ "github.com/host-uk/core/internal/cmd/test"
|
||||||
_ "github.com/host-uk/core/internal/cmd/unifi"
|
|
||||||
_ "github.com/host-uk/core/internal/cmd/updater"
|
_ "github.com/host-uk/core/internal/cmd/updater"
|
||||||
_ "github.com/host-uk/core/internal/cmd/vm"
|
_ "github.com/host-uk/core/internal/cmd/vm"
|
||||||
_ "github.com/host-uk/core/internal/cmd/workspace"
|
_ "github.com/host-uk/core/internal/cmd/workspace"
|
||||||
|
|
|
||||||
13
main.go
13
main.go
|
|
@ -1,13 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/host-uk/core/pkg/cli"
|
|
||||||
|
|
||||||
// Build variants import commands via self-registration.
|
|
||||||
// See internal/variants/ for available variants: full, ci, php, minimal.
|
|
||||||
_ "github.com/host-uk/core/internal/variants"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
cli.Main()
|
|
||||||
}
|
|
||||||
|
|
@ -1,26 +1,25 @@
|
||||||
package agentic
|
package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/config"
|
errors "github.com/host-uk/core/pkg/framework/core"
|
||||||
"github.com/host-uk/core/pkg/io"
|
"github.com/host-uk/core/pkg/io"
|
||||||
"github.com/host-uk/core/pkg/log"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config holds the configuration for connecting to the core-agentic service.
|
// Config holds the configuration for connecting to the core-agentic service.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// BaseURL is the URL of the core-agentic API server.
|
// BaseURL is the URL of the core-agentic API server.
|
||||||
BaseURL string `yaml:"base_url" json:"base_url" mapstructure:"base_url"`
|
BaseURL string `yaml:"base_url" json:"base_url"`
|
||||||
// Token is the authentication token for API requests.
|
// Token is the authentication token for API requests.
|
||||||
Token string `yaml:"token" json:"token" mapstructure:"token"`
|
Token string `yaml:"token" json:"token"`
|
||||||
// DefaultProject is the project to use when none is specified.
|
// DefaultProject is the project to use when none is specified.
|
||||||
DefaultProject string `yaml:"default_project" json:"default_project" mapstructure:"default_project"`
|
DefaultProject string `yaml:"default_project" json:"default_project"`
|
||||||
// AgentID is the identifier for this agent (optional, used for claiming tasks).
|
// AgentID is the identifier for this agent (optional, used for claiming tasks).
|
||||||
AgentID string `yaml:"agent_id" json:"agent_id" mapstructure:"agent_id"`
|
AgentID string `yaml:"agent_id" json:"agent_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// configFileName is the name of the YAML config file.
|
// configFileName is the name of the YAML config file.
|
||||||
|
|
@ -33,9 +32,10 @@ const envFileName = ".env"
|
||||||
const DefaultBaseURL = "https://api.core-agentic.dev"
|
const DefaultBaseURL = "https://api.core-agentic.dev"
|
||||||
|
|
||||||
// LoadConfig loads the agentic configuration from the specified directory.
|
// LoadConfig loads the agentic configuration from the specified directory.
|
||||||
// It uses the centralized config service.
|
// It first checks for a .env file, then falls back to ~/.core/agentic.yaml.
|
||||||
|
// If dir is empty, it checks the current directory first.
|
||||||
//
|
//
|
||||||
// Environment variables take precedence (prefix: AGENTIC_):
|
// Environment variables take precedence:
|
||||||
// - AGENTIC_BASE_URL: API base URL
|
// - AGENTIC_BASE_URL: API base URL
|
||||||
// - AGENTIC_TOKEN: Authentication token
|
// - AGENTIC_TOKEN: Authentication token
|
||||||
// - AGENTIC_PROJECT: Default project
|
// - AGENTIC_PROJECT: Default project
|
||||||
|
|
@ -58,6 +58,7 @@ func LoadConfig(dir string) (*Config, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try loading from current directory .env
|
// Try loading from current directory .env
|
||||||
|
if dir == "" {
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
envPath := filepath.Join(cwd, envFileName)
|
envPath := filepath.Join(cwd, envFileName)
|
||||||
|
|
@ -68,23 +69,17 @@ func LoadConfig(dir string) (*Config, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Try loading from ~/.core/agentic.yaml
|
// Try loading from ~/.core/agentic.yaml
|
||||||
homeDir, err := os.UserHomeDir()
|
homeDir, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, log.E("agentic.LoadConfig", "failed to get home directory", err)
|
return nil, errors.E("agentic.LoadConfig", "failed to get home directory", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
configPath := filepath.Join(homeDir, ".core", configFileName)
|
configPath := filepath.Join(homeDir, ".core", configFileName)
|
||||||
if io.Local.IsFile(configPath) {
|
if err := loadYAMLConfig(configPath, cfg); err != nil && !os.IsNotExist(err) {
|
||||||
// Use centralized config service to load the YAML file
|
return nil, errors.E("agentic.LoadConfig", "failed to load config", err)
|
||||||
c, err := config.New(config.WithPath(configPath))
|
|
||||||
if err != nil {
|
|
||||||
return nil, log.E("agentic.LoadConfig", "failed to initialize config", err)
|
|
||||||
}
|
|
||||||
if err := c.Get("", cfg); err != nil {
|
|
||||||
return nil, log.E("agentic.LoadConfig", "failed to load config", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply environment variable overrides
|
// Apply environment variable overrides
|
||||||
|
|
@ -92,25 +87,21 @@ func LoadConfig(dir string) (*Config, error) {
|
||||||
|
|
||||||
// Validate configuration
|
// Validate configuration
|
||||||
if cfg.Token == "" {
|
if cfg.Token == "" {
|
||||||
log.Security("agentic authentication failed: no token configured", "user", log.Username())
|
return nil, errors.E("agentic.LoadConfig", "no authentication token configured", nil)
|
||||||
return nil, log.E("agentic.LoadConfig", "no authentication token configured", nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Security("agentic configuration loaded", "user", log.Username(), "baseURL", cfg.BaseURL)
|
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadEnvFile reads a .env file and extracts agentic configuration.
|
// loadEnvFile reads a .env file and extracts agentic configuration.
|
||||||
func loadEnvFile(path string, cfg *Config) error {
|
func loadEnvFile(path string, cfg *Config) error {
|
||||||
file, err := os.Open(path)
|
content, err := io.Local.Read(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() { _ = file.Close() }()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
for _, line := range strings.Split(content, "\n") {
|
||||||
for scanner.Scan() {
|
line = strings.TrimSpace(line)
|
||||||
line := strings.TrimSpace(scanner.Text())
|
|
||||||
|
|
||||||
// Skip empty lines and comments
|
// Skip empty lines and comments
|
||||||
if line == "" || strings.HasPrefix(line, "#") {
|
if line == "" || strings.HasPrefix(line, "#") {
|
||||||
|
|
@ -141,7 +132,17 @@ func loadEnvFile(path string, cfg *Config) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return scanner.Err()
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadYAMLConfig reads configuration from a YAML file.
|
||||||
|
func loadYAMLConfig(path string, cfg *Config) error {
|
||||||
|
content, err := io.Local.Read(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return yaml.Unmarshal([]byte(content), cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// applyEnvOverrides applies environment variable overrides to the config.
|
// applyEnvOverrides applies environment variable overrides to the config.
|
||||||
|
|
@ -162,25 +163,35 @@ func applyEnvOverrides(cfg *Config) {
|
||||||
|
|
||||||
// SaveConfig saves the configuration to ~/.core/agentic.yaml.
|
// SaveConfig saves the configuration to ~/.core/agentic.yaml.
|
||||||
func SaveConfig(cfg *Config) error {
|
func SaveConfig(cfg *Config) error {
|
||||||
path, err := ConfigPath()
|
homeDir, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.E("agentic.SaveConfig", "failed to get home directory", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data := make(map[string]any)
|
configDir := filepath.Join(homeDir, ".core")
|
||||||
data["base_url"] = cfg.BaseURL
|
if err := io.Local.EnsureDir(configDir); err != nil {
|
||||||
data["token"] = cfg.Token
|
return errors.E("agentic.SaveConfig", "failed to create config directory", err)
|
||||||
data["default_project"] = cfg.DefaultProject
|
}
|
||||||
data["agent_id"] = cfg.AgentID
|
|
||||||
|
|
||||||
return config.Save(io.Local, path, data)
|
configPath := filepath.Join(configDir, configFileName)
|
||||||
|
|
||||||
|
data, err := yaml.Marshal(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return errors.E("agentic.SaveConfig", "failed to marshal config", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := io.Local.Write(configPath, string(data)); err != nil {
|
||||||
|
return errors.E("agentic.SaveConfig", "failed to write config file", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigPath returns the path to the config file in the user's home directory.
|
// ConfigPath returns the path to the config file in the user's home directory.
|
||||||
func ConfigPath() (string, error) {
|
func ConfigPath() (string, error) {
|
||||||
homeDir, err := os.UserHomeDir()
|
homeDir, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", log.E("agentic.ConfigPath", "failed to get home directory", err)
|
return "", errors.E("agentic.ConfigPath", "failed to get home directory", err)
|
||||||
}
|
}
|
||||||
return filepath.Join(homeDir, ".core", configFileName), nil
|
return filepath.Join(homeDir, ".core", configFileName), nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,20 +3,16 @@ package agentic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
goio "io"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/ai"
|
errors "github.com/host-uk/core/pkg/framework/core"
|
||||||
"github.com/host-uk/core/pkg/io"
|
"github.com/host-uk/core/pkg/io"
|
||||||
"github.com/host-uk/core/pkg/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxContextBytes = 5000
|
|
||||||
|
|
||||||
// FileContent represents the content of a file for AI context.
|
// FileContent represents the content of a file for AI context.
|
||||||
type FileContent struct {
|
type FileContent struct {
|
||||||
// Path is the relative path to the file.
|
// Path is the relative path to the file.
|
||||||
|
|
@ -39,8 +35,6 @@ type TaskContext struct {
|
||||||
RecentCommits string `json:"recent_commits"`
|
RecentCommits string `json:"recent_commits"`
|
||||||
// RelatedCode contains code snippets related to the task.
|
// RelatedCode contains code snippets related to the task.
|
||||||
RelatedCode []FileContent `json:"related_code"`
|
RelatedCode []FileContent `json:"related_code"`
|
||||||
// RAGContext contains relevant documentation from the vector database.
|
|
||||||
RAGContext string `json:"rag_context,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildTaskContext gathers context for AI collaboration on a task.
|
// BuildTaskContext gathers context for AI collaboration on a task.
|
||||||
|
|
@ -48,13 +42,13 @@ func BuildTaskContext(task *Task, dir string) (*TaskContext, error) {
|
||||||
const op = "agentic.BuildTaskContext"
|
const op = "agentic.BuildTaskContext"
|
||||||
|
|
||||||
if task == nil {
|
if task == nil {
|
||||||
return nil, log.E(op, "task is required", nil)
|
return nil, errors.E(op, "task is required", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "" {
|
if dir == "" {
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, log.E(op, "failed to get working directory", err)
|
return nil, errors.E(op, "failed to get working directory", err)
|
||||||
}
|
}
|
||||||
dir = cwd
|
dir = cwd
|
||||||
}
|
}
|
||||||
|
|
@ -86,13 +80,6 @@ func BuildTaskContext(task *Task, dir string) (*TaskContext, error) {
|
||||||
}
|
}
|
||||||
ctx.RelatedCode = relatedCode
|
ctx.RelatedCode = relatedCode
|
||||||
|
|
||||||
// Query RAG for relevant documentation (graceful degradation)
|
|
||||||
ragCtx := ai.QueryRAGForTask(ai.TaskInfo{
|
|
||||||
Title: task.Title,
|
|
||||||
Description: task.Description,
|
|
||||||
})
|
|
||||||
ctx.RAGContext = ragCtx
|
|
||||||
|
|
||||||
return ctx, nil
|
return ctx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -101,31 +88,24 @@ func GatherRelatedFiles(task *Task, dir string) ([]FileContent, error) {
|
||||||
const op = "agentic.GatherRelatedFiles"
|
const op = "agentic.GatherRelatedFiles"
|
||||||
|
|
||||||
if task == nil {
|
if task == nil {
|
||||||
return nil, log.E(op, "task is required", nil)
|
return nil, errors.E(op, "task is required", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var files []FileContent
|
var files []FileContent
|
||||||
|
|
||||||
// Read files explicitly mentioned in the task
|
// Read files explicitly mentioned in the task
|
||||||
for _, relPath := range task.Files {
|
for _, relPath := range task.Files {
|
||||||
fullPath := relPath
|
fullPath := filepath.Join(dir, relPath)
|
||||||
if !filepath.IsAbs(relPath) {
|
|
||||||
fullPath = filepath.Join(dir, relPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
content, truncated, err := readAndTruncate(fullPath)
|
content, err := io.Local.Read(fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Skip files that don't exist
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
contentStr := string(content)
|
|
||||||
if truncated {
|
|
||||||
contentStr += "\n... (truncated)"
|
|
||||||
}
|
|
||||||
|
|
||||||
files = append(files, FileContent{
|
files = append(files, FileContent{
|
||||||
Path: relPath,
|
Path: relPath,
|
||||||
Content: contentStr,
|
Content: content,
|
||||||
Language: detectLanguage(relPath),
|
Language: detectLanguage(relPath),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
@ -138,7 +118,7 @@ func findRelatedCode(task *Task, dir string) ([]FileContent, error) {
|
||||||
const op = "agentic.findRelatedCode"
|
const op = "agentic.findRelatedCode"
|
||||||
|
|
||||||
if task == nil {
|
if task == nil {
|
||||||
return nil, log.E(op, "task is required", nil)
|
return nil, errors.E(op, "task is required", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract keywords from title and description
|
// Extract keywords from title and description
|
||||||
|
|
@ -174,24 +154,20 @@ func findRelatedCode(task *Task, dir string) ([]FileContent, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
fullPath := line
|
fullPath := filepath.Join(dir, line)
|
||||||
if !filepath.IsAbs(line) {
|
content, err := io.Local.Read(fullPath)
|
||||||
fullPath = filepath.Join(dir, line)
|
|
||||||
}
|
|
||||||
|
|
||||||
content, truncated, err := readAndTruncate(fullPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
contentStr := string(content)
|
// Truncate large files
|
||||||
if truncated {
|
if len(content) > 5000 {
|
||||||
contentStr += "\n... (truncated)"
|
content = content[:5000] + "\n... (truncated)"
|
||||||
}
|
}
|
||||||
|
|
||||||
files = append(files, FileContent{
|
files = append(files, FileContent{
|
||||||
Path: line,
|
Path: line,
|
||||||
Content: contentStr,
|
Content: content,
|
||||||
Language: detectLanguage(line),
|
Language: detectLanguage(line),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
@ -286,30 +262,6 @@ func detectLanguage(path string) string {
|
||||||
return "text"
|
return "text"
|
||||||
}
|
}
|
||||||
|
|
||||||
// readAndTruncate reads up to maxContextBytes from a file.
|
|
||||||
func readAndTruncate(path string) ([]byte, bool, error) {
|
|
||||||
f, err := io.Local.ReadStream(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
defer func() { _ = f.Close() }()
|
|
||||||
|
|
||||||
// Read up to maxContextBytes + 1 to detect truncation
|
|
||||||
reader := goio.LimitReader(f, maxContextBytes+1)
|
|
||||||
content, err := goio.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
truncated := false
|
|
||||||
if len(content) > maxContextBytes {
|
|
||||||
content = content[:maxContextBytes]
|
|
||||||
truncated = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return content, truncated, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// runGitCommand runs a git command and returns the output.
|
// runGitCommand runs a git command and returns the output.
|
||||||
func runGitCommand(dir string, args ...string) (string, error) {
|
func runGitCommand(dir string, args ...string) (string, error) {
|
||||||
cmd := exec.Command("git", args...)
|
cmd := exec.Command("git", args...)
|
||||||
|
|
@ -379,12 +331,5 @@ func (tc *TaskContext) FormatContext() string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Relevant documentation from RAG
|
|
||||||
if tc.RAGContext != "" {
|
|
||||||
sb.WriteString("## Relevant Documentation\n")
|
|
||||||
sb.WriteString(tc.RAGContext)
|
|
||||||
sb.WriteString("\n\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/host-uk/core/pkg/cli"
|
"github.com/host-uk/core/pkg/cli"
|
||||||
"github.com/host-uk/core/pkg/framework/core"
|
"github.com/host-uk/core/pkg/framework/core"
|
||||||
"github.com/host-uk/core/pkg/i18n"
|
"github.com/host-uk/core/pkg/i18n"
|
||||||
"github.com/host-uk/core/pkg/io"
|
|
||||||
"github.com/host-uk/core/pkg/release"
|
"github.com/host-uk/core/pkg/release"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -51,7 +50,7 @@ func runRelease(ctx context.Context, dryRun bool, version string, draft, prerele
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for release config
|
// Check for release config
|
||||||
if !release.ConfigExists(io.Local, projectDir) {
|
if !release.ConfigExists(projectDir) {
|
||||||
cli.Print("%s %s\n",
|
cli.Print("%s %s\n",
|
||||||
buildErrorStyle.Render(i18n.Label("error")),
|
buildErrorStyle.Render(i18n.Label("error")),
|
||||||
i18n.T("cmd.build.release.error.no_config"),
|
i18n.T("cmd.build.release.error.no_config"),
|
||||||
|
|
@ -61,7 +60,7 @@ func runRelease(ctx context.Context, dryRun bool, version string, draft, prerele
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load configuration
|
// Load configuration
|
||||||
cfg, err := release.LoadConfig(io.Local, projectDir)
|
cfg, err := release.LoadConfig(projectDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return core.E("release", "load config", err)
|
return core.E("release", "load config", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,11 +4,12 @@ package build
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/host-uk/core/pkg/build/signing"
|
"github.com/host-uk/core/pkg/build/signing"
|
||||||
"github.com/host-uk/core/pkg/config"
|
|
||||||
"github.com/host-uk/core/pkg/io"
|
"github.com/host-uk/core/pkg/io"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigFileName is the name of the build configuration file.
|
// ConfigFileName is the name of the build configuration file.
|
||||||
|
|
@ -21,48 +22,48 @@ const ConfigDir = ".core"
|
||||||
// This is distinct from Config which holds runtime build parameters.
|
// This is distinct from Config which holds runtime build parameters.
|
||||||
type BuildConfig struct {
|
type BuildConfig struct {
|
||||||
// Version is the config file format version.
|
// Version is the config file format version.
|
||||||
Version int `yaml:"version" mapstructure:"version"`
|
Version int `yaml:"version"`
|
||||||
// Project contains project metadata.
|
// Project contains project metadata.
|
||||||
Project Project `yaml:"project" mapstructure:"project"`
|
Project Project `yaml:"project"`
|
||||||
// Build contains build settings.
|
// Build contains build settings.
|
||||||
Build Build `yaml:"build" mapstructure:"build"`
|
Build Build `yaml:"build"`
|
||||||
// Targets defines the build targets.
|
// Targets defines the build targets.
|
||||||
Targets []TargetConfig `yaml:"targets" mapstructure:"targets"`
|
Targets []TargetConfig `yaml:"targets"`
|
||||||
// Sign contains code signing configuration.
|
// Sign contains code signing configuration.
|
||||||
Sign signing.SignConfig `yaml:"sign,omitempty" mapstructure:"sign,omitempty"`
|
Sign signing.SignConfig `yaml:"sign,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Project holds project metadata.
|
// Project holds project metadata.
|
||||||
type Project struct {
|
type Project struct {
|
||||||
// Name is the project name.
|
// Name is the project name.
|
||||||
Name string `yaml:"name" mapstructure:"name"`
|
Name string `yaml:"name"`
|
||||||
// Description is a brief description of the project.
|
// Description is a brief description of the project.
|
||||||
Description string `yaml:"description" mapstructure:"description"`
|
Description string `yaml:"description"`
|
||||||
// Main is the path to the main package (e.g., ./cmd/core).
|
// Main is the path to the main package (e.g., ./cmd/core).
|
||||||
Main string `yaml:"main" mapstructure:"main"`
|
Main string `yaml:"main"`
|
||||||
// Binary is the output binary name.
|
// Binary is the output binary name.
|
||||||
Binary string `yaml:"binary" mapstructure:"binary"`
|
Binary string `yaml:"binary"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build holds build-time settings.
|
// Build holds build-time settings.
|
||||||
type Build struct {
|
type Build struct {
|
||||||
// CGO enables CGO for the build.
|
// CGO enables CGO for the build.
|
||||||
CGO bool `yaml:"cgo" mapstructure:"cgo"`
|
CGO bool `yaml:"cgo"`
|
||||||
// Flags are additional build flags (e.g., ["-trimpath"]).
|
// Flags are additional build flags (e.g., ["-trimpath"]).
|
||||||
Flags []string `yaml:"flags" mapstructure:"flags"`
|
Flags []string `yaml:"flags"`
|
||||||
// LDFlags are linker flags (e.g., ["-s", "-w"]).
|
// LDFlags are linker flags (e.g., ["-s", "-w"]).
|
||||||
LDFlags []string `yaml:"ldflags" mapstructure:"ldflags"`
|
LDFlags []string `yaml:"ldflags"`
|
||||||
// Env are additional environment variables.
|
// Env are additional environment variables.
|
||||||
Env []string `yaml:"env" mapstructure:"env"`
|
Env []string `yaml:"env"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TargetConfig defines a build target in the config file.
|
// TargetConfig defines a build target in the config file.
|
||||||
// This is separate from Target to allow for additional config-specific fields.
|
// This is separate from Target to allow for additional config-specific fields.
|
||||||
type TargetConfig struct {
|
type TargetConfig struct {
|
||||||
// OS is the target operating system (e.g., "linux", "darwin", "windows").
|
// OS is the target operating system (e.g., "linux", "darwin", "windows").
|
||||||
OS string `yaml:"os" mapstructure:"os"`
|
OS string `yaml:"os"`
|
||||||
// Arch is the target architecture (e.g., "amd64", "arm64").
|
// Arch is the target architecture (e.g., "amd64", "arm64").
|
||||||
Arch string `yaml:"arch" mapstructure:"arch"`
|
Arch string `yaml:"arch"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadConfig loads build configuration from the .core/build.yaml file in the given directory.
|
// LoadConfig loads build configuration from the .core/build.yaml file in the given directory.
|
||||||
|
|
@ -71,25 +72,24 @@ type TargetConfig struct {
|
||||||
func LoadConfig(fs io.Medium, dir string) (*BuildConfig, error) {
|
func LoadConfig(fs io.Medium, dir string) (*BuildConfig, error) {
|
||||||
configPath := filepath.Join(dir, ConfigDir, ConfigFileName)
|
configPath := filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||||
|
|
||||||
if !fs.Exists(configPath) {
|
content, err := fs.Read(configPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
return DefaultConfig(), nil
|
return DefaultConfig(), nil
|
||||||
}
|
}
|
||||||
|
return nil, fmt.Errorf("build.LoadConfig: failed to read config file: %w", err)
|
||||||
// Use centralized config service
|
|
||||||
c, err := config.New(config.WithMedium(fs), config.WithPath(configPath))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("build.LoadConfig: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := DefaultConfig()
|
var cfg BuildConfig
|
||||||
if err := c.Get("", cfg); err != nil {
|
data := []byte(content)
|
||||||
return nil, fmt.Errorf("build.LoadConfig: %w", err)
|
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||||
|
return nil, fmt.Errorf("build.LoadConfig: failed to parse config file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply defaults for any missing fields (centralized Get might not fill everything)
|
// Apply defaults for any missing fields
|
||||||
applyDefaults(cfg)
|
applyDefaults(&cfg)
|
||||||
|
|
||||||
return cfg, nil
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConfig returns sensible defaults for Go projects.
|
// DefaultConfig returns sensible defaults for Go projects.
|
||||||
|
|
@ -156,7 +156,7 @@ func ConfigPath(dir string) string {
|
||||||
|
|
||||||
// ConfigExists checks if a build config file exists in the given directory.
|
// ConfigExists checks if a build config file exists in the given directory.
|
||||||
func ConfigExists(fs io.Medium, dir string) bool {
|
func ConfigExists(fs io.Medium, dir string) bool {
|
||||||
return fs.IsFile(ConfigPath(dir))
|
return fileExists(fs, ConfigPath(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToTargets converts TargetConfig slice to Target slice for use with builders.
|
// ToTargets converts TargetConfig slice to Target slice for use with builders.
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,6 @@ const (
|
||||||
markerWails = "wails.json"
|
markerWails = "wails.json"
|
||||||
markerNodePackage = "package.json"
|
markerNodePackage = "package.json"
|
||||||
markerComposer = "composer.json"
|
markerComposer = "composer.json"
|
||||||
markerCMake = "CMakeLists.txt"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// projectMarker maps a marker file to its project type.
|
// projectMarker maps a marker file to its project type.
|
||||||
|
|
@ -29,7 +28,6 @@ var markers = []projectMarker{
|
||||||
{markerGoMod, ProjectTypeGo},
|
{markerGoMod, ProjectTypeGo},
|
||||||
{markerNodePackage, ProjectTypeNode},
|
{markerNodePackage, ProjectTypeNode},
|
||||||
{markerComposer, ProjectTypePHP},
|
{markerComposer, ProjectTypePHP},
|
||||||
{markerCMake, ProjectTypeCPP},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discover detects project types in the given directory by checking for marker files.
|
// Discover detects project types in the given directory by checking for marker files.
|
||||||
|
|
@ -85,9 +83,9 @@ func IsPHPProject(fs io.Medium, dir string) bool {
|
||||||
return fileExists(fs, filepath.Join(dir, markerComposer))
|
return fileExists(fs, filepath.Join(dir, markerComposer))
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsCPPProject checks if the directory contains a C++ project.
|
// IsCPPProject checks if the directory contains a C++ project (CMakeLists.txt).
|
||||||
func IsCPPProject(fs io.Medium, dir string) bool {
|
func IsCPPProject(fs io.Medium, dir string) bool {
|
||||||
return fileExists(fs, filepath.Join(dir, markerCMake))
|
return fileExists(fs, filepath.Join(dir, "CMakeLists.txt"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileExists checks if a file exists and is not a directory.
|
// fileExists checks if a file exists and is not a directory.
|
||||||
|
|
|
||||||
|
|
@ -52,13 +52,6 @@ func TestDiscover_Good(t *testing.T) {
|
||||||
assert.Equal(t, []ProjectType{ProjectTypePHP}, types)
|
assert.Equal(t, []ProjectType{ProjectTypePHP}, types)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("detects C++ project", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "CMakeLists.txt")
|
|
||||||
types, err := Discover(fs, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []ProjectType{ProjectTypeCPP}, types)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("detects multiple project types", func(t *testing.T) {
|
t.Run("detects multiple project types", func(t *testing.T) {
|
||||||
dir := setupTestDir(t, "go.mod", "package.json")
|
dir := setupTestDir(t, "go.mod", "package.json")
|
||||||
types, err := Discover(fs, dir)
|
types, err := Discover(fs, dir)
|
||||||
|
|
@ -162,19 +155,6 @@ func TestIsNodeProject_Good(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsCPPProject_Good(t *testing.T) {
|
|
||||||
fs := io.Local
|
|
||||||
t.Run("true with CMakeLists.txt", func(t *testing.T) {
|
|
||||||
dir := setupTestDir(t, "CMakeLists.txt")
|
|
||||||
assert.True(t, IsCPPProject(fs, dir))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("false without CMakeLists.txt", func(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
assert.False(t, IsCPPProject(fs, dir))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsPHPProject_Good(t *testing.T) {
|
func TestIsPHPProject_Good(t *testing.T) {
|
||||||
fs := io.Local
|
fs := io.Local
|
||||||
t.Run("true with composer.json", func(t *testing.T) {
|
t.Run("true with composer.json", func(t *testing.T) {
|
||||||
|
|
@ -229,7 +209,6 @@ func TestDiscover_Testdata(t *testing.T) {
|
||||||
{"wails-project", "wails-project", []ProjectType{ProjectTypeWails, ProjectTypeGo}},
|
{"wails-project", "wails-project", []ProjectType{ProjectTypeWails, ProjectTypeGo}},
|
||||||
{"node-project", "node-project", []ProjectType{ProjectTypeNode}},
|
{"node-project", "node-project", []ProjectType{ProjectTypeNode}},
|
||||||
{"php-project", "php-project", []ProjectType{ProjectTypePHP}},
|
{"php-project", "php-project", []ProjectType{ProjectTypePHP}},
|
||||||
{"cpp-project", "cpp-project", []ProjectType{ProjectTypeCPP}},
|
|
||||||
{"multi-project", "multi-project", []ProjectType{ProjectTypeGo, ProjectTypeNode}},
|
{"multi-project", "multi-project", []ProjectType{ProjectTypeGo, ProjectTypeNode}},
|
||||||
{"empty-project", "empty-project", []ProjectType{}},
|
{"empty-project", "empty-project", []ProjectType{}},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
49
pkg/cache/cache.go
vendored
49
pkg/cache/cache.go
vendored
|
|
@ -3,8 +3,6 @@ package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"io/fs"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -17,7 +15,6 @@ const DefaultTTL = 1 * time.Hour
|
||||||
|
|
||||||
// Cache represents a file-based cache.
|
// Cache represents a file-based cache.
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
medium io.Medium
|
|
||||||
baseDir string
|
baseDir string
|
||||||
ttl time.Duration
|
ttl time.Duration
|
||||||
}
|
}
|
||||||
|
|
@ -30,13 +27,8 @@ type Entry struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new cache instance.
|
// New creates a new cache instance.
|
||||||
// If baseDir is empty, uses .core/cache in current directory.
|
// If baseDir is empty, uses .core/cache in current directory
|
||||||
// If m is nil, uses io.Local.
|
func New(baseDir string, ttl time.Duration) (*Cache, error) {
|
||||||
func New(m io.Medium, baseDir string, ttl time.Duration) (*Cache, error) {
|
|
||||||
if m == nil {
|
|
||||||
m = io.Local
|
|
||||||
}
|
|
||||||
|
|
||||||
if baseDir == "" {
|
if baseDir == "" {
|
||||||
// Use .core/cache in current working directory
|
// Use .core/cache in current working directory
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
|
|
@ -50,21 +42,12 @@ func New(m io.Medium, baseDir string, ttl time.Duration) (*Cache, error) {
|
||||||
ttl = DefaultTTL
|
ttl = DefaultTTL
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert to absolute path for consistency
|
|
||||||
absBaseDir, err := filepath.Abs(baseDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure cache directory exists
|
// Ensure cache directory exists
|
||||||
if err := m.EnsureDir(absBaseDir); err != nil {
|
if err := io.Local.EnsureDir(baseDir); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseDir = absBaseDir
|
|
||||||
|
|
||||||
return &Cache{
|
return &Cache{
|
||||||
medium: m,
|
|
||||||
baseDir: baseDir,
|
baseDir: baseDir,
|
||||||
ttl: ttl,
|
ttl: ttl,
|
||||||
}, nil
|
}, nil
|
||||||
|
|
@ -79,17 +62,16 @@ func (c *Cache) Path(key string) string {
|
||||||
func (c *Cache) Get(key string, dest interface{}) (bool, error) {
|
func (c *Cache) Get(key string, dest interface{}) (bool, error) {
|
||||||
path := c.Path(key)
|
path := c.Path(key)
|
||||||
|
|
||||||
content, err := c.medium.Read(path)
|
dataStr, err := io.Local.Read(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, fs.ErrNotExist) || os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
data := []byte(content)
|
|
||||||
|
|
||||||
var entry Entry
|
var entry Entry
|
||||||
if err := json.Unmarshal(data, &entry); err != nil {
|
if err := json.Unmarshal([]byte(dataStr), &entry); err != nil {
|
||||||
// Invalid cache file, treat as miss
|
// Invalid cache file, treat as miss
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
@ -111,6 +93,11 @@ func (c *Cache) Get(key string, dest interface{}) (bool, error) {
|
||||||
func (c *Cache) Set(key string, data interface{}) error {
|
func (c *Cache) Set(key string, data interface{}) error {
|
||||||
path := c.Path(key)
|
path := c.Path(key)
|
||||||
|
|
||||||
|
// Ensure parent directory exists
|
||||||
|
if err := io.Local.EnsureDir(filepath.Dir(path)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Marshal the data
|
// Marshal the data
|
||||||
dataBytes, err := json.Marshal(data)
|
dataBytes, err := json.Marshal(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -128,15 +115,14 @@ func (c *Cache) Set(key string, data interface{}) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// medium.Write creates parent directories automatically
|
return io.Local.Write(path, string(entryBytes))
|
||||||
return c.medium.Write(path, string(entryBytes))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes an item from the cache.
|
// Delete removes an item from the cache.
|
||||||
func (c *Cache) Delete(key string) error {
|
func (c *Cache) Delete(key string) error {
|
||||||
path := c.Path(key)
|
path := c.Path(key)
|
||||||
err := c.medium.Delete(path)
|
err := io.Local.Delete(path)
|
||||||
if errors.Is(err, fs.ErrNotExist) || os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
@ -144,21 +130,20 @@ func (c *Cache) Delete(key string) error {
|
||||||
|
|
||||||
// Clear removes all cached items.
|
// Clear removes all cached items.
|
||||||
func (c *Cache) Clear() error {
|
func (c *Cache) Clear() error {
|
||||||
return c.medium.DeleteAll(c.baseDir)
|
return io.Local.DeleteAll(c.baseDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Age returns how old a cached item is, or -1 if not cached.
|
// Age returns how old a cached item is, or -1 if not cached.
|
||||||
func (c *Cache) Age(key string) time.Duration {
|
func (c *Cache) Age(key string) time.Duration {
|
||||||
path := c.Path(key)
|
path := c.Path(key)
|
||||||
|
|
||||||
content, err := c.medium.Read(path)
|
dataStr, err := io.Local.Read(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
data := []byte(content)
|
|
||||||
|
|
||||||
var entry Entry
|
var entry Entry
|
||||||
if err := json.Unmarshal(data, &entry); err != nil {
|
if err := json.Unmarshal([]byte(dataStr), &entry); err != nil {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -74,14 +74,13 @@ func IsStderrTTY() bool {
|
||||||
|
|
||||||
// PIDFile manages a process ID file for single-instance enforcement.
|
// PIDFile manages a process ID file for single-instance enforcement.
|
||||||
type PIDFile struct {
|
type PIDFile struct {
|
||||||
medium io.Medium
|
|
||||||
path string
|
path string
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPIDFile creates a PID file manager.
|
// NewPIDFile creates a PID file manager.
|
||||||
func NewPIDFile(m io.Medium, path string) *PIDFile {
|
func NewPIDFile(path string) *PIDFile {
|
||||||
return &PIDFile{medium: m, path: path}
|
return &PIDFile{path: path}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Acquire writes the current PID to the file.
|
// Acquire writes the current PID to the file.
|
||||||
|
|
@ -91,7 +90,7 @@ func (p *PIDFile) Acquire() error {
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
// Check if PID file exists
|
// Check if PID file exists
|
||||||
if data, err := p.medium.Read(p.path); err == nil {
|
if data, err := io.Local.Read(p.path); err == nil {
|
||||||
pid, err := strconv.Atoi(data)
|
pid, err := strconv.Atoi(data)
|
||||||
if err == nil && pid > 0 {
|
if err == nil && pid > 0 {
|
||||||
// Check if process is still running
|
// Check if process is still running
|
||||||
|
|
@ -102,19 +101,19 @@ func (p *PIDFile) Acquire() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Stale PID file, remove it
|
// Stale PID file, remove it
|
||||||
_ = p.medium.Delete(p.path)
|
_ = io.Local.Delete(p.path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure directory exists
|
// Ensure directory exists
|
||||||
if dir := filepath.Dir(p.path); dir != "." {
|
if dir := filepath.Dir(p.path); dir != "." {
|
||||||
if err := p.medium.EnsureDir(dir); err != nil {
|
if err := io.Local.EnsureDir(dir); err != nil {
|
||||||
return fmt.Errorf("failed to create PID directory: %w", err)
|
return fmt.Errorf("failed to create PID directory: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write current PID
|
// Write current PID
|
||||||
pid := os.Getpid()
|
pid := os.Getpid()
|
||||||
if err := p.medium.Write(p.path, strconv.Itoa(pid)); err != nil {
|
if err := io.Local.Write(p.path, strconv.Itoa(pid)); err != nil {
|
||||||
return fmt.Errorf("failed to write PID file: %w", err)
|
return fmt.Errorf("failed to write PID file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -125,7 +124,7 @@ func (p *PIDFile) Acquire() error {
|
||||||
func (p *PIDFile) Release() error {
|
func (p *PIDFile) Release() error {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
return p.medium.Delete(p.path)
|
return io.Local.Delete(p.path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path returns the PID file path.
|
// Path returns the PID file path.
|
||||||
|
|
@ -219,7 +218,7 @@ func (h *HealthServer) Start() error {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := h.server.Serve(listener); err != http.ErrServerClosed {
|
if err := h.server.Serve(listener); err != http.ErrServerClosed {
|
||||||
LogError("health server error", "err", err)
|
LogError(fmt.Sprintf("health server error: %v", err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
@ -247,10 +246,6 @@ func (h *HealthServer) Addr() string {
|
||||||
|
|
||||||
// DaemonOptions configures daemon mode execution.
|
// DaemonOptions configures daemon mode execution.
|
||||||
type DaemonOptions struct {
|
type DaemonOptions struct {
|
||||||
// Medium is the storage backend for PID files.
|
|
||||||
// Defaults to io.Local if not set.
|
|
||||||
Medium io.Medium
|
|
||||||
|
|
||||||
// PIDFile path for single-instance enforcement.
|
// PIDFile path for single-instance enforcement.
|
||||||
// Leave empty to skip PID file management.
|
// Leave empty to skip PID file management.
|
||||||
PIDFile string
|
PIDFile string
|
||||||
|
|
@ -287,9 +282,6 @@ func NewDaemon(opts DaemonOptions) *Daemon {
|
||||||
if opts.ShutdownTimeout == 0 {
|
if opts.ShutdownTimeout == 0 {
|
||||||
opts.ShutdownTimeout = 30 * time.Second
|
opts.ShutdownTimeout = 30 * time.Second
|
||||||
}
|
}
|
||||||
if opts.Medium == nil {
|
|
||||||
opts.Medium = io.Local
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &Daemon{
|
d := &Daemon{
|
||||||
opts: opts,
|
opts: opts,
|
||||||
|
|
@ -297,7 +289,7 @@ func NewDaemon(opts DaemonOptions) *Daemon {
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.PIDFile != "" {
|
if opts.PIDFile != "" {
|
||||||
d.pid = NewPIDFile(opts.Medium, opts.PIDFile)
|
d.pid = NewPIDFile(opts.PIDFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.HealthAddr != "" {
|
if opts.HealthAddr != "" {
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ func NewLinuxKitManager(m io.Medium) (*LinuxKitManager, error) {
|
||||||
return nil, fmt.Errorf("failed to determine state path: %w", err)
|
return nil, fmt.Errorf("failed to determine state path: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
state, err := LoadState(m, statePath)
|
state, err := LoadState(statePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to load state: %w", err)
|
return nil, fmt.Errorf("failed to load state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -90,7 +90,7 @@ func (m *LinuxKitManager) Run(ctx context.Context, image string, opts RunOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure logs directory exists
|
// Ensure logs directory exists
|
||||||
if err := EnsureLogsDir(m.medium); err != nil {
|
if err := EnsureLogsDir(); err != nil {
|
||||||
return nil, fmt.Errorf("failed to create logs directory: %w", err)
|
return nil, fmt.Errorf("failed to create logs directory: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,6 @@ type State struct {
|
||||||
Containers map[string]*Container `json:"containers"`
|
Containers map[string]*Container `json:"containers"`
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
medium io.Medium
|
|
||||||
filePath string
|
filePath string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -47,25 +46,19 @@ func DefaultLogsDir() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewState creates a new State instance.
|
// NewState creates a new State instance.
|
||||||
func NewState(m io.Medium, filePath string) *State {
|
func NewState(filePath string) *State {
|
||||||
return &State{
|
return &State{
|
||||||
Containers: make(map[string]*Container),
|
Containers: make(map[string]*Container),
|
||||||
medium: m,
|
|
||||||
filePath: filePath,
|
filePath: filePath,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadState loads the state from the given file path.
|
// LoadState loads the state from the given file path.
|
||||||
// If the file doesn't exist, returns an empty state.
|
// If the file doesn't exist, returns an empty state.
|
||||||
func LoadState(m io.Medium, filePath string) (*State, error) {
|
func LoadState(filePath string) (*State, error) {
|
||||||
state := NewState(m, filePath)
|
state := NewState(filePath)
|
||||||
|
|
||||||
absPath, err := filepath.Abs(filePath)
|
dataStr, err := io.Local.Read(filePath)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := m.Read(absPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return state, nil
|
return state, nil
|
||||||
|
|
@ -73,7 +66,7 @@ func LoadState(m io.Medium, filePath string) (*State, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal([]byte(content), state); err != nil {
|
if err := json.Unmarshal([]byte(dataStr), state); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -85,8 +78,9 @@ func (s *State) SaveState() error {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
absPath, err := filepath.Abs(s.filePath)
|
// Ensure the directory exists
|
||||||
if err != nil {
|
dir := filepath.Dir(s.filePath)
|
||||||
|
if err := io.Local.EnsureDir(dir); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -95,8 +89,7 @@ func (s *State) SaveState() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// s.medium.Write creates parent directories automatically
|
return io.Local.Write(s.filePath, string(data))
|
||||||
return s.medium.Write(absPath, string(data))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds a container to the state and persists it.
|
// Add adds a container to the state and persists it.
|
||||||
|
|
@ -170,10 +163,10 @@ func LogPath(id string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnsureLogsDir ensures the logs directory exists.
|
// EnsureLogsDir ensures the logs directory exists.
|
||||||
func EnsureLogsDir(m io.Medium) error {
|
func EnsureLogsDir() error {
|
||||||
logsDir, err := DefaultLogsDir()
|
logsDir, err := DefaultLogsDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return m.EnsureDir(logsDir)
|
return io.Local.EnsureDir(logsDir)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -38,52 +38,17 @@ var builtinTemplates = []Template{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemplateManager manages LinuxKit templates using a storage medium.
|
|
||||||
type TemplateManager struct {
|
|
||||||
medium io.Medium
|
|
||||||
workingDir string
|
|
||||||
homeDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTemplateManager creates a new TemplateManager instance.
|
|
||||||
func NewTemplateManager(m io.Medium) *TemplateManager {
|
|
||||||
tm := &TemplateManager{medium: m}
|
|
||||||
|
|
||||||
// Default working and home directories from local system
|
|
||||||
// These can be overridden if needed.
|
|
||||||
if wd, err := os.Getwd(); err == nil {
|
|
||||||
tm.workingDir = wd
|
|
||||||
}
|
|
||||||
if home, err := os.UserHomeDir(); err == nil {
|
|
||||||
tm.homeDir = home
|
|
||||||
}
|
|
||||||
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithWorkingDir sets the working directory for user template discovery.
|
|
||||||
func (tm *TemplateManager) WithWorkingDir(wd string) *TemplateManager {
|
|
||||||
tm.workingDir = wd
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHomeDir sets the home directory for user template discovery.
|
|
||||||
func (tm *TemplateManager) WithHomeDir(home string) *TemplateManager {
|
|
||||||
tm.homeDir = home
|
|
||||||
return tm
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListTemplates returns all available LinuxKit templates.
|
// ListTemplates returns all available LinuxKit templates.
|
||||||
// It combines embedded templates with any templates found in the user's
|
// It combines embedded templates with any templates found in the user's
|
||||||
// .core/linuxkit directory.
|
// .core/linuxkit directory.
|
||||||
func (tm *TemplateManager) ListTemplates() []Template {
|
func ListTemplates() []Template {
|
||||||
templates := make([]Template, len(builtinTemplates))
|
templates := make([]Template, len(builtinTemplates))
|
||||||
copy(templates, builtinTemplates)
|
copy(templates, builtinTemplates)
|
||||||
|
|
||||||
// Check for user templates in .core/linuxkit/
|
// Check for user templates in .core/linuxkit/
|
||||||
userTemplatesDir := tm.getUserTemplatesDir()
|
userTemplatesDir := getUserTemplatesDir()
|
||||||
if userTemplatesDir != "" {
|
if userTemplatesDir != "" {
|
||||||
userTemplates := tm.scanUserTemplates(userTemplatesDir)
|
userTemplates := scanUserTemplates(userTemplatesDir)
|
||||||
templates = append(templates, userTemplates...)
|
templates = append(templates, userTemplates...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -92,7 +57,7 @@ func (tm *TemplateManager) ListTemplates() []Template {
|
||||||
|
|
||||||
// GetTemplate returns the content of a template by name.
|
// GetTemplate returns the content of a template by name.
|
||||||
// It first checks embedded templates, then user templates.
|
// It first checks embedded templates, then user templates.
|
||||||
func (tm *TemplateManager) GetTemplate(name string) (string, error) {
|
func GetTemplate(name string) (string, error) {
|
||||||
// Check embedded templates first
|
// Check embedded templates first
|
||||||
for _, t := range builtinTemplates {
|
for _, t := range builtinTemplates {
|
||||||
if t.Name == name {
|
if t.Name == name {
|
||||||
|
|
@ -105,27 +70,27 @@ func (tm *TemplateManager) GetTemplate(name string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check user templates
|
// Check user templates
|
||||||
userTemplatesDir := tm.getUserTemplatesDir()
|
userTemplatesDir := getUserTemplatesDir()
|
||||||
if userTemplatesDir != "" {
|
if userTemplatesDir != "" {
|
||||||
// Check both .yml and .yaml extensions
|
templatePath := filepath.Join(userTemplatesDir, name+".yml")
|
||||||
for _, ext := range []string{".yml", ".yaml"} {
|
if io.Local.IsFile(templatePath) {
|
||||||
templatePath := filepath.Join(userTemplatesDir, name+ext)
|
content, err := io.Local.Read(templatePath)
|
||||||
if tm.medium.IsFile(templatePath) {
|
|
||||||
content, err := tm.medium.Read(templatePath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to read user template %s: %w", name, err)
|
return "", fmt.Errorf("failed to read user template %s: %w", name, err)
|
||||||
}
|
}
|
||||||
return content, nil
|
return content, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("template not found: %s", name)
|
return "", fmt.Errorf("template not found: %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyTemplate applies variable substitution to a template.
|
// ApplyTemplate applies variable substitution to a template.
|
||||||
func (tm *TemplateManager) ApplyTemplate(name string, vars map[string]string) (string, error) {
|
// It supports two syntaxes:
|
||||||
content, err := tm.GetTemplate(name)
|
// - ${VAR} - required variable, returns error if not provided
|
||||||
|
// - ${VAR:-default} - variable with default value
|
||||||
|
func ApplyTemplate(name string, vars map[string]string) (string, error) {
|
||||||
|
content, err := GetTemplate(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
@ -226,31 +191,35 @@ func ExtractVariables(content string) (required []string, optional map[string]st
|
||||||
|
|
||||||
// getUserTemplatesDir returns the path to user templates directory.
|
// getUserTemplatesDir returns the path to user templates directory.
|
||||||
// Returns empty string if the directory doesn't exist.
|
// Returns empty string if the directory doesn't exist.
|
||||||
func (tm *TemplateManager) getUserTemplatesDir() string {
|
func getUserTemplatesDir() string {
|
||||||
// Try workspace-relative .core/linuxkit first
|
// Try workspace-relative .core/linuxkit first
|
||||||
if tm.workingDir != "" {
|
cwd, err := os.Getwd()
|
||||||
wsDir := filepath.Join(tm.workingDir, ".core", "linuxkit")
|
if err == nil {
|
||||||
if tm.medium.IsDir(wsDir) {
|
wsDir := filepath.Join(cwd, ".core", "linuxkit")
|
||||||
|
if io.Local.IsDir(wsDir) {
|
||||||
return wsDir
|
return wsDir
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try home directory
|
// Try home directory
|
||||||
if tm.homeDir != "" {
|
home, err := os.UserHomeDir()
|
||||||
homeDir := filepath.Join(tm.homeDir, ".core", "linuxkit")
|
if err != nil {
|
||||||
if tm.medium.IsDir(homeDir) {
|
return ""
|
||||||
return homeDir
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
homeDir := filepath.Join(home, ".core", "linuxkit")
|
||||||
|
if io.Local.IsDir(homeDir) {
|
||||||
|
return homeDir
|
||||||
}
|
}
|
||||||
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// scanUserTemplates scans a directory for .yml template files.
|
// scanUserTemplates scans a directory for .yml template files.
|
||||||
func (tm *TemplateManager) scanUserTemplates(dir string) []Template {
|
func scanUserTemplates(dir string) []Template {
|
||||||
var templates []Template
|
var templates []Template
|
||||||
|
|
||||||
entries, err := tm.medium.List(dir)
|
entries, err := io.Local.List(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return templates
|
return templates
|
||||||
}
|
}
|
||||||
|
|
@ -281,7 +250,7 @@ func (tm *TemplateManager) scanUserTemplates(dir string) []Template {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read file to extract description from comments
|
// Read file to extract description from comments
|
||||||
description := tm.extractTemplateDescription(filepath.Join(dir, name))
|
description := extractTemplateDescription(filepath.Join(dir, name))
|
||||||
if description == "" {
|
if description == "" {
|
||||||
description = "User-defined template"
|
description = "User-defined template"
|
||||||
}
|
}
|
||||||
|
|
@ -298,8 +267,8 @@ func (tm *TemplateManager) scanUserTemplates(dir string) []Template {
|
||||||
|
|
||||||
// extractTemplateDescription reads the first comment block from a YAML file
|
// extractTemplateDescription reads the first comment block from a YAML file
|
||||||
// to use as a description.
|
// to use as a description.
|
||||||
func (tm *TemplateManager) extractTemplateDescription(path string) string {
|
func extractTemplateDescription(path string) string {
|
||||||
content, err := tm.medium.Read(path)
|
content, err := io.Local.Read(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
||||||
1422
pkg/i18n/locales/ar.json
Normal file
1422
pkg/i18n/locales/ar.json
Normal file
File diff suppressed because it is too large
Load diff
1422
pkg/i18n/locales/cs.json
Normal file
1422
pkg/i18n/locales/cs.json
Normal file
File diff suppressed because it is too large
Load diff
1422
pkg/i18n/locales/cy_GB.json
Normal file
1422
pkg/i18n/locales/cy_GB.json
Normal file
File diff suppressed because it is too large
Load diff
1422
pkg/i18n/locales/da.json
Normal file
1422
pkg/i18n/locales/da.json
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
1422
pkg/i18n/locales/el.json
Normal file
1422
pkg/i18n/locales/el.json
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue