refactor: move playbooks, plans, and RAG tools to proper repos
- playbooks/ → go-devops/playbooks/ (Ansible playbooks) - tasks/plans/ → go-devops/docs/plans/ (design/impl docs) - tools/rag/ → go-rag/tools/rag/ (Python RAG scripts) Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
260dca0999
commit
236c498e76
17 changed files with 0 additions and 7611 deletions
|
|
@ -1,63 +0,0 @@
|
|||
# Galera Database Backup
|
||||
# Dumps the database and uploads to Hetzner S3
|
||||
#
|
||||
# Usage:
|
||||
# core deploy ansible playbooks/galera-backup.yml -i playbooks/inventory.yml -l de
|
||||
---
|
||||
- name: Backup Galera Database to S3
|
||||
hosts: app_servers
|
||||
become: true
|
||||
vars:
|
||||
db_root_password: "{{ lookup('env', 'DB_ROOT_PASSWORD') }}"
|
||||
s3_endpoint: "{{ lookup('env', 'HETZNER_S3_ENDPOINT') | default('fsn1.your-objectstorage.com', true) }}"
|
||||
s3_bucket: "{{ lookup('env', 'HETZNER_S3_BUCKET') | default('hostuk', true) }}"
|
||||
s3_access_key: "{{ lookup('env', 'HETZNER_S3_ACCESS_KEY') }}"
|
||||
s3_secret_key: "{{ lookup('env', 'HETZNER_S3_SECRET_KEY') }}"
|
||||
backup_prefix: backup/galera
|
||||
backup_retain_days: 30
|
||||
|
||||
tasks:
|
||||
- name: Create backup directory
|
||||
file:
|
||||
path: /opt/backup
|
||||
state: directory
|
||||
mode: "0700"
|
||||
|
||||
- name: Dump database
|
||||
shell: |
|
||||
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
DUMP_FILE="/opt/backup/hostuk-${TIMESTAMP}-{{ galera_node_name }}.sql.gz"
|
||||
docker exec galera mariadb-dump \
|
||||
-u root -p{{ db_root_password }} \
|
||||
--all-databases \
|
||||
--single-transaction \
|
||||
--routines \
|
||||
--triggers \
|
||||
--events \
|
||||
| gzip > "${DUMP_FILE}"
|
||||
echo "${DUMP_FILE}"
|
||||
register: dump_result
|
||||
|
||||
- name: Install s3cmd if missing
|
||||
shell: |
|
||||
which s3cmd 2>/dev/null || pip3 install s3cmd
|
||||
changed_when: false
|
||||
|
||||
- name: Upload to S3
|
||||
shell: |
|
||||
s3cmd put {{ dump_result.stdout | trim }} \
|
||||
s3://{{ s3_bucket }}/{{ backup_prefix }}/$(basename {{ dump_result.stdout | trim }}) \
|
||||
--host={{ s3_endpoint }} \
|
||||
--host-bucket='%(bucket)s.{{ s3_endpoint }}' \
|
||||
--access_key={{ s3_access_key }} \
|
||||
--secret_key={{ s3_secret_key }}
|
||||
when: s3_access_key != ""
|
||||
|
||||
- name: Clean old local backups
|
||||
shell: |
|
||||
find /opt/backup -name "hostuk-*.sql.gz" -mtime +{{ backup_retain_days }} -delete
|
||||
changed_when: false
|
||||
|
||||
- name: Show backup result
|
||||
debug:
|
||||
msg: "Backup completed: {{ dump_result.stdout | trim }}"
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
# MariaDB Galera Cluster Deployment
|
||||
# Deploys a 2-node Galera cluster on de + de2
|
||||
#
|
||||
# Usage:
|
||||
# core deploy ansible playbooks/galera-deploy.yml -i playbooks/inventory.yml
|
||||
# core deploy ansible playbooks/galera-deploy.yml -i playbooks/inventory.yml -l de # Single node
|
||||
#
|
||||
# First-time bootstrap:
|
||||
# Set galera_bootstrap=true for the first node:
|
||||
# core deploy ansible playbooks/galera-deploy.yml -i playbooks/inventory.yml -l de -e galera_bootstrap=true
|
||||
---
|
||||
- name: Deploy MariaDB Galera Cluster
|
||||
hosts: app_servers
|
||||
become: true
|
||||
vars:
|
||||
mariadb_version: "11"
|
||||
galera_cluster_address: "gcomm://116.202.82.115,88.99.195.41"
|
||||
galera_bootstrap: false
|
||||
db_root_password: "{{ lookup('env', 'DB_ROOT_PASSWORD') }}"
|
||||
db_password: "{{ lookup('env', 'DB_PASSWORD') }}"
|
||||
|
||||
tasks:
|
||||
- name: Create MariaDB data directory
|
||||
file:
|
||||
path: /opt/galera/data
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create MariaDB config directory
|
||||
file:
|
||||
path: /opt/galera/conf.d
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Write Galera configuration
|
||||
copy:
|
||||
dest: /opt/galera/conf.d/galera.cnf
|
||||
content: |
|
||||
[mysqld]
|
||||
wsrep_on=ON
|
||||
wsrep_provider=/usr/lib/galera/libgalera_smm.so
|
||||
wsrep_cluster_name={{ galera_cluster_name }}
|
||||
wsrep_cluster_address={{ 'gcomm://' if galera_bootstrap else galera_cluster_address }}
|
||||
wsrep_node_address={{ galera_node_address }}
|
||||
wsrep_node_name={{ galera_node_name }}
|
||||
wsrep_sst_method={{ galera_sst_method }}
|
||||
binlog_format=ROW
|
||||
default_storage_engine=InnoDB
|
||||
innodb_autoinc_lock_mode=2
|
||||
innodb_buffer_pool_size=1G
|
||||
innodb_log_file_size=256M
|
||||
character_set_server=utf8mb4
|
||||
collation_server=utf8mb4_unicode_ci
|
||||
|
||||
- name: Stop existing MariaDB container
|
||||
shell: docker stop galera 2>/dev/null || true
|
||||
changed_when: false
|
||||
|
||||
- name: Remove existing MariaDB container
|
||||
shell: docker rm galera 2>/dev/null || true
|
||||
changed_when: false
|
||||
|
||||
- name: Start MariaDB Galera container
|
||||
shell: |
|
||||
docker run -d \
|
||||
--name galera \
|
||||
--restart unless-stopped \
|
||||
--network host \
|
||||
-v /opt/galera/data:/var/lib/mysql \
|
||||
-v /opt/galera/conf.d:/etc/mysql/conf.d \
|
||||
-e MARIADB_ROOT_PASSWORD={{ db_root_password }} \
|
||||
-e MARIADB_DATABASE={{ db_name }} \
|
||||
-e MARIADB_USER={{ db_user }} \
|
||||
-e MARIADB_PASSWORD={{ db_password }} \
|
||||
mariadb:{{ mariadb_version }}
|
||||
|
||||
- name: Wait for MariaDB to be ready
|
||||
shell: |
|
||||
for i in $(seq 1 60); do
|
||||
docker exec galera mariadb -u root -p{{ db_root_password }} -e "SELECT 1" 2>/dev/null && exit 0
|
||||
sleep 2
|
||||
done
|
||||
exit 1
|
||||
changed_when: false
|
||||
|
||||
- name: Check Galera cluster status
|
||||
shell: |
|
||||
docker exec galera mariadb -u root -p{{ db_root_password }} \
|
||||
-e "SHOW STATUS WHERE Variable_name IN ('wsrep_cluster_size','wsrep_ready','wsrep_cluster_status')" \
|
||||
--skip-column-names
|
||||
register: galera_status
|
||||
changed_when: false
|
||||
|
||||
- name: Display cluster status
|
||||
debug:
|
||||
var: galera_status.stdout_lines
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
# Ansible inventory for Host UK production
|
||||
# Used by: core deploy ansible <playbook> -i playbooks/inventory.yml
|
||||
all:
|
||||
vars:
|
||||
ansible_user: root
|
||||
ansible_ssh_private_key_file: ~/.ssh/hostuk
|
||||
|
||||
children:
|
||||
bastion:
|
||||
hosts:
|
||||
noc:
|
||||
ansible_host: 77.42.42.205
|
||||
private_ip: 10.0.0.4
|
||||
|
||||
app_servers:
|
||||
hosts:
|
||||
de:
|
||||
ansible_host: 116.202.82.115
|
||||
galera_node_name: de
|
||||
galera_node_address: 116.202.82.115
|
||||
de2:
|
||||
ansible_host: 88.99.195.41
|
||||
galera_node_name: de2
|
||||
galera_node_address: 88.99.195.41
|
||||
vars:
|
||||
galera_cluster_name: hostuk-galera
|
||||
galera_sst_method: mariabackup
|
||||
db_name: hostuk
|
||||
db_user: hostuk
|
||||
redis_maxmemory: 512mb
|
||||
|
||||
builders:
|
||||
hosts:
|
||||
build:
|
||||
ansible_host: 46.224.93.62
|
||||
private_ip: 10.0.0.5
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
# Redis Sentinel Deployment
|
||||
# Deploys Redis with Sentinel on de + de2
|
||||
#
|
||||
# Usage:
|
||||
# core deploy ansible playbooks/redis-deploy.yml -i playbooks/inventory.yml
|
||||
---
|
||||
- name: Deploy Redis with Sentinel
|
||||
hosts: app_servers
|
||||
become: true
|
||||
vars:
|
||||
redis_version: "7"
|
||||
redis_password: "{{ lookup('env', 'REDIS_PASSWORD') | default('', true) }}"
|
||||
|
||||
tasks:
|
||||
- name: Create Redis data directory
|
||||
file:
|
||||
path: /opt/redis/data
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create Redis config directory
|
||||
file:
|
||||
path: /opt/redis/conf
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Write Redis configuration
|
||||
copy:
|
||||
dest: /opt/redis/conf/redis.conf
|
||||
content: |
|
||||
maxmemory {{ redis_maxmemory }}
|
||||
maxmemory-policy allkeys-lru
|
||||
appendonly yes
|
||||
appendfsync everysec
|
||||
tcp-keepalive 300
|
||||
timeout 0
|
||||
{% if redis_password %}
|
||||
requirepass {{ redis_password }}
|
||||
masterauth {{ redis_password }}
|
||||
{% endif %}
|
||||
|
||||
- name: Write Sentinel configuration
|
||||
copy:
|
||||
dest: /opt/redis/conf/sentinel.conf
|
||||
content: |
|
||||
port 26379
|
||||
sentinel monitor hostuk-redis 116.202.82.115 6379 2
|
||||
sentinel down-after-milliseconds hostuk-redis 5000
|
||||
sentinel failover-timeout hostuk-redis 60000
|
||||
sentinel parallel-syncs hostuk-redis 1
|
||||
{% if redis_password %}
|
||||
sentinel auth-pass hostuk-redis {{ redis_password }}
|
||||
{% endif %}
|
||||
|
||||
- name: Stop existing Redis containers
|
||||
shell: |
|
||||
docker stop redis redis-sentinel 2>/dev/null || true
|
||||
docker rm redis redis-sentinel 2>/dev/null || true
|
||||
changed_when: false
|
||||
|
||||
- name: Start Redis container
|
||||
shell: |
|
||||
docker run -d \
|
||||
--name redis \
|
||||
--restart unless-stopped \
|
||||
--network host \
|
||||
-v /opt/redis/data:/data \
|
||||
-v /opt/redis/conf/redis.conf:/usr/local/etc/redis/redis.conf \
|
||||
redis:{{ redis_version }}-alpine \
|
||||
redis-server /usr/local/etc/redis/redis.conf
|
||||
|
||||
- name: Start Redis Sentinel container
|
||||
shell: |
|
||||
docker run -d \
|
||||
--name redis-sentinel \
|
||||
--restart unless-stopped \
|
||||
--network host \
|
||||
-v /opt/redis/conf/sentinel.conf:/usr/local/etc/redis/sentinel.conf \
|
||||
redis:{{ redis_version }}-alpine \
|
||||
redis-sentinel /usr/local/etc/redis/sentinel.conf
|
||||
|
||||
- name: Wait for Redis to be ready
|
||||
shell: |
|
||||
for i in $(seq 1 30); do
|
||||
docker exec redis redis-cli ping 2>/dev/null | grep -q PONG && exit 0
|
||||
sleep 1
|
||||
done
|
||||
exit 1
|
||||
changed_when: false
|
||||
|
||||
- name: Check Redis info
|
||||
shell: docker exec redis redis-cli info replication | head -10
|
||||
register: redis_info
|
||||
changed_when: false
|
||||
|
||||
- name: Display Redis info
|
||||
debug:
|
||||
var: redis_info.stdout_lines
|
||||
|
|
@ -1,236 +0,0 @@
|
|||
# Code Signing Design (S3.3)
|
||||
|
||||
## Summary
|
||||
|
||||
Integrate standard code signing tools into the build pipeline. GPG signs checksums by default. macOS codesign + notarization for Apple binaries. Windows signtool deferred.
|
||||
|
||||
## Design Decisions
|
||||
|
||||
- **Sign during build**: Signing happens in `pkg/build/signing/` after compilation, before archiving
|
||||
- **Config location**: `.core/build.yaml` with environment variable fallbacks for secrets
|
||||
- **GPG scope**: Signs `checksums.txt` only (standard pattern like Go, Terraform)
|
||||
- **macOS flow**: Codesign always when identity configured, notarize optional with flag/config
|
||||
- **Windows**: Placeholder for later implementation
|
||||
|
||||
## Package Structure
|
||||
|
||||
```
|
||||
pkg/build/signing/
|
||||
├── signer.go # Signer interface + SignConfig
|
||||
├── gpg.go # GPG checksums signing
|
||||
├── codesign.go # macOS codesign + notarize
|
||||
└── signtool.go # Windows placeholder
|
||||
```
|
||||
|
||||
## Signer Interface
|
||||
|
||||
```go
|
||||
// pkg/build/signing/signer.go
|
||||
type Signer interface {
|
||||
Name() string
|
||||
Available() bool
|
||||
Sign(ctx context.Context, artifact string) error
|
||||
}
|
||||
|
||||
type SignConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
GPG GPGConfig `yaml:"gpg,omitempty"`
|
||||
MacOS MacOSConfig `yaml:"macos,omitempty"`
|
||||
Windows WindowsConfig `yaml:"windows,omitempty"`
|
||||
}
|
||||
|
||||
type GPGConfig struct {
|
||||
Key string `yaml:"key"` // Key ID or fingerprint, supports $ENV
|
||||
}
|
||||
|
||||
type MacOSConfig struct {
|
||||
Identity string `yaml:"identity"` // Developer ID Application: ...
|
||||
Notarize bool `yaml:"notarize"` // Submit to Apple
|
||||
AppleID string `yaml:"apple_id"` // Apple account email
|
||||
TeamID string `yaml:"team_id"` // Team ID
|
||||
AppPassword string `yaml:"app_password"` // App-specific password
|
||||
}
|
||||
|
||||
type WindowsConfig struct {
|
||||
Certificate string `yaml:"certificate"` // Path to .pfx
|
||||
Password string `yaml:"password"` // Certificate password
|
||||
}
|
||||
```
|
||||
|
||||
## Config Schema
|
||||
|
||||
In `.core/build.yaml`:
|
||||
|
||||
```yaml
|
||||
sign:
|
||||
enabled: true
|
||||
|
||||
gpg:
|
||||
key: $GPG_KEY_ID
|
||||
|
||||
macos:
|
||||
identity: "Developer ID Application: Your Name (TEAM_ID)"
|
||||
notarize: false
|
||||
apple_id: $APPLE_ID
|
||||
team_id: $APPLE_TEAM_ID
|
||||
app_password: $APPLE_APP_PASSWORD
|
||||
|
||||
# windows: (deferred)
|
||||
# certificate: $WINDOWS_CERT_PATH
|
||||
# password: $WINDOWS_CERT_PASSWORD
|
||||
```
|
||||
|
||||
## Build Pipeline Integration
|
||||
|
||||
```
|
||||
Build() in pkg/build/builders/go.go
|
||||
↓
|
||||
compile binaries
|
||||
↓
|
||||
Sign macOS binaries (codesign) ← NEW
|
||||
↓
|
||||
Notarize if enabled (wait) ← NEW
|
||||
↓
|
||||
Create archives (tar.gz, zip)
|
||||
↓
|
||||
Generate checksums.txt
|
||||
↓
|
||||
GPG sign checksums.txt ← NEW
|
||||
↓
|
||||
Return artifacts
|
||||
```
|
||||
|
||||
## GPG Signer
|
||||
|
||||
```go
|
||||
// pkg/build/signing/gpg.go
|
||||
type GPGSigner struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
func (s *GPGSigner) Name() string { return "gpg" }
|
||||
|
||||
func (s *GPGSigner) Available() bool {
|
||||
_, err := exec.LookPath("gpg")
|
||||
return err == nil && s.KeyID != ""
|
||||
}
|
||||
|
||||
func (s *GPGSigner) Sign(ctx context.Context, file string) error {
|
||||
cmd := exec.CommandContext(ctx, "gpg",
|
||||
"--detach-sign",
|
||||
"--armor",
|
||||
"--local-user", s.KeyID,
|
||||
"--output", file+".asc",
|
||||
file,
|
||||
)
|
||||
return cmd.Run()
|
||||
}
|
||||
```
|
||||
|
||||
**Output:** `checksums.txt.asc` (ASCII armored detached signature)
|
||||
|
||||
**User verification:**
|
||||
```bash
|
||||
gpg --verify checksums.txt.asc checksums.txt
|
||||
sha256sum -c checksums.txt
|
||||
```
|
||||
|
||||
## macOS Codesign
|
||||
|
||||
```go
|
||||
// pkg/build/signing/codesign.go
|
||||
type MacOSSigner struct {
|
||||
Identity string
|
||||
Notarize bool
|
||||
AppleID string
|
||||
TeamID string
|
||||
AppPassword string
|
||||
}
|
||||
|
||||
func (s *MacOSSigner) Name() string { return "codesign" }
|
||||
|
||||
func (s *MacOSSigner) Available() bool {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath("codesign")
|
||||
return err == nil && s.Identity != ""
|
||||
}
|
||||
|
||||
func (s *MacOSSigner) Sign(ctx context.Context, binary string) error {
|
||||
cmd := exec.CommandContext(ctx, "codesign",
|
||||
"--sign", s.Identity,
|
||||
"--timestamp",
|
||||
"--options", "runtime",
|
||||
"--force",
|
||||
binary,
|
||||
)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (s *MacOSSigner) NotarizeAndStaple(ctx context.Context, binary string) error {
|
||||
// 1. Create ZIP for submission
|
||||
zipPath := binary + ".zip"
|
||||
exec.CommandContext(ctx, "zip", "-j", zipPath, binary).Run()
|
||||
defer os.Remove(zipPath)
|
||||
|
||||
// 2. Submit and wait
|
||||
cmd := exec.CommandContext(ctx, "xcrun", "notarytool", "submit",
|
||||
zipPath,
|
||||
"--apple-id", s.AppleID,
|
||||
"--team-id", s.TeamID,
|
||||
"--password", s.AppPassword,
|
||||
"--wait",
|
||||
)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("notarization failed: %w", err)
|
||||
}
|
||||
|
||||
// 3. Staple ticket
|
||||
return exec.CommandContext(ctx, "xcrun", "stapler", "staple", binary).Run()
|
||||
}
|
||||
```
|
||||
|
||||
## CLI Flags
|
||||
|
||||
```bash
|
||||
core build # Sign with defaults (GPG + codesign if configured)
|
||||
core build --no-sign # Skip all signing
|
||||
core build --notarize # Enable macOS notarization (overrides config)
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Purpose |
|
||||
|----------|---------|
|
||||
| `GPG_KEY_ID` | GPG key ID or fingerprint |
|
||||
| `CODESIGN_IDENTITY` | macOS Developer ID (fallback) |
|
||||
| `APPLE_ID` | Apple account email |
|
||||
| `APPLE_TEAM_ID` | Apple Developer Team ID |
|
||||
| `APPLE_APP_PASSWORD` | App-specific password for notarization |
|
||||
|
||||
## Deferred
|
||||
|
||||
- **Windows signtool**: Placeholder implementation returning nil
|
||||
- **Sigstore/keyless signing**: Future consideration
|
||||
- **Binary-level GPG signatures**: Only checksums.txt signed
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. Create `pkg/build/signing/` package structure
|
||||
2. Implement Signer interface and SignConfig
|
||||
3. Implement GPGSigner
|
||||
4. Implement MacOSSigner with codesign
|
||||
5. Add notarization support to MacOSSigner
|
||||
6. Add SignConfig to build.Config
|
||||
7. Integrate signing into build pipeline
|
||||
8. Add CLI flags (--no-sign, --notarize)
|
||||
9. Add Windows placeholder
|
||||
10. Tests with mocked exec
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `gpg` CLI (system)
|
||||
- `codesign` CLI (macOS Xcode Command Line Tools)
|
||||
- `xcrun notarytool` (macOS Xcode Command Line Tools)
|
||||
- `xcrun stapler` (macOS Xcode Command Line Tools)
|
||||
|
|
@ -1,967 +0,0 @@
|
|||
# Code Signing Implementation Plan
|
||||
|
||||
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
|
||||
|
||||
**Goal:** Add GPG checksums signing and macOS codesign/notarization to the build pipeline.
|
||||
|
||||
**Architecture:** `pkg/build/signing/` package with Signer interface. GPG signs CHECKSUMS.txt. macOS codesign runs after binary compilation, before archiving. Config in `.core/build.yaml` with env var fallbacks.
|
||||
|
||||
**Tech Stack:** Go, os/exec for gpg/codesign/xcrun CLI tools
|
||||
|
||||
---
|
||||
|
||||
### Task 1: Create Signing Package Structure
|
||||
|
||||
**Files:**
|
||||
- Create: `pkg/build/signing/signer.go`
|
||||
|
||||
**Step 1: Create signer.go with interface and config types**
|
||||
|
||||
```go
|
||||
// Package signing provides code signing for build artifacts.
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Signer defines the interface for code signing implementations.
|
||||
type Signer interface {
|
||||
// Name returns the signer's identifier.
|
||||
Name() string
|
||||
// Available checks if this signer can be used.
|
||||
Available() bool
|
||||
// Sign signs the artifact at the given path.
|
||||
Sign(ctx context.Context, path string) error
|
||||
}
|
||||
|
||||
// SignConfig holds signing configuration from .core/build.yaml.
|
||||
type SignConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
GPG GPGConfig `yaml:"gpg,omitempty"`
|
||||
MacOS MacOSConfig `yaml:"macos,omitempty"`
|
||||
Windows WindowsConfig `yaml:"windows,omitempty"`
|
||||
}
|
||||
|
||||
// GPGConfig holds GPG signing configuration.
|
||||
type GPGConfig struct {
|
||||
Key string `yaml:"key"` // Key ID or fingerprint, supports $ENV
|
||||
}
|
||||
|
||||
// MacOSConfig holds macOS codesign configuration.
|
||||
type MacOSConfig struct {
|
||||
Identity string `yaml:"identity"` // Developer ID Application: ...
|
||||
Notarize bool `yaml:"notarize"` // Submit to Apple for notarization
|
||||
AppleID string `yaml:"apple_id"` // Apple account email
|
||||
TeamID string `yaml:"team_id"` // Team ID
|
||||
AppPassword string `yaml:"app_password"` // App-specific password
|
||||
}
|
||||
|
||||
// WindowsConfig holds Windows signtool configuration (placeholder).
|
||||
type WindowsConfig struct {
|
||||
Certificate string `yaml:"certificate"` // Path to .pfx
|
||||
Password string `yaml:"password"` // Certificate password
|
||||
}
|
||||
|
||||
// DefaultSignConfig returns sensible defaults.
|
||||
func DefaultSignConfig() SignConfig {
|
||||
return SignConfig{
|
||||
Enabled: true,
|
||||
GPG: GPGConfig{
|
||||
Key: os.Getenv("GPG_KEY_ID"),
|
||||
},
|
||||
MacOS: MacOSConfig{
|
||||
Identity: os.Getenv("CODESIGN_IDENTITY"),
|
||||
AppleID: os.Getenv("APPLE_ID"),
|
||||
TeamID: os.Getenv("APPLE_TEAM_ID"),
|
||||
AppPassword: os.Getenv("APPLE_APP_PASSWORD"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ExpandEnv expands environment variables in config values.
|
||||
func (c *SignConfig) ExpandEnv() {
|
||||
c.GPG.Key = expandEnv(c.GPG.Key)
|
||||
c.MacOS.Identity = expandEnv(c.MacOS.Identity)
|
||||
c.MacOS.AppleID = expandEnv(c.MacOS.AppleID)
|
||||
c.MacOS.TeamID = expandEnv(c.MacOS.TeamID)
|
||||
c.MacOS.AppPassword = expandEnv(c.MacOS.AppPassword)
|
||||
c.Windows.Certificate = expandEnv(c.Windows.Certificate)
|
||||
c.Windows.Password = expandEnv(c.Windows.Password)
|
||||
}
|
||||
|
||||
// expandEnv expands $VAR or ${VAR} in a string.
|
||||
func expandEnv(s string) string {
|
||||
if strings.HasPrefix(s, "$") {
|
||||
return os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Verify it compiles**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go build ./pkg/build/signing/...`
|
||||
Expected: No errors
|
||||
|
||||
**Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/build/signing/signer.go
|
||||
git commit -m "feat(signing): add Signer interface and config types
|
||||
|
||||
Defines interface for GPG, macOS, and Windows signing.
|
||||
Config supports env var expansion for secrets.
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: Implement GPG Signer
|
||||
|
||||
**Files:**
|
||||
- Create: `pkg/build/signing/gpg.go`
|
||||
- Create: `pkg/build/signing/gpg_test.go`
|
||||
|
||||
**Step 1: Write the failing test**
|
||||
|
||||
```go
|
||||
package signing
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGPGSigner_Good_Name(t *testing.T) {
|
||||
s := NewGPGSigner("ABCD1234")
|
||||
if s.Name() != "gpg" {
|
||||
t.Errorf("expected name 'gpg', got %q", s.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGPGSigner_Good_Available(t *testing.T) {
|
||||
s := NewGPGSigner("ABCD1234")
|
||||
// Available depends on gpg being installed
|
||||
_ = s.Available()
|
||||
}
|
||||
|
||||
func TestGPGSigner_Bad_NoKey(t *testing.T) {
|
||||
s := NewGPGSigner("")
|
||||
if s.Available() {
|
||||
t.Error("expected Available() to be false when key is empty")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go test ./pkg/build/signing/... -run TestGPGSigner -v`
|
||||
Expected: FAIL (NewGPGSigner not defined)
|
||||
|
||||
**Step 3: Write implementation**
|
||||
|
||||
```go
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// GPGSigner signs files using GPG.
|
||||
type GPGSigner struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*GPGSigner)(nil)
|
||||
|
||||
// NewGPGSigner creates a new GPG signer.
|
||||
func NewGPGSigner(keyID string) *GPGSigner {
|
||||
return &GPGSigner{KeyID: keyID}
|
||||
}
|
||||
|
||||
// Name returns "gpg".
|
||||
func (s *GPGSigner) Name() string {
|
||||
return "gpg"
|
||||
}
|
||||
|
||||
// Available checks if gpg is installed and key is configured.
|
||||
func (s *GPGSigner) Available() bool {
|
||||
if s.KeyID == "" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath("gpg")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Sign creates a detached ASCII-armored signature.
|
||||
// For file.txt, creates file.txt.asc
|
||||
func (s *GPGSigner) Sign(ctx context.Context, file string) error {
|
||||
if !s.Available() {
|
||||
return fmt.Errorf("gpg.Sign: gpg not available or key not configured")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gpg",
|
||||
"--detach-sign",
|
||||
"--armor",
|
||||
"--local-user", s.KeyID,
|
||||
"--output", file+".asc",
|
||||
file,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("gpg.Sign: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Run tests**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go test ./pkg/build/signing/... -run TestGPGSigner -v`
|
||||
Expected: PASS
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/build/signing/gpg.go pkg/build/signing/gpg_test.go
|
||||
git commit -m "feat(signing): add GPG signer
|
||||
|
||||
Signs files with detached ASCII-armored signatures (.asc).
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Implement macOS Codesign
|
||||
|
||||
**Files:**
|
||||
- Create: `pkg/build/signing/codesign.go`
|
||||
- Create: `pkg/build/signing/codesign_test.go`
|
||||
|
||||
**Step 1: Write the failing test**
|
||||
|
||||
```go
|
||||
package signing
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMacOSSigner_Good_Name(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
||||
if s.Name() != "codesign" {
|
||||
t.Errorf("expected name 'codesign', got %q", s.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Good_Available(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
||||
|
||||
// Only available on macOS with identity set
|
||||
if runtime.GOOS == "darwin" {
|
||||
// May or may not be available depending on Xcode
|
||||
_ = s.Available()
|
||||
} else {
|
||||
if s.Available() {
|
||||
t.Error("expected Available() to be false on non-macOS")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Bad_NoIdentity(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{})
|
||||
if s.Available() {
|
||||
t.Error("expected Available() to be false when identity is empty")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go test ./pkg/build/signing/... -run TestMacOSSigner -v`
|
||||
Expected: FAIL (NewMacOSSigner not defined)
|
||||
|
||||
**Step 3: Write implementation**
|
||||
|
||||
```go
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// MacOSSigner signs binaries using macOS codesign.
|
||||
type MacOSSigner struct {
|
||||
config MacOSConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*MacOSSigner)(nil)
|
||||
|
||||
// NewMacOSSigner creates a new macOS signer.
|
||||
func NewMacOSSigner(cfg MacOSConfig) *MacOSSigner {
|
||||
return &MacOSSigner{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "codesign".
|
||||
func (s *MacOSSigner) Name() string {
|
||||
return "codesign"
|
||||
}
|
||||
|
||||
// Available checks if running on macOS with codesign and identity configured.
|
||||
func (s *MacOSSigner) Available() bool {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return false
|
||||
}
|
||||
if s.config.Identity == "" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath("codesign")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Sign codesigns a binary with hardened runtime.
|
||||
func (s *MacOSSigner) Sign(ctx context.Context, binary string) error {
|
||||
if !s.Available() {
|
||||
return fmt.Errorf("codesign.Sign: codesign not available")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "codesign",
|
||||
"--sign", s.config.Identity,
|
||||
"--timestamp",
|
||||
"--options", "runtime", // Hardened runtime for notarization
|
||||
"--force",
|
||||
binary,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("codesign.Sign: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notarize submits binary to Apple for notarization and staples the ticket.
|
||||
// This blocks until Apple responds (typically 1-5 minutes).
|
||||
func (s *MacOSSigner) Notarize(ctx context.Context, binary string) error {
|
||||
if s.config.AppleID == "" || s.config.TeamID == "" || s.config.AppPassword == "" {
|
||||
return fmt.Errorf("codesign.Notarize: missing Apple credentials (apple_id, team_id, app_password)")
|
||||
}
|
||||
|
||||
// Create ZIP for submission
|
||||
zipPath := binary + ".zip"
|
||||
zipCmd := exec.CommandContext(ctx, "zip", "-j", zipPath, binary)
|
||||
if output, err := zipCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: failed to create zip: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
defer os.Remove(zipPath)
|
||||
|
||||
// Submit to Apple and wait
|
||||
submitCmd := exec.CommandContext(ctx, "xcrun", "notarytool", "submit",
|
||||
zipPath,
|
||||
"--apple-id", s.config.AppleID,
|
||||
"--team-id", s.config.TeamID,
|
||||
"--password", s.config.AppPassword,
|
||||
"--wait",
|
||||
)
|
||||
if output, err := submitCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: notarization failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Staple the ticket
|
||||
stapleCmd := exec.CommandContext(ctx, "xcrun", "stapler", "staple", binary)
|
||||
if output, err := stapleCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: failed to staple: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldNotarize returns true if notarization is enabled.
|
||||
func (s *MacOSSigner) ShouldNotarize() bool {
|
||||
return s.config.Notarize
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Run tests**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go test ./pkg/build/signing/... -run TestMacOSSigner -v`
|
||||
Expected: PASS
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/build/signing/codesign.go pkg/build/signing/codesign_test.go
|
||||
git commit -m "feat(signing): add macOS codesign + notarization
|
||||
|
||||
Signs binaries with Developer ID and hardened runtime.
|
||||
Notarization submits to Apple and staples ticket.
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Add Windows Placeholder
|
||||
|
||||
**Files:**
|
||||
- Create: `pkg/build/signing/signtool.go`
|
||||
|
||||
**Step 1: Create placeholder implementation**
|
||||
|
||||
```go
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// WindowsSigner signs binaries using Windows signtool (placeholder).
|
||||
type WindowsSigner struct {
|
||||
config WindowsConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*WindowsSigner)(nil)
|
||||
|
||||
// NewWindowsSigner creates a new Windows signer.
|
||||
func NewWindowsSigner(cfg WindowsConfig) *WindowsSigner {
|
||||
return &WindowsSigner{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "signtool".
|
||||
func (s *WindowsSigner) Name() string {
|
||||
return "signtool"
|
||||
}
|
||||
|
||||
// Available returns false (not yet implemented).
|
||||
func (s *WindowsSigner) Available() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Sign is a placeholder that does nothing.
|
||||
func (s *WindowsSigner) Sign(ctx context.Context, binary string) error {
|
||||
// TODO: Implement Windows signing
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Verify it compiles**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go build ./pkg/build/signing/...`
|
||||
Expected: No errors
|
||||
|
||||
**Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/build/signing/signtool.go
|
||||
git commit -m "feat(signing): add Windows signtool placeholder
|
||||
|
||||
Placeholder for future Windows code signing support.
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 5: Add SignConfig to BuildConfig
|
||||
|
||||
**Files:**
|
||||
- Modify: `pkg/build/config.go`
|
||||
- Modify: `pkg/build/config_test.go`
|
||||
|
||||
**Step 1: Add Sign field to BuildConfig**
|
||||
|
||||
In `pkg/build/config.go`, add to the `BuildConfig` struct:
|
||||
|
||||
```go
|
||||
// Add import
|
||||
import "forge.lthn.ai/core/cli/pkg/build/signing"
|
||||
|
||||
// Add to BuildConfig struct after Targets field:
|
||||
// Sign contains code signing configuration.
|
||||
Sign signing.SignConfig `yaml:"sign,omitempty"`
|
||||
```
|
||||
|
||||
**Step 2: Update DefaultConfig**
|
||||
|
||||
In `DefaultConfig()`, add:
|
||||
|
||||
```go
|
||||
Sign: signing.DefaultSignConfig(),
|
||||
```
|
||||
|
||||
**Step 3: Update applyDefaults**
|
||||
|
||||
In `applyDefaults()`, add:
|
||||
|
||||
```go
|
||||
// Expand environment variables in sign config
|
||||
cfg.Sign.ExpandEnv()
|
||||
```
|
||||
|
||||
**Step 4: Add test for sign config loading**
|
||||
|
||||
Add to `pkg/build/config_test.go`:
|
||||
|
||||
```go
|
||||
func TestLoadConfig_Good_SignConfig(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
os.MkdirAll(coreDir, 0755)
|
||||
|
||||
configContent := `version: 1
|
||||
sign:
|
||||
enabled: true
|
||||
gpg:
|
||||
key: "ABCD1234"
|
||||
macos:
|
||||
identity: "Developer ID Application: Test"
|
||||
notarize: true
|
||||
`
|
||||
os.WriteFile(filepath.Join(coreDir, "build.yaml"), []byte(configContent), 0644)
|
||||
|
||||
cfg, err := LoadConfig(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !cfg.Sign.Enabled {
|
||||
t.Error("expected Sign.Enabled to be true")
|
||||
}
|
||||
if cfg.Sign.GPG.Key != "ABCD1234" {
|
||||
t.Errorf("expected GPG.Key 'ABCD1234', got %q", cfg.Sign.GPG.Key)
|
||||
}
|
||||
if cfg.Sign.MacOS.Identity != "Developer ID Application: Test" {
|
||||
t.Errorf("expected MacOS.Identity, got %q", cfg.Sign.MacOS.Identity)
|
||||
}
|
||||
if !cfg.Sign.MacOS.Notarize {
|
||||
t.Error("expected MacOS.Notarize to be true")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 5: Run tests**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go test ./pkg/build/... -run TestLoadConfig -v`
|
||||
Expected: PASS
|
||||
|
||||
**Step 6: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/build/config.go pkg/build/config_test.go
|
||||
git commit -m "feat(build): add SignConfig to BuildConfig
|
||||
|
||||
Loads signing configuration from .core/build.yaml.
|
||||
Expands environment variables for secrets.
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 6: Create Sign Helper Functions
|
||||
|
||||
**Files:**
|
||||
- Create: `pkg/build/signing/sign.go`
|
||||
|
||||
**Step 1: Create orchestration helpers**
|
||||
|
||||
```go
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/build"
|
||||
)
|
||||
|
||||
// SignBinaries signs macOS binaries in the artifacts list.
|
||||
// Only signs darwin binaries when running on macOS with a configured identity.
|
||||
func SignBinaries(ctx context.Context, cfg SignConfig, artifacts []build.Artifact) error {
|
||||
if !cfg.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only sign on macOS
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewMacOSSigner(cfg.MacOS)
|
||||
if !signer.Available() {
|
||||
return nil // Silently skip if not configured
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.OS != "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf(" Signing %s...\n", artifact.Path)
|
||||
if err := signer.Sign(ctx, artifact.Path); err != nil {
|
||||
return fmt.Errorf("failed to sign %s: %w", artifact.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotarizeBinaries notarizes macOS binaries if enabled.
|
||||
func NotarizeBinaries(ctx context.Context, cfg SignConfig, artifacts []build.Artifact) error {
|
||||
if !cfg.Enabled || !cfg.MacOS.Notarize {
|
||||
return nil
|
||||
}
|
||||
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewMacOSSigner(cfg.MacOS)
|
||||
if !signer.Available() {
|
||||
return fmt.Errorf("notarization requested but codesign not available")
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.OS != "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf(" Notarizing %s (this may take a few minutes)...\n", artifact.Path)
|
||||
if err := signer.Notarize(ctx, artifact.Path); err != nil {
|
||||
return fmt.Errorf("failed to notarize %s: %w", artifact.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignChecksums signs the checksums file with GPG.
|
||||
func SignChecksums(ctx context.Context, cfg SignConfig, checksumFile string) error {
|
||||
if !cfg.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewGPGSigner(cfg.GPG.Key)
|
||||
if !signer.Available() {
|
||||
return nil // Silently skip if not configured
|
||||
}
|
||||
|
||||
fmt.Printf(" Signing %s with GPG...\n", checksumFile)
|
||||
if err := signer.Sign(ctx, checksumFile); err != nil {
|
||||
return fmt.Errorf("failed to sign checksums: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Verify it compiles**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go build ./pkg/build/signing/...`
|
||||
Expected: No errors
|
||||
|
||||
**Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/build/signing/sign.go
|
||||
git commit -m "feat(signing): add orchestration helpers
|
||||
|
||||
SignBinaries, NotarizeBinaries, SignChecksums for pipeline integration.
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 7: Integrate Signing into CLI
|
||||
|
||||
**Files:**
|
||||
- Modify: `cmd/core/cmd/build.go`
|
||||
|
||||
**Step 1: Add --no-sign and --notarize flags**
|
||||
|
||||
After the existing flag declarations (around line 74), add:
|
||||
|
||||
```go
|
||||
var noSign bool
|
||||
var notarize bool
|
||||
|
||||
buildCmd.BoolFlag("no-sign", "Skip all code signing", &noSign)
|
||||
buildCmd.BoolFlag("notarize", "Enable macOS notarization (requires Apple credentials)", ¬arize)
|
||||
```
|
||||
|
||||
**Step 2: Update runProjectBuild signature**
|
||||
|
||||
Update the function signature and call:
|
||||
|
||||
```go
|
||||
// Update function signature:
|
||||
func runProjectBuild(buildType string, ciMode bool, targetsFlag string, outputDir string, doArchive bool, doChecksum bool, configPath string, format string, push bool, imageName string, noSign bool, notarize bool) error {
|
||||
|
||||
// Update the Action call:
|
||||
buildCmd.Action(func() error {
|
||||
return runProjectBuild(buildType, ciMode, targets, outputDir, doArchive, doChecksum, configPath, format, push, imageName, noSign, notarize)
|
||||
})
|
||||
```
|
||||
|
||||
**Step 3: Add signing import**
|
||||
|
||||
Add to imports:
|
||||
|
||||
```go
|
||||
"forge.lthn.ai/core/cli/pkg/build/signing"
|
||||
```
|
||||
|
||||
**Step 4: Add signing after build, before archive**
|
||||
|
||||
After the build succeeds (around line 228), add:
|
||||
|
||||
```go
|
||||
// Sign macOS binaries if enabled
|
||||
signCfg := buildCfg.Sign
|
||||
if notarize {
|
||||
signCfg.MacOS.Notarize = true
|
||||
}
|
||||
if noSign {
|
||||
signCfg.Enabled = false
|
||||
}
|
||||
|
||||
if signCfg.Enabled && runtime.GOOS == "darwin" {
|
||||
if !ciMode {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s Signing binaries...\n", buildHeaderStyle.Render("Sign:"))
|
||||
}
|
||||
|
||||
if err := signing.SignBinaries(ctx, signCfg, artifacts); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s Signing failed: %v\n", buildErrorStyle.Render("Error:"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if signCfg.MacOS.Notarize {
|
||||
if err := signing.NotarizeBinaries(ctx, signCfg, artifacts); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s Notarization failed: %v\n", buildErrorStyle.Render("Error:"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 5: Add GPG signing after checksums**
|
||||
|
||||
After WriteChecksumFile (around line 297), add:
|
||||
|
||||
```go
|
||||
// Sign checksums with GPG
|
||||
if signCfg.Enabled {
|
||||
if err := signing.SignChecksums(ctx, signCfg, checksumPath); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s GPG signing failed: %v\n", buildErrorStyle.Render("Error:"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 6: Verify it compiles**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go build ./cmd/core/...`
|
||||
Expected: No errors
|
||||
|
||||
**Step 7: Commit**
|
||||
|
||||
```bash
|
||||
git add cmd/core/cmd/build.go
|
||||
git commit -m "feat(cli): integrate signing into build command
|
||||
|
||||
Adds --no-sign and --notarize flags.
|
||||
Signs macOS binaries after build, GPG signs checksums.
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 8: Add Integration Test
|
||||
|
||||
**Files:**
|
||||
- Create: `pkg/build/signing/signing_test.go`
|
||||
|
||||
**Step 1: Create integration test**
|
||||
|
||||
```go
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/build"
|
||||
)
|
||||
|
||||
func TestSignBinaries_Good_SkipsNonDarwin(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{
|
||||
Identity: "Developer ID Application: Test",
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake artifact for linux
|
||||
artifacts := []build.Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "linux", Arch: "amd64"},
|
||||
}
|
||||
|
||||
// Should not error even though binary doesn't exist (skips non-darwin)
|
||||
err := SignBinaries(ctx, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignBinaries_Good_DisabledConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
artifacts := []build.Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := SignBinaries(ctx, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignChecksums_Good_SkipsNoKey(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
GPG: GPGConfig{
|
||||
Key: "", // No key configured
|
||||
},
|
||||
}
|
||||
|
||||
// Should silently skip when no key
|
||||
err := SignChecksums(ctx, cfg, "/tmp/CHECKSUMS.txt")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignChecksums_Good_Disabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
err := SignChecksums(ctx, cfg, "/tmp/CHECKSUMS.txt")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Run all signing tests**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go test ./pkg/build/signing/... -v`
|
||||
Expected: All tests pass
|
||||
|
||||
**Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/build/signing/signing_test.go
|
||||
git commit -m "test(signing): add integration tests
|
||||
|
||||
Tests for skip conditions and disabled configs.
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 9: Update TODO.md and Final Verification
|
||||
|
||||
**Step 1: Build CLI**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go build -o bin/core ./cmd/core`
|
||||
Expected: No errors
|
||||
|
||||
**Step 2: Test help output**
|
||||
|
||||
Run: `./bin/core build --help`
|
||||
Expected: Shows --no-sign and --notarize flags
|
||||
|
||||
**Step 3: Run all tests**
|
||||
|
||||
Run: `cd /Users/snider/Code/Core && go test ./pkg/build/... -v`
|
||||
Expected: All tests pass
|
||||
|
||||
**Step 4: Update TODO.md**
|
||||
|
||||
Mark S3.3 tasks as complete in `tasks/TODO.md`:
|
||||
|
||||
```markdown
|
||||
### S3.3 Code Signing (Standard) ✅
|
||||
- [x] macOS codesign integration
|
||||
- [x] macOS notarization
|
||||
- [ ] Windows signtool integration (placeholder added)
|
||||
- [x] GPG signing (standard tools)
|
||||
```
|
||||
|
||||
**Step 5: Final commit**
|
||||
|
||||
```bash
|
||||
git add tasks/TODO.md
|
||||
git commit -m "chore(signing): finalize S3.3 code signing
|
||||
|
||||
Implemented:
|
||||
- GPG signing of CHECKSUMS.txt
|
||||
- macOS codesign with hardened runtime
|
||||
- macOS notarization via notarytool
|
||||
- Windows signtool placeholder
|
||||
|
||||
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
9 tasks covering:
|
||||
1. Signing package structure (Signer interface, SignConfig)
|
||||
2. GPG signer implementation
|
||||
3. macOS codesign + notarization
|
||||
4. Windows signtool placeholder
|
||||
5. Add SignConfig to BuildConfig
|
||||
6. Orchestration helpers (SignBinaries, SignChecksums)
|
||||
7. CLI integration (--no-sign, --notarize)
|
||||
8. Integration tests
|
||||
9. Final verification and TODO update
|
||||
|
|
@ -1,306 +0,0 @@
|
|||
# Core DevOps CLI Design (S4.6)
|
||||
|
||||
## Summary
|
||||
|
||||
Portable development environment CLI commands for the core-devops LinuxKit image. Provides a sandboxed, immutable environment with 100+ embedded tools.
|
||||
|
||||
## Design Decisions
|
||||
|
||||
- **Image sources**: GitHub Releases + Container Registry + CDN (try in order, configurable)
|
||||
- **Local storage**: `~/.core/images/` with `CORE_IMAGES_DIR` env override
|
||||
- **Shell connection**: SSH by default, `--console` for serial fallback
|
||||
- **Serve**: Mount PWD into VM via 9P/SSHFS, run auto-detected dev server
|
||||
- **Test**: Auto-detect framework + `.core/test.yaml` config + `--` override
|
||||
- **Update**: Simple hash/version check, `--force` to always download
|
||||
- **Claude sandbox**: SSH in with forwarded auth, safe experimentation in immutable image
|
||||
|
||||
## Package Structure
|
||||
|
||||
```
|
||||
pkg/devops/
|
||||
├── devops.go # DevOps struct, Boot/Stop/Status
|
||||
├── images.go # ImageManager, manifest handling
|
||||
├── mount.go # Directory mounting (9P, SSHFS)
|
||||
├── serve.go # Project detection, serve command
|
||||
├── test.go # Test detection, .core/test.yaml parsing
|
||||
├── config.go # ~/.core/config.yaml handling
|
||||
└── sources/
|
||||
├── source.go # ImageSource interface
|
||||
├── github.go # GitHub Releases
|
||||
├── registry.go # Container registry
|
||||
└── cdn.go # CDN/S3
|
||||
|
||||
cmd/core/cmd/dev.go # CLI commands
|
||||
```
|
||||
|
||||
## Image Storage
|
||||
|
||||
```
|
||||
~/.core/
|
||||
├── config.yaml # Global config (image source preference, etc.)
|
||||
└── images/
|
||||
├── core-devops-darwin-arm64.qcow2
|
||||
├── core-devops-darwin-amd64.qcow2
|
||||
├── core-devops-linux-amd64.qcow2
|
||||
└── manifest.json # Tracks versions, hashes, last-updated
|
||||
```
|
||||
|
||||
## ImageSource Interface
|
||||
|
||||
```go
|
||||
type ImageSource interface {
|
||||
Name() string
|
||||
Available() bool
|
||||
LatestVersion() (string, error)
|
||||
Download(ctx context.Context, dest string) error
|
||||
}
|
||||
```
|
||||
|
||||
Sources tried in order: GitHub → Registry → CDN, or respect user preference in config.
|
||||
|
||||
## CLI Commands
|
||||
|
||||
```go
|
||||
// cmd/core/cmd/dev.go
|
||||
|
||||
func AddDevCommand(app *clir.Cli) {
|
||||
devCmd := app.NewSubCommand("dev", "Portable development environment")
|
||||
|
||||
// core dev install [--source github|registry|cdn]
|
||||
// Downloads core-devops image for current platform
|
||||
|
||||
// core dev boot [--memory 4096] [--cpus 4] [--name mydev]
|
||||
// Boots the dev environment (detached by default)
|
||||
|
||||
// core dev shell [--console]
|
||||
// SSH into running dev env (or serial console with --console)
|
||||
|
||||
// core dev serve [--port 8000]
|
||||
// Mount PWD → /app, run FrankenPHP, forward port
|
||||
|
||||
// core dev test [-- custom command]
|
||||
// Auto-detect tests or use .core/test.yaml or pass custom
|
||||
|
||||
// core dev claude [--auth] [--model opus|sonnet]
|
||||
// SSH in with forwarded auth, start Claude in sandbox
|
||||
|
||||
// core dev update [--force]
|
||||
// Check for newer image, download if available
|
||||
|
||||
// core dev status
|
||||
// Show if dev env is running, resource usage, ports
|
||||
|
||||
// core dev stop
|
||||
// Stop the running dev environment
|
||||
}
|
||||
```
|
||||
|
||||
## Command Flow
|
||||
|
||||
```
|
||||
First time:
|
||||
core dev install → Downloads ~/.core/images/core-devops-{os}-{arch}.qcow2
|
||||
core dev boot → Starts VM in background
|
||||
core dev shell → SSH in
|
||||
|
||||
Daily use:
|
||||
core dev boot → Start (if not running)
|
||||
core dev serve → Mount project, start server
|
||||
core dev test → Run tests inside VM
|
||||
core dev shell → Interactive work
|
||||
|
||||
AI sandbox:
|
||||
core dev claude → SSH + forward auth + start Claude CLI
|
||||
|
||||
Maintenance:
|
||||
core dev update → Get latest image
|
||||
core dev status → Check what's running
|
||||
```
|
||||
|
||||
## `core dev claude` - Sandboxed AI Session
|
||||
|
||||
```bash
|
||||
core dev claude # Forward all auth by default
|
||||
core dev claude --no-auth # Clean session, no host credentials
|
||||
core dev claude --auth=gh,anthropic # Selective forwarding
|
||||
```
|
||||
|
||||
**What it does:**
|
||||
1. Ensures dev VM is running (auto-boots if not)
|
||||
2. Forwards auth credentials from host:
|
||||
- `~/.anthropic/` or `ANTHROPIC_API_KEY`
|
||||
- `~/.config/gh/` (GitHub CLI auth)
|
||||
- SSH agent forwarding
|
||||
- Git config (name, email)
|
||||
3. SSHs into VM with agent forwarding (`ssh -A`)
|
||||
4. Starts `claude` CLI inside with forwarded context
|
||||
5. Current project mounted at `/app`
|
||||
|
||||
**Why this is powerful:**
|
||||
- Immutable base = reset anytime with `core dev boot --fresh`
|
||||
- Claude can experiment freely, install packages, make mistakes
|
||||
- Host system untouched
|
||||
- Still has real credentials to push code, create PRs
|
||||
- Full 100+ tools available in core-devops image
|
||||
|
||||
## Test Configuration
|
||||
|
||||
**`.core/test.yaml` format:**
|
||||
```yaml
|
||||
version: 1
|
||||
|
||||
# Commands to run (in order)
|
||||
commands:
|
||||
- name: unit
|
||||
run: vendor/bin/pest --parallel
|
||||
- name: types
|
||||
run: vendor/bin/phpstan analyse
|
||||
- name: lint
|
||||
run: vendor/bin/pint --test
|
||||
|
||||
# Or simple single command
|
||||
command: npm test
|
||||
|
||||
# Environment variables
|
||||
env:
|
||||
APP_ENV: testing
|
||||
DB_CONNECTION: sqlite
|
||||
```
|
||||
|
||||
**Auto-Detection Priority:**
|
||||
1. `.core/test.yaml`
|
||||
2. `composer.json` scripts.test → `composer test`
|
||||
3. `package.json` scripts.test → `npm test`
|
||||
4. `go.mod` → `go test ./...`
|
||||
5. `pytest.ini` or `pyproject.toml` → `pytest`
|
||||
6. `Taskfile.yaml` → `task test`
|
||||
|
||||
**CLI Usage:**
|
||||
```bash
|
||||
core dev test # Auto-detect and run
|
||||
core dev test --unit # Run only "unit" from .core/test.yaml
|
||||
core dev test -- go test -v ./pkg/... # Override with custom
|
||||
```
|
||||
|
||||
## `core dev serve` - Mount & Serve
|
||||
|
||||
**How it works:**
|
||||
1. Ensure VM is running
|
||||
2. Mount current directory into VM via 9P virtio-fs (or SSHFS fallback)
|
||||
3. Start auto-detected dev server on /app inside VM
|
||||
4. Forward port to host
|
||||
|
||||
**Mount Strategy:**
|
||||
```go
|
||||
type MountMethod int
|
||||
const (
|
||||
Mount9P MountMethod = iota // QEMU virtio-9p (faster)
|
||||
MountSSHFS // sshfs reverse mount
|
||||
MountRSync // Fallback: rsync on change
|
||||
)
|
||||
```
|
||||
|
||||
**CLI Usage:**
|
||||
```bash
|
||||
core dev serve # Mount PWD, serve on :8000
|
||||
core dev serve --port 3000 # Custom port
|
||||
core dev serve --path ./backend # Serve subdirectory
|
||||
```
|
||||
|
||||
**Project Detection:**
|
||||
```go
|
||||
func detectServeCommand(projectDir string) string {
|
||||
if exists("artisan") {
|
||||
return "php artisan octane:start --host=0.0.0.0 --port=8000"
|
||||
}
|
||||
if exists("package.json") && hasScript("dev") {
|
||||
return "npm run dev -- --host 0.0.0.0"
|
||||
}
|
||||
if exists("composer.json") {
|
||||
return "frankenphp php-server"
|
||||
}
|
||||
return "python -m http.server 8000" // Fallback
|
||||
}
|
||||
```
|
||||
|
||||
## Image Sources & Updates
|
||||
|
||||
**~/.core/config.yaml:**
|
||||
```yaml
|
||||
version: 1
|
||||
|
||||
images:
|
||||
source: auto # auto | github | registry | cdn
|
||||
|
||||
cdn:
|
||||
url: https://images.example.com/core-devops
|
||||
|
||||
github:
|
||||
repo: host-uk/core-images
|
||||
|
||||
registry:
|
||||
image: ghcr.io/host-uk/core-devops
|
||||
```
|
||||
|
||||
**Manifest for Update Checking:**
|
||||
```json
|
||||
// ~/.core/images/manifest.json
|
||||
{
|
||||
"core-devops-darwin-arm64.qcow2": {
|
||||
"version": "v1.2.0",
|
||||
"sha256": "abc123...",
|
||||
"downloaded": "2026-01-29T10:00:00Z",
|
||||
"source": "github"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Update Flow:**
|
||||
```go
|
||||
func (d *DevOps) Update(force bool) error {
|
||||
local := d.manifest.Get(imageName)
|
||||
remote, _ := d.source.LatestVersion()
|
||||
|
||||
if force || local.Version != remote {
|
||||
fmt.Printf("Updating %s → %s\n", local.Version, remote)
|
||||
return d.source.Download(ctx, imagePath)
|
||||
}
|
||||
fmt.Println("Already up to date")
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Commands Summary
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `core dev install` | Download image for platform |
|
||||
| `core dev boot` | Start VM (auto-installs if needed) |
|
||||
| `core dev shell` | SSH in (--console for serial) |
|
||||
| `core dev serve` | Mount PWD, run dev server |
|
||||
| `core dev test` | Run tests inside VM |
|
||||
| `core dev claude` | Start Claude session in sandbox |
|
||||
| `core dev update` | Check/download newer image |
|
||||
| `core dev status` | Show VM state, ports, resources |
|
||||
| `core dev stop` | Stop the VM |
|
||||
|
||||
## Dependencies
|
||||
|
||||
- Reuse existing `pkg/container` for VM management (LinuxKitManager)
|
||||
- SSH client for shell/exec (golang.org/x/crypto/ssh)
|
||||
- Progress bar for downloads (charmbracelet/bubbles or similar)
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. Create `pkg/devops/` package structure
|
||||
2. Implement ImageSource interface and sources (GitHub, Registry, CDN)
|
||||
3. Implement image download with manifest tracking
|
||||
4. Implement config loading (`~/.core/config.yaml`)
|
||||
5. Add CLI commands to `cmd/core/cmd/dev.go`
|
||||
6. Implement boot/stop using existing LinuxKitManager
|
||||
7. Implement shell (SSH + serial console)
|
||||
8. Implement serve (mount + project detection)
|
||||
9. Implement test (detection + .core/test.yaml)
|
||||
10. Implement claude (auth forwarding + sandbox)
|
||||
11. Implement update (version check + download)
|
||||
12. Implement status
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,291 +0,0 @@
|
|||
# SDK Generation Design
|
||||
|
||||
## Summary
|
||||
|
||||
Generate typed API clients from OpenAPI specs for TypeScript, Python, Go, and PHP. Includes breaking change detection via semantic diff.
|
||||
|
||||
## Design Decisions
|
||||
|
||||
- **Generator approach**: Hybrid - native generators where available, openapi-generator fallback
|
||||
- **Languages**: TypeScript, Python, Go, PHP (Core 4)
|
||||
- **Detection**: Config → common paths → Laravel Scramble
|
||||
- **Output**: Local `sdk/` + optional monorepo publish
|
||||
- **Diff**: Semantic with oasdiff, CI-friendly exit codes
|
||||
- **Priority**: DX (developer experience)
|
||||
|
||||
## Package Structure
|
||||
|
||||
```
|
||||
pkg/sdk/
|
||||
├── sdk.go # Main SDK type, orchestration
|
||||
├── detect.go # OpenAPI spec detection
|
||||
├── diff.go # Breaking change detection (oasdiff)
|
||||
├── generators/
|
||||
│ ├── generator.go # Generator interface
|
||||
│ ├── typescript.go # openapi-typescript-codegen
|
||||
│ ├── python.go # openapi-python-client
|
||||
│ ├── go.go # oapi-codegen
|
||||
│ └── php.go # openapi-generator (Docker)
|
||||
└── templates/ # Package scaffolding templates
|
||||
├── typescript/
|
||||
│ └── package.json.tmpl
|
||||
├── python/
|
||||
│ └── setup.py.tmpl
|
||||
├── go/
|
||||
│ └── go.mod.tmpl
|
||||
└── php/
|
||||
└── composer.json.tmpl
|
||||
```
|
||||
|
||||
## OpenAPI Detection Flow
|
||||
|
||||
```
|
||||
1. Check config: sdk.spec in .core/release.yaml
|
||||
↓ not found
|
||||
2. Check common paths:
|
||||
- api/openapi.yaml
|
||||
- api/openapi.json
|
||||
- openapi.yaml
|
||||
- openapi.json
|
||||
- docs/api.yaml
|
||||
- swagger.yaml
|
||||
↓ not found
|
||||
3. Laravel Scramble detection:
|
||||
- Check for scramble/scramble in composer.json
|
||||
- Run: php artisan scramble:export --path=api/openapi.json
|
||||
- Use generated spec
|
||||
↓ not found
|
||||
4. Error: No OpenAPI spec found
|
||||
```
|
||||
|
||||
## Generator Interface
|
||||
|
||||
```go
|
||||
type Generator interface {
|
||||
// Language returns the generator's target language
|
||||
Language() string
|
||||
|
||||
// Generate creates SDK from OpenAPI spec
|
||||
Generate(ctx context.Context, opts GenerateOptions) error
|
||||
|
||||
// Available checks if generator dependencies are installed
|
||||
Available() bool
|
||||
|
||||
// Install provides installation instructions
|
||||
Install() string
|
||||
}
|
||||
|
||||
type GenerateOptions struct {
|
||||
SpecPath string // OpenAPI spec file
|
||||
OutputDir string // Where to write SDK
|
||||
PackageName string // Package/module name
|
||||
Version string // SDK version
|
||||
}
|
||||
```
|
||||
|
||||
### Native Generators
|
||||
|
||||
| Language | Tool | Install |
|
||||
|------------|----------------------------|--------------------------------|
|
||||
| TypeScript | openapi-typescript-codegen | `npm i -g openapi-typescript-codegen` |
|
||||
| Python | openapi-python-client | `pip install openapi-python-client` |
|
||||
| Go | oapi-codegen | `go install github.com/deepmap/oapi-codegen/cmd/oapi-codegen@latest` |
|
||||
| PHP | openapi-generator (Docker) | Requires Docker |
|
||||
|
||||
### Fallback Strategy
|
||||
|
||||
```go
|
||||
func (g *TypeScriptGenerator) Generate(ctx context.Context, opts GenerateOptions) error {
|
||||
if g.Available() {
|
||||
return g.generateNative(ctx, opts)
|
||||
}
|
||||
return g.generateDocker(ctx, opts) // openapi-generator in Docker
|
||||
}
|
||||
```
|
||||
|
||||
## Breaking Change Detection
|
||||
|
||||
Using [oasdiff](https://github.com/Tufin/oasdiff) for semantic OpenAPI comparison:
|
||||
|
||||
```go
|
||||
import "github.com/tufin/oasdiff/diff"
|
||||
import "github.com/tufin/oasdiff/checker"
|
||||
|
||||
func (s *SDK) Diff(base, revision string) (*DiffResult, error) {
|
||||
// Load specs
|
||||
baseSpec, _ := load.From(loader, base)
|
||||
revSpec, _ := load.From(loader, revision)
|
||||
|
||||
// Compute diff
|
||||
d, _ := diff.Get(diff.NewConfig(), baseSpec, revSpec)
|
||||
|
||||
// Check for breaking changes
|
||||
breaks := checker.CheckBackwardCompatibility(
|
||||
checker.GetDefaultChecks(),
|
||||
d,
|
||||
baseSpec,
|
||||
revSpec,
|
||||
)
|
||||
|
||||
return &DiffResult{
|
||||
Breaking: len(breaks) > 0,
|
||||
Changes: breaks,
|
||||
Summary: formatSummary(d),
|
||||
}, nil
|
||||
}
|
||||
```
|
||||
|
||||
### Exit Codes for CI
|
||||
|
||||
| Exit Code | Meaning |
|
||||
|-----------|---------|
|
||||
| 0 | No breaking changes |
|
||||
| 1 | Breaking changes detected |
|
||||
| 2 | Error (invalid spec, etc.) |
|
||||
|
||||
### Breaking Change Categories
|
||||
|
||||
- Removed endpoints
|
||||
- Changed required parameters
|
||||
- Modified response schemas
|
||||
- Changed authentication requirements
|
||||
|
||||
## CLI Commands
|
||||
|
||||
```bash
|
||||
# Generate SDKs from OpenAPI spec
|
||||
core sdk generate # Uses .core/release.yaml config
|
||||
core sdk generate --spec api.yaml # Explicit spec file
|
||||
core sdk generate --lang typescript # Single language
|
||||
|
||||
# Check for breaking changes
|
||||
core sdk diff # Compare current vs last release
|
||||
core sdk diff --spec api.yaml --base v1.0.0
|
||||
|
||||
# Validate spec before generation
|
||||
core sdk validate
|
||||
core sdk validate --spec api.yaml
|
||||
```
|
||||
|
||||
## Config Schema
|
||||
|
||||
In `.core/release.yaml`:
|
||||
|
||||
```yaml
|
||||
sdk:
|
||||
# OpenAPI spec source (auto-detected if omitted)
|
||||
spec: api/openapi.yaml
|
||||
|
||||
# Languages to generate
|
||||
languages:
|
||||
- typescript
|
||||
- python
|
||||
- go
|
||||
- php
|
||||
|
||||
# Output directory (default: sdk/)
|
||||
output: sdk/
|
||||
|
||||
# Package naming
|
||||
package:
|
||||
name: myapi # Base name
|
||||
version: "{{.Version}}"
|
||||
|
||||
# Breaking change detection
|
||||
diff:
|
||||
enabled: true
|
||||
fail_on_breaking: true # CI fails on breaking changes
|
||||
|
||||
# Optional: publish to monorepo
|
||||
publish:
|
||||
repo: myorg/sdks
|
||||
path: packages/myapi
|
||||
```
|
||||
|
||||
## Output Structure
|
||||
|
||||
Each generator outputs to `sdk/{lang}/`:
|
||||
|
||||
```
|
||||
sdk/
|
||||
├── typescript/
|
||||
│ ├── package.json
|
||||
│ ├── src/
|
||||
│ │ ├── index.ts
|
||||
│ │ ├── client.ts
|
||||
│ │ └── models/
|
||||
│ └── tsconfig.json
|
||||
├── python/
|
||||
│ ├── setup.py
|
||||
│ ├── myapi/
|
||||
│ │ ├── __init__.py
|
||||
│ │ ├── client.py
|
||||
│ │ └── models/
|
||||
│ └── requirements.txt
|
||||
├── go/
|
||||
│ ├── go.mod
|
||||
│ ├── client.go
|
||||
│ └── models.go
|
||||
└── php/
|
||||
├── composer.json
|
||||
├── src/
|
||||
│ ├── Client.php
|
||||
│ └── Models/
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## Publishing Workflow
|
||||
|
||||
SDK publishing integrates with the existing release pipeline:
|
||||
|
||||
```
|
||||
core release
|
||||
→ build artifacts
|
||||
→ generate SDKs (if sdk: configured)
|
||||
→ run diff check (warns or fails on breaking)
|
||||
→ publish to GitHub release
|
||||
→ publish SDKs (optional)
|
||||
```
|
||||
|
||||
### Monorepo Publishing
|
||||
|
||||
For projects using a shared SDK monorepo:
|
||||
|
||||
1. Clone target repo (shallow)
|
||||
2. Update `packages/{name}/{lang}/`
|
||||
3. Commit with version tag
|
||||
4. Push (triggers downstream CI)
|
||||
|
||||
The SDK tarball is also attached to GitHub releases for direct download.
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. Create `pkg/sdk/` package structure
|
||||
2. Implement OpenAPI detection (`detect.go`)
|
||||
3. Define Generator interface (`generators/generator.go`)
|
||||
4. Implement TypeScript generator (native + fallback)
|
||||
5. Implement Python generator (native + fallback)
|
||||
6. Implement Go generator (native)
|
||||
7. Implement PHP generator (Docker-based)
|
||||
8. Add package templates (`templates/`)
|
||||
9. Implement diff with oasdiff (`diff.go`)
|
||||
10. Add CLI commands (`cmd/core/sdk.go`)
|
||||
11. Integrate with release pipeline
|
||||
12. Add monorepo publish support
|
||||
|
||||
## Dependencies
|
||||
|
||||
```go
|
||||
// go.mod additions
|
||||
require (
|
||||
github.com/tufin/oasdiff v1.x.x
|
||||
github.com/getkin/kin-openapi v0.x.x
|
||||
)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
- Unit tests for each generator
|
||||
- Integration tests with sample OpenAPI specs
|
||||
- Diff tests with known breaking/non-breaking changes
|
||||
- E2E test generating SDKs for a real API
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,210 +0,0 @@
|
|||
# SDK Release Integration Design (S3.4)
|
||||
|
||||
## Summary
|
||||
|
||||
Add `core release --target sdk` to generate SDKs as a separate release target. Runs breaking change detection before generating, uses release version for SDK versioning, outputs locally for manual publishing.
|
||||
|
||||
## Design Decisions
|
||||
|
||||
- **Separate target**: `--target sdk` runs ONLY SDK generation (no binary builds)
|
||||
- **Local output**: Generates to `sdk/` directory, user handles publishing
|
||||
- **Diff first**: Run breaking change detection before generating
|
||||
- **Match version**: SDK version matches release version from git tags
|
||||
|
||||
## CLI
|
||||
|
||||
```bash
|
||||
core release --target sdk # Generate SDKs only
|
||||
core release --target sdk --version v1.2.3 # Explicit version
|
||||
core release --target sdk --dry-run # Preview what would generate
|
||||
core release # Normal release (unchanged)
|
||||
```
|
||||
|
||||
## Config Schema
|
||||
|
||||
In `.core/release.yaml`:
|
||||
|
||||
```yaml
|
||||
sdk:
|
||||
spec: openapi.yaml # or auto-detect
|
||||
languages: [typescript, python, go, php]
|
||||
output: sdk # output directory
|
||||
package:
|
||||
name: myapi-sdk
|
||||
diff:
|
||||
enabled: true
|
||||
fail_on_breaking: false # warn but continue
|
||||
```
|
||||
|
||||
## Flow
|
||||
|
||||
```
|
||||
core release --target sdk
|
||||
↓
|
||||
1. Load release config (.core/release.yaml)
|
||||
↓
|
||||
2. Check sdk config exists (error if not configured)
|
||||
↓
|
||||
3. Determine version (git tag or --version flag)
|
||||
↓
|
||||
4. If diff.enabled:
|
||||
- Get previous tag
|
||||
- Run oasdiff against current spec
|
||||
- If breaking && fail_on_breaking: abort
|
||||
- If breaking && !fail_on_breaking: warn, continue
|
||||
↓
|
||||
5. Generate SDKs for each language
|
||||
- Pass version to generators
|
||||
- Output to sdk/{language}/
|
||||
↓
|
||||
6. Print summary (languages generated, output paths)
|
||||
```
|
||||
|
||||
## Package Structure
|
||||
|
||||
```
|
||||
pkg/release/
|
||||
├── sdk.go # RunSDK() orchestration + diff helper ← NEW
|
||||
├── release.go # Existing Run() unchanged
|
||||
└── config.go # Existing SDKConfig unchanged
|
||||
|
||||
pkg/sdk/
|
||||
└── sdk.go # Add SetVersion() method ← MODIFY
|
||||
|
||||
cmd/core/cmd/
|
||||
└── release.go # Add --target flag ← MODIFY
|
||||
```
|
||||
|
||||
## RunSDK Implementation
|
||||
|
||||
```go
|
||||
// pkg/release/sdk.go
|
||||
|
||||
// RunSDK executes SDK-only release: diff check + generate.
|
||||
func RunSDK(ctx context.Context, cfg *Config, dryRun bool) (*SDKRelease, error) {
|
||||
if cfg.SDK == nil {
|
||||
return nil, fmt.Errorf("sdk not configured in .core/release.yaml")
|
||||
}
|
||||
|
||||
projectDir := cfg.projectDir
|
||||
if projectDir == "" {
|
||||
projectDir = "."
|
||||
}
|
||||
|
||||
// Determine version
|
||||
version := cfg.version
|
||||
if version == "" {
|
||||
var err error
|
||||
version, err = DetermineVersion(projectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine version: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run diff check if enabled
|
||||
if cfg.SDK.Diff.Enabled {
|
||||
breaking, err := checkBreakingChanges(projectDir, cfg.SDK)
|
||||
if err != nil {
|
||||
// Non-fatal: warn and continue
|
||||
fmt.Printf("Warning: diff check failed: %v\n", err)
|
||||
} else if breaking {
|
||||
if cfg.SDK.Diff.FailOnBreaking {
|
||||
return nil, fmt.Errorf("breaking API changes detected")
|
||||
}
|
||||
fmt.Printf("Warning: breaking API changes detected\n")
|
||||
}
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return &SDKRelease{
|
||||
Version: version,
|
||||
Languages: cfg.SDK.Languages,
|
||||
Output: cfg.SDK.Output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Generate SDKs
|
||||
sdkCfg := toSDKConfig(cfg.SDK)
|
||||
s := sdk.New(projectDir, sdkCfg)
|
||||
s.SetVersion(version)
|
||||
|
||||
if err := s.Generate(ctx); err != nil {
|
||||
return nil, fmt.Errorf("sdk generation failed: %w", err)
|
||||
}
|
||||
|
||||
return &SDKRelease{
|
||||
Version: version,
|
||||
Languages: cfg.SDK.Languages,
|
||||
Output: cfg.SDK.Output,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SDKRelease holds the result of an SDK release.
|
||||
type SDKRelease struct {
|
||||
Version string
|
||||
Languages []string
|
||||
Output string
|
||||
}
|
||||
```
|
||||
|
||||
## CLI Integration
|
||||
|
||||
```go
|
||||
// cmd/core/cmd/release.go
|
||||
|
||||
var target string
|
||||
releaseCmd.StringFlag("target", "Release target (sdk)", &target)
|
||||
|
||||
releaseCmd.Action(func() error {
|
||||
if target == "sdk" {
|
||||
return runReleaseSDK(dryRun, version)
|
||||
}
|
||||
return runRelease(dryRun, version, draft, prerelease)
|
||||
})
|
||||
|
||||
func runReleaseSDK(dryRun bool, version string) error {
|
||||
ctx := context.Background()
|
||||
projectDir, _ := os.Getwd()
|
||||
|
||||
cfg, err := release.LoadConfig(projectDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if version != "" {
|
||||
cfg.SetVersion(version)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Generating SDKs\n", releaseHeaderStyle.Render("SDK Release:"))
|
||||
if dryRun {
|
||||
fmt.Printf(" %s\n", releaseDimStyle.Render("(dry-run mode)"))
|
||||
}
|
||||
|
||||
result, err := release.RunSDK(ctx, cfg, dryRun)
|
||||
if err != nil {
|
||||
fmt.Printf("%s %v\n", releaseErrorStyle.Render("Error:"), err)
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s SDK generation complete\n", releaseSuccessStyle.Render("Success:"))
|
||||
fmt.Printf(" Version: %s\n", result.Version)
|
||||
fmt.Printf(" Languages: %v\n", result.Languages)
|
||||
fmt.Printf(" Output: %s/\n", result.Output)
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
1. Add `SetVersion()` method to `pkg/sdk/sdk.go`
|
||||
2. Create `pkg/release/sdk.go` with `RunSDK()` and helpers
|
||||
3. Add `--target` flag to `cmd/core/cmd/release.go`
|
||||
4. Add `runReleaseSDK()` function to CLI
|
||||
5. Add tests for `pkg/release/sdk_test.go`
|
||||
6. Final verification and TODO update
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `oasdiff` CLI (for breaking change detection)
|
||||
- Existing SDK generators (openapi-generator, etc.)
|
||||
|
|
@ -1,576 +0,0 @@
|
|||
# SDK Release Implementation Plan (S3.4)
|
||||
|
||||
> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task.
|
||||
|
||||
**Goal:** Add `core release --target sdk` to generate SDKs with version and diff checking
|
||||
|
||||
**Architecture:** Separate release target that runs diff check then SDK generation, outputs locally
|
||||
|
||||
**Tech Stack:** Go, existing pkg/sdk generators, oasdiff for diff
|
||||
|
||||
---
|
||||
|
||||
## Task 1: Add SetVersion to SDK struct
|
||||
|
||||
**Files:**
|
||||
- Modify: `pkg/sdk/sdk.go`
|
||||
- Test: `pkg/sdk/sdk_test.go` (create if needed)
|
||||
|
||||
**Step 1: Write the failing test**
|
||||
|
||||
```go
|
||||
// pkg/sdk/sdk_test.go
|
||||
package sdk
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSDK_Good_SetVersion(t *testing.T) {
|
||||
s := New("/tmp", nil)
|
||||
s.SetVersion("v1.2.3")
|
||||
|
||||
if s.version != "v1.2.3" {
|
||||
t.Errorf("expected version v1.2.3, got %s", s.version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSDK_Good_VersionPassedToGenerator(t *testing.T) {
|
||||
config := &Config{
|
||||
Languages: []string{"typescript"},
|
||||
Output: "sdk",
|
||||
Package: PackageConfig{
|
||||
Name: "test-sdk",
|
||||
},
|
||||
}
|
||||
s := New("/tmp", config)
|
||||
s.SetVersion("v2.0.0")
|
||||
|
||||
// Version should override config
|
||||
if s.config.Package.Version != "v2.0.0" {
|
||||
t.Errorf("expected config version v2.0.0, got %s", s.config.Package.Version)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
|
||||
Run: `go test ./pkg/sdk/... -run TestSDK_Good_SetVersion -v`
|
||||
Expected: FAIL with "s.version undefined" or similar
|
||||
|
||||
**Step 3: Write minimal implementation**
|
||||
|
||||
Add to `pkg/sdk/sdk.go`:
|
||||
|
||||
```go
|
||||
// SDK struct - add version field
|
||||
type SDK struct {
|
||||
config *Config
|
||||
projectDir string
|
||||
version string // ADD THIS
|
||||
}
|
||||
|
||||
// SetVersion sets the SDK version, overriding config.
|
||||
func (s *SDK) SetVersion(version string) {
|
||||
s.version = version
|
||||
if s.config != nil {
|
||||
s.config.Package.Version = version
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 4: Run test to verify it passes**
|
||||
|
||||
Run: `go test ./pkg/sdk/... -run TestSDK_Good -v`
|
||||
Expected: PASS
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/sdk/sdk.go pkg/sdk/sdk_test.go
|
||||
git commit -m "feat(sdk): add SetVersion method for release integration"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 2: Create pkg/release/sdk.go structure
|
||||
|
||||
**Files:**
|
||||
- Create: `pkg/release/sdk.go`
|
||||
|
||||
**Step 1: Create file with types and helper**
|
||||
|
||||
```go
|
||||
// pkg/release/sdk.go
|
||||
package release
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/sdk"
|
||||
)
|
||||
|
||||
// SDKRelease holds the result of an SDK release.
|
||||
type SDKRelease struct {
|
||||
// Version is the SDK version.
|
||||
Version string
|
||||
// Languages that were generated.
|
||||
Languages []string
|
||||
// Output directory.
|
||||
Output string
|
||||
}
|
||||
|
||||
// toSDKConfig converts release.SDKConfig to sdk.Config.
|
||||
func toSDKConfig(cfg *SDKConfig) *sdk.Config {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
return &sdk.Config{
|
||||
Spec: cfg.Spec,
|
||||
Languages: cfg.Languages,
|
||||
Output: cfg.Output,
|
||||
Package: sdk.PackageConfig{
|
||||
Name: cfg.Package.Name,
|
||||
Version: cfg.Package.Version,
|
||||
},
|
||||
Diff: sdk.DiffConfig{
|
||||
Enabled: cfg.Diff.Enabled,
|
||||
FailOnBreaking: cfg.Diff.FailOnBreaking,
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Verify it compiles**
|
||||
|
||||
Run: `go build ./pkg/release/...`
|
||||
Expected: Success
|
||||
|
||||
**Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/release/sdk.go
|
||||
git commit -m "feat(release): add SDK release types and config converter"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 3: Implement RunSDK function
|
||||
|
||||
**Files:**
|
||||
- Modify: `pkg/release/sdk.go`
|
||||
- Test: `pkg/release/sdk_test.go`
|
||||
|
||||
**Step 1: Write the failing test**
|
||||
|
||||
```go
|
||||
// pkg/release/sdk_test.go
|
||||
package release
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRunSDK_Bad_NoConfig(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: nil,
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
|
||||
_, err := RunSDK(context.Background(), cfg, true)
|
||||
if err == nil {
|
||||
t.Error("expected error when SDK config is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_DryRun(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"typescript", "python"},
|
||||
Output: "sdk",
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if result.Version != "v1.0.0" {
|
||||
t.Errorf("expected version v1.0.0, got %s", result.Version)
|
||||
}
|
||||
if len(result.Languages) != 2 {
|
||||
t.Errorf("expected 2 languages, got %d", len(result.Languages))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
|
||||
Run: `go test ./pkg/release/... -run TestRunSDK -v`
|
||||
Expected: FAIL with "RunSDK undefined"
|
||||
|
||||
**Step 3: Write implementation**
|
||||
|
||||
Add to `pkg/release/sdk.go`:
|
||||
|
||||
```go
|
||||
// RunSDK executes SDK-only release: diff check + generate.
|
||||
// If dryRun is true, it shows what would be done without generating.
|
||||
func RunSDK(ctx context.Context, cfg *Config, dryRun bool) (*SDKRelease, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("release.RunSDK: config is nil")
|
||||
}
|
||||
if cfg.SDK == nil {
|
||||
return nil, fmt.Errorf("release.RunSDK: sdk not configured in .core/release.yaml")
|
||||
}
|
||||
|
||||
projectDir := cfg.projectDir
|
||||
if projectDir == "" {
|
||||
projectDir = "."
|
||||
}
|
||||
|
||||
// Determine version
|
||||
version := cfg.version
|
||||
if version == "" {
|
||||
var err error
|
||||
version, err = DetermineVersion(projectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.RunSDK: failed to determine version: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run diff check if enabled
|
||||
if cfg.SDK.Diff.Enabled {
|
||||
breaking, err := checkBreakingChanges(projectDir, cfg.SDK)
|
||||
if err != nil {
|
||||
// Non-fatal: warn and continue
|
||||
fmt.Printf("Warning: diff check failed: %v\n", err)
|
||||
} else if breaking {
|
||||
if cfg.SDK.Diff.FailOnBreaking {
|
||||
return nil, fmt.Errorf("release.RunSDK: breaking API changes detected")
|
||||
}
|
||||
fmt.Printf("Warning: breaking API changes detected\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare result
|
||||
output := cfg.SDK.Output
|
||||
if output == "" {
|
||||
output = "sdk"
|
||||
}
|
||||
|
||||
result := &SDKRelease{
|
||||
Version: version,
|
||||
Languages: cfg.SDK.Languages,
|
||||
Output: output,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Generate SDKs
|
||||
sdkCfg := toSDKConfig(cfg.SDK)
|
||||
s := sdk.New(projectDir, sdkCfg)
|
||||
s.SetVersion(version)
|
||||
|
||||
if err := s.Generate(ctx); err != nil {
|
||||
return nil, fmt.Errorf("release.RunSDK: generation failed: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// checkBreakingChanges runs oasdiff to detect breaking changes.
|
||||
func checkBreakingChanges(projectDir string, cfg *SDKConfig) (bool, error) {
|
||||
// Get previous tag for comparison
|
||||
prevTag, err := getPreviousTag(projectDir)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("no previous tag found: %w", err)
|
||||
}
|
||||
|
||||
// Detect spec path
|
||||
specPath := cfg.Spec
|
||||
if specPath == "" {
|
||||
s := sdk.New(projectDir, nil)
|
||||
specPath, err = s.DetectSpec()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Run diff
|
||||
result, err := sdk.Diff(prevTag, specPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result.Breaking, nil
|
||||
}
|
||||
|
||||
// getPreviousTag gets the most recent tag before HEAD.
|
||||
func getPreviousTag(projectDir string) (string, error) {
|
||||
// Use git describe to get previous tag
|
||||
// This is a simplified version - may need refinement
|
||||
cmd := exec.Command("git", "describe", "--tags", "--abbrev=0", "HEAD^")
|
||||
cmd.Dir = projectDir
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
```
|
||||
|
||||
Add import for `os/exec` and `strings`.
|
||||
|
||||
**Step 4: Run test to verify it passes**
|
||||
|
||||
Run: `go test ./pkg/release/... -run TestRunSDK -v`
|
||||
Expected: PASS
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/release/sdk.go pkg/release/sdk_test.go
|
||||
git commit -m "feat(release): implement RunSDK for SDK-only releases"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 4: Add --target flag to CLI
|
||||
|
||||
**Files:**
|
||||
- Modify: `cmd/core/cmd/release.go`
|
||||
|
||||
**Step 1: Add target flag and routing**
|
||||
|
||||
In `AddReleaseCommand`, add:
|
||||
|
||||
```go
|
||||
var target string
|
||||
releaseCmd.StringFlag("target", "Release target (sdk)", &target)
|
||||
|
||||
// Update the action
|
||||
releaseCmd.Action(func() error {
|
||||
if target == "sdk" {
|
||||
return runReleaseSDK(dryRun, version)
|
||||
}
|
||||
return runRelease(dryRun, version, draft, prerelease)
|
||||
})
|
||||
```
|
||||
|
||||
**Step 2: Verify it compiles**
|
||||
|
||||
Run: `go build ./cmd/core/...`
|
||||
Expected: FAIL with "runReleaseSDK undefined"
|
||||
|
||||
**Step 3: Commit partial progress**
|
||||
|
||||
```bash
|
||||
git add cmd/core/cmd/release.go
|
||||
git commit -m "feat(cli): add --target flag to release command"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Implement runReleaseSDK CLI function
|
||||
|
||||
**Files:**
|
||||
- Modify: `cmd/core/cmd/release.go`
|
||||
|
||||
**Step 1: Add the function**
|
||||
|
||||
```go
|
||||
// runReleaseSDK executes SDK-only release.
|
||||
func runReleaseSDK(dryRun bool, version string) error {
|
||||
ctx := context.Background()
|
||||
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get working directory: %w", err)
|
||||
}
|
||||
|
||||
// Load configuration
|
||||
cfg, err := release.LoadConfig(projectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %w", err)
|
||||
}
|
||||
|
||||
// Apply CLI overrides
|
||||
if version != "" {
|
||||
cfg.SetVersion(version)
|
||||
}
|
||||
|
||||
// Print header
|
||||
fmt.Printf("%s Generating SDKs\n", releaseHeaderStyle.Render("SDK Release:"))
|
||||
if dryRun {
|
||||
fmt.Printf(" %s\n", releaseDimStyle.Render("(dry-run mode)"))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Run SDK release
|
||||
result, err := release.RunSDK(ctx, cfg, dryRun)
|
||||
if err != nil {
|
||||
fmt.Printf("%s %v\n", releaseErrorStyle.Render("Error:"), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Print summary
|
||||
fmt.Println()
|
||||
fmt.Printf("%s SDK generation complete!\n", releaseSuccessStyle.Render("Success:"))
|
||||
fmt.Printf(" Version: %s\n", releaseValueStyle.Render(result.Version))
|
||||
fmt.Printf(" Languages: %v\n", result.Languages)
|
||||
fmt.Printf(" Output: %s/\n", releaseValueStyle.Render(result.Output))
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Verify it compiles and help shows flag**
|
||||
|
||||
Run: `go build -o bin/core ./cmd/core && ./bin/core release --help`
|
||||
Expected: Shows `--target` flag in help output
|
||||
|
||||
**Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add cmd/core/cmd/release.go
|
||||
git commit -m "feat(cli): implement runReleaseSDK for SDK generation"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 6: Add integration tests
|
||||
|
||||
**Files:**
|
||||
- Modify: `pkg/release/sdk_test.go`
|
||||
|
||||
**Step 1: Add more test cases**
|
||||
|
||||
```go
|
||||
func TestRunSDK_Good_WithDiffEnabled(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"typescript"},
|
||||
Output: "sdk",
|
||||
Diff: SDKDiffConfig{
|
||||
Enabled: true,
|
||||
FailOnBreaking: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
// Dry run should succeed even without git repo
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if result.Version != "v1.0.0" {
|
||||
t.Errorf("expected v1.0.0, got %s", result.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_DefaultOutput(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"go"},
|
||||
// Output not set - should default to "sdk"
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if result.Output != "sdk" {
|
||||
t.Errorf("expected default output 'sdk', got %s", result.Output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToSDKConfig_Good_Conversion(t *testing.T) {
|
||||
relCfg := &SDKConfig{
|
||||
Spec: "api.yaml",
|
||||
Languages: []string{"typescript", "python"},
|
||||
Output: "generated",
|
||||
Package: SDKPackageConfig{
|
||||
Name: "my-sdk",
|
||||
Version: "v2.0.0",
|
||||
},
|
||||
Diff: SDKDiffConfig{
|
||||
Enabled: true,
|
||||
FailOnBreaking: true,
|
||||
},
|
||||
}
|
||||
|
||||
sdkCfg := toSDKConfig(relCfg)
|
||||
|
||||
if sdkCfg.Spec != "api.yaml" {
|
||||
t.Errorf("expected spec api.yaml, got %s", sdkCfg.Spec)
|
||||
}
|
||||
if len(sdkCfg.Languages) != 2 {
|
||||
t.Errorf("expected 2 languages, got %d", len(sdkCfg.Languages))
|
||||
}
|
||||
if sdkCfg.Package.Name != "my-sdk" {
|
||||
t.Errorf("expected package name my-sdk, got %s", sdkCfg.Package.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToSDKConfig_Good_NilInput(t *testing.T) {
|
||||
result := toSDKConfig(nil)
|
||||
if result != nil {
|
||||
t.Error("expected nil for nil input")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2: Run all tests**
|
||||
|
||||
Run: `go test ./pkg/release/... -v`
|
||||
Expected: All tests PASS
|
||||
|
||||
**Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add pkg/release/sdk_test.go
|
||||
git commit -m "test(release): add SDK release integration tests"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 7: Final verification and TODO update
|
||||
|
||||
**Step 1: Build CLI**
|
||||
|
||||
Run: `go build -o bin/core ./cmd/core`
|
||||
Expected: Success
|
||||
|
||||
**Step 2: Test help output**
|
||||
|
||||
Run: `./bin/core release --help`
|
||||
Expected: Shows `--target` flag
|
||||
|
||||
**Step 3: Run all tests**
|
||||
|
||||
Run: `go test ./pkg/release/... ./pkg/sdk/... -v`
|
||||
Expected: All PASS
|
||||
|
||||
**Step 4: Update TODO.md**
|
||||
|
||||
Mark S3.4 `core release --target sdk` as complete in `tasks/TODO.md`.
|
||||
|
||||
**Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add tasks/TODO.md
|
||||
git commit -m "docs: mark S3.4 SDK release integration as complete"
|
||||
```
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
# Docs Sync Setup - Next Steps
|
||||
|
||||
After moving repo to `~/Code/host-uk/core`:
|
||||
|
||||
## 1. Add to repos.yaml
|
||||
|
||||
Add this to `/Users/snider/Code/host-uk/repos.yaml` under `repos:`:
|
||||
|
||||
```yaml
|
||||
# CLI (Go)
|
||||
core:
|
||||
type: foundation
|
||||
description: Core CLI - build, release, deploy for Go/Wails/PHP/containers
|
||||
docs: true
|
||||
ci: github-actions
|
||||
```
|
||||
|
||||
## 2. Test docs sync
|
||||
|
||||
```bash
|
||||
cd ~/Code/host-uk
|
||||
core docs list # Should show "core" with docs
|
||||
core docs sync --dry-run # Preview what syncs
|
||||
```
|
||||
|
||||
## 3. Add CLI section to VitePress (core-php)
|
||||
|
||||
Edit `core-php/docs/.vitepress/config.js`:
|
||||
- Add `/cli/` to nav
|
||||
- Add sidebar for CLI commands
|
||||
|
||||
## 4. Sync and verify
|
||||
|
||||
```bash
|
||||
core docs sync --output ../core-php/docs/cli
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Current state:
|
||||
- CLI docs written in `docs/cmd/*.md` (12 files)
|
||||
- `docs/index.md` updated with command table
|
||||
- All committed to git
|
||||
|
|
@ -1,193 +0,0 @@
|
|||
# RAG Pipeline for Host UK Documentation
|
||||
|
||||
Store documentation in a vector database so Claude (and local LLMs) can retrieve relevant context without being reminded every conversation.
|
||||
|
||||
## The Problem This Solves
|
||||
|
||||
> "The amount of times I've had to re-tell you how to make a Flux button is crazy"
|
||||
|
||||
Instead of wasting context window on "remember, Flux buttons work like this...", the RAG system:
|
||||
1. Stores all documentation in Qdrant
|
||||
2. Claude queries before answering
|
||||
3. Relevant docs injected automatically
|
||||
4. No more re-teaching
|
||||
|
||||
## Prerequisites
|
||||
|
||||
**Already running on your lab:**
|
||||
- Qdrant: `linux.snider.dev:6333`
|
||||
- Ollama: `linux.snider.dev:11434` (or local)
|
||||
|
||||
**Install Python deps:**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
**Ensure embedding model is available:**
|
||||
```bash
|
||||
ollama pull nomic-embed-text
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Ingest Documentation
|
||||
|
||||
```bash
|
||||
# Ingest recovered Host UK docs
|
||||
python ingest.py /Users/snider/Code/host-uk/core/tasks/recovered-hostuk \
|
||||
--collection hostuk-docs \
|
||||
--recreate
|
||||
|
||||
# Ingest Flux UI docs separately (higher priority)
|
||||
python ingest.py /path/to/flux-ui-docs \
|
||||
--collection flux-ui-docs \
|
||||
--recreate
|
||||
```
|
||||
|
||||
### 2. Query the Database
|
||||
|
||||
```bash
|
||||
# Search for Flux button docs
|
||||
python query.py "how to create a Flux button component"
|
||||
|
||||
# Filter by category
|
||||
python query.py "path sandboxing" --category architecture
|
||||
|
||||
# Get more results
|
||||
python query.py "Vi personality" --top 10
|
||||
|
||||
# Output as JSON
|
||||
python query.py "brand voice" --format json
|
||||
|
||||
# Output for LLM context injection
|
||||
python query.py "Flux modal component" --format context
|
||||
```
|
||||
|
||||
### 3. List Collections
|
||||
|
||||
```bash
|
||||
python query.py --list-collections
|
||||
python query.py --stats --collection flux-ui-docs
|
||||
```
|
||||
|
||||
## Collections Strategy
|
||||
|
||||
| Collection | Content | Priority |
|
||||
|------------|---------|----------|
|
||||
| `flux-ui-docs` | Flux Pro component docs | High (UI questions) |
|
||||
| `hostuk-docs` | Recovered implementation docs | Medium |
|
||||
| `brand-docs` | Vi, brand voice, visual identity | For content generation |
|
||||
| `lethean-docs` | SASE/dVPN technical docs | Product-specific |
|
||||
|
||||
## Integration with Claude Code
|
||||
|
||||
### Option 1: MCP Server (Best)
|
||||
|
||||
Create an MCP server that Claude can query:
|
||||
|
||||
```go
|
||||
// In core CLI
|
||||
func (s *RagServer) Query(query string) ([]Document, error) {
|
||||
// Query Qdrant
|
||||
// Return relevant docs
|
||||
}
|
||||
```
|
||||
|
||||
Then Claude can call `rag.query("Flux button")` and get docs automatically.
|
||||
|
||||
### Option 2: CLAUDE.md Instruction
|
||||
|
||||
Add to project CLAUDE.md:
|
||||
|
||||
```markdown
|
||||
## Before Answering UI Questions
|
||||
|
||||
When asked about Flux UI components, query the RAG database first:
|
||||
```bash
|
||||
python /path/to/query.py "your question" --collection flux-ui-docs --format context
|
||||
```
|
||||
|
||||
Include the retrieved context in your response.
|
||||
```
|
||||
|
||||
### Option 3: Claude Code Hook
|
||||
|
||||
Create a hook that auto-injects context for certain queries.
|
||||
|
||||
## Category Taxonomy
|
||||
|
||||
The ingestion automatically categorizes files:
|
||||
|
||||
| Category | Matches |
|
||||
|----------|---------|
|
||||
| `ui-component` | flux, ui/component |
|
||||
| `brand` | brand, mascot |
|
||||
| `product-brief` | brief |
|
||||
| `help-doc` | help, draft |
|
||||
| `task` | task, plan |
|
||||
| `architecture` | architecture, migration |
|
||||
| `documentation` | default |
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `QDRANT_HOST` | linux.snider.dev | Qdrant server |
|
||||
| `QDRANT_PORT` | 6333 | Qdrant port |
|
||||
| `EMBEDDING_MODEL` | nomic-embed-text | Ollama model |
|
||||
| `CHUNK_SIZE` | 500 | Characters per chunk |
|
||||
| `CHUNK_OVERLAP` | 50 | Overlap between chunks |
|
||||
|
||||
## Training a Model vs RAG
|
||||
|
||||
**RAG** (what this does):
|
||||
- Model weights unchanged
|
||||
- Documents retrieved at query time
|
||||
- Knowledge updates instantly (re-ingest)
|
||||
- Good for: facts, API docs, current information
|
||||
|
||||
**Fine-tuning** (separate process):
|
||||
- Model weights updated
|
||||
- Knowledge baked into model
|
||||
- Requires retraining to update
|
||||
- Good for: style, patterns, conventions
|
||||
|
||||
**For Flux UI**: RAG is perfect. The docs change, API changes, you want current info.
|
||||
|
||||
**For Vi's voice**: Fine-tuning is better. Style doesn't change often, should be "baked in".
|
||||
|
||||
## Vector Math (For Understanding)
|
||||
|
||||
```text
|
||||
"How do I make a Flux button?"
|
||||
↓ Embedding
|
||||
[0.12, -0.45, 0.78, ...768 floats...]
|
||||
↓ Cosine similarity search
|
||||
Find chunks with similar vectors
|
||||
↓ Results
|
||||
1. doc/ui/flux/components/button.md (score: 0.89)
|
||||
2. doc/ui/flux/forms.md (score: 0.76)
|
||||
3. doc/ui/flux/components/input.md (score: 0.71)
|
||||
```
|
||||
|
||||
The embedding model converts text to "meaning vectors". Similar meanings = similar vectors = found by search.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**"No results found"**
|
||||
- Lower threshold: `--threshold 0.3`
|
||||
- Check collection has data: `--stats`
|
||||
- Verify Ollama is running: `ollama list`
|
||||
|
||||
**"Connection refused"**
|
||||
- Check Qdrant is running: `curl http://linux.snider.dev:6333/collections`
|
||||
- Check firewall/network
|
||||
|
||||
**"Embedding model not available"**
|
||||
```bash
|
||||
ollama pull nomic-embed-text
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Part of the Host UK Core CLI tooling*
|
||||
|
|
@ -1,254 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
RAG Ingestion Pipeline for Host UK Documentation
|
||||
|
||||
Chunks markdown files, generates embeddings via Ollama, stores in Qdrant.
|
||||
|
||||
Usage:
|
||||
python ingest.py /path/to/docs --collection hostuk-docs
|
||||
python ingest.py /path/to/flux-ui --collection flux-ui-docs
|
||||
|
||||
Requirements:
|
||||
pip install qdrant-client ollama markdown
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Generator
|
||||
|
||||
try:
|
||||
from qdrant_client import QdrantClient
|
||||
from qdrant_client.models import Distance, VectorParams, PointStruct
|
||||
import ollama
|
||||
except ImportError:
|
||||
print("Install dependencies: pip install qdrant-client ollama")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Configuration
|
||||
QDRANT_HOST = os.getenv("QDRANT_HOST", "localhost")
|
||||
QDRANT_PORT = int(os.getenv("QDRANT_PORT", "6333"))
|
||||
EMBEDDING_MODEL = os.getenv("EMBEDDING_MODEL", "nomic-embed-text")
|
||||
CHUNK_SIZE = int(os.getenv("CHUNK_SIZE", "500")) # chars
|
||||
CHUNK_OVERLAP = int(os.getenv("CHUNK_OVERLAP", "50")) # chars
|
||||
VECTOR_DIM = 768 # nomic-embed-text dimension
|
||||
|
||||
|
||||
def chunk_markdown(text: str, chunk_size: int = CHUNK_SIZE, overlap: int = CHUNK_OVERLAP) -> Generator[dict, None, None]:
|
||||
"""
|
||||
Chunk markdown by sections (## headers), then by paragraphs if too long.
|
||||
Preserves context with overlap.
|
||||
"""
|
||||
# Split by ## headers first
|
||||
sections = re.split(r'\n(?=## )', text)
|
||||
|
||||
for section in sections:
|
||||
if not section.strip():
|
||||
continue
|
||||
|
||||
# Extract section title
|
||||
lines = section.strip().split('\n')
|
||||
title = lines[0].lstrip('#').strip() if lines[0].startswith('#') else ""
|
||||
|
||||
# If section is small enough, yield as-is
|
||||
if len(section) <= chunk_size:
|
||||
yield {
|
||||
"text": section.strip(),
|
||||
"section": title,
|
||||
}
|
||||
continue
|
||||
|
||||
# Otherwise, chunk by paragraphs
|
||||
paragraphs = re.split(r'\n\n+', section)
|
||||
current_chunk = ""
|
||||
|
||||
for para in paragraphs:
|
||||
if len(current_chunk) + len(para) <= chunk_size:
|
||||
current_chunk += "\n\n" + para if current_chunk else para
|
||||
else:
|
||||
if current_chunk:
|
||||
yield {
|
||||
"text": current_chunk.strip(),
|
||||
"section": title,
|
||||
}
|
||||
# Start new chunk with overlap from previous
|
||||
if overlap and current_chunk:
|
||||
overlap_text = current_chunk[-overlap:]
|
||||
current_chunk = overlap_text + "\n\n" + para
|
||||
else:
|
||||
current_chunk = para
|
||||
|
||||
# Don't forget the last chunk
|
||||
if current_chunk.strip():
|
||||
yield {
|
||||
"text": current_chunk.strip(),
|
||||
"section": title,
|
||||
}
|
||||
|
||||
|
||||
def generate_embedding(text: str) -> list[float]:
|
||||
"""Generate embedding using Ollama."""
|
||||
response = ollama.embeddings(model=EMBEDDING_MODEL, prompt=text)
|
||||
return response["embedding"]
|
||||
|
||||
|
||||
def get_file_category(path: str) -> str:
|
||||
"""Determine category from file path."""
|
||||
path_lower = path.lower()
|
||||
|
||||
if "flux" in path_lower or "ui/component" in path_lower:
|
||||
return "ui-component"
|
||||
elif "brand" in path_lower or "mascot" in path_lower:
|
||||
return "brand"
|
||||
elif "brief" in path_lower:
|
||||
return "product-brief"
|
||||
elif "help" in path_lower or "draft" in path_lower:
|
||||
return "help-doc"
|
||||
elif "task" in path_lower or "plan" in path_lower:
|
||||
return "task"
|
||||
elif "architecture" in path_lower or "migration" in path_lower:
|
||||
return "architecture"
|
||||
else:
|
||||
return "documentation"
|
||||
|
||||
|
||||
def ingest_directory(
|
||||
directory: Path,
|
||||
client: QdrantClient,
|
||||
collection: str,
|
||||
verbose: bool = False
|
||||
) -> dict:
|
||||
"""Ingest all markdown files from directory into Qdrant."""
|
||||
|
||||
stats = {"files": 0, "chunks": 0, "errors": 0}
|
||||
points = []
|
||||
|
||||
# Find all markdown files
|
||||
md_files = list(directory.rglob("*.md"))
|
||||
print(f"Found {len(md_files)} markdown files")
|
||||
|
||||
for file_path in md_files:
|
||||
try:
|
||||
rel_path = str(file_path.relative_to(directory))
|
||||
|
||||
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
content = f.read()
|
||||
|
||||
if not content.strip():
|
||||
continue
|
||||
|
||||
# Extract metadata
|
||||
category = get_file_category(rel_path)
|
||||
|
||||
# Chunk the content
|
||||
for i, chunk in enumerate(chunk_markdown(content)):
|
||||
chunk_id = hashlib.md5(
|
||||
f"{rel_path}:{i}:{chunk['text'][:100]}".encode()
|
||||
).hexdigest()
|
||||
|
||||
# Generate embedding
|
||||
embedding = generate_embedding(chunk["text"])
|
||||
|
||||
# Create point
|
||||
point = PointStruct(
|
||||
id=chunk_id,
|
||||
vector=embedding,
|
||||
payload={
|
||||
"text": chunk["text"],
|
||||
"source": rel_path,
|
||||
"section": chunk["section"],
|
||||
"category": category,
|
||||
"chunk_index": i,
|
||||
}
|
||||
)
|
||||
points.append(point)
|
||||
stats["chunks"] += 1
|
||||
|
||||
if verbose:
|
||||
print(f" [{category}] {rel_path} chunk {i}: {len(chunk['text'])} chars")
|
||||
|
||||
stats["files"] += 1
|
||||
if not verbose:
|
||||
print(f" Processed: {rel_path} ({stats['chunks']} chunks total)")
|
||||
|
||||
except Exception as e:
|
||||
print(f" Error processing {file_path}: {e}")
|
||||
stats["errors"] += 1
|
||||
|
||||
# Batch upsert to Qdrant
|
||||
if points:
|
||||
print(f"\nUpserting {len(points)} vectors to Qdrant...")
|
||||
|
||||
# Upsert in batches of 100
|
||||
batch_size = 100
|
||||
for i in range(0, len(points), batch_size):
|
||||
batch = points[i:i + batch_size]
|
||||
client.upsert(collection_name=collection, points=batch)
|
||||
print(f" Uploaded batch {i // batch_size + 1}/{(len(points) - 1) // batch_size + 1}")
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Ingest markdown docs into Qdrant")
|
||||
parser.add_argument("directory", type=Path, help="Directory containing markdown files")
|
||||
parser.add_argument("--collection", default="hostuk-docs", help="Qdrant collection name")
|
||||
parser.add_argument("--recreate", action="store_true", help="Delete and recreate collection")
|
||||
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
||||
parser.add_argument("--qdrant-host", default=QDRANT_HOST, help="Qdrant host")
|
||||
parser.add_argument("--qdrant-port", type=int, default=QDRANT_PORT, help="Qdrant port")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.directory.exists():
|
||||
print(f"Error: Directory not found: {args.directory}")
|
||||
sys.exit(1)
|
||||
|
||||
# Connect to Qdrant
|
||||
print(f"Connecting to Qdrant at {args.qdrant_host}:{args.qdrant_port}...")
|
||||
client = QdrantClient(host=args.qdrant_host, port=args.qdrant_port)
|
||||
|
||||
# Create or recreate collection
|
||||
collections = [c.name for c in client.get_collections().collections]
|
||||
|
||||
if args.recreate and args.collection in collections:
|
||||
print(f"Deleting existing collection: {args.collection}")
|
||||
client.delete_collection(args.collection)
|
||||
collections.remove(args.collection)
|
||||
|
||||
if args.collection not in collections:
|
||||
print(f"Creating collection: {args.collection}")
|
||||
client.create_collection(
|
||||
collection_name=args.collection,
|
||||
vectors_config=VectorParams(size=VECTOR_DIM, distance=Distance.COSINE)
|
||||
)
|
||||
|
||||
# Verify Ollama model is available
|
||||
print(f"Using embedding model: {EMBEDDING_MODEL}")
|
||||
try:
|
||||
ollama.embeddings(model=EMBEDDING_MODEL, prompt="test")
|
||||
except Exception as e:
|
||||
print(f"Error: Embedding model not available. Run: ollama pull {EMBEDDING_MODEL}")
|
||||
sys.exit(1)
|
||||
|
||||
# Ingest files
|
||||
print(f"\nIngesting from: {args.directory}")
|
||||
stats = ingest_directory(args.directory, client, args.collection, args.verbose)
|
||||
|
||||
# Summary
|
||||
print(f"\n{'=' * 50}")
|
||||
print(f"Ingestion complete!")
|
||||
print(f" Files processed: {stats['files']}")
|
||||
print(f" Chunks created: {stats['chunks']}")
|
||||
print(f" Errors: {stats['errors']}")
|
||||
print(f" Collection: {args.collection}")
|
||||
print(f"{'=' * 50}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,196 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
RAG Query Tool for Host UK Documentation
|
||||
|
||||
Query the vector database and retrieve relevant documentation chunks.
|
||||
|
||||
Usage:
|
||||
python query.py "how do I create a Flux button"
|
||||
python query.py "what is Vi's personality" --collection hostuk-docs
|
||||
python query.py "path sandboxing" --top 10 --category architecture
|
||||
|
||||
Requirements:
|
||||
pip install qdrant-client ollama
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import html
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Optional
|
||||
|
||||
try:
|
||||
from qdrant_client import QdrantClient
|
||||
from qdrant_client.models import Filter, FieldCondition, MatchValue
|
||||
import ollama
|
||||
except ImportError:
|
||||
print("Install dependencies: pip install qdrant-client ollama")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Configuration
|
||||
QDRANT_HOST = os.getenv("QDRANT_HOST", "localhost")
|
||||
QDRANT_PORT = int(os.getenv("QDRANT_PORT", "6333"))
|
||||
EMBEDDING_MODEL = os.getenv("EMBEDDING_MODEL", "nomic-embed-text")
|
||||
|
||||
|
||||
def generate_embedding(text: str) -> list[float]:
|
||||
"""Generate embedding using Ollama."""
|
||||
response = ollama.embeddings(model=EMBEDDING_MODEL, prompt=text)
|
||||
return response["embedding"]
|
||||
|
||||
|
||||
def query_rag(
|
||||
query: str,
|
||||
client: QdrantClient,
|
||||
collection: str,
|
||||
top_k: int = 5,
|
||||
category: Optional[str] = None,
|
||||
score_threshold: float = 0.5,
|
||||
) -> list[dict]:
|
||||
"""Query the RAG database and return relevant chunks."""
|
||||
|
||||
# Generate query embedding
|
||||
query_embedding = generate_embedding(query)
|
||||
|
||||
# Build filter if category specified
|
||||
query_filter = None
|
||||
if category:
|
||||
query_filter = Filter(
|
||||
must=[
|
||||
FieldCondition(key="category", match=MatchValue(value=category))
|
||||
]
|
||||
)
|
||||
|
||||
# Search
|
||||
results = client.query_points(
|
||||
collection_name=collection,
|
||||
query=query_embedding,
|
||||
query_filter=query_filter,
|
||||
limit=top_k,
|
||||
score_threshold=score_threshold,
|
||||
).points
|
||||
|
||||
return [
|
||||
{
|
||||
"score": hit.score,
|
||||
"text": hit.payload["text"],
|
||||
"source": hit.payload["source"],
|
||||
"section": hit.payload.get("section", ""),
|
||||
"category": hit.payload.get("category", ""),
|
||||
}
|
||||
for hit in results
|
||||
]
|
||||
|
||||
|
||||
def format_results(results: list[dict], query: str, format: str = "text") -> str:
|
||||
"""Format results for display."""
|
||||
|
||||
if format == "json":
|
||||
return json.dumps(results, indent=2)
|
||||
|
||||
if not results:
|
||||
return f"No results found for: {query}"
|
||||
|
||||
output = []
|
||||
output.append(f"Query: {query}")
|
||||
output.append(f"Results: {len(results)}")
|
||||
output.append("=" * 60)
|
||||
|
||||
for i, r in enumerate(results, 1):
|
||||
output.append(f"\n[{i}] {r['source']} (score: {r['score']:.3f})")
|
||||
if r['section']:
|
||||
output.append(f" Section: {r['section']}")
|
||||
output.append(f" Category: {r['category']}")
|
||||
output.append("-" * 40)
|
||||
# Truncate long text for display
|
||||
text = r['text']
|
||||
if len(text) > 500:
|
||||
text = text[:500] + "..."
|
||||
output.append(text)
|
||||
output.append("")
|
||||
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def format_for_context(results: list[dict], query: str) -> str:
|
||||
"""Format results as context for LLM injection."""
|
||||
|
||||
if not results:
|
||||
return ""
|
||||
|
||||
output = []
|
||||
output.append(f'<retrieved_context query="{html.escape(query)}">')
|
||||
|
||||
for r in results:
|
||||
output.append(f'\n<document source="{html.escape(r["source"])}" category="{html.escape(r["category"])}">')
|
||||
output.append(html.escape(r['text']))
|
||||
output.append("</document>")
|
||||
|
||||
output.append("\n</retrieved_context>")
|
||||
|
||||
return "\n".join(output)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Query RAG documentation")
|
||||
parser.add_argument("query", nargs="?", help="Search query")
|
||||
parser.add_argument("--collection", default="hostuk-docs", help="Qdrant collection name")
|
||||
parser.add_argument("--top", "-k", type=int, default=5, help="Number of results")
|
||||
parser.add_argument("--category", "-c", help="Filter by category")
|
||||
parser.add_argument("--threshold", "-t", type=float, default=0.5, help="Score threshold")
|
||||
parser.add_argument("--format", "-f", choices=["text", "json", "context"], default="text")
|
||||
parser.add_argument("--qdrant-host", default=QDRANT_HOST)
|
||||
parser.add_argument("--qdrant-port", type=int, default=QDRANT_PORT)
|
||||
parser.add_argument("--list-collections", action="store_true", help="List available collections")
|
||||
parser.add_argument("--stats", action="store_true", help="Show collection stats")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Connect to Qdrant
|
||||
client = QdrantClient(host=args.qdrant_host, port=args.qdrant_port)
|
||||
|
||||
# List collections
|
||||
if args.list_collections:
|
||||
collections = client.get_collections().collections
|
||||
print("Available collections:")
|
||||
for c in collections:
|
||||
info = client.get_collection(c.name)
|
||||
print(f" - {c.name}: {info.points_count} vectors")
|
||||
return
|
||||
|
||||
# Show stats
|
||||
if args.stats:
|
||||
try:
|
||||
info = client.get_collection(args.collection)
|
||||
print(f"Collection: {args.collection}")
|
||||
print(f" Vectors: {info.points_count}")
|
||||
print(f" Status: {info.status}")
|
||||
except Exception as e:
|
||||
print(f"Collection not found: {args.collection}")
|
||||
return
|
||||
|
||||
# Query required
|
||||
if not args.query:
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
# Execute query
|
||||
results = query_rag(
|
||||
query=args.query,
|
||||
client=client,
|
||||
collection=args.collection,
|
||||
top_k=args.top,
|
||||
category=args.category,
|
||||
score_threshold=args.threshold,
|
||||
)
|
||||
|
||||
# Format output
|
||||
if args.format == "context":
|
||||
print(format_for_context(results, args.query))
|
||||
else:
|
||||
print(format_results(results, args.query, args.format))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
qdrant-client>=1.12.0,<2.0.0
|
||||
ollama>=0.1.0
|
||||
Loading…
Add table
Reference in a new issue