refactor: remove build/, release/, sdk/ — extracted to core/go-build
Build system, release publishers, and SDK generation now live at forge.lthn.ai/core/go-build v0.1.0. 110 files, 21K LOC moved. Co-Authored-By: Virgil <virgil@lethean.io>
This commit is contained in:
parent
a3f6b76337
commit
d171fe5db8
107 changed files with 0 additions and 21357 deletions
298
build/archive.go
298
build/archive.go
|
|
@ -1,298 +0,0 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
package build
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
io_interface "forge.lthn.ai/core/go-io"
|
||||
"github.com/Snider/Borg/pkg/compress"
|
||||
)
|
||||
|
||||
// ArchiveFormat specifies the compression format for archives.
|
||||
type ArchiveFormat string
|
||||
|
||||
const (
|
||||
// ArchiveFormatGzip uses tar.gz (gzip compression) - widely compatible.
|
||||
ArchiveFormatGzip ArchiveFormat = "gz"
|
||||
// ArchiveFormatXZ uses tar.xz (xz/LZMA2 compression) - better compression ratio.
|
||||
ArchiveFormatXZ ArchiveFormat = "xz"
|
||||
// ArchiveFormatZip uses zip - for Windows.
|
||||
ArchiveFormatZip ArchiveFormat = "zip"
|
||||
)
|
||||
|
||||
// Archive creates an archive for a single artifact using gzip compression.
|
||||
// Uses tar.gz for linux/darwin and zip for windows.
|
||||
// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.gz).
|
||||
// Returns a new Artifact with Path pointing to the archive.
|
||||
func Archive(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
||||
return ArchiveWithFormat(fs, artifact, ArchiveFormatGzip)
|
||||
}
|
||||
|
||||
// ArchiveXZ creates an archive for a single artifact using xz compression.
|
||||
// Uses tar.xz for linux/darwin and zip for windows.
|
||||
// Returns a new Artifact with Path pointing to the archive.
|
||||
func ArchiveXZ(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
||||
return ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
||||
}
|
||||
|
||||
// ArchiveWithFormat creates an archive for a single artifact with the specified format.
|
||||
// Uses tar.gz or tar.xz for linux/darwin and zip for windows.
|
||||
// The archive is created alongside the binary (e.g., dist/myapp_linux_amd64.tar.xz).
|
||||
// Returns a new Artifact with Path pointing to the archive.
|
||||
func ArchiveWithFormat(fs io_interface.Medium, artifact Artifact, format ArchiveFormat) (Artifact, error) {
|
||||
if artifact.Path == "" {
|
||||
return Artifact{}, errors.New("build.Archive: artifact path is empty")
|
||||
}
|
||||
|
||||
// Verify the source file exists
|
||||
info, err := fs.Stat(artifact.Path)
|
||||
if err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Archive: source file not found: %w", err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
return Artifact{}, errors.New("build.Archive: source path is a directory, expected file")
|
||||
}
|
||||
|
||||
// Determine archive type based on OS and format
|
||||
var archivePath string
|
||||
var archiveFunc func(fs io_interface.Medium, src, dst string) error
|
||||
|
||||
if artifact.OS == "windows" {
|
||||
archivePath = archiveFilename(artifact, ".zip")
|
||||
archiveFunc = createZipArchive
|
||||
} else {
|
||||
switch format {
|
||||
case ArchiveFormatXZ:
|
||||
archivePath = archiveFilename(artifact, ".tar.xz")
|
||||
archiveFunc = createTarXzArchive
|
||||
default:
|
||||
archivePath = archiveFilename(artifact, ".tar.gz")
|
||||
archiveFunc = createTarGzArchive
|
||||
}
|
||||
}
|
||||
|
||||
// Create the archive
|
||||
if err := archiveFunc(fs, artifact.Path, archivePath); err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Archive: failed to create archive: %w", err)
|
||||
}
|
||||
|
||||
return Artifact{
|
||||
Path: archivePath,
|
||||
OS: artifact.OS,
|
||||
Arch: artifact.Arch,
|
||||
Checksum: artifact.Checksum,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ArchiveAll archives all artifacts using gzip compression.
|
||||
// Returns a slice of new artifacts pointing to the archives.
|
||||
func ArchiveAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
||||
return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatGzip)
|
||||
}
|
||||
|
||||
// ArchiveAllXZ archives all artifacts using xz compression.
|
||||
// Returns a slice of new artifacts pointing to the archives.
|
||||
func ArchiveAllXZ(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
||||
return ArchiveAllWithFormat(fs, artifacts, ArchiveFormatXZ)
|
||||
}
|
||||
|
||||
// ArchiveAllWithFormat archives all artifacts with the specified format.
|
||||
// Returns a slice of new artifacts pointing to the archives.
|
||||
func ArchiveAllWithFormat(fs io_interface.Medium, artifacts []Artifact, format ArchiveFormat) ([]Artifact, error) {
|
||||
if len(artifacts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var archived []Artifact
|
||||
for _, artifact := range artifacts {
|
||||
arch, err := ArchiveWithFormat(fs, artifact, format)
|
||||
if err != nil {
|
||||
return archived, fmt.Errorf("build.ArchiveAll: failed to archive %s: %w", artifact.Path, err)
|
||||
}
|
||||
archived = append(archived, arch)
|
||||
}
|
||||
|
||||
return archived, nil
|
||||
}
|
||||
|
||||
// archiveFilename generates the archive filename based on the artifact and extension.
|
||||
// Format: dist/myapp_linux_amd64.tar.gz (binary name taken from artifact path).
|
||||
func archiveFilename(artifact Artifact, ext string) string {
|
||||
// Get the directory containing the binary (e.g., dist/linux_amd64)
|
||||
dir := filepath.Dir(artifact.Path)
|
||||
// Go up one level to the output directory (e.g., dist)
|
||||
outputDir := filepath.Dir(dir)
|
||||
|
||||
// Get the binary name without extension
|
||||
binaryName := filepath.Base(artifact.Path)
|
||||
binaryName = strings.TrimSuffix(binaryName, ".exe")
|
||||
|
||||
// Construct archive name: myapp_linux_amd64.tar.gz
|
||||
archiveName := fmt.Sprintf("%s_%s_%s%s", binaryName, artifact.OS, artifact.Arch, ext)
|
||||
|
||||
return filepath.Join(outputDir, archiveName)
|
||||
}
|
||||
|
||||
// createTarXzArchive creates a tar.xz archive containing a single file.
|
||||
// Uses Borg's compress package for xz compression.
|
||||
func createTarXzArchive(fs io_interface.Medium, src, dst string) error {
|
||||
// Open the source file
|
||||
srcFile, err := fs.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
srcInfo, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
|
||||
// Create tar archive in memory
|
||||
var tarBuf bytes.Buffer
|
||||
tarWriter := tar.NewWriter(&tarBuf)
|
||||
|
||||
// Create tar header
|
||||
header, err := tar.FileInfoHeader(srcInfo, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tar header: %w", err)
|
||||
}
|
||||
header.Name = filepath.Base(src)
|
||||
|
||||
if err := tarWriter.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("failed to write tar header: %w", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tarWriter, srcFile); err != nil {
|
||||
return fmt.Errorf("failed to write file content to tar: %w", err)
|
||||
}
|
||||
|
||||
if err := tarWriter.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close tar writer: %w", err)
|
||||
}
|
||||
|
||||
// Compress with xz using Borg
|
||||
xzData, err := compress.Compress(tarBuf.Bytes(), "xz")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compress with xz: %w", err)
|
||||
}
|
||||
|
||||
// Write to destination file
|
||||
dstFile, err := fs.Create(dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create archive file: %w", err)
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
if _, err := dstFile.Write(xzData); err != nil {
|
||||
return fmt.Errorf("failed to write archive file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTarGzArchive creates a tar.gz archive containing a single file.
|
||||
func createTarGzArchive(fs io_interface.Medium, src, dst string) error {
|
||||
// Open the source file
|
||||
srcFile, err := fs.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
srcInfo, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
|
||||
// Create the destination file
|
||||
dstFile, err := fs.Create(dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create archive file: %w", err)
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
// Create gzip writer
|
||||
gzWriter := gzip.NewWriter(dstFile)
|
||||
defer func() { _ = gzWriter.Close() }()
|
||||
|
||||
// Create tar writer
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
defer func() { _ = tarWriter.Close() }()
|
||||
|
||||
// Create tar header
|
||||
header, err := tar.FileInfoHeader(srcInfo, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tar header: %w", err)
|
||||
}
|
||||
// Use just the filename, not the full path
|
||||
header.Name = filepath.Base(src)
|
||||
|
||||
// Write header
|
||||
if err := tarWriter.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("failed to write tar header: %w", err)
|
||||
}
|
||||
|
||||
// Write file content
|
||||
if _, err := io.Copy(tarWriter, srcFile); err != nil {
|
||||
return fmt.Errorf("failed to write file content to tar: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createZipArchive creates a zip archive containing a single file.
|
||||
func createZipArchive(fs io_interface.Medium, src, dst string) error {
|
||||
// Open the source file
|
||||
srcFile, err := fs.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
srcInfo, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat source file: %w", err)
|
||||
}
|
||||
|
||||
// Create the destination file
|
||||
dstFile, err := fs.Create(dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create archive file: %w", err)
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
// Create zip writer
|
||||
zipWriter := zip.NewWriter(dstFile)
|
||||
defer func() { _ = zipWriter.Close() }()
|
||||
|
||||
// Create zip header
|
||||
header, err := zip.FileInfoHeader(srcInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create zip header: %w", err)
|
||||
}
|
||||
// Use just the filename, not the full path
|
||||
header.Name = filepath.Base(src)
|
||||
header.Method = zip.Deflate
|
||||
|
||||
// Create file in archive
|
||||
writer, err := zipWriter.CreateHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create zip entry: %w", err)
|
||||
}
|
||||
|
||||
// Write file content
|
||||
if _, err := io.Copy(writer, srcFile); err != nil {
|
||||
return fmt.Errorf("failed to write file content to zip: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,646 +0,0 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/Snider/Borg/pkg/compress"
|
||||
io_interface "forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupArchiveTestFile creates a test binary file in a temp directory with the standard structure.
|
||||
// Returns the path to the binary and the output directory.
|
||||
func setupArchiveTestFile(t *testing.T, name, os_, arch string) (binaryPath string, outputDir string) {
|
||||
t.Helper()
|
||||
|
||||
outputDir = t.TempDir()
|
||||
|
||||
// Create platform directory: dist/os_arch
|
||||
platformDir := filepath.Join(outputDir, os_+"_"+arch)
|
||||
err := os.MkdirAll(platformDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test binary
|
||||
binaryPath = filepath.Join(platformDir, name)
|
||||
content := []byte("#!/bin/bash\necho 'Hello, World!'\n")
|
||||
err = os.WriteFile(binaryPath, content, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
return binaryPath, outputDir
|
||||
}
|
||||
|
||||
func TestArchive_Good(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("creates tar.gz for linux", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify archive was created
|
||||
expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.gz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
// Verify OS and Arch are preserved
|
||||
assert.Equal(t, "linux", result.OS)
|
||||
assert.Equal(t, "amd64", result.Arch)
|
||||
|
||||
// Verify archive content
|
||||
verifyTarGzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("creates tar.gz for darwin", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.gz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyTarGzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("creates zip for windows", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "windows",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Windows archives should strip .exe from archive name
|
||||
expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyZipContent(t, result.Path, "myapp.exe")
|
||||
})
|
||||
|
||||
t.Run("preserves checksum field", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
Checksum: "abc123",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "abc123", result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("creates tar.xz for linux with ArchiveXZ", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := ArchiveXZ(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPath := filepath.Join(outputDir, "myapp_linux_amd64.tar.xz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyTarXzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("creates tar.xz for darwin with ArchiveWithFormat", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp", "darwin", "arm64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPath := filepath.Join(outputDir, "myapp_darwin_arm64.tar.xz")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyTarXzContent(t, result.Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("windows still uses zip even with xz format", func(t *testing.T) {
|
||||
binaryPath, outputDir := setupArchiveTestFile(t, "myapp.exe", "windows", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "windows",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := ArchiveWithFormat(fs, artifact, ArchiveFormatXZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Windows should still get .zip regardless of format
|
||||
expectedPath := filepath.Join(outputDir, "myapp_windows_amd64.zip")
|
||||
assert.Equal(t, expectedPath, result.Path)
|
||||
assert.FileExists(t, result.Path)
|
||||
|
||||
verifyZipContent(t, result.Path, "myapp.exe")
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchive_Bad(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("returns error for empty path", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "artifact path is empty")
|
||||
assert.Empty(t, result.Path)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-existent file", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/nonexistent/path/binary",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "source file not found")
|
||||
assert.Empty(t, result.Path)
|
||||
})
|
||||
|
||||
t.Run("returns error for directory path", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
artifact := Artifact{
|
||||
Path: dir,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Archive(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "source path is a directory")
|
||||
assert.Empty(t, result.Path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveAll_Good(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("archives multiple artifacts", func(t *testing.T) {
|
||||
outputDir := t.TempDir()
|
||||
|
||||
// Create multiple binaries
|
||||
var artifacts []Artifact
|
||||
targets := []struct {
|
||||
os_ string
|
||||
arch string
|
||||
}{
|
||||
{"linux", "amd64"},
|
||||
{"linux", "arm64"},
|
||||
{"darwin", "arm64"},
|
||||
{"windows", "amd64"},
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
platformDir := filepath.Join(outputDir, target.os_+"_"+target.arch)
|
||||
err := os.MkdirAll(platformDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
name := "myapp"
|
||||
if target.os_ == "windows" {
|
||||
name = "myapp.exe"
|
||||
}
|
||||
|
||||
binaryPath := filepath.Join(platformDir, name)
|
||||
err = os.WriteFile(binaryPath, []byte("binary content"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
artifacts = append(artifacts, Artifact{
|
||||
Path: binaryPath,
|
||||
OS: target.os_,
|
||||
Arch: target.arch,
|
||||
})
|
||||
}
|
||||
|
||||
results, err := ArchiveAll(fs, artifacts)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 4)
|
||||
|
||||
// Verify all archives were created
|
||||
for i, result := range results {
|
||||
assert.FileExists(t, result.Path)
|
||||
assert.Equal(t, artifacts[i].OS, result.OS)
|
||||
assert.Equal(t, artifacts[i].Arch, result.Arch)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns nil for empty slice", func(t *testing.T) {
|
||||
results, err := ArchiveAll(fs, []Artifact{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
|
||||
t.Run("returns nil for nil slice", func(t *testing.T) {
|
||||
results, err := ArchiveAll(fs, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveAll_Bad(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
t.Run("returns partial results on error", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "myapp", "linux", "amd64")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: binaryPath, OS: "linux", Arch: "amd64"},
|
||||
{Path: "/nonexistent/binary", OS: "linux", Arch: "arm64"}, // This will fail
|
||||
}
|
||||
|
||||
results, err := ArchiveAll(fs, artifacts)
|
||||
assert.Error(t, err)
|
||||
// Should have the first successful result
|
||||
assert.Len(t, results, 1)
|
||||
assert.FileExists(t, results[0].Path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchiveFilename_Good(t *testing.T) {
|
||||
t.Run("generates correct tar.gz filename", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/output/linux_amd64/myapp",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
filename := archiveFilename(artifact, ".tar.gz")
|
||||
assert.Equal(t, "/output/myapp_linux_amd64.tar.gz", filename)
|
||||
})
|
||||
|
||||
t.Run("generates correct zip filename", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/output/windows_amd64/myapp.exe",
|
||||
OS: "windows",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
filename := archiveFilename(artifact, ".zip")
|
||||
assert.Equal(t, "/output/myapp_windows_amd64.zip", filename)
|
||||
})
|
||||
|
||||
t.Run("handles nested output directories", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/project/dist/linux_arm64/cli",
|
||||
OS: "linux",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
filename := archiveFilename(artifact, ".tar.gz")
|
||||
assert.Equal(t, "/project/dist/cli_linux_arm64.tar.gz", filename)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArchive_RoundTrip_Good(t *testing.T) {
|
||||
fs := io_interface.Local
|
||||
|
||||
t.Run("tar.gz round trip preserves content", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "roundtrip-app", "linux", "amd64")
|
||||
|
||||
// Read original content
|
||||
originalContent, err := os.ReadFile(binaryPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
// Create archive
|
||||
archiveArtifact, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
assert.FileExists(t, archiveArtifact.Path)
|
||||
|
||||
// Extract and verify content matches
|
||||
extractedContent := extractTarGzFile(t, archiveArtifact.Path, "roundtrip-app")
|
||||
assert.Equal(t, originalContent, extractedContent)
|
||||
})
|
||||
|
||||
t.Run("tar.xz round trip preserves content", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "roundtrip-xz", "linux", "arm64")
|
||||
|
||||
originalContent, err := os.ReadFile(binaryPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
archiveArtifact, err := ArchiveXZ(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
assert.FileExists(t, archiveArtifact.Path)
|
||||
|
||||
extractedContent := extractTarXzFile(t, archiveArtifact.Path, "roundtrip-xz")
|
||||
assert.Equal(t, originalContent, extractedContent)
|
||||
})
|
||||
|
||||
t.Run("zip round trip preserves content", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "roundtrip.exe", "windows", "amd64")
|
||||
|
||||
originalContent, err := os.ReadFile(binaryPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "windows",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
archiveArtifact, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
assert.FileExists(t, archiveArtifact.Path)
|
||||
|
||||
extractedContent := extractZipFile(t, archiveArtifact.Path, "roundtrip.exe")
|
||||
assert.Equal(t, originalContent, extractedContent)
|
||||
})
|
||||
|
||||
t.Run("tar.gz preserves file permissions", func(t *testing.T) {
|
||||
binaryPath, _ := setupArchiveTestFile(t, "perms-app", "linux", "amd64")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
archiveArtifact, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract and verify permissions are preserved
|
||||
mode := extractTarGzFileMode(t, archiveArtifact.Path, "perms-app")
|
||||
// The original file was written with 0755
|
||||
assert.Equal(t, os.FileMode(0755), mode&os.ModePerm)
|
||||
})
|
||||
|
||||
t.Run("round trip with large binary content", func(t *testing.T) {
|
||||
outputDir := t.TempDir()
|
||||
platformDir := filepath.Join(outputDir, "linux_amd64")
|
||||
require.NoError(t, os.MkdirAll(platformDir, 0755))
|
||||
|
||||
// Create a larger file (64KB)
|
||||
largeContent := make([]byte, 64*1024)
|
||||
for i := range largeContent {
|
||||
largeContent[i] = byte(i % 256)
|
||||
}
|
||||
binaryPath := filepath.Join(platformDir, "large-app")
|
||||
require.NoError(t, os.WriteFile(binaryPath, largeContent, 0755))
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
archiveArtifact, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
extractedContent := extractTarGzFile(t, archiveArtifact.Path, "large-app")
|
||||
assert.Equal(t, largeContent, extractedContent)
|
||||
})
|
||||
|
||||
t.Run("archive is smaller than original for tar.gz", func(t *testing.T) {
|
||||
outputDir := t.TempDir()
|
||||
platformDir := filepath.Join(outputDir, "linux_amd64")
|
||||
require.NoError(t, os.MkdirAll(platformDir, 0755))
|
||||
|
||||
// Create a compressible file (repeated pattern)
|
||||
compressibleContent := make([]byte, 4096)
|
||||
for i := range compressibleContent {
|
||||
compressibleContent[i] = 'A'
|
||||
}
|
||||
binaryPath := filepath.Join(platformDir, "compressible-app")
|
||||
require.NoError(t, os.WriteFile(binaryPath, compressibleContent, 0755))
|
||||
|
||||
artifact := Artifact{
|
||||
Path: binaryPath,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
archiveArtifact, err := Archive(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
originalInfo, err := os.Stat(binaryPath)
|
||||
require.NoError(t, err)
|
||||
archiveInfo, err := os.Stat(archiveArtifact.Path)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compressed archive should be smaller than original
|
||||
assert.Less(t, archiveInfo.Size(), originalInfo.Size())
|
||||
})
|
||||
}
|
||||
|
||||
// extractTarGzFile extracts a named file from a tar.gz archive and returns its content.
|
||||
func extractTarGzFile(t *testing.T, archivePath, fileName string) []byte {
|
||||
t.Helper()
|
||||
|
||||
file, err := os.Open(archivePath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = gzReader.Close() }()
|
||||
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
t.Fatalf("file %q not found in archive", fileName)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
if header.Name == fileName {
|
||||
content, err := io.ReadAll(tarReader)
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extractTarGzFileMode extracts the file mode of a named file from a tar.gz archive.
|
||||
func extractTarGzFileMode(t *testing.T, archivePath, fileName string) os.FileMode {
|
||||
t.Helper()
|
||||
|
||||
file, err := os.Open(archivePath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = gzReader.Close() }()
|
||||
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
t.Fatalf("file %q not found in archive", fileName)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
if header.Name == fileName {
|
||||
return header.FileInfo().Mode()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extractTarXzFile extracts a named file from a tar.xz archive and returns its content.
|
||||
func extractTarXzFile(t *testing.T, archivePath, fileName string) []byte {
|
||||
t.Helper()
|
||||
|
||||
xzData, err := os.ReadFile(archivePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
tarData, err := compress.Decompress(xzData)
|
||||
require.NoError(t, err)
|
||||
|
||||
tarReader := tar.NewReader(bytes.NewReader(tarData))
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
t.Fatalf("file %q not found in archive", fileName)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
if header.Name == fileName {
|
||||
content, err := io.ReadAll(tarReader)
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extractZipFile extracts a named file from a zip archive and returns its content.
|
||||
func extractZipFile(t *testing.T, archivePath, fileName string) []byte {
|
||||
t.Helper()
|
||||
|
||||
reader, err := zip.OpenReader(archivePath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
for _, f := range reader.File {
|
||||
if f.Name == fileName {
|
||||
rc, err := f.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = rc.Close() }()
|
||||
|
||||
content, err := io.ReadAll(rc)
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf("file %q not found in zip archive", fileName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyTarGzContent opens a tar.gz file and verifies it contains the expected file.
|
||||
func verifyTarGzContent(t *testing.T, archivePath, expectedName string) {
|
||||
t.Helper()
|
||||
|
||||
file, err := os.Open(archivePath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = gzReader.Close() }()
|
||||
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
|
||||
header, err := tarReader.Next()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedName, header.Name)
|
||||
|
||||
// Verify there's only one file
|
||||
_, err = tarReader.Next()
|
||||
assert.Equal(t, io.EOF, err)
|
||||
}
|
||||
|
||||
// verifyZipContent opens a zip file and verifies it contains the expected file.
|
||||
func verifyZipContent(t *testing.T, archivePath, expectedName string) {
|
||||
t.Helper()
|
||||
|
||||
reader, err := zip.OpenReader(archivePath)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
require.Len(t, reader.File, 1)
|
||||
assert.Equal(t, expectedName, reader.File[0].Name)
|
||||
}
|
||||
|
||||
// verifyTarXzContent opens a tar.xz file and verifies it contains the expected file.
|
||||
func verifyTarXzContent(t *testing.T, archivePath, expectedName string) {
|
||||
t.Helper()
|
||||
|
||||
// Read the xz-compressed file
|
||||
xzData, err := os.ReadFile(archivePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Decompress with Borg
|
||||
tarData, err := compress.Decompress(xzData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read tar archive
|
||||
tarReader := tar.NewReader(bytes.NewReader(tarData))
|
||||
|
||||
header, err := tarReader.Next()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedName, header.Name)
|
||||
|
||||
// Verify there's only one file
|
||||
_, err = tarReader.Next()
|
||||
assert.Equal(t, io.EOF, err)
|
||||
}
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
// It supports Go, Wails, Node.js, and PHP projects with automatic detection based on
|
||||
// marker files (go.mod, wails.json, package.json, composer.json).
|
||||
package build
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// ProjectType represents a detected project type.
|
||||
type ProjectType string
|
||||
|
||||
// Project type constants for build detection.
|
||||
const (
|
||||
// ProjectTypeGo indicates a standard Go project with go.mod.
|
||||
ProjectTypeGo ProjectType = "go"
|
||||
// ProjectTypeWails indicates a Wails desktop application.
|
||||
ProjectTypeWails ProjectType = "wails"
|
||||
// ProjectTypeNode indicates a Node.js project with package.json.
|
||||
ProjectTypeNode ProjectType = "node"
|
||||
// ProjectTypePHP indicates a PHP/Laravel project with composer.json.
|
||||
ProjectTypePHP ProjectType = "php"
|
||||
// ProjectTypeCPP indicates a C++ project with CMakeLists.txt.
|
||||
ProjectTypeCPP ProjectType = "cpp"
|
||||
// ProjectTypeDocker indicates a Docker-based project with Dockerfile.
|
||||
ProjectTypeDocker ProjectType = "docker"
|
||||
// ProjectTypeLinuxKit indicates a LinuxKit VM configuration.
|
||||
ProjectTypeLinuxKit ProjectType = "linuxkit"
|
||||
// ProjectTypeTaskfile indicates a project using Taskfile automation.
|
||||
ProjectTypeTaskfile ProjectType = "taskfile"
|
||||
)
|
||||
|
||||
// Target represents a build target platform.
|
||||
type Target struct {
|
||||
OS string
|
||||
Arch string
|
||||
}
|
||||
|
||||
// String returns the target in GOOS/GOARCH format.
|
||||
func (t Target) String() string {
|
||||
return t.OS + "/" + t.Arch
|
||||
}
|
||||
|
||||
// Artifact represents a build output file.
|
||||
type Artifact struct {
|
||||
Path string
|
||||
OS string
|
||||
Arch string
|
||||
Checksum string
|
||||
}
|
||||
|
||||
// Config holds build configuration.
|
||||
type Config struct {
|
||||
// FS is the medium used for file operations.
|
||||
FS io.Medium
|
||||
// ProjectDir is the root directory of the project.
|
||||
ProjectDir string
|
||||
// OutputDir is where build artifacts are placed.
|
||||
OutputDir string
|
||||
// Name is the output binary name.
|
||||
Name string
|
||||
// Version is the build version string.
|
||||
Version string
|
||||
// LDFlags are additional linker flags.
|
||||
LDFlags []string
|
||||
|
||||
// Docker-specific config
|
||||
Dockerfile string // Path to Dockerfile (default: Dockerfile)
|
||||
Registry string // Container registry (default: ghcr.io)
|
||||
Image string // Image name (owner/repo format)
|
||||
Tags []string // Additional tags to apply
|
||||
BuildArgs map[string]string // Docker build arguments
|
||||
Push bool // Whether to push after build
|
||||
|
||||
// LinuxKit-specific config
|
||||
LinuxKitConfig string // Path to LinuxKit YAML config
|
||||
Formats []string // Output formats (iso, qcow2, raw, vmdk)
|
||||
}
|
||||
|
||||
// Builder defines the interface for project-specific build implementations.
|
||||
type Builder interface {
|
||||
// Name returns the builder's identifier.
|
||||
Name() string
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
Detect(fs io.Medium, dir string) (bool, error)
|
||||
// Build compiles the project for the specified targets.
|
||||
Build(ctx context.Context, cfg *Config, targets []Target) ([]Artifact, error)
|
||||
}
|
||||
|
|
@ -1,143 +0,0 @@
|
|||
// Package buildcmd provides project build commands with auto-detection.
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"embed"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddBuildCommands)
|
||||
}
|
||||
|
||||
// Style aliases from shared package
|
||||
var (
|
||||
buildHeaderStyle = cli.TitleStyle
|
||||
buildTargetStyle = cli.ValueStyle
|
||||
buildSuccessStyle = cli.SuccessStyle
|
||||
buildErrorStyle = cli.ErrorStyle
|
||||
buildDimStyle = cli.DimStyle
|
||||
)
|
||||
|
||||
//go:embed all:tmpl/gui
|
||||
var guiTemplate embed.FS
|
||||
|
||||
// Flags for the main build command
|
||||
var (
|
||||
buildType string
|
||||
ciMode bool
|
||||
targets string
|
||||
outputDir string
|
||||
doArchive bool
|
||||
doChecksum bool
|
||||
verbose bool
|
||||
|
||||
// Docker/LinuxKit specific flags
|
||||
configPath string
|
||||
format string
|
||||
push bool
|
||||
imageName string
|
||||
|
||||
// Signing flags
|
||||
noSign bool
|
||||
notarize bool
|
||||
|
||||
// from-path subcommand
|
||||
fromPath string
|
||||
|
||||
// pwa subcommand
|
||||
pwaURL string
|
||||
|
||||
// sdk subcommand
|
||||
sdkSpec string
|
||||
sdkLang string
|
||||
sdkVersion string
|
||||
sdkDryRun bool
|
||||
)
|
||||
|
||||
var buildCmd = &cli.Command{
|
||||
Use: "build",
|
||||
Short: i18n.T("cmd.build.short"),
|
||||
Long: i18n.T("cmd.build.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runProjectBuild(cmd.Context(), buildType, ciMode, targets, outputDir, doArchive, doChecksum, configPath, format, push, imageName, noSign, notarize, verbose)
|
||||
},
|
||||
}
|
||||
|
||||
var fromPathCmd = &cli.Command{
|
||||
Use: "from-path",
|
||||
Short: i18n.T("cmd.build.from_path.short"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
if fromPath == "" {
|
||||
return errPathRequired
|
||||
}
|
||||
return runBuild(fromPath)
|
||||
},
|
||||
}
|
||||
|
||||
var pwaCmd = &cli.Command{
|
||||
Use: "pwa",
|
||||
Short: i18n.T("cmd.build.pwa.short"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
if pwaURL == "" {
|
||||
return errURLRequired
|
||||
}
|
||||
return runPwaBuild(pwaURL)
|
||||
},
|
||||
}
|
||||
|
||||
var sdkBuildCmd = &cli.Command{
|
||||
Use: "sdk",
|
||||
Short: i18n.T("cmd.build.sdk.short"),
|
||||
Long: i18n.T("cmd.build.sdk.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runBuildSDK(sdkSpec, sdkLang, sdkVersion, sdkDryRun)
|
||||
},
|
||||
}
|
||||
|
||||
func initBuildFlags() {
|
||||
// Main build command flags
|
||||
buildCmd.Flags().StringVar(&buildType, "type", "", i18n.T("cmd.build.flag.type"))
|
||||
buildCmd.Flags().BoolVar(&ciMode, "ci", false, i18n.T("cmd.build.flag.ci"))
|
||||
buildCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, i18n.T("common.flag.verbose"))
|
||||
buildCmd.Flags().StringVar(&targets, "targets", "", i18n.T("cmd.build.flag.targets"))
|
||||
buildCmd.Flags().StringVar(&outputDir, "output", "", i18n.T("cmd.build.flag.output"))
|
||||
buildCmd.Flags().BoolVar(&doArchive, "archive", true, i18n.T("cmd.build.flag.archive"))
|
||||
buildCmd.Flags().BoolVar(&doChecksum, "checksum", true, i18n.T("cmd.build.flag.checksum"))
|
||||
|
||||
// Docker/LinuxKit specific
|
||||
buildCmd.Flags().StringVar(&configPath, "config", "", i18n.T("cmd.build.flag.config"))
|
||||
buildCmd.Flags().StringVar(&format, "format", "", i18n.T("cmd.build.flag.format"))
|
||||
buildCmd.Flags().BoolVar(&push, "push", false, i18n.T("cmd.build.flag.push"))
|
||||
buildCmd.Flags().StringVar(&imageName, "image", "", i18n.T("cmd.build.flag.image"))
|
||||
|
||||
// Signing flags
|
||||
buildCmd.Flags().BoolVar(&noSign, "no-sign", false, i18n.T("cmd.build.flag.no_sign"))
|
||||
buildCmd.Flags().BoolVar(¬arize, "notarize", false, i18n.T("cmd.build.flag.notarize"))
|
||||
|
||||
// from-path subcommand flags
|
||||
fromPathCmd.Flags().StringVar(&fromPath, "path", "", i18n.T("cmd.build.from_path.flag.path"))
|
||||
|
||||
// pwa subcommand flags
|
||||
pwaCmd.Flags().StringVar(&pwaURL, "url", "", i18n.T("cmd.build.pwa.flag.url"))
|
||||
|
||||
// sdk subcommand flags
|
||||
sdkBuildCmd.Flags().StringVar(&sdkSpec, "spec", "", i18n.T("common.flag.spec"))
|
||||
sdkBuildCmd.Flags().StringVar(&sdkLang, "lang", "", i18n.T("cmd.build.sdk.flag.lang"))
|
||||
sdkBuildCmd.Flags().StringVar(&sdkVersion, "version", "", i18n.T("cmd.build.sdk.flag.version"))
|
||||
sdkBuildCmd.Flags().BoolVar(&sdkDryRun, "dry-run", false, i18n.T("cmd.build.sdk.flag.dry_run"))
|
||||
|
||||
// Add subcommands
|
||||
buildCmd.AddCommand(fromPathCmd)
|
||||
buildCmd.AddCommand(pwaCmd)
|
||||
buildCmd.AddCommand(sdkBuildCmd)
|
||||
}
|
||||
|
||||
// AddBuildCommands registers the 'build' command and all subcommands.
|
||||
func AddBuildCommands(root *cli.Command) {
|
||||
initBuildFlags()
|
||||
AddReleaseCommand(buildCmd)
|
||||
root.AddCommand(buildCmd)
|
||||
}
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
// Package buildcmd provides project build commands with auto-detection.
|
||||
//
|
||||
// Supports building:
|
||||
// - Go projects (standard and cross-compilation)
|
||||
// - Wails desktop applications
|
||||
// - Docker images
|
||||
// - LinuxKit VM images
|
||||
// - Taskfile-based projects
|
||||
//
|
||||
// Configuration via .core/build.yaml or command-line flags.
|
||||
//
|
||||
// Subcommands:
|
||||
// - build: Auto-detect and build the current project
|
||||
// - build from-path: Build from a local static web app directory
|
||||
// - build pwa: Build from a live PWA URL
|
||||
// - build sdk: Generate API SDKs from OpenAPI spec
|
||||
package buildcmd
|
||||
|
||||
// Note: The AddBuildCommands function is defined in cmd_build.go
|
||||
// This file exists for documentation purposes and maintains the original
|
||||
// package documentation from commands.go.
|
||||
|
|
@ -1,392 +0,0 @@
|
|||
// cmd_project.go implements the main project build logic.
|
||||
//
|
||||
// This handles auto-detection of project types (Go, Wails, Docker, LinuxKit, Taskfile)
|
||||
// and orchestrates the build process including signing, archiving, and checksums.
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-devops/build/builders"
|
||||
"forge.lthn.ai/core/go-devops/build/signing"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// runProjectBuild handles the main `core build` command with auto-detection.
|
||||
func runProjectBuild(ctx context.Context, buildType string, ciMode bool, targetsFlag string, outputDir string, doArchive bool, doChecksum bool, configPath string, format string, push bool, imageName string, noSign bool, notarize bool, verbose bool) error {
|
||||
// Use local filesystem as the default medium
|
||||
fs := io.Local
|
||||
|
||||
// Get current working directory as project root
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err)
|
||||
}
|
||||
|
||||
// Load configuration from .core/build.yaml (or defaults)
|
||||
buildCfg, err := build.LoadConfig(fs, projectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "load config"}), err)
|
||||
}
|
||||
|
||||
// Detect project type if not specified
|
||||
var projectType build.ProjectType
|
||||
if buildType != "" {
|
||||
projectType = build.ProjectType(buildType)
|
||||
} else {
|
||||
projectType, err = build.PrimaryType(fs, projectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "detect project type"}), err)
|
||||
}
|
||||
if projectType == "" {
|
||||
return fmt.Errorf("%s", i18n.T("cmd.build.error.no_project_type", map[string]any{"Dir": projectDir}))
|
||||
}
|
||||
}
|
||||
|
||||
// Determine targets
|
||||
var buildTargets []build.Target
|
||||
if targetsFlag != "" {
|
||||
// Parse from command line
|
||||
buildTargets, err = parseTargets(targetsFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(buildCfg.Targets) > 0 {
|
||||
// Use config targets
|
||||
buildTargets = buildCfg.ToTargets()
|
||||
} else {
|
||||
// Fall back to current OS/arch
|
||||
buildTargets = []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
}
|
||||
|
||||
// Determine output directory
|
||||
if outputDir == "" {
|
||||
outputDir = "dist"
|
||||
}
|
||||
if !filepath.IsAbs(outputDir) {
|
||||
outputDir = filepath.Join(projectDir, outputDir)
|
||||
}
|
||||
outputDir = filepath.Clean(outputDir)
|
||||
|
||||
// Ensure config path is absolute if provided
|
||||
if configPath != "" && !filepath.IsAbs(configPath) {
|
||||
configPath = filepath.Join(projectDir, configPath)
|
||||
}
|
||||
|
||||
// Determine binary name
|
||||
binaryName := buildCfg.Project.Binary
|
||||
if binaryName == "" {
|
||||
binaryName = buildCfg.Project.Name
|
||||
}
|
||||
if binaryName == "" {
|
||||
binaryName = filepath.Base(projectDir)
|
||||
}
|
||||
|
||||
// Print build info (verbose mode only)
|
||||
if verbose && !ciMode {
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.build")), i18n.T("cmd.build.building_project"))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.type"), buildTargetStyle.Render(string(projectType)))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.output"), buildTargetStyle.Render(outputDir))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.binary"), buildTargetStyle.Render(binaryName))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.label.targets"), buildTargetStyle.Render(formatTargets(buildTargets)))
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Get the appropriate builder
|
||||
builder, err := getBuilder(projectType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create build config for the builder
|
||||
cfg := &build.Config{
|
||||
FS: fs,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: binaryName,
|
||||
Version: buildCfg.Project.Name, // Could be enhanced with git describe
|
||||
LDFlags: buildCfg.Build.LDFlags,
|
||||
// Docker/LinuxKit specific
|
||||
Dockerfile: configPath, // Reuse for Dockerfile path
|
||||
LinuxKitConfig: configPath,
|
||||
Push: push,
|
||||
Image: imageName,
|
||||
}
|
||||
|
||||
// Parse formats for LinuxKit
|
||||
if format != "" {
|
||||
cfg.Formats = strings.Split(format, ",")
|
||||
}
|
||||
|
||||
// Execute build
|
||||
artifacts, err := builder.Build(ctx, cfg, buildTargets)
|
||||
if err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if verbose && !ciMode {
|
||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.built_artifacts", map[string]any{"Count": len(artifacts)}))
|
||||
fmt.Println()
|
||||
for _, artifact := range artifacts {
|
||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
||||
if err != nil {
|
||||
relPath = artifact.Path
|
||||
}
|
||||
fmt.Printf(" %s %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relPath),
|
||||
buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Sign macOS binaries if enabled
|
||||
signCfg := buildCfg.Sign
|
||||
if notarize {
|
||||
signCfg.MacOS.Notarize = true
|
||||
}
|
||||
if noSign {
|
||||
signCfg.Enabled = false
|
||||
}
|
||||
|
||||
if signCfg.Enabled && runtime.GOOS == "darwin" {
|
||||
if verbose && !ciMode {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.sign")), i18n.T("cmd.build.signing_binaries"))
|
||||
}
|
||||
|
||||
// Convert build.Artifact to signing.Artifact
|
||||
signingArtifacts := make([]signing.Artifact, len(artifacts))
|
||||
for i, a := range artifacts {
|
||||
signingArtifacts[i] = signing.Artifact{Path: a.Path, OS: a.OS, Arch: a.Arch}
|
||||
}
|
||||
|
||||
if err := signing.SignBinaries(ctx, fs, signCfg, signingArtifacts); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.signing_failed"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if signCfg.MacOS.Notarize {
|
||||
if err := signing.NotarizeBinaries(ctx, fs, signCfg, signingArtifacts); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.notarization_failed"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Archive artifacts if enabled
|
||||
var archivedArtifacts []build.Artifact
|
||||
if doArchive && len(artifacts) > 0 {
|
||||
if verbose && !ciMode {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.archive")), i18n.T("cmd.build.creating_archives"))
|
||||
}
|
||||
|
||||
archivedArtifacts, err = build.ArchiveAll(fs, artifacts)
|
||||
if err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.archive_failed"), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if verbose && !ciMode {
|
||||
for _, artifact := range archivedArtifacts {
|
||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
||||
if err != nil {
|
||||
relPath = artifact.Path
|
||||
}
|
||||
fmt.Printf(" %s %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relPath),
|
||||
buildDimStyle.Render(fmt.Sprintf("(%s/%s)", artifact.OS, artifact.Arch)),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute checksums if enabled
|
||||
var checksummedArtifacts []build.Artifact
|
||||
if doChecksum && len(archivedArtifacts) > 0 {
|
||||
checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, archivedArtifacts, signCfg, ciMode, verbose)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if doChecksum && len(artifacts) > 0 && !doArchive {
|
||||
// Checksum raw binaries if archiving is disabled
|
||||
checksummedArtifacts, err = computeAndWriteChecksums(ctx, projectDir, outputDir, artifacts, signCfg, ciMode, verbose)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Output results
|
||||
if ciMode {
|
||||
// Determine which artifacts to output (prefer checksummed > archived > raw)
|
||||
var outputArtifacts []build.Artifact
|
||||
if len(checksummedArtifacts) > 0 {
|
||||
outputArtifacts = checksummedArtifacts
|
||||
} else if len(archivedArtifacts) > 0 {
|
||||
outputArtifacts = archivedArtifacts
|
||||
} else {
|
||||
outputArtifacts = artifacts
|
||||
}
|
||||
|
||||
// JSON output for CI
|
||||
output, err := json.MarshalIndent(outputArtifacts, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "marshal artifacts"}), err)
|
||||
}
|
||||
fmt.Println(string(output))
|
||||
} else if !verbose {
|
||||
// Minimal output: just success with artifact count
|
||||
fmt.Printf("%s %s %s\n",
|
||||
buildSuccessStyle.Render(i18n.T("common.label.success")),
|
||||
i18n.T("cmd.build.built_artifacts", map[string]any{"Count": len(artifacts)}),
|
||||
buildDimStyle.Render(fmt.Sprintf("(%s)", outputDir)),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computeAndWriteChecksums computes checksums for artifacts and writes CHECKSUMS.txt.
|
||||
func computeAndWriteChecksums(ctx context.Context, projectDir, outputDir string, artifacts []build.Artifact, signCfg signing.SignConfig, ciMode bool, verbose bool) ([]build.Artifact, error) {
|
||||
fs := io.Local
|
||||
if verbose && !ciMode {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.label.checksum")), i18n.T("cmd.build.computing_checksums"))
|
||||
}
|
||||
|
||||
checksummedArtifacts, err := build.ChecksumAll(fs, artifacts)
|
||||
if err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.checksum_failed"), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Write CHECKSUMS.txt
|
||||
checksumPath := filepath.Join(outputDir, "CHECKSUMS.txt")
|
||||
if err := build.WriteChecksumFile(fs, checksummedArtifacts, checksumPath); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("common.error.failed", map[string]any{"Action": "write CHECKSUMS.txt"}), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sign checksums with GPG
|
||||
if signCfg.Enabled {
|
||||
if err := signing.SignChecksums(ctx, fs, signCfg, checksumPath); err != nil {
|
||||
if !ciMode {
|
||||
fmt.Printf("%s %s: %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), i18n.T("cmd.build.error.gpg_signing_failed"), err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if verbose && !ciMode {
|
||||
for _, artifact := range checksummedArtifacts {
|
||||
relPath, err := filepath.Rel(projectDir, artifact.Path)
|
||||
if err != nil {
|
||||
relPath = artifact.Path
|
||||
}
|
||||
fmt.Printf(" %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relPath),
|
||||
)
|
||||
fmt.Printf(" %s\n", buildDimStyle.Render(artifact.Checksum))
|
||||
}
|
||||
|
||||
relChecksumPath, err := filepath.Rel(projectDir, checksumPath)
|
||||
if err != nil {
|
||||
relChecksumPath = checksumPath
|
||||
}
|
||||
fmt.Printf(" %s %s\n",
|
||||
buildSuccessStyle.Render("*"),
|
||||
buildTargetStyle.Render(relChecksumPath),
|
||||
)
|
||||
}
|
||||
|
||||
return checksummedArtifacts, nil
|
||||
}
|
||||
|
||||
// parseTargets parses a comma-separated list of OS/arch pairs.
|
||||
func parseTargets(targetsFlag string) ([]build.Target, error) {
|
||||
parts := strings.Split(targetsFlag, ",")
|
||||
var targets []build.Target
|
||||
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
osArch := strings.Split(part, "/")
|
||||
if len(osArch) != 2 {
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.invalid_target", map[string]any{"Target": part}))
|
||||
}
|
||||
|
||||
targets = append(targets, build.Target{
|
||||
OS: strings.TrimSpace(osArch[0]),
|
||||
Arch: strings.TrimSpace(osArch[1]),
|
||||
})
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.no_targets"))
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
// formatTargets returns a human-readable string of targets.
|
||||
func formatTargets(targets []build.Target) string {
|
||||
var parts []string
|
||||
for _, t := range targets {
|
||||
parts = append(parts, t.String())
|
||||
}
|
||||
return strings.Join(parts, ", ")
|
||||
}
|
||||
|
||||
// getBuilder returns the appropriate builder for the project type.
|
||||
func getBuilder(projectType build.ProjectType) (build.Builder, error) {
|
||||
switch projectType {
|
||||
case build.ProjectTypeWails:
|
||||
return builders.NewWailsBuilder(), nil
|
||||
case build.ProjectTypeGo:
|
||||
return builders.NewGoBuilder(), nil
|
||||
case build.ProjectTypeDocker:
|
||||
return builders.NewDockerBuilder(), nil
|
||||
case build.ProjectTypeLinuxKit:
|
||||
return builders.NewLinuxKitBuilder(), nil
|
||||
case build.ProjectTypeTaskfile:
|
||||
return builders.NewTaskfileBuilder(), nil
|
||||
case build.ProjectTypeCPP:
|
||||
return builders.NewCPPBuilder(), nil
|
||||
case build.ProjectTypeNode:
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.node_not_implemented"))
|
||||
case build.ProjectTypePHP:
|
||||
return nil, fmt.Errorf("%s", i18n.T("cmd.build.error.php_not_implemented"))
|
||||
default:
|
||||
return nil, fmt.Errorf("%s: %s", i18n.T("cmd.build.error.unsupported_type"), projectType)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,324 +0,0 @@
|
|||
// cmd_pwa.go implements PWA and legacy GUI build functionality.
|
||||
//
|
||||
// Supports building desktop applications from:
|
||||
// - Local static web application directories
|
||||
// - Live PWA URLs (downloads and packages)
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"github.com/leaanthony/debme"
|
||||
"github.com/leaanthony/gosod"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
// Error sentinels for build commands
|
||||
var (
|
||||
errPathRequired = errors.New("the --path flag is required")
|
||||
errURLRequired = errors.New("the --url flag is required")
|
||||
)
|
||||
|
||||
// runPwaBuild downloads a PWA from URL and builds it.
|
||||
func runPwaBuild(pwaURL string) error {
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.starting"), pwaURL)
|
||||
|
||||
tempDir, err := os.MkdirTemp("", "core-pwa-build-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "create temporary directory"}), err)
|
||||
}
|
||||
// defer os.RemoveAll(tempDir) // Keep temp dir for debugging
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.downloading_to"), tempDir)
|
||||
|
||||
if err := downloadPWA(pwaURL, tempDir); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "download PWA"}), err)
|
||||
}
|
||||
|
||||
return runBuild(tempDir)
|
||||
}
|
||||
|
||||
// downloadPWA fetches a PWA from a URL and saves assets locally.
|
||||
func downloadPWA(baseURL, destDir string) error {
|
||||
// Fetch the main HTML page
|
||||
resp, err := http.Get(baseURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s %s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch URL"}), baseURL, err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "read response body"}), err)
|
||||
}
|
||||
|
||||
// Find the manifest URL from the HTML
|
||||
manifestURL, err := findManifestURL(string(body), baseURL)
|
||||
if err != nil {
|
||||
// If no manifest, it's not a PWA, but we can still try to package it as a simple site.
|
||||
fmt.Printf("%s %s\n", i18n.T("common.label.warning"), i18n.T("cmd.build.pwa.no_manifest"))
|
||||
if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.pwa.found_manifest"), manifestURL)
|
||||
|
||||
// Fetch and parse the manifest
|
||||
manifest, err := fetchManifest(manifestURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "fetch or parse manifest"}), err)
|
||||
}
|
||||
|
||||
// Download all assets listed in the manifest
|
||||
assets := collectAssets(manifest, manifestURL)
|
||||
for _, assetURL := range assets {
|
||||
if err := downloadAsset(assetURL, destDir); err != nil {
|
||||
fmt.Printf("%s %s %s: %v\n", i18n.T("common.label.warning"), i18n.T("common.error.failed", map[string]any{"Action": "download asset"}), assetURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Also save the root index.html
|
||||
if err := os.WriteFile(filepath.Join(destDir, "index.html"), body, 0644); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "write index.html"}), err)
|
||||
}
|
||||
|
||||
fmt.Println(i18n.T("cmd.build.pwa.download_complete"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// findManifestURL extracts the manifest URL from HTML content.
|
||||
func findManifestURL(htmlContent, baseURL string) (string, error) {
|
||||
doc, err := html.Parse(strings.NewReader(htmlContent))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var manifestPath string
|
||||
var f func(*html.Node)
|
||||
f = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "link" {
|
||||
var rel, href string
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "rel" {
|
||||
rel = a.Val
|
||||
}
|
||||
if a.Key == "href" {
|
||||
href = a.Val
|
||||
}
|
||||
}
|
||||
if rel == "manifest" && href != "" {
|
||||
manifestPath = href
|
||||
return
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
f(c)
|
||||
}
|
||||
}
|
||||
f(doc)
|
||||
|
||||
if manifestPath == "" {
|
||||
return "", fmt.Errorf("%s", i18n.T("cmd.build.pwa.error.no_manifest_tag"))
|
||||
}
|
||||
|
||||
base, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
manifestURL, err := base.Parse(manifestPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return manifestURL.String(), nil
|
||||
}
|
||||
|
||||
// fetchManifest downloads and parses a PWA manifest.
|
||||
func fetchManifest(manifestURL string) (map[string]any, error) {
|
||||
resp, err := http.Get(manifestURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
var manifest map[string]any
|
||||
if err := json.NewDecoder(resp.Body).Decode(&manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// collectAssets extracts asset URLs from a PWA manifest.
|
||||
func collectAssets(manifest map[string]any, manifestURL string) []string {
|
||||
var assets []string
|
||||
base, _ := url.Parse(manifestURL)
|
||||
|
||||
// Add start_url
|
||||
if startURL, ok := manifest["start_url"].(string); ok {
|
||||
if resolved, err := base.Parse(startURL); err == nil {
|
||||
assets = append(assets, resolved.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Add icons
|
||||
if icons, ok := manifest["icons"].([]any); ok {
|
||||
for _, icon := range icons {
|
||||
if iconMap, ok := icon.(map[string]any); ok {
|
||||
if src, ok := iconMap["src"].(string); ok {
|
||||
if resolved, err := base.Parse(src); err == nil {
|
||||
assets = append(assets, resolved.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return assets
|
||||
}
|
||||
|
||||
// downloadAsset fetches a single asset and saves it locally.
|
||||
func downloadAsset(assetURL, destDir string) error {
|
||||
resp, err := http.Get(assetURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
u, err := url.Parse(assetURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Join(destDir, filepath.FromSlash(u.Path))
|
||||
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = out.Close() }()
|
||||
|
||||
_, err = io.Copy(out, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// runBuild builds a desktop application from a local directory.
|
||||
func runBuild(fromPath string) error {
|
||||
fmt.Printf("%s %s\n", i18n.T("cmd.build.from_path.starting"), fromPath)
|
||||
|
||||
info, err := os.Stat(fromPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.invalid_path"), err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("%s", i18n.T("cmd.build.from_path.error.must_be_directory"))
|
||||
}
|
||||
|
||||
buildDir := ".core/build/app"
|
||||
htmlDir := filepath.Join(buildDir, "html")
|
||||
appName := filepath.Base(fromPath)
|
||||
if strings.HasPrefix(appName, "core-pwa-build-") {
|
||||
appName = "pwa-app"
|
||||
}
|
||||
outputExe := appName
|
||||
|
||||
if err := os.RemoveAll(buildDir); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "clean build directory"}), err)
|
||||
}
|
||||
|
||||
// 1. Generate the project from the embedded template
|
||||
fmt.Println(i18n.T("cmd.build.from_path.generating_template"))
|
||||
templateFS, err := debme.FS(guiTemplate, "tmpl/gui")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "anchor template filesystem"}), err)
|
||||
}
|
||||
sod := gosod.New(templateFS)
|
||||
if sod == nil {
|
||||
return fmt.Errorf("%s", i18n.T("common.error.failed", map[string]any{"Action": "create new sod instance"}))
|
||||
}
|
||||
|
||||
templateData := map[string]string{"AppName": appName}
|
||||
if err := sod.Extract(buildDir, templateData); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "extract template"}), err)
|
||||
}
|
||||
|
||||
// 2. Copy the user's web app files
|
||||
fmt.Println(i18n.T("cmd.build.from_path.copying_files"))
|
||||
if err := copyDir(fromPath, htmlDir); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "copy application files"}), err)
|
||||
}
|
||||
|
||||
// 3. Compile the application
|
||||
fmt.Println(i18n.T("cmd.build.from_path.compiling"))
|
||||
|
||||
// Run go mod tidy
|
||||
cmd := exec.Command("go", "mod", "tidy")
|
||||
cmd.Dir = buildDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_mod_tidy"), err)
|
||||
}
|
||||
|
||||
// Run go build
|
||||
cmd = exec.Command("go", "build", "-o", outputExe)
|
||||
cmd.Dir = buildDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("cmd.build.from_path.error.go_build"), err)
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s %s/%s\n", i18n.T("cmd.build.from_path.success"), buildDir, outputExe)
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyDir recursively copies a directory from src to dst.
|
||||
func copyDir(src, dst string) error {
|
||||
return filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(src, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstPath := filepath.Join(dst, relPath)
|
||||
|
||||
if info.IsDir() {
|
||||
return os.MkdirAll(dstPath, info.Mode())
|
||||
}
|
||||
|
||||
srcFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = srcFile.Close() }()
|
||||
|
||||
dstFile, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = dstFile.Close() }()
|
||||
|
||||
_, err = io.Copy(dstFile, srcFile)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
|
@ -1,111 +0,0 @@
|
|||
// cmd_release.go implements the release command: build + archive + publish in one step.
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-devops/release"
|
||||
)
|
||||
|
||||
// Flag variables for release command
|
||||
var (
|
||||
releaseVersion string
|
||||
releaseDraft bool
|
||||
releasePrerelease bool
|
||||
releaseGoForLaunch bool
|
||||
)
|
||||
|
||||
var releaseCmd = &cli.Command{
|
||||
Use: "release",
|
||||
Short: i18n.T("cmd.build.release.short"),
|
||||
Long: i18n.T("cmd.build.release.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runRelease(cmd.Context(), !releaseGoForLaunch, releaseVersion, releaseDraft, releasePrerelease)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
releaseCmd.Flags().BoolVar(&releaseGoForLaunch, "we-are-go-for-launch", false, i18n.T("cmd.build.release.flag.go_for_launch"))
|
||||
releaseCmd.Flags().StringVar(&releaseVersion, "version", "", i18n.T("cmd.build.release.flag.version"))
|
||||
releaseCmd.Flags().BoolVar(&releaseDraft, "draft", false, i18n.T("cmd.build.release.flag.draft"))
|
||||
releaseCmd.Flags().BoolVar(&releasePrerelease, "prerelease", false, i18n.T("cmd.build.release.flag.prerelease"))
|
||||
}
|
||||
|
||||
// AddReleaseCommand adds the release subcommand to the build command.
|
||||
func AddReleaseCommand(buildCmd *cli.Command) {
|
||||
buildCmd.AddCommand(releaseCmd)
|
||||
}
|
||||
|
||||
// runRelease executes the full release workflow: build + archive + checksum + publish.
|
||||
func runRelease(ctx context.Context, dryRun bool, version string, draft, prerelease bool) error {
|
||||
// Get current directory
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return log.E("release", "get working directory", err)
|
||||
}
|
||||
|
||||
// Check for release config
|
||||
if !release.ConfigExists(projectDir) {
|
||||
cli.Print("%s %s\n",
|
||||
buildErrorStyle.Render(i18n.Label("error")),
|
||||
i18n.T("cmd.build.release.error.no_config"),
|
||||
)
|
||||
cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.hint.create_config")))
|
||||
return log.E("release", "config not found", nil)
|
||||
}
|
||||
|
||||
// Load configuration
|
||||
cfg, err := release.LoadConfig(projectDir)
|
||||
if err != nil {
|
||||
return log.E("release", "load config", err)
|
||||
}
|
||||
|
||||
// Apply CLI overrides
|
||||
if version != "" {
|
||||
cfg.SetVersion(version)
|
||||
}
|
||||
|
||||
// Apply draft/prerelease overrides to all publishers
|
||||
if draft || prerelease {
|
||||
for i := range cfg.Publishers {
|
||||
if draft {
|
||||
cfg.Publishers[i].Draft = true
|
||||
}
|
||||
if prerelease {
|
||||
cfg.Publishers[i].Prerelease = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print header
|
||||
cli.Print("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.release.label.release")), i18n.T("cmd.build.release.building_and_publishing"))
|
||||
if dryRun {
|
||||
cli.Print(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.release.dry_run_hint")))
|
||||
}
|
||||
cli.Blank()
|
||||
|
||||
// Run full release (build + archive + checksum + publish)
|
||||
rel, err := release.Run(ctx, cfg, dryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Print summary
|
||||
cli.Blank()
|
||||
cli.Print("%s %s\n", buildSuccessStyle.Render(i18n.T("i18n.done.pass")), i18n.T("cmd.build.release.completed"))
|
||||
cli.Print(" %s %s\n", i18n.Label("version"), buildTargetStyle.Render(rel.Version))
|
||||
cli.Print(" %s %d\n", i18n.T("cmd.build.release.label.artifacts"), len(rel.Artifacts))
|
||||
|
||||
if !dryRun {
|
||||
for _, pub := range cfg.Publishers {
|
||||
cli.Print(" %s %s\n", i18n.T("cmd.build.release.label.published"), buildTargetStyle.Render(pub.Type))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
// cmd_sdk.go implements SDK generation from OpenAPI specifications.
|
||||
//
|
||||
// Generates typed API clients for TypeScript, Python, Go, and PHP
|
||||
// from OpenAPI/Swagger specifications.
|
||||
|
||||
package buildcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/sdk"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
)
|
||||
|
||||
// runBuildSDK handles the `core build sdk` command.
|
||||
func runBuildSDK(specPath, lang, version string, dryRun bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("common.error.failed", map[string]any{"Action": "get working directory"}), err)
|
||||
}
|
||||
|
||||
// Load config
|
||||
config := sdk.DefaultConfig()
|
||||
if specPath != "" {
|
||||
config.Spec = specPath
|
||||
}
|
||||
|
||||
s := sdk.New(projectDir, config)
|
||||
if version != "" {
|
||||
s.SetVersion(version)
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", buildHeaderStyle.Render(i18n.T("cmd.build.sdk.label")), i18n.T("cmd.build.sdk.generating"))
|
||||
if dryRun {
|
||||
fmt.Printf(" %s\n", buildDimStyle.Render(i18n.T("cmd.build.sdk.dry_run_mode")))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Detect spec
|
||||
detectedSpec, err := s.DetectSpec()
|
||||
if err != nil {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" %s %s\n", i18n.T("common.label.spec"), buildTargetStyle.Render(detectedSpec))
|
||||
|
||||
if dryRun {
|
||||
if lang != "" {
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.language_label"), buildTargetStyle.Render(lang))
|
||||
} else {
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.languages_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", ")))
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("cmd.build.label.ok")), i18n.T("cmd.build.sdk.would_generate"))
|
||||
return nil
|
||||
}
|
||||
|
||||
if lang != "" {
|
||||
// Generate single language
|
||||
if err := s.GenerateLanguage(ctx, lang); err != nil {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(lang))
|
||||
} else {
|
||||
// Generate all
|
||||
if err := s.Generate(ctx); err != nil {
|
||||
fmt.Printf("%s %v\n", buildErrorStyle.Render(i18n.T("common.label.error")), err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.build.sdk.generated_label"), buildTargetStyle.Render(strings.Join(config.Languages, ", ")))
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("%s %s\n", buildSuccessStyle.Render(i18n.T("common.label.success")), i18n.T("cmd.build.sdk.complete"))
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
module {{.AppName}}
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/wailsapp/wails/v3 v3.0.0-alpha.8
|
||||
)
|
||||
|
|
@ -1 +0,0 @@
|
|||
// This file ensures the 'html' directory is correctly embedded by the Go compiler.
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"log"
|
||||
|
||||
"github.com/wailsapp/wails/v3/pkg/application"
|
||||
)
|
||||
|
||||
//go:embed all:html
|
||||
var assets embed.FS
|
||||
|
||||
func main() {
|
||||
app := application.New(application.Options{
|
||||
Name: "{{.AppName}}",
|
||||
Description: "A web application enclaved by Core.",
|
||||
Assets: application.AssetOptions{
|
||||
FS: assets,
|
||||
},
|
||||
})
|
||||
|
||||
if err := app.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,254 +0,0 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// CPPBuilder implements the Builder interface for C++ projects using CMake + Conan.
|
||||
// It wraps the Makefile-based build system from the .core/build submodule.
|
||||
type CPPBuilder struct{}
|
||||
|
||||
// NewCPPBuilder creates a new CPPBuilder instance.
|
||||
func NewCPPBuilder() *CPPBuilder {
|
||||
return &CPPBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *CPPBuilder) Name() string {
|
||||
return "cpp"
|
||||
}
|
||||
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
func (b *CPPBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
return build.IsCPPProject(fs, dir), nil
|
||||
}
|
||||
|
||||
// Build compiles the C++ project using Make targets.
|
||||
// The build flow is: make configure → make build → make package.
|
||||
// Cross-compilation is handled via Conan profiles specified in .core/build.yaml.
|
||||
func (b *CPPBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
if cfg == nil {
|
||||
return nil, errors.New("builders.CPPBuilder.Build: config is nil")
|
||||
}
|
||||
|
||||
// Validate make is available
|
||||
if err := b.validateMake(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For C++ projects, the Makefile handles everything.
|
||||
// We don't iterate per-target like Go — the Makefile's configure + build
|
||||
// produces binaries for the host platform, and cross-compilation uses
|
||||
// named Conan profiles (e.g., make gcc-linux-armv8).
|
||||
if len(targets) == 0 {
|
||||
// Default to host platform
|
||||
targets = []build.Target{{OS: runtime.GOOS, Arch: runtime.GOARCH}}
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
for _, target := range targets {
|
||||
built, err := b.buildTarget(ctx, cfg, target)
|
||||
if err != nil {
|
||||
return artifacts, fmt.Errorf("builders.CPPBuilder.Build: %w", err)
|
||||
}
|
||||
artifacts = append(artifacts, built...)
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// buildTarget compiles for a single target platform.
|
||||
func (b *CPPBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
||||
// Determine if this is a cross-compile or host build
|
||||
isHostBuild := target.OS == runtime.GOOS && target.Arch == runtime.GOARCH
|
||||
|
||||
if isHostBuild {
|
||||
return b.buildHost(ctx, cfg, target)
|
||||
}
|
||||
|
||||
return b.buildCross(ctx, cfg, target)
|
||||
}
|
||||
|
||||
// buildHost runs the standard make configure → make build → make package flow.
|
||||
func (b *CPPBuilder) buildHost(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
||||
fmt.Printf("Building C++ project for %s/%s (host)\n", target.OS, target.Arch)
|
||||
|
||||
// Step 1: Configure (runs conan install + cmake configure)
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, "configure"); err != nil {
|
||||
return nil, fmt.Errorf("configure failed: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Build
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, "build"); err != nil {
|
||||
return nil, fmt.Errorf("build failed: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Package
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, "package"); err != nil {
|
||||
return nil, fmt.Errorf("package failed: %w", err)
|
||||
}
|
||||
|
||||
// Discover artifacts from build/packages/
|
||||
return b.findArtifacts(cfg.FS, cfg.ProjectDir, target)
|
||||
}
|
||||
|
||||
// buildCross runs a cross-compilation using a Conan profile name.
|
||||
// The Makefile supports profile targets like: make gcc-linux-armv8
|
||||
func (b *CPPBuilder) buildCross(ctx context.Context, cfg *build.Config, target build.Target) ([]build.Artifact, error) {
|
||||
// Map target to a Conan profile name
|
||||
profile := b.targetToProfile(target)
|
||||
if profile == "" {
|
||||
return nil, fmt.Errorf("no Conan profile mapped for target %s/%s", target.OS, target.Arch)
|
||||
}
|
||||
|
||||
fmt.Printf("Building C++ project for %s/%s (cross: %s)\n", target.OS, target.Arch, profile)
|
||||
|
||||
// The Makefile exposes each profile as a top-level target
|
||||
if err := b.runMake(ctx, cfg.ProjectDir, profile); err != nil {
|
||||
return nil, fmt.Errorf("cross-compile for %s failed: %w", profile, err)
|
||||
}
|
||||
|
||||
return b.findArtifacts(cfg.FS, cfg.ProjectDir, target)
|
||||
}
|
||||
|
||||
// runMake executes a make target in the project directory.
|
||||
func (b *CPPBuilder) runMake(ctx context.Context, projectDir string, target string) error {
|
||||
cmd := exec.CommandContext(ctx, "make", target)
|
||||
cmd.Dir = projectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Env = os.Environ()
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("make %s: %w", target, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findArtifacts searches for built packages in build/packages/.
|
||||
func (b *CPPBuilder) findArtifacts(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) {
|
||||
packagesDir := filepath.Join(projectDir, "build", "packages")
|
||||
|
||||
if !fs.IsDir(packagesDir) {
|
||||
// Fall back to searching build/release/src/ for raw binaries
|
||||
return b.findBinaries(fs, projectDir, target)
|
||||
}
|
||||
|
||||
entries, err := fs.List(packagesDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list packages directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := entry.Name()
|
||||
// Skip checksum files and hidden files
|
||||
if strings.HasSuffix(name, ".sha256") || strings.HasPrefix(name, ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(packagesDir, name),
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// findBinaries searches for compiled binaries in build/release/src/.
|
||||
func (b *CPPBuilder) findBinaries(fs io.Medium, projectDir string, target build.Target) ([]build.Artifact, error) {
|
||||
binDir := filepath.Join(projectDir, "build", "release", "src")
|
||||
|
||||
if !fs.IsDir(binDir) {
|
||||
return nil, fmt.Errorf("no build output found in %s", binDir)
|
||||
}
|
||||
|
||||
entries, err := fs.List(binDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list build directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := entry.Name()
|
||||
// Skip non-executable files (libraries, cmake files, etc.)
|
||||
if strings.HasSuffix(name, ".a") || strings.HasSuffix(name, ".o") ||
|
||||
strings.HasSuffix(name, ".cmake") || strings.HasPrefix(name, ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(binDir, name)
|
||||
|
||||
// On Unix, check if file is executable
|
||||
if target.OS != "windows" {
|
||||
info, err := os.Stat(fullPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if info.Mode()&0111 == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: fullPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// targetToProfile maps a build target to a Conan cross-compilation profile name.
|
||||
// Profile names match those in .core/build/cmake/profiles/.
|
||||
func (b *CPPBuilder) targetToProfile(target build.Target) string {
|
||||
key := target.OS + "/" + target.Arch
|
||||
profiles := map[string]string{
|
||||
"linux/amd64": "gcc-linux-x86_64",
|
||||
"linux/x86_64": "gcc-linux-x86_64",
|
||||
"linux/arm64": "gcc-linux-armv8",
|
||||
"linux/armv8": "gcc-linux-armv8",
|
||||
"darwin/arm64": "apple-clang-armv8",
|
||||
"darwin/armv8": "apple-clang-armv8",
|
||||
"darwin/amd64": "apple-clang-x86_64",
|
||||
"darwin/x86_64": "apple-clang-x86_64",
|
||||
"windows/amd64": "msvc-194-x86_64",
|
||||
"windows/x86_64": "msvc-194-x86_64",
|
||||
}
|
||||
|
||||
return profiles[key]
|
||||
}
|
||||
|
||||
// validateMake checks if make is available.
|
||||
func (b *CPPBuilder) validateMake() error {
|
||||
if _, err := exec.LookPath("make"); err != nil {
|
||||
return errors.New("cpp: make not found. Install build-essential (Linux) or Xcode Command Line Tools (macOS)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure CPPBuilder implements the Builder interface.
|
||||
var _ build.Builder = (*CPPBuilder)(nil)
|
||||
|
|
@ -1,149 +0,0 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCPPBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
assert.Equal(t, "cpp", builder.Name())
|
||||
}
|
||||
|
||||
func TestCPPBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
|
||||
t.Run("detects C++ project with CMakeLists.txt", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "CMakeLists.txt"), []byte("cmake_minimum_required(VERSION 3.16)"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for non-C++ project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_Build_Bad(t *testing.T) {
|
||||
t.Run("returns error for nil config", func(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
artifacts, err := builder.Build(nil, nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_TargetToProfile_Good(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
|
||||
tests := []struct {
|
||||
os, arch string
|
||||
expected string
|
||||
}{
|
||||
{"linux", "amd64", "gcc-linux-x86_64"},
|
||||
{"linux", "x86_64", "gcc-linux-x86_64"},
|
||||
{"linux", "arm64", "gcc-linux-armv8"},
|
||||
{"darwin", "arm64", "apple-clang-armv8"},
|
||||
{"darwin", "amd64", "apple-clang-x86_64"},
|
||||
{"windows", "amd64", "msvc-194-x86_64"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.os+"/"+tt.arch, func(t *testing.T) {
|
||||
profile := builder.targetToProfile(build.Target{OS: tt.os, Arch: tt.arch})
|
||||
assert.Equal(t, tt.expected, profile)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCPPBuilder_TargetToProfile_Bad(t *testing.T) {
|
||||
builder := NewCPPBuilder()
|
||||
|
||||
t.Run("returns empty for unknown target", func(t *testing.T) {
|
||||
profile := builder.targetToProfile(build.Target{OS: "plan9", Arch: "mips"})
|
||||
assert.Empty(t, profile)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_FindArtifacts_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
|
||||
t.Run("finds packages in build/packages", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
packagesDir := filepath.Join(dir, "build", "packages")
|
||||
require.NoError(t, os.MkdirAll(packagesDir, 0755))
|
||||
|
||||
// Create mock package files
|
||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz"), []byte("pkg"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.tar.xz.sha256"), []byte("checksum"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(packagesDir, "test-1.0-linux-x86_64.rpm"), []byte("rpm"), 0644))
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
||||
artifacts, err := builder.findArtifacts(fs, dir, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should find tar.xz and rpm but not sha256
|
||||
assert.Len(t, artifacts, 2)
|
||||
for _, a := range artifacts {
|
||||
assert.Equal(t, "linux", a.OS)
|
||||
assert.Equal(t, "amd64", a.Arch)
|
||||
assert.False(t, filepath.Ext(a.Path) == ".sha256")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("falls back to binaries in build/release/src", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
binDir := filepath.Join(dir, "build", "release", "src")
|
||||
require.NoError(t, os.MkdirAll(binDir, 0755))
|
||||
|
||||
// Create mock binary (executable)
|
||||
binPath := filepath.Join(binDir, "test-daemon")
|
||||
require.NoError(t, os.WriteFile(binPath, []byte("binary"), 0755))
|
||||
|
||||
// Create a library (should be skipped)
|
||||
require.NoError(t, os.WriteFile(filepath.Join(binDir, "libcrypto.a"), []byte("lib"), 0644))
|
||||
|
||||
builder := NewCPPBuilder()
|
||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
||||
artifacts, err := builder.findArtifacts(fs, dir, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should find the executable but not the library
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "test-daemon")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCPPBuilder_Interface_Good(t *testing.T) {
|
||||
var _ build.Builder = (*CPPBuilder)(nil)
|
||||
var _ build.Builder = NewCPPBuilder()
|
||||
}
|
||||
|
|
@ -1,216 +0,0 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// DockerBuilder builds Docker images.
|
||||
type DockerBuilder struct{}
|
||||
|
||||
// NewDockerBuilder creates a new Docker builder.
|
||||
func NewDockerBuilder() *DockerBuilder {
|
||||
return &DockerBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *DockerBuilder) Name() string {
|
||||
return "docker"
|
||||
}
|
||||
|
||||
// Detect checks if a Dockerfile exists in the directory.
|
||||
func (b *DockerBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
dockerfilePath := filepath.Join(dir, "Dockerfile")
|
||||
if fs.IsFile(dockerfilePath) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Build builds Docker images for the specified targets.
|
||||
func (b *DockerBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
// Validate docker CLI is available
|
||||
if err := b.validateDockerCli(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure buildx is available
|
||||
if err := b.ensureBuildx(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine Dockerfile path
|
||||
dockerfile := cfg.Dockerfile
|
||||
if dockerfile == "" {
|
||||
dockerfile = filepath.Join(cfg.ProjectDir, "Dockerfile")
|
||||
}
|
||||
|
||||
// Validate Dockerfile exists
|
||||
if !cfg.FS.IsFile(dockerfile) {
|
||||
return nil, fmt.Errorf("docker.Build: Dockerfile not found: %s", dockerfile)
|
||||
}
|
||||
|
||||
// Determine image name
|
||||
imageName := cfg.Image
|
||||
if imageName == "" {
|
||||
imageName = cfg.Name
|
||||
}
|
||||
if imageName == "" {
|
||||
imageName = filepath.Base(cfg.ProjectDir)
|
||||
}
|
||||
|
||||
// Build platform string from targets
|
||||
var platforms []string
|
||||
for _, t := range targets {
|
||||
platforms = append(platforms, fmt.Sprintf("%s/%s", t.OS, t.Arch))
|
||||
}
|
||||
|
||||
// If no targets specified, use current platform
|
||||
if len(platforms) == 0 {
|
||||
platforms = []string{"linux/amd64"}
|
||||
}
|
||||
|
||||
// Determine registry
|
||||
registry := cfg.Registry
|
||||
if registry == "" {
|
||||
registry = "ghcr.io"
|
||||
}
|
||||
|
||||
// Determine tags
|
||||
tags := cfg.Tags
|
||||
if len(tags) == 0 {
|
||||
tags = []string{"latest"}
|
||||
if cfg.Version != "" {
|
||||
tags = append(tags, cfg.Version)
|
||||
}
|
||||
}
|
||||
|
||||
// Build full image references
|
||||
var imageRefs []string
|
||||
for _, tag := range tags {
|
||||
// Expand version template
|
||||
expandedTag := strings.ReplaceAll(tag, "{{.Version}}", cfg.Version)
|
||||
expandedTag = strings.ReplaceAll(expandedTag, "{{Version}}", cfg.Version)
|
||||
|
||||
if registry != "" {
|
||||
imageRefs = append(imageRefs, fmt.Sprintf("%s/%s:%s", registry, imageName, expandedTag))
|
||||
} else {
|
||||
imageRefs = append(imageRefs, fmt.Sprintf("%s:%s", imageName, expandedTag))
|
||||
}
|
||||
}
|
||||
|
||||
// Build the docker buildx command
|
||||
args := []string{"buildx", "build"}
|
||||
|
||||
// Multi-platform support
|
||||
args = append(args, "--platform", strings.Join(platforms, ","))
|
||||
|
||||
// Add all tags
|
||||
for _, ref := range imageRefs {
|
||||
args = append(args, "-t", ref)
|
||||
}
|
||||
|
||||
// Dockerfile path
|
||||
args = append(args, "-f", dockerfile)
|
||||
|
||||
// Build arguments
|
||||
for k, v := range cfg.BuildArgs {
|
||||
expandedValue := strings.ReplaceAll(v, "{{.Version}}", cfg.Version)
|
||||
expandedValue = strings.ReplaceAll(expandedValue, "{{Version}}", cfg.Version)
|
||||
args = append(args, "--build-arg", fmt.Sprintf("%s=%s", k, expandedValue))
|
||||
}
|
||||
|
||||
// Always add VERSION build arg if version is set
|
||||
if cfg.Version != "" {
|
||||
args = append(args, "--build-arg", fmt.Sprintf("VERSION=%s", cfg.Version))
|
||||
}
|
||||
|
||||
// Output to local docker images or push
|
||||
if cfg.Push {
|
||||
args = append(args, "--push")
|
||||
} else {
|
||||
// For multi-platform builds without push, we need to load or output somewhere
|
||||
if len(platforms) == 1 {
|
||||
args = append(args, "--load")
|
||||
} else {
|
||||
// Multi-platform builds can't use --load, output to tarball
|
||||
outputPath := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s.tar", imageName))
|
||||
args = append(args, "--output", fmt.Sprintf("type=oci,dest=%s", outputPath))
|
||||
}
|
||||
}
|
||||
|
||||
// Build context (project directory)
|
||||
args = append(args, cfg.ProjectDir)
|
||||
|
||||
// Create output directory
|
||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
||||
return nil, fmt.Errorf("docker.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Execute build
|
||||
cmd := exec.CommandContext(ctx, "docker", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
fmt.Printf("Building Docker image: %s\n", imageName)
|
||||
fmt.Printf(" Platforms: %s\n", strings.Join(platforms, ", "))
|
||||
fmt.Printf(" Tags: %s\n", strings.Join(imageRefs, ", "))
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("docker.Build: buildx build failed: %w", err)
|
||||
}
|
||||
|
||||
// Create artifacts for each platform
|
||||
var artifacts []build.Artifact
|
||||
for _, t := range targets {
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: imageRefs[0], // Primary image reference
|
||||
OS: t.OS,
|
||||
Arch: t.Arch,
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// validateDockerCli checks if the docker CLI is available.
|
||||
func (b *DockerBuilder) validateDockerCli() error {
|
||||
cmd := exec.Command("docker", "--version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.New("docker: docker CLI not found. Install it from https://docs.docker.com/get-docker/")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureBuildx ensures docker buildx is available and has a builder.
|
||||
func (b *DockerBuilder) ensureBuildx(ctx context.Context) error {
|
||||
// Check if buildx is available
|
||||
cmd := exec.CommandContext(ctx, "docker", "buildx", "version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.New("docker: buildx is not available. Install it from https://docs.docker.com/buildx/working-with-buildx/")
|
||||
}
|
||||
|
||||
// Check if we have a builder, create one if not
|
||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "inspect", "--bootstrap")
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Try to create a builder
|
||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "create", "--use", "--bootstrap")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("docker: failed to create buildx builder: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDockerBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewDockerBuilder()
|
||||
assert.Equal(t, "docker", builder.Name())
|
||||
}
|
||||
|
||||
func TestDockerBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
|
||||
t.Run("detects Dockerfile", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "Dockerfile"), []byte("FROM alpine\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewDockerBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewDockerBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for non-Docker project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create a Go project instead
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewDockerBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("does not match docker-compose.yml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "docker-compose.yml"), []byte("version: '3'\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewDockerBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("does not match Dockerfile in subdirectory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
subDir := filepath.Join(dir, "subdir")
|
||||
require.NoError(t, os.MkdirAll(subDir, 0755))
|
||||
err := os.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM alpine\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewDockerBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerBuilder_Interface_Good(t *testing.T) {
|
||||
// Verify DockerBuilder implements Builder interface
|
||||
var _ build.Builder = (*DockerBuilder)(nil)
|
||||
var _ build.Builder = NewDockerBuilder()
|
||||
}
|
||||
|
|
@ -1,130 +0,0 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// GoBuilder implements the Builder interface for Go projects.
|
||||
type GoBuilder struct{}
|
||||
|
||||
// NewGoBuilder creates a new GoBuilder instance.
|
||||
func NewGoBuilder() *GoBuilder {
|
||||
return &GoBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *GoBuilder) Name() string {
|
||||
return "go"
|
||||
}
|
||||
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
// Uses IsGoProject from the build package which checks for go.mod or wails.json.
|
||||
func (b *GoBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
return build.IsGoProject(fs, dir), nil
|
||||
}
|
||||
|
||||
// Build compiles the Go project for the specified targets.
|
||||
// It sets GOOS, GOARCH, and CGO_ENABLED environment variables,
|
||||
// applies ldflags and trimpath, and runs go build.
|
||||
func (b *GoBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
if cfg == nil {
|
||||
return nil, errors.New("builders.GoBuilder.Build: config is nil")
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return nil, errors.New("builders.GoBuilder.Build: no targets specified")
|
||||
}
|
||||
|
||||
// Ensure output directory exists
|
||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
||||
return nil, fmt.Errorf("builders.GoBuilder.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
for _, target := range targets {
|
||||
artifact, err := b.buildTarget(ctx, cfg, target)
|
||||
if err != nil {
|
||||
return artifacts, fmt.Errorf("builders.GoBuilder.Build: failed to build %s: %w", target.String(), err)
|
||||
}
|
||||
artifacts = append(artifacts, artifact)
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// buildTarget compiles for a single target platform.
|
||||
func (b *GoBuilder) buildTarget(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) {
|
||||
// Determine output binary name
|
||||
binaryName := cfg.Name
|
||||
if binaryName == "" {
|
||||
binaryName = filepath.Base(cfg.ProjectDir)
|
||||
}
|
||||
|
||||
// Add .exe extension for Windows
|
||||
if target.OS == "windows" && !strings.HasSuffix(binaryName, ".exe") {
|
||||
binaryName += ".exe"
|
||||
}
|
||||
|
||||
// Create platform-specific output path: output/os_arch/binary
|
||||
platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
||||
if err := cfg.FS.EnsureDir(platformDir); err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("failed to create platform directory: %w", err)
|
||||
}
|
||||
|
||||
outputPath := filepath.Join(platformDir, binaryName)
|
||||
|
||||
// Build the go build arguments
|
||||
args := []string{"build"}
|
||||
|
||||
// Add trimpath flag
|
||||
args = append(args, "-trimpath")
|
||||
|
||||
// Add ldflags if specified
|
||||
if len(cfg.LDFlags) > 0 {
|
||||
ldflags := strings.Join(cfg.LDFlags, " ")
|
||||
args = append(args, "-ldflags", ldflags)
|
||||
}
|
||||
|
||||
// Add output path
|
||||
args = append(args, "-o", outputPath)
|
||||
|
||||
// Add the project directory as the build target (current directory)
|
||||
args = append(args, ".")
|
||||
|
||||
// Create the command
|
||||
cmd := exec.CommandContext(ctx, "go", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
|
||||
// Set up environment
|
||||
env := os.Environ()
|
||||
env = append(env, fmt.Sprintf("GOOS=%s", target.OS))
|
||||
env = append(env, fmt.Sprintf("GOARCH=%s", target.Arch))
|
||||
env = append(env, "CGO_ENABLED=0") // CGO disabled by default for cross-compilation
|
||||
cmd.Env = env
|
||||
|
||||
// Capture output for error messages
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("go build failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return build.Artifact{
|
||||
Path: outputPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Ensure GoBuilder implements the Builder interface.
|
||||
var _ build.Builder = (*GoBuilder)(nil)
|
||||
|
|
@ -1,398 +0,0 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupGoTestProject creates a minimal Go project for testing.
|
||||
func setupGoTestProject(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create a minimal go.mod
|
||||
goMod := `module testproject
|
||||
|
||||
go 1.21
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal main.go
|
||||
mainGo := `package main
|
||||
|
||||
func main() {
|
||||
println("hello")
|
||||
}
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestGoBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewGoBuilder()
|
||||
assert.Equal(t, "go", builder.Name())
|
||||
}
|
||||
|
||||
func TestGoBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects Go project with go.mod", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects Wails project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for non-Go project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create a Node.js project instead
|
||||
err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoBuilder_Build_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
t.Run("builds for current platform", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testbinary",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Verify artifact properties
|
||||
artifact := artifacts[0]
|
||||
assert.Equal(t, runtime.GOOS, artifact.OS)
|
||||
assert.Equal(t, runtime.GOARCH, artifact.Arch)
|
||||
|
||||
// Verify binary was created
|
||||
assert.FileExists(t, artifact.Path)
|
||||
|
||||
// Verify the path is in the expected location
|
||||
expectedName := "testbinary"
|
||||
if runtime.GOOS == "windows" {
|
||||
expectedName += ".exe"
|
||||
}
|
||||
assert.Contains(t, artifact.Path, expectedName)
|
||||
})
|
||||
|
||||
t.Run("builds multiple targets", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "multitest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "linux", Arch: "arm64"},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 2)
|
||||
|
||||
// Verify both artifacts were created
|
||||
for i, artifact := range artifacts {
|
||||
assert.Equal(t, targets[i].OS, artifact.OS)
|
||||
assert.Equal(t, targets[i].Arch, artifact.Arch)
|
||||
assert.FileExists(t, artifact.Path)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("adds .exe extension for Windows", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "wintest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: "windows", Arch: "amd64"},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Verify .exe extension
|
||||
assert.True(t, filepath.Ext(artifacts[0].Path) == ".exe")
|
||||
assert.FileExists(t, artifacts[0].Path)
|
||||
})
|
||||
|
||||
t.Run("uses directory name when Name not specified", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "", // Empty name
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Binary should use the project directory base name
|
||||
baseName := filepath.Base(projectDir)
|
||||
if runtime.GOOS == "windows" {
|
||||
baseName += ".exe"
|
||||
}
|
||||
assert.Contains(t, artifacts[0].Path, baseName)
|
||||
})
|
||||
|
||||
t.Run("applies ldflags", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "ldflagstest",
|
||||
LDFlags: []string{"-s", "-w"}, // Strip debug info
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
assert.FileExists(t, artifacts[0].Path)
|
||||
})
|
||||
|
||||
t.Run("creates output directory if missing", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := filepath.Join(t.TempDir(), "nested", "output")
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "nestedtest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
assert.FileExists(t, artifacts[0].Path)
|
||||
assert.DirExists(t, outputDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoBuilder_Build_Bad(t *testing.T) {
|
||||
t.Run("returns error for nil config", func(t *testing.T) {
|
||||
builder := NewGoBuilder()
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
|
||||
t.Run("returns error for empty targets", func(t *testing.T) {
|
||||
projectDir := setupGoTestProject(t)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, []build.Target{})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "no targets specified")
|
||||
})
|
||||
|
||||
t.Run("returns error for invalid project directory", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: "/nonexistent/path",
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
|
||||
t.Run("returns error for invalid Go code", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create go.mod
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test\n\ngo 1.21"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid Go code
|
||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte("this is not valid go code"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: dir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "go build failed")
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
|
||||
t.Run("returns partial artifacts on partial failure", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create a project that will fail on one target
|
||||
// Using an invalid arch for linux
|
||||
projectDir := setupGoTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "partialtest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH}, // This should succeed
|
||||
{OS: "linux", Arch: "invalid_arch"}, // This should fail
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
// Should return error for the failed build
|
||||
assert.Error(t, err)
|
||||
// Should have the successful artifact
|
||||
assert.Len(t, artifacts, 1)
|
||||
})
|
||||
|
||||
t.Run("respects context cancellation", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
projectDir := setupGoTestProject(t)
|
||||
|
||||
builder := NewGoBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "canceltest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// Create an already cancelled context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
artifacts, err := builder.Build(ctx, cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGoBuilder_Interface_Good(t *testing.T) {
|
||||
// Verify GoBuilder implements Builder interface
|
||||
var _ build.Builder = (*GoBuilder)(nil)
|
||||
var _ build.Builder = NewGoBuilder()
|
||||
}
|
||||
|
|
@ -1,271 +0,0 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// LinuxKitBuilder builds LinuxKit images.
|
||||
type LinuxKitBuilder struct{}
|
||||
|
||||
// NewLinuxKitBuilder creates a new LinuxKit builder.
|
||||
func NewLinuxKitBuilder() *LinuxKitBuilder {
|
||||
return &LinuxKitBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *LinuxKitBuilder) Name() string {
|
||||
return "linuxkit"
|
||||
}
|
||||
|
||||
// Detect checks if a linuxkit.yml or .yml config exists in the directory.
|
||||
func (b *LinuxKitBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
// Check for linuxkit.yml
|
||||
if fs.IsFile(filepath.Join(dir, "linuxkit.yml")) {
|
||||
return true, nil
|
||||
}
|
||||
// Check for .core/linuxkit/
|
||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
||||
if fs.IsDir(lkDir) {
|
||||
entries, err := fs.List(lkDir)
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Build builds LinuxKit images for the specified targets.
|
||||
func (b *LinuxKitBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
// Validate linuxkit CLI is available
|
||||
if err := b.validateLinuxKitCli(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine config file path
|
||||
configPath := cfg.LinuxKitConfig
|
||||
if configPath == "" {
|
||||
// Auto-detect
|
||||
if cfg.FS.IsFile(filepath.Join(cfg.ProjectDir, "linuxkit.yml")) {
|
||||
configPath = filepath.Join(cfg.ProjectDir, "linuxkit.yml")
|
||||
} else {
|
||||
// Look in .core/linuxkit/
|
||||
lkDir := filepath.Join(cfg.ProjectDir, ".core", "linuxkit")
|
||||
if cfg.FS.IsDir(lkDir) {
|
||||
entries, err := cfg.FS.List(lkDir)
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".yml") {
|
||||
configPath = filepath.Join(lkDir, entry.Name())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if configPath == "" {
|
||||
return nil, errors.New("linuxkit.Build: no LinuxKit config file found. Specify with --config or create linuxkit.yml")
|
||||
}
|
||||
|
||||
// Validate config file exists
|
||||
if !cfg.FS.IsFile(configPath) {
|
||||
return nil, fmt.Errorf("linuxkit.Build: config file not found: %s", configPath)
|
||||
}
|
||||
|
||||
// Determine output formats
|
||||
formats := cfg.Formats
|
||||
if len(formats) == 0 {
|
||||
formats = []string{"qcow2-bios"} // Default to QEMU-compatible format
|
||||
}
|
||||
|
||||
// Create output directory
|
||||
outputDir := cfg.OutputDir
|
||||
if outputDir == "" {
|
||||
outputDir = filepath.Join(cfg.ProjectDir, "dist")
|
||||
}
|
||||
if err := cfg.FS.EnsureDir(outputDir); err != nil {
|
||||
return nil, fmt.Errorf("linuxkit.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Determine base name from config file or project name
|
||||
baseName := cfg.Name
|
||||
if baseName == "" {
|
||||
baseName = strings.TrimSuffix(filepath.Base(configPath), ".yml")
|
||||
}
|
||||
|
||||
// If no targets, default to linux/amd64
|
||||
if len(targets) == 0 {
|
||||
targets = []build.Target{{OS: "linux", Arch: "amd64"}}
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
// Build for each target and format
|
||||
for _, target := range targets {
|
||||
// LinuxKit only supports Linux
|
||||
if target.OS != "linux" {
|
||||
fmt.Printf("Skipping %s/%s (LinuxKit only supports Linux)\n", target.OS, target.Arch)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, format := range formats {
|
||||
outputName := fmt.Sprintf("%s-%s", baseName, target.Arch)
|
||||
|
||||
args := b.buildLinuxKitArgs(configPath, format, outputName, outputDir, target.Arch)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "linuxkit", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
fmt.Printf("Building LinuxKit image: %s (%s, %s)\n", outputName, format, target.Arch)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("linuxkit.Build: build failed for %s/%s: %w", target.Arch, format, err)
|
||||
}
|
||||
|
||||
// Determine the actual output file path
|
||||
artifactPath := b.getArtifactPath(outputDir, outputName, format)
|
||||
|
||||
// Verify the artifact was created
|
||||
if !cfg.FS.Exists(artifactPath) {
|
||||
// Try alternate naming conventions
|
||||
artifactPath = b.findArtifact(cfg.FS, outputDir, outputName, format)
|
||||
if artifactPath == "" {
|
||||
return nil, fmt.Errorf("linuxkit.Build: artifact not found after build: expected %s", b.getArtifactPath(outputDir, outputName, format))
|
||||
}
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: artifactPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// buildLinuxKitArgs builds the arguments for linuxkit build command.
|
||||
func (b *LinuxKitBuilder) buildLinuxKitArgs(configPath, format, outputName, outputDir, arch string) []string {
|
||||
args := []string{"build"}
|
||||
|
||||
// Output format
|
||||
args = append(args, "--format", format)
|
||||
|
||||
// Output name
|
||||
args = append(args, "--name", outputName)
|
||||
|
||||
// Output directory
|
||||
args = append(args, "--dir", outputDir)
|
||||
|
||||
// Architecture (if not amd64)
|
||||
if arch != "amd64" {
|
||||
args = append(args, "--arch", arch)
|
||||
}
|
||||
|
||||
// Config file
|
||||
args = append(args, configPath)
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// getArtifactPath returns the expected path of the built artifact.
|
||||
func (b *LinuxKitBuilder) getArtifactPath(outputDir, outputName, format string) string {
|
||||
ext := b.getFormatExtension(format)
|
||||
return filepath.Join(outputDir, outputName+ext)
|
||||
}
|
||||
|
||||
// findArtifact searches for the built artifact with various naming conventions.
|
||||
func (b *LinuxKitBuilder) findArtifact(fs io.Medium, outputDir, outputName, format string) string {
|
||||
// LinuxKit can create files with different suffixes
|
||||
extensions := []string{
|
||||
b.getFormatExtension(format),
|
||||
"-bios" + b.getFormatExtension(format),
|
||||
"-efi" + b.getFormatExtension(format),
|
||||
}
|
||||
|
||||
for _, ext := range extensions {
|
||||
path := filepath.Join(outputDir, outputName+ext)
|
||||
if fs.Exists(path) {
|
||||
return path
|
||||
}
|
||||
}
|
||||
|
||||
// Try to find any file matching the output name
|
||||
entries, err := fs.List(outputDir)
|
||||
if err == nil {
|
||||
for _, entry := range entries {
|
||||
if strings.HasPrefix(entry.Name(), outputName) {
|
||||
match := filepath.Join(outputDir, entry.Name())
|
||||
// Return first match that looks like an image
|
||||
ext := filepath.Ext(match)
|
||||
if ext == ".iso" || ext == ".qcow2" || ext == ".raw" || ext == ".vmdk" || ext == ".vhd" {
|
||||
return match
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// getFormatExtension returns the file extension for a LinuxKit output format.
|
||||
func (b *LinuxKitBuilder) getFormatExtension(format string) string {
|
||||
switch format {
|
||||
case "iso", "iso-bios", "iso-efi":
|
||||
return ".iso"
|
||||
case "raw", "raw-bios", "raw-efi":
|
||||
return ".raw"
|
||||
case "qcow2", "qcow2-bios", "qcow2-efi":
|
||||
return ".qcow2"
|
||||
case "vmdk":
|
||||
return ".vmdk"
|
||||
case "vhd":
|
||||
return ".vhd"
|
||||
case "gcp":
|
||||
return ".img.tar.gz"
|
||||
case "aws":
|
||||
return ".raw"
|
||||
default:
|
||||
return "." + strings.TrimSuffix(format, "-bios")
|
||||
}
|
||||
}
|
||||
|
||||
// validateLinuxKitCli checks if the linuxkit CLI is available.
|
||||
func (b *LinuxKitBuilder) validateLinuxKitCli() error {
|
||||
// Check PATH first
|
||||
if _, err := exec.LookPath("linuxkit"); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check common locations
|
||||
paths := []string{
|
||||
"/usr/local/bin/linuxkit",
|
||||
"/opt/homebrew/bin/linuxkit",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("linuxkit: linuxkit CLI not found. Install with: brew install linuxkit (macOS) or see https://github.com/linuxkit/linuxkit")
|
||||
}
|
||||
|
|
@ -1,224 +0,0 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLinuxKitBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewLinuxKitBuilder()
|
||||
assert.Equal(t, "linuxkit", builder.Name())
|
||||
}
|
||||
|
||||
func TestLinuxKitBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
|
||||
t.Run("detects linuxkit.yml in root", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "linuxkit.yml"), []byte("kernel:\n image: test\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects .core/linuxkit/*.yml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
||||
require.NoError(t, os.MkdirAll(lkDir, 0755))
|
||||
err := os.WriteFile(filepath.Join(lkDir, "server.yml"), []byte("kernel:\n image: test\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects .core/linuxkit with multiple yml files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
||||
require.NoError(t, os.MkdirAll(lkDir, 0755))
|
||||
err := os.WriteFile(filepath.Join(lkDir, "server.yml"), []byte("kernel:\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(lkDir, "desktop.yml"), []byte("kernel:\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for non-LinuxKit project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty .core/linuxkit directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
||||
require.NoError(t, os.MkdirAll(lkDir, 0755))
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false when .core/linuxkit has only non-yml files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
||||
require.NoError(t, os.MkdirAll(lkDir, 0755))
|
||||
err := os.WriteFile(filepath.Join(lkDir, "README.md"), []byte("# LinuxKit\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("ignores subdirectories in .core/linuxkit", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
lkDir := filepath.Join(dir, ".core", "linuxkit")
|
||||
subDir := filepath.Join(lkDir, "subdir")
|
||||
require.NoError(t, os.MkdirAll(subDir, 0755))
|
||||
// Put yml in subdir only, not in lkDir itself
|
||||
err := os.WriteFile(filepath.Join(subDir, "server.yml"), []byte("kernel:\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewLinuxKitBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitBuilder_GetFormatExtension_Good(t *testing.T) {
|
||||
builder := NewLinuxKitBuilder()
|
||||
|
||||
tests := []struct {
|
||||
format string
|
||||
expected string
|
||||
}{
|
||||
{"iso", ".iso"},
|
||||
{"iso-bios", ".iso"},
|
||||
{"iso-efi", ".iso"},
|
||||
{"raw", ".raw"},
|
||||
{"raw-bios", ".raw"},
|
||||
{"raw-efi", ".raw"},
|
||||
{"qcow2", ".qcow2"},
|
||||
{"qcow2-bios", ".qcow2"},
|
||||
{"qcow2-efi", ".qcow2"},
|
||||
{"vmdk", ".vmdk"},
|
||||
{"vhd", ".vhd"},
|
||||
{"gcp", ".img.tar.gz"},
|
||||
{"aws", ".raw"},
|
||||
{"custom", ".custom"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.format, func(t *testing.T) {
|
||||
ext := builder.getFormatExtension(tc.format)
|
||||
assert.Equal(t, tc.expected, ext)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinuxKitBuilder_GetArtifactPath_Good(t *testing.T) {
|
||||
builder := NewLinuxKitBuilder()
|
||||
|
||||
t.Run("constructs correct path", func(t *testing.T) {
|
||||
path := builder.getArtifactPath("/dist", "server-amd64", "iso")
|
||||
assert.Equal(t, "/dist/server-amd64.iso", path)
|
||||
})
|
||||
|
||||
t.Run("constructs correct path for qcow2", func(t *testing.T) {
|
||||
path := builder.getArtifactPath("/output/linuxkit", "server-arm64", "qcow2-bios")
|
||||
assert.Equal(t, "/output/linuxkit/server-arm64.qcow2", path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitBuilder_BuildLinuxKitArgs_Good(t *testing.T) {
|
||||
builder := NewLinuxKitBuilder()
|
||||
|
||||
t.Run("builds args for amd64 without --arch", func(t *testing.T) {
|
||||
args := builder.buildLinuxKitArgs("/config.yml", "iso", "output", "/dist", "amd64")
|
||||
assert.Contains(t, args, "build")
|
||||
assert.Contains(t, args, "--format")
|
||||
assert.Contains(t, args, "iso")
|
||||
assert.Contains(t, args, "--name")
|
||||
assert.Contains(t, args, "output")
|
||||
assert.Contains(t, args, "--dir")
|
||||
assert.Contains(t, args, "/dist")
|
||||
assert.Contains(t, args, "/config.yml")
|
||||
assert.NotContains(t, args, "--arch")
|
||||
})
|
||||
|
||||
t.Run("builds args for arm64 with --arch", func(t *testing.T) {
|
||||
args := builder.buildLinuxKitArgs("/config.yml", "qcow2", "output", "/dist", "arm64")
|
||||
assert.Contains(t, args, "--arch")
|
||||
assert.Contains(t, args, "arm64")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitBuilder_FindArtifact_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
builder := NewLinuxKitBuilder()
|
||||
|
||||
t.Run("finds artifact with exact extension", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
artifactPath := filepath.Join(dir, "server-amd64.iso")
|
||||
require.NoError(t, os.WriteFile(artifactPath, []byte("fake iso"), 0644))
|
||||
|
||||
found := builder.findArtifact(fs, dir, "server-amd64", "iso")
|
||||
assert.Equal(t, artifactPath, found)
|
||||
})
|
||||
|
||||
t.Run("returns empty for missing artifact", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
found := builder.findArtifact(fs, dir, "nonexistent", "iso")
|
||||
assert.Empty(t, found)
|
||||
})
|
||||
|
||||
t.Run("finds artifact with alternate naming", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create file matching the name prefix + known image extension
|
||||
artifactPath := filepath.Join(dir, "server-amd64.qcow2")
|
||||
require.NoError(t, os.WriteFile(artifactPath, []byte("fake qcow2"), 0644))
|
||||
|
||||
found := builder.findArtifact(fs, dir, "server-amd64", "qcow2")
|
||||
assert.Equal(t, artifactPath, found)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitBuilder_Interface_Good(t *testing.T) {
|
||||
// Verify LinuxKitBuilder implements Builder interface
|
||||
var _ build.Builder = (*LinuxKitBuilder)(nil)
|
||||
var _ build.Builder = NewLinuxKitBuilder()
|
||||
}
|
||||
|
|
@ -1,276 +0,0 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// TaskfileBuilder builds projects using Taskfile (https://taskfile.dev/).
|
||||
// This is a generic builder that can handle any project type that has a Taskfile.
|
||||
type TaskfileBuilder struct{}
|
||||
|
||||
// NewTaskfileBuilder creates a new Taskfile builder.
|
||||
func NewTaskfileBuilder() *TaskfileBuilder {
|
||||
return &TaskfileBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *TaskfileBuilder) Name() string {
|
||||
return "taskfile"
|
||||
}
|
||||
|
||||
// Detect checks if a Taskfile exists in the directory.
|
||||
func (b *TaskfileBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
// Check for Taskfile.yml, Taskfile.yaml, or Taskfile
|
||||
taskfiles := []string{
|
||||
"Taskfile.yml",
|
||||
"Taskfile.yaml",
|
||||
"Taskfile",
|
||||
"taskfile.yml",
|
||||
"taskfile.yaml",
|
||||
}
|
||||
|
||||
for _, tf := range taskfiles {
|
||||
if fs.IsFile(filepath.Join(dir, tf)) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Build runs the Taskfile build task for each target platform.
|
||||
func (b *TaskfileBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
// Validate task CLI is available
|
||||
if err := b.validateTaskCli(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create output directory
|
||||
outputDir := cfg.OutputDir
|
||||
if outputDir == "" {
|
||||
outputDir = filepath.Join(cfg.ProjectDir, "dist")
|
||||
}
|
||||
if err := cfg.FS.EnsureDir(outputDir); err != nil {
|
||||
return nil, fmt.Errorf("taskfile.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
// If no targets specified, just run the build task once
|
||||
if len(targets) == 0 {
|
||||
if err := b.runTask(ctx, cfg, "", ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to find artifacts in output directory
|
||||
found := b.findArtifacts(cfg.FS, outputDir)
|
||||
artifacts = append(artifacts, found...)
|
||||
} else {
|
||||
// Run build task for each target
|
||||
for _, target := range targets {
|
||||
if err := b.runTask(ctx, cfg, target.OS, target.Arch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to find artifacts for this target
|
||||
found := b.findArtifactsForTarget(cfg.FS, outputDir, target)
|
||||
artifacts = append(artifacts, found...)
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// runTask executes the Taskfile build task.
|
||||
func (b *TaskfileBuilder) runTask(ctx context.Context, cfg *build.Config, goos, goarch string) error {
|
||||
// Build task command
|
||||
args := []string{"build"}
|
||||
|
||||
// Pass variables if targets are specified
|
||||
if goos != "" {
|
||||
args = append(args, fmt.Sprintf("GOOS=%s", goos))
|
||||
}
|
||||
if goarch != "" {
|
||||
args = append(args, fmt.Sprintf("GOARCH=%s", goarch))
|
||||
}
|
||||
if cfg.OutputDir != "" {
|
||||
args = append(args, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir))
|
||||
}
|
||||
if cfg.Name != "" {
|
||||
args = append(args, fmt.Sprintf("NAME=%s", cfg.Name))
|
||||
}
|
||||
if cfg.Version != "" {
|
||||
args = append(args, fmt.Sprintf("VERSION=%s", cfg.Version))
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "task", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set environment variables
|
||||
cmd.Env = os.Environ()
|
||||
if goos != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GOOS=%s", goos))
|
||||
}
|
||||
if goarch != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("GOARCH=%s", goarch))
|
||||
}
|
||||
if cfg.OutputDir != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("OUTPUT_DIR=%s", cfg.OutputDir))
|
||||
}
|
||||
if cfg.Name != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("NAME=%s", cfg.Name))
|
||||
}
|
||||
if cfg.Version != "" {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("VERSION=%s", cfg.Version))
|
||||
}
|
||||
|
||||
if goos != "" && goarch != "" {
|
||||
fmt.Printf("Running task build for %s/%s\n", goos, goarch)
|
||||
} else {
|
||||
fmt.Println("Running task build")
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("taskfile.Build: task build failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findArtifacts searches for built artifacts in the output directory.
|
||||
func (b *TaskfileBuilder) findArtifacts(fs io.Medium, outputDir string) []build.Artifact {
|
||||
var artifacts []build.Artifact
|
||||
|
||||
entries, err := fs.List(outputDir)
|
||||
if err != nil {
|
||||
return artifacts
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip common non-artifact files
|
||||
name := entry.Name()
|
||||
if strings.HasPrefix(name, ".") || name == "CHECKSUMS.txt" {
|
||||
continue
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(outputDir, name),
|
||||
OS: "",
|
||||
Arch: "",
|
||||
})
|
||||
}
|
||||
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// findArtifactsForTarget searches for built artifacts for a specific target.
|
||||
func (b *TaskfileBuilder) findArtifactsForTarget(fs io.Medium, outputDir string, target build.Target) []build.Artifact {
|
||||
var artifacts []build.Artifact
|
||||
|
||||
// 1. Look for platform-specific subdirectory: output/os_arch/
|
||||
platformSubdir := filepath.Join(outputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
||||
if fs.IsDir(platformSubdir) {
|
||||
entries, _ := fs.List(platformSubdir)
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
// Handle .app bundles on macOS
|
||||
if target.OS == "darwin" && strings.HasSuffix(entry.Name(), ".app") {
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(platformSubdir, entry.Name()),
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Skip hidden files
|
||||
if strings.HasPrefix(entry.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: filepath.Join(platformSubdir, entry.Name()),
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
if len(artifacts) > 0 {
|
||||
return artifacts
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Look for files matching the target pattern in the root output dir
|
||||
patterns := []string{
|
||||
fmt.Sprintf("*-%s-%s*", target.OS, target.Arch),
|
||||
fmt.Sprintf("*_%s_%s*", target.OS, target.Arch),
|
||||
fmt.Sprintf("*-%s*", target.Arch),
|
||||
}
|
||||
|
||||
for _, pattern := range patterns {
|
||||
entries, _ := fs.List(outputDir)
|
||||
for _, entry := range entries {
|
||||
match := entry.Name()
|
||||
// Simple glob matching
|
||||
if b.matchPattern(match, pattern) {
|
||||
fullPath := filepath.Join(outputDir, match)
|
||||
if fs.IsDir(fullPath) {
|
||||
continue
|
||||
}
|
||||
|
||||
artifacts = append(artifacts, build.Artifact{
|
||||
Path: fullPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(artifacts) > 0 {
|
||||
break // Found matches, stop looking
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// matchPattern implements glob matching for Taskfile artifacts.
|
||||
func (b *TaskfileBuilder) matchPattern(name, pattern string) bool {
|
||||
matched, _ := filepath.Match(pattern, name)
|
||||
return matched
|
||||
}
|
||||
|
||||
// validateTaskCli checks if the task CLI is available.
|
||||
func (b *TaskfileBuilder) validateTaskCli() error {
|
||||
// Check PATH first
|
||||
if _, err := exec.LookPath("task"); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check common locations
|
||||
paths := []string{
|
||||
"/usr/local/bin/task",
|
||||
"/opt/homebrew/bin/task",
|
||||
}
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("taskfile: task CLI not found. Install with: brew install go-task (macOS), go install github.com/go-task/task/v3/cmd/task@latest, or see https://taskfile.dev/installation/")
|
||||
}
|
||||
|
|
@ -1,234 +0,0 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTaskfileBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewTaskfileBuilder()
|
||||
assert.Equal(t, "taskfile", builder.Name())
|
||||
}
|
||||
|
||||
func TestTaskfileBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
|
||||
t.Run("detects Taskfile.yml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "Taskfile.yml"), []byte("version: '3'\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects Taskfile.yaml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "Taskfile.yaml"), []byte("version: '3'\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects Taskfile (no extension)", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "Taskfile"), []byte("version: '3'\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects lowercase taskfile.yml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "taskfile.yml"), []byte("version: '3'\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("detects lowercase taskfile.yaml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "taskfile.yaml"), []byte("version: '3'\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for non-Taskfile project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "Makefile"), []byte("all:\n\techo hello\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("does not match Taskfile in subdirectory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
subDir := filepath.Join(dir, "subdir")
|
||||
require.NoError(t, os.MkdirAll(subDir, 0755))
|
||||
err := os.WriteFile(filepath.Join(subDir, "Taskfile.yml"), []byte("version: '3'\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewTaskfileBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTaskfileBuilder_FindArtifacts_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
builder := NewTaskfileBuilder()
|
||||
|
||||
t.Run("finds files in output directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "myapp"), []byte("binary"), 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "myapp.tar.gz"), []byte("archive"), 0644))
|
||||
|
||||
artifacts := builder.findArtifacts(fs, dir)
|
||||
assert.Len(t, artifacts, 2)
|
||||
})
|
||||
|
||||
t.Run("skips hidden files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "myapp"), []byte("binary"), 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, ".hidden"), []byte("hidden"), 0644))
|
||||
|
||||
artifacts := builder.findArtifacts(fs, dir)
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("skips CHECKSUMS.txt", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "myapp"), []byte("binary"), 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "CHECKSUMS.txt"), []byte("sha256"), 0644))
|
||||
|
||||
artifacts := builder.findArtifacts(fs, dir)
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "myapp")
|
||||
})
|
||||
|
||||
t.Run("skips directories", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "myapp"), []byte("binary"), 0755))
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(dir, "subdir"), 0755))
|
||||
|
||||
artifacts := builder.findArtifacts(fs, dir)
|
||||
assert.Len(t, artifacts, 1)
|
||||
})
|
||||
|
||||
t.Run("returns empty for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
artifacts := builder.findArtifacts(fs, dir)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
|
||||
t.Run("returns empty for nonexistent directory", func(t *testing.T) {
|
||||
artifacts := builder.findArtifacts(fs, "/nonexistent/path")
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTaskfileBuilder_FindArtifactsForTarget_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
builder := NewTaskfileBuilder()
|
||||
|
||||
t.Run("finds artifacts in platform subdirectory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
platformDir := filepath.Join(dir, "linux_amd64")
|
||||
require.NoError(t, os.MkdirAll(platformDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(platformDir, "myapp"), []byte("binary"), 0755))
|
||||
|
||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
||||
artifacts := builder.findArtifactsForTarget(fs, dir, target)
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Equal(t, "linux", artifacts[0].OS)
|
||||
assert.Equal(t, "amd64", artifacts[0].Arch)
|
||||
})
|
||||
|
||||
t.Run("finds artifacts by name pattern in root", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "myapp-linux-amd64"), []byte("binary"), 0755))
|
||||
|
||||
target := build.Target{OS: "linux", Arch: "amd64"}
|
||||
artifacts := builder.findArtifactsForTarget(fs, dir, target)
|
||||
assert.NotEmpty(t, artifacts)
|
||||
})
|
||||
|
||||
t.Run("returns empty when no matching artifacts", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "myapp"), []byte("binary"), 0755))
|
||||
|
||||
target := build.Target{OS: "linux", Arch: "arm64"}
|
||||
artifacts := builder.findArtifactsForTarget(fs, dir, target)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
|
||||
t.Run("handles .app bundles on darwin", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
platformDir := filepath.Join(dir, "darwin_arm64")
|
||||
appDir := filepath.Join(platformDir, "MyApp.app")
|
||||
require.NoError(t, os.MkdirAll(appDir, 0755))
|
||||
|
||||
target := build.Target{OS: "darwin", Arch: "arm64"}
|
||||
artifacts := builder.findArtifactsForTarget(fs, dir, target)
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "MyApp.app")
|
||||
})
|
||||
}
|
||||
|
||||
func TestTaskfileBuilder_MatchPattern_Good(t *testing.T) {
|
||||
builder := NewTaskfileBuilder()
|
||||
|
||||
t.Run("matches simple glob", func(t *testing.T) {
|
||||
assert.True(t, builder.matchPattern("myapp-linux-amd64", "*-linux-amd64"))
|
||||
})
|
||||
|
||||
t.Run("does not match different pattern", func(t *testing.T) {
|
||||
assert.False(t, builder.matchPattern("myapp-linux-amd64", "*-darwin-arm64"))
|
||||
})
|
||||
|
||||
t.Run("matches wildcard", func(t *testing.T) {
|
||||
assert.True(t, builder.matchPattern("test_linux_arm64.bin", "*_linux_arm64*"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestTaskfileBuilder_Interface_Good(t *testing.T) {
|
||||
// Verify TaskfileBuilder implements Builder interface
|
||||
var _ build.Builder = (*TaskfileBuilder)(nil)
|
||||
var _ build.Builder = NewTaskfileBuilder()
|
||||
}
|
||||
|
|
@ -1,248 +0,0 @@
|
|||
// Package builders provides build implementations for different project types.
|
||||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// WailsBuilder implements the Builder interface for Wails v3 projects.
|
||||
type WailsBuilder struct{}
|
||||
|
||||
// NewWailsBuilder creates a new WailsBuilder instance.
|
||||
func NewWailsBuilder() *WailsBuilder {
|
||||
return &WailsBuilder{}
|
||||
}
|
||||
|
||||
// Name returns the builder's identifier.
|
||||
func (b *WailsBuilder) Name() string {
|
||||
return "wails"
|
||||
}
|
||||
|
||||
// Detect checks if this builder can handle the project in the given directory.
|
||||
// Uses IsWailsProject from the build package which checks for wails.json.
|
||||
func (b *WailsBuilder) Detect(fs io.Medium, dir string) (bool, error) {
|
||||
return build.IsWailsProject(fs, dir), nil
|
||||
}
|
||||
|
||||
// Build compiles the Wails project for the specified targets.
|
||||
// It detects the Wails version and chooses the appropriate build strategy:
|
||||
// - Wails v3: Delegates to Taskfile (error if missing)
|
||||
// - Wails v2: Uses 'wails build' command
|
||||
func (b *WailsBuilder) Build(ctx context.Context, cfg *build.Config, targets []build.Target) ([]build.Artifact, error) {
|
||||
if cfg == nil {
|
||||
return nil, errors.New("builders.WailsBuilder.Build: config is nil")
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return nil, errors.New("builders.WailsBuilder.Build: no targets specified")
|
||||
}
|
||||
|
||||
// Detect Wails version
|
||||
isV3 := b.isWailsV3(cfg.FS, cfg.ProjectDir)
|
||||
|
||||
if isV3 {
|
||||
// Wails v3 strategy: Delegate to Taskfile
|
||||
taskBuilder := NewTaskfileBuilder()
|
||||
if detected, _ := taskBuilder.Detect(cfg.FS, cfg.ProjectDir); detected {
|
||||
return taskBuilder.Build(ctx, cfg, targets)
|
||||
}
|
||||
return nil, errors.New("wails v3 projects require a Taskfile for building")
|
||||
}
|
||||
|
||||
// Wails v2 strategy: Use 'wails build'
|
||||
// Ensure output directory exists
|
||||
if err := cfg.FS.EnsureDir(cfg.OutputDir); err != nil {
|
||||
return nil, fmt.Errorf("builders.WailsBuilder.Build: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Note: Wails v2 handles frontend installation/building automatically via wails.json config
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
for _, target := range targets {
|
||||
artifact, err := b.buildV2Target(ctx, cfg, target)
|
||||
if err != nil {
|
||||
return artifacts, fmt.Errorf("builders.WailsBuilder.Build: failed to build %s: %w", target.String(), err)
|
||||
}
|
||||
artifacts = append(artifacts, artifact)
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// isWailsV3 checks if the project uses Wails v3 by inspecting go.mod.
|
||||
func (b *WailsBuilder) isWailsV3(fs io.Medium, dir string) bool {
|
||||
goModPath := filepath.Join(dir, "go.mod")
|
||||
content, err := fs.Read(goModPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(content, "github.com/wailsapp/wails/v3")
|
||||
}
|
||||
|
||||
// buildV2Target compiles for a single target platform using wails (v2).
|
||||
func (b *WailsBuilder) buildV2Target(ctx context.Context, cfg *build.Config, target build.Target) (build.Artifact, error) {
|
||||
// Determine output binary name
|
||||
binaryName := cfg.Name
|
||||
if binaryName == "" {
|
||||
binaryName = filepath.Base(cfg.ProjectDir)
|
||||
}
|
||||
|
||||
// Build the wails build arguments
|
||||
args := []string{"build"}
|
||||
|
||||
// Platform
|
||||
args = append(args, "-platform", fmt.Sprintf("%s/%s", target.OS, target.Arch))
|
||||
|
||||
// Output (Wails v2 uses -o for the binary name, relative to build/bin usually, but we want to control it)
|
||||
// Actually, Wails v2 is opinionated about output dir (build/bin).
|
||||
// We might need to copy artifacts after build if we want them in cfg.OutputDir.
|
||||
// For now, let's try to let Wails do its thing and find the artifact.
|
||||
|
||||
// Create the command
|
||||
cmd := exec.CommandContext(ctx, "wails", args...)
|
||||
cmd.Dir = cfg.ProjectDir
|
||||
|
||||
// Capture output for error messages
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("wails build failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Wails v2 typically outputs to build/bin
|
||||
// We need to move/copy it to our desired output dir
|
||||
|
||||
// Construct the source path where Wails v2 puts the binary
|
||||
wailsOutputDir := filepath.Join(cfg.ProjectDir, "build", "bin")
|
||||
|
||||
// Find the artifact in Wails output dir
|
||||
sourcePath, err := b.findArtifact(cfg.FS, wailsOutputDir, binaryName, target)
|
||||
if err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("failed to find Wails v2 build artifact: %w", err)
|
||||
}
|
||||
|
||||
// Move/Copy to our output dir
|
||||
// Create platform specific dir in our output
|
||||
platformDir := filepath.Join(cfg.OutputDir, fmt.Sprintf("%s_%s", target.OS, target.Arch))
|
||||
if err := cfg.FS.EnsureDir(platformDir); err != nil {
|
||||
return build.Artifact{}, fmt.Errorf("failed to create output dir: %w", err)
|
||||
}
|
||||
|
||||
destPath := filepath.Join(platformDir, filepath.Base(sourcePath))
|
||||
|
||||
// Simple copy using the medium
|
||||
content, err := cfg.FS.Read(sourcePath)
|
||||
if err != nil {
|
||||
return build.Artifact{}, err
|
||||
}
|
||||
if err := cfg.FS.Write(destPath, content); err != nil {
|
||||
return build.Artifact{}, err
|
||||
}
|
||||
|
||||
return build.Artifact{
|
||||
Path: destPath,
|
||||
OS: target.OS,
|
||||
Arch: target.Arch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// findArtifact locates the built artifact based on the target platform.
|
||||
func (b *WailsBuilder) findArtifact(fs io.Medium, platformDir, binaryName string, target build.Target) (string, error) {
|
||||
var candidates []string
|
||||
|
||||
switch target.OS {
|
||||
case "windows":
|
||||
// Look for NSIS installer first, then plain exe
|
||||
candidates = []string{
|
||||
filepath.Join(platformDir, binaryName+"-installer.exe"),
|
||||
filepath.Join(platformDir, binaryName+".exe"),
|
||||
filepath.Join(platformDir, binaryName+"-amd64-installer.exe"),
|
||||
}
|
||||
case "darwin":
|
||||
// Look for .dmg, then .app bundle, then plain binary
|
||||
candidates = []string{
|
||||
filepath.Join(platformDir, binaryName+".dmg"),
|
||||
filepath.Join(platformDir, binaryName+".app"),
|
||||
filepath.Join(platformDir, binaryName),
|
||||
}
|
||||
default:
|
||||
// Linux and others: look for plain binary
|
||||
candidates = []string{
|
||||
filepath.Join(platformDir, binaryName),
|
||||
}
|
||||
}
|
||||
|
||||
// Try each candidate
|
||||
for _, candidate := range candidates {
|
||||
if fs.Exists(candidate) {
|
||||
return candidate, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no specific candidate found, try to find any executable or package in the directory
|
||||
entries, err := fs.List(platformDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read platform directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
// Skip common non-artifact files
|
||||
if strings.HasSuffix(name, ".go") || strings.HasSuffix(name, ".json") {
|
||||
continue
|
||||
}
|
||||
|
||||
path := filepath.Join(platformDir, name)
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// On Unix, check if it's executable; on Windows, check for .exe
|
||||
if target.OS == "windows" {
|
||||
if strings.HasSuffix(name, ".exe") {
|
||||
return path, nil
|
||||
}
|
||||
} else if info.Mode()&0111 != 0 || entry.IsDir() {
|
||||
// Executable file or directory (.app bundle)
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no artifact found in %s", platformDir)
|
||||
}
|
||||
|
||||
// detectPackageManager detects the frontend package manager based on lock files.
|
||||
// Returns "bun", "pnpm", "yarn", or "npm" (default).
|
||||
func detectPackageManager(fs io.Medium, dir string) string {
|
||||
// Check in priority order: bun, pnpm, yarn, npm
|
||||
lockFiles := []struct {
|
||||
file string
|
||||
manager string
|
||||
}{
|
||||
{"bun.lockb", "bun"},
|
||||
{"pnpm-lock.yaml", "pnpm"},
|
||||
{"yarn.lock", "yarn"},
|
||||
{"package-lock.json", "npm"},
|
||||
}
|
||||
|
||||
for _, lf := range lockFiles {
|
||||
if fs.IsFile(filepath.Join(dir, lf.file)) {
|
||||
return lf.manager
|
||||
}
|
||||
}
|
||||
|
||||
// Default to npm if no lock file found
|
||||
return "npm"
|
||||
}
|
||||
|
||||
// Ensure WailsBuilder implements the Builder interface.
|
||||
var _ build.Builder = (*WailsBuilder)(nil)
|
||||
|
|
@ -1,416 +0,0 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupWailsTestProject creates a minimal Wails project structure for testing.
|
||||
func setupWailsTestProject(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create wails.json
|
||||
wailsJSON := `{
|
||||
"name": "testapp",
|
||||
"outputfilename": "testapp"
|
||||
}`
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte(wailsJSON), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal go.mod
|
||||
goMod := `module testapp
|
||||
|
||||
go 1.21
|
||||
|
||||
require github.com/wailsapp/wails/v3 v3.0.0
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal main.go
|
||||
mainGo := `package main
|
||||
|
||||
func main() {
|
||||
println("hello wails")
|
||||
}
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a minimal Taskfile.yml
|
||||
taskfile := `version: '3'
|
||||
tasks:
|
||||
build:
|
||||
cmds:
|
||||
- mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}
|
||||
- touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "Taskfile.yml"), []byte(taskfile), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
// setupWailsV2TestProject creates a Wails v2 project structure.
|
||||
func setupWailsV2TestProject(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// wails.json
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// go.mod with v2
|
||||
goMod := `module testapp
|
||||
go 1.21
|
||||
require github.com/wailsapp/wails/v2 v2.8.0
|
||||
`
|
||||
err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_Taskfile_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if task is available
|
||||
if _, err := exec.LookPath("task"); err != nil {
|
||||
t.Skip("task not installed, skipping test")
|
||||
}
|
||||
|
||||
t.Run("delegates to Taskfile if present", func(t *testing.T) {
|
||||
fs := io.Local
|
||||
projectDir := setupWailsTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
// Create a Taskfile that just touches a file
|
||||
taskfile := `version: '3'
|
||||
tasks:
|
||||
build:
|
||||
cmds:
|
||||
- mkdir -p {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}
|
||||
- touch {{.OUTPUT_DIR}}/{{.GOOS}}_{{.GOARCH}}/testapp
|
||||
`
|
||||
err := os.WriteFile(filepath.Join(projectDir, "Taskfile.yml"), []byte(taskfile), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: fs,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testapp",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, artifacts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Name_Good(t *testing.T) {
|
||||
builder := NewWailsBuilder()
|
||||
assert.Equal(t, "wails", builder.Name())
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_V2_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
if _, err := exec.LookPath("wails"); err != nil {
|
||||
t.Skip("wails not installed, skipping integration test")
|
||||
}
|
||||
|
||||
t.Run("builds v2 project", func(t *testing.T) {
|
||||
fs := io.Local
|
||||
projectDir := setupWailsV2TestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: fs,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testapp",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// This will likely fail in a real run because we can't easily mock the full wails v2 build process
|
||||
// (which needs a valid project with main.go etc).
|
||||
// But it validates we are trying to run the command.
|
||||
// For now, we just verify it attempts the build - error is expected
|
||||
_, _ = builder.Build(context.Background(), cfg, targets)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Detect_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects Wails project with wails.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for Go-only project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module test"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for Node.js project", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "package.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
|
||||
t.Run("returns false for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
detected, err := builder.Detect(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, detected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDetectPackageManager_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects bun from bun.lockb", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "bun", result)
|
||||
})
|
||||
|
||||
t.Run("detects pnpm from pnpm-lock.yaml", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "pnpm", result)
|
||||
})
|
||||
|
||||
t.Run("detects yarn from yarn.lock", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "yarn", result)
|
||||
})
|
||||
|
||||
t.Run("detects npm from package-lock.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "npm", result)
|
||||
})
|
||||
|
||||
t.Run("defaults to npm when no lock file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "npm", result)
|
||||
})
|
||||
|
||||
t.Run("prefers bun over other lock files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create multiple lock files
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "bun.lockb"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "bun", result)
|
||||
})
|
||||
|
||||
t.Run("prefers pnpm over yarn and npm", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create multiple lock files (no bun)
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "pnpm-lock.yaml"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "pnpm", result)
|
||||
})
|
||||
|
||||
t.Run("prefers yarn over npm", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create multiple lock files (no bun or pnpm)
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "yarn.lock"), []byte(""), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "package-lock.json"), []byte(""), 0644))
|
||||
|
||||
result := detectPackageManager(fs, dir)
|
||||
assert.Equal(t, "yarn", result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_Bad(t *testing.T) {
|
||||
t.Run("returns error for nil config", func(t *testing.T) {
|
||||
builder := NewWailsBuilder()
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), nil, []build.Target{{OS: "linux", Arch: "amd64"}})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
|
||||
t.Run("returns error for empty targets", func(t *testing.T) {
|
||||
projectDir := setupWailsTestProject(t)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, []build.Target{})
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, artifacts)
|
||||
assert.Contains(t, err.Error(), "no targets specified")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Build_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Check if wails3 is available in PATH
|
||||
if _, err := exec.LookPath("wails3"); err != nil {
|
||||
t.Skip("wails3 not installed, skipping integration test")
|
||||
}
|
||||
|
||||
t.Run("builds for current platform", func(t *testing.T) {
|
||||
projectDir := setupWailsTestProject(t)
|
||||
outputDir := t.TempDir()
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: "testapp",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
artifacts, err := builder.Build(context.Background(), cfg, targets)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, artifacts, 1)
|
||||
|
||||
// Verify artifact properties
|
||||
artifact := artifacts[0]
|
||||
assert.Equal(t, runtime.GOOS, artifact.OS)
|
||||
assert.Equal(t, runtime.GOARCH, artifact.Arch)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Interface_Good(t *testing.T) {
|
||||
// Verify WailsBuilder implements Builder interface
|
||||
var _ build.Builder = (*WailsBuilder)(nil)
|
||||
var _ build.Builder = NewWailsBuilder()
|
||||
}
|
||||
|
||||
func TestWailsBuilder_Ugly(t *testing.T) {
|
||||
t.Run("handles nonexistent frontend directory gracefully", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create a Wails project without a frontend directory
|
||||
dir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(dir, "wails.json"), []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: dir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "test",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// This will fail because wails3 isn't set up, but it shouldn't panic
|
||||
// due to missing frontend directory
|
||||
_, err = builder.Build(context.Background(), cfg, targets)
|
||||
// We expect an error (wails3 build will fail), but not a panic
|
||||
// The error should be about wails3 build, not about frontend
|
||||
if err != nil {
|
||||
assert.NotContains(t, err.Error(), "frontend dependencies")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("handles context cancellation", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
projectDir := setupWailsTestProject(t)
|
||||
|
||||
builder := NewWailsBuilder()
|
||||
cfg := &build.Config{
|
||||
FS: io.Local,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: t.TempDir(),
|
||||
Name: "canceltest",
|
||||
}
|
||||
targets := []build.Target{
|
||||
{OS: runtime.GOOS, Arch: runtime.GOARCH},
|
||||
}
|
||||
|
||||
// Create an already cancelled context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
artifacts, err := builder.Build(ctx, cfg, targets)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,99 +0,0 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
package build
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"strings"
|
||||
|
||||
io_interface "forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Checksum computes SHA256 for an artifact and returns the artifact with the Checksum field filled.
|
||||
func Checksum(fs io_interface.Medium, artifact Artifact) (Artifact, error) {
|
||||
if artifact.Path == "" {
|
||||
return Artifact{}, errors.New("build.Checksum: artifact path is empty")
|
||||
}
|
||||
|
||||
// Open the file
|
||||
file, err := fs.Open(artifact.Path)
|
||||
if err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Checksum: failed to open file: %w", err)
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
// Compute SHA256 hash
|
||||
hasher := sha256.New()
|
||||
if _, err := io.Copy(hasher, file); err != nil {
|
||||
return Artifact{}, fmt.Errorf("build.Checksum: failed to hash file: %w", err)
|
||||
}
|
||||
|
||||
checksum := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
return Artifact{
|
||||
Path: artifact.Path,
|
||||
OS: artifact.OS,
|
||||
Arch: artifact.Arch,
|
||||
Checksum: checksum,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ChecksumAll computes checksums for all artifacts.
|
||||
// Returns a slice of artifacts with their Checksum fields filled.
|
||||
func ChecksumAll(fs io_interface.Medium, artifacts []Artifact) ([]Artifact, error) {
|
||||
if len(artifacts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var checksummed []Artifact
|
||||
for _, artifact := range artifacts {
|
||||
cs, err := Checksum(fs, artifact)
|
||||
if err != nil {
|
||||
return checksummed, fmt.Errorf("build.ChecksumAll: failed to checksum %s: %w", artifact.Path, err)
|
||||
}
|
||||
checksummed = append(checksummed, cs)
|
||||
}
|
||||
|
||||
return checksummed, nil
|
||||
}
|
||||
|
||||
// WriteChecksumFile writes a CHECKSUMS.txt file with the format:
|
||||
//
|
||||
// sha256hash filename1
|
||||
// sha256hash filename2
|
||||
//
|
||||
// The artifacts should have their Checksum fields filled (call ChecksumAll first).
|
||||
// Filenames are relative to the output directory (just the basename).
|
||||
func WriteChecksumFile(fs io_interface.Medium, artifacts []Artifact, path string) error {
|
||||
if len(artifacts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the content
|
||||
var lines []string
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.Checksum == "" {
|
||||
return fmt.Errorf("build.WriteChecksumFile: artifact %s has no checksum", artifact.Path)
|
||||
}
|
||||
filename := filepath.Base(artifact.Path)
|
||||
lines = append(lines, fmt.Sprintf("%s %s", artifact.Checksum, filename))
|
||||
}
|
||||
|
||||
// Sort lines for consistent output
|
||||
slices.Sort(lines)
|
||||
|
||||
content := strings.Join(lines, "\n") + "\n"
|
||||
|
||||
// Write the file using the medium (which handles directory creation in Write)
|
||||
if err := fs.Write(path, content); err != nil {
|
||||
return fmt.Errorf("build.WriteChecksumFile: failed to write file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,282 +0,0 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupChecksumTestFile creates a test file with known content.
|
||||
func setupChecksumTestFile(t *testing.T, content string) string {
|
||||
t.Helper()
|
||||
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "testfile")
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
func TestChecksum_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("computes SHA256 checksum", func(t *testing.T) {
|
||||
// Known SHA256 of "Hello, World!\n"
|
||||
path := setupChecksumTestFile(t, "Hello, World!\n")
|
||||
expectedChecksum := "c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31"
|
||||
|
||||
artifact := Artifact{
|
||||
Path: path,
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedChecksum, result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("preserves artifact fields", func(t *testing.T) {
|
||||
path := setupChecksumTestFile(t, "test content")
|
||||
|
||||
artifact := Artifact{
|
||||
Path: path,
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, path, result.Path)
|
||||
assert.Equal(t, "darwin", result.OS)
|
||||
assert.Equal(t, "arm64", result.Arch)
|
||||
assert.NotEmpty(t, result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("produces 64 character hex string", func(t *testing.T) {
|
||||
path := setupChecksumTestFile(t, "any content")
|
||||
|
||||
artifact := Artifact{Path: path, OS: "linux", Arch: "amd64"}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
require.NoError(t, err)
|
||||
|
||||
// SHA256 produces 32 bytes = 64 hex characters
|
||||
assert.Len(t, result.Checksum, 64)
|
||||
})
|
||||
|
||||
t.Run("different content produces different checksums", func(t *testing.T) {
|
||||
path1 := setupChecksumTestFile(t, "content one")
|
||||
path2 := setupChecksumTestFile(t, "content two")
|
||||
|
||||
result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotEqual(t, result1.Checksum, result2.Checksum)
|
||||
})
|
||||
|
||||
t.Run("same content produces same checksum", func(t *testing.T) {
|
||||
content := "identical content"
|
||||
path1 := setupChecksumTestFile(t, content)
|
||||
path2 := setupChecksumTestFile(t, content)
|
||||
|
||||
result1, err := Checksum(fs, Artifact{Path: path1, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
result2, err := Checksum(fs, Artifact{Path: path2, OS: "linux", Arch: "amd64"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, result1.Checksum, result2.Checksum)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChecksum_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns error for empty path", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "artifact path is empty")
|
||||
assert.Empty(t, result.Checksum)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-existent file", func(t *testing.T) {
|
||||
artifact := Artifact{
|
||||
Path: "/nonexistent/path/file",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
}
|
||||
|
||||
result, err := Checksum(fs, artifact)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to open file")
|
||||
assert.Empty(t, result.Checksum)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChecksumAll_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("checksums multiple artifacts", func(t *testing.T) {
|
||||
paths := []string{
|
||||
setupChecksumTestFile(t, "content one"),
|
||||
setupChecksumTestFile(t, "content two"),
|
||||
setupChecksumTestFile(t, "content three"),
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: paths[0], OS: "linux", Arch: "amd64"},
|
||||
{Path: paths[1], OS: "darwin", Arch: "arm64"},
|
||||
{Path: paths[2], OS: "windows", Arch: "amd64"},
|
||||
}
|
||||
|
||||
results, err := ChecksumAll(fs, artifacts)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 3)
|
||||
|
||||
for i, result := range results {
|
||||
assert.Equal(t, artifacts[i].Path, result.Path)
|
||||
assert.Equal(t, artifacts[i].OS, result.OS)
|
||||
assert.Equal(t, artifacts[i].Arch, result.Arch)
|
||||
assert.NotEmpty(t, result.Checksum)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns nil for empty slice", func(t *testing.T) {
|
||||
results, err := ChecksumAll(fs, []Artifact{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
|
||||
t.Run("returns nil for nil slice", func(t *testing.T) {
|
||||
results, err := ChecksumAll(fs, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, results)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChecksumAll_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns partial results on error", func(t *testing.T) {
|
||||
path := setupChecksumTestFile(t, "valid content")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: path, OS: "linux", Arch: "amd64"},
|
||||
{Path: "/nonexistent/file", OS: "linux", Arch: "arm64"}, // This will fail
|
||||
}
|
||||
|
||||
results, err := ChecksumAll(fs, artifacts)
|
||||
assert.Error(t, err)
|
||||
// Should have the first successful result
|
||||
assert.Len(t, results, 1)
|
||||
assert.NotEmpty(t, results[0].Checksum)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteChecksumFile_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("writes checksum file with correct format", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/output/app_linux_amd64.tar.gz", Checksum: "abc123def456", OS: "linux", Arch: "amd64"},
|
||||
{Path: "/output/app_darwin_arm64.tar.gz", Checksum: "789xyz000111", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Read and verify content
|
||||
content, err := os.ReadFile(checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(content)), "\n")
|
||||
require.Len(t, lines, 2)
|
||||
|
||||
// Lines should be sorted alphabetically
|
||||
assert.Equal(t, "789xyz000111 app_darwin_arm64.tar.gz", lines[0])
|
||||
assert.Equal(t, "abc123def456 app_linux_amd64.tar.gz", lines[1])
|
||||
})
|
||||
|
||||
t.Run("creates parent directories", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "nested", "deep", "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/output/app.tar.gz", Checksum: "abc123", OS: "linux", Arch: "amd64"},
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
require.NoError(t, err)
|
||||
assert.FileExists(t, checksumPath)
|
||||
})
|
||||
|
||||
t.Run("does nothing for empty artifacts", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
err := WriteChecksumFile(fs, []Artifact{}, checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// File should not exist
|
||||
_, err = os.Stat(checksumPath)
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("does nothing for nil artifacts", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
err := WriteChecksumFile(fs, nil, checksumPath)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("uses only basename for filenames", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/some/deep/nested/path/myapp_linux_amd64.tar.gz", Checksum: "checksum123", OS: "linux", Arch: "amd64"},
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(checksumPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should only contain the basename
|
||||
assert.Contains(t, string(content), "myapp_linux_amd64.tar.gz")
|
||||
assert.NotContains(t, string(content), "/some/deep/nested/path/")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteChecksumFile_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns error for artifact without checksum", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
checksumPath := filepath.Join(dir, "CHECKSUMS.txt")
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/output/app.tar.gz", Checksum: "", OS: "linux", Arch: "amd64"}, // No checksum
|
||||
}
|
||||
|
||||
err := WriteChecksumFile(fs, artifacts, checksumPath)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "has no checksum")
|
||||
})
|
||||
}
|
||||
181
build/config.go
181
build/config.go
|
|
@ -1,181 +0,0 @@
|
|||
// Package build provides project type detection and cross-compilation for the Core build system.
|
||||
// This file handles configuration loading from .core/build.yaml files.
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"iter"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build/signing"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// ConfigFileName is the name of the build configuration file.
|
||||
const ConfigFileName = "build.yaml"
|
||||
|
||||
// ConfigDir is the directory where build configuration is stored.
|
||||
const ConfigDir = ".core"
|
||||
|
||||
// BuildConfig holds the complete build configuration loaded from .core/build.yaml.
|
||||
// This is distinct from Config which holds runtime build parameters.
|
||||
type BuildConfig struct {
|
||||
// Version is the config file format version.
|
||||
Version int `yaml:"version"`
|
||||
// Project contains project metadata.
|
||||
Project Project `yaml:"project"`
|
||||
// Build contains build settings.
|
||||
Build Build `yaml:"build"`
|
||||
// Targets defines the build targets.
|
||||
Targets []TargetConfig `yaml:"targets"`
|
||||
// Sign contains code signing configuration.
|
||||
Sign signing.SignConfig `yaml:"sign,omitempty"`
|
||||
}
|
||||
|
||||
// Project holds project metadata.
|
||||
type Project struct {
|
||||
// Name is the project name.
|
||||
Name string `yaml:"name"`
|
||||
// Description is a brief description of the project.
|
||||
Description string `yaml:"description"`
|
||||
// Main is the path to the main package (e.g., ./cmd/core).
|
||||
Main string `yaml:"main"`
|
||||
// Binary is the output binary name.
|
||||
Binary string `yaml:"binary"`
|
||||
}
|
||||
|
||||
// Build holds build-time settings.
|
||||
type Build struct {
|
||||
// CGO enables CGO for the build.
|
||||
CGO bool `yaml:"cgo"`
|
||||
// Flags are additional build flags (e.g., ["-trimpath"]).
|
||||
Flags []string `yaml:"flags"`
|
||||
// LDFlags are linker flags (e.g., ["-s", "-w"]).
|
||||
LDFlags []string `yaml:"ldflags"`
|
||||
// Env are additional environment variables.
|
||||
Env []string `yaml:"env"`
|
||||
}
|
||||
|
||||
// TargetConfig defines a build target in the config file.
|
||||
// This is separate from Target to allow for additional config-specific fields.
|
||||
type TargetConfig struct {
|
||||
// OS is the target operating system (e.g., "linux", "darwin", "windows").
|
||||
OS string `yaml:"os"`
|
||||
// Arch is the target architecture (e.g., "amd64", "arm64").
|
||||
Arch string `yaml:"arch"`
|
||||
}
|
||||
|
||||
// LoadConfig loads build configuration from the .core/build.yaml file in the given directory.
|
||||
// If the config file does not exist, it returns DefaultConfig().
|
||||
// Returns an error if the file exists but cannot be parsed.
|
||||
func LoadConfig(fs io.Medium, dir string) (*BuildConfig, error) {
|
||||
configPath := filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
|
||||
content, err := fs.Read(configPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return DefaultConfig(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("build.LoadConfig: failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
var cfg BuildConfig
|
||||
data := []byte(content)
|
||||
if err := yaml.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("build.LoadConfig: failed to parse config file: %w", err)
|
||||
}
|
||||
|
||||
// Apply defaults for any missing fields
|
||||
applyDefaults(&cfg)
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// DefaultConfig returns sensible defaults for Go projects.
|
||||
func DefaultConfig() *BuildConfig {
|
||||
return &BuildConfig{
|
||||
Version: 1,
|
||||
Project: Project{
|
||||
Name: "",
|
||||
Main: ".",
|
||||
Binary: "",
|
||||
},
|
||||
Build: Build{
|
||||
CGO: false,
|
||||
Flags: []string{"-trimpath"},
|
||||
LDFlags: []string{"-s", "-w"},
|
||||
Env: []string{},
|
||||
},
|
||||
Targets: []TargetConfig{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "linux", Arch: "arm64"},
|
||||
{OS: "darwin", Arch: "arm64"},
|
||||
{OS: "windows", Arch: "amd64"},
|
||||
},
|
||||
Sign: signing.DefaultSignConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
// applyDefaults fills in default values for any empty fields in the config.
|
||||
func applyDefaults(cfg *BuildConfig) {
|
||||
defaults := DefaultConfig()
|
||||
|
||||
if cfg.Version == 0 {
|
||||
cfg.Version = defaults.Version
|
||||
}
|
||||
|
||||
if cfg.Project.Main == "" {
|
||||
cfg.Project.Main = defaults.Project.Main
|
||||
}
|
||||
|
||||
if cfg.Build.Flags == nil {
|
||||
cfg.Build.Flags = defaults.Build.Flags
|
||||
}
|
||||
|
||||
if cfg.Build.LDFlags == nil {
|
||||
cfg.Build.LDFlags = defaults.Build.LDFlags
|
||||
}
|
||||
|
||||
if cfg.Build.Env == nil {
|
||||
cfg.Build.Env = defaults.Build.Env
|
||||
}
|
||||
|
||||
if len(cfg.Targets) == 0 {
|
||||
cfg.Targets = defaults.Targets
|
||||
}
|
||||
|
||||
// Expand environment variables in sign config
|
||||
cfg.Sign.ExpandEnv()
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the build config file for a given directory.
|
||||
func ConfigPath(dir string) string {
|
||||
return filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
}
|
||||
|
||||
// ConfigExists checks if a build config file exists in the given directory.
|
||||
func ConfigExists(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, ConfigPath(dir))
|
||||
}
|
||||
|
||||
// TargetsIter returns an iterator for the build targets.
|
||||
func (cfg *BuildConfig) TargetsIter() iter.Seq[TargetConfig] {
|
||||
return func(yield func(TargetConfig) bool) {
|
||||
for _, t := range cfg.Targets {
|
||||
if !yield(t) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ToTargets converts TargetConfig slice to Target slice for use with builders.
|
||||
func (cfg *BuildConfig) ToTargets() []Target {
|
||||
targets := make([]Target, len(cfg.Targets))
|
||||
for i, t := range cfg.Targets {
|
||||
targets[i] = Target{OS: t.OS, Arch: t.Arch}
|
||||
}
|
||||
return targets
|
||||
}
|
||||
|
|
@ -1,324 +0,0 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupConfigTestDir creates a temp directory with optional .core/build.yaml content.
|
||||
func setupConfigTestDir(t *testing.T, configContent string) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
if configContent != "" {
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.WriteFile(configPath, []byte(configContent), 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("loads valid config", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: myapp
|
||||
description: A test application
|
||||
main: ./cmd/myapp
|
||||
binary: myapp
|
||||
build:
|
||||
cgo: true
|
||||
flags:
|
||||
- -trimpath
|
||||
- -race
|
||||
ldflags:
|
||||
- -s
|
||||
- -w
|
||||
env:
|
||||
- FOO=bar
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "myapp", cfg.Project.Name)
|
||||
assert.Equal(t, "A test application", cfg.Project.Description)
|
||||
assert.Equal(t, "./cmd/myapp", cfg.Project.Main)
|
||||
assert.Equal(t, "myapp", cfg.Project.Binary)
|
||||
assert.True(t, cfg.Build.CGO)
|
||||
assert.Equal(t, []string{"-trimpath", "-race"}, cfg.Build.Flags)
|
||||
assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags)
|
||||
assert.Equal(t, []string{"FOO=bar"}, cfg.Build.Env)
|
||||
assert.Len(t, cfg.Targets, 2)
|
||||
assert.Equal(t, "linux", cfg.Targets[0].OS)
|
||||
assert.Equal(t, "amd64", cfg.Targets[0].Arch)
|
||||
assert.Equal(t, "darwin", cfg.Targets[1].OS)
|
||||
assert.Equal(t, "arm64", cfg.Targets[1].Arch)
|
||||
})
|
||||
|
||||
t.Run("returns defaults when config file missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Version, cfg.Version)
|
||||
assert.Equal(t, defaults.Project.Main, cfg.Project.Main)
|
||||
assert.Equal(t, defaults.Build.CGO, cfg.Build.CGO)
|
||||
assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags)
|
||||
assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags)
|
||||
assert.Equal(t, defaults.Targets, cfg.Targets)
|
||||
})
|
||||
|
||||
t.Run("applies defaults for missing fields", func(t *testing.T) {
|
||||
content := `
|
||||
version: 2
|
||||
project:
|
||||
name: partial
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
// Explicit values preserved
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
assert.Equal(t, "partial", cfg.Project.Name)
|
||||
|
||||
// Defaults applied
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Project.Main, cfg.Project.Main)
|
||||
assert.Equal(t, defaults.Build.Flags, cfg.Build.Flags)
|
||||
assert.Equal(t, defaults.Build.LDFlags, cfg.Build.LDFlags)
|
||||
assert.Equal(t, defaults.Targets, cfg.Targets)
|
||||
})
|
||||
|
||||
t.Run("preserves empty arrays when explicitly set", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: noflags
|
||||
build:
|
||||
flags: []
|
||||
ldflags: []
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
// Empty arrays are preserved (not replaced with defaults)
|
||||
assert.Empty(t, cfg.Build.Flags)
|
||||
assert.Empty(t, cfg.Build.LDFlags)
|
||||
// Targets explicitly set
|
||||
assert.Len(t, cfg.Targets, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns error for invalid YAML", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: [invalid yaml
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to parse config file")
|
||||
})
|
||||
|
||||
t.Run("returns error for unreadable file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create config as a directory instead of file
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.Mkdir(configPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := LoadConfig(fs, dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to read config file")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultConfig_Good(t *testing.T) {
|
||||
t.Run("returns sensible defaults", func(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, ".", cfg.Project.Main)
|
||||
assert.Empty(t, cfg.Project.Name)
|
||||
assert.Empty(t, cfg.Project.Binary)
|
||||
assert.False(t, cfg.Build.CGO)
|
||||
assert.Contains(t, cfg.Build.Flags, "-trimpath")
|
||||
assert.Contains(t, cfg.Build.LDFlags, "-s")
|
||||
assert.Contains(t, cfg.Build.LDFlags, "-w")
|
||||
assert.Empty(t, cfg.Build.Env)
|
||||
|
||||
// Default targets cover common platforms
|
||||
assert.Len(t, cfg.Targets, 4)
|
||||
hasLinuxAmd64 := false
|
||||
hasDarwinArm64 := false
|
||||
hasWindowsAmd64 := false
|
||||
for _, t := range cfg.Targets {
|
||||
if t.OS == "linux" && t.Arch == "amd64" {
|
||||
hasLinuxAmd64 = true
|
||||
}
|
||||
if t.OS == "darwin" && t.Arch == "arm64" {
|
||||
hasDarwinArm64 = true
|
||||
}
|
||||
if t.OS == "windows" && t.Arch == "amd64" {
|
||||
hasWindowsAmd64 = true
|
||||
}
|
||||
}
|
||||
assert.True(t, hasLinuxAmd64)
|
||||
assert.True(t, hasDarwinArm64)
|
||||
assert.True(t, hasWindowsAmd64)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigPath_Good(t *testing.T) {
|
||||
t.Run("returns correct path", func(t *testing.T) {
|
||||
path := ConfigPath("/project/root")
|
||||
assert.Equal(t, "/project/root/.core/build.yaml", path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigExists_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns true when config exists", func(t *testing.T) {
|
||||
dir := setupConfigTestDir(t, "version: 1")
|
||||
assert.True(t, ConfigExists(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when config missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when .core dir missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good_SignConfig(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
coreDir := filepath.Join(tmpDir, ".core")
|
||||
_ = os.MkdirAll(coreDir, 0755)
|
||||
|
||||
configContent := `version: 1
|
||||
sign:
|
||||
enabled: true
|
||||
gpg:
|
||||
key: "ABCD1234"
|
||||
macos:
|
||||
identity: "Developer ID Application: Test"
|
||||
notarize: true
|
||||
`
|
||||
_ = os.WriteFile(filepath.Join(coreDir, "build.yaml"), []byte(configContent), 0644)
|
||||
|
||||
cfg, err := LoadConfig(io.Local, tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !cfg.Sign.Enabled {
|
||||
t.Error("expected Sign.Enabled to be true")
|
||||
}
|
||||
if cfg.Sign.GPG.Key != "ABCD1234" {
|
||||
t.Errorf("expected GPG.Key 'ABCD1234', got %q", cfg.Sign.GPG.Key)
|
||||
}
|
||||
if cfg.Sign.MacOS.Identity != "Developer ID Application: Test" {
|
||||
t.Errorf("expected MacOS.Identity, got %q", cfg.Sign.MacOS.Identity)
|
||||
}
|
||||
if !cfg.Sign.MacOS.Notarize {
|
||||
t.Error("expected MacOS.Notarize to be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildConfig_ToTargets_Good(t *testing.T) {
|
||||
t.Run("converts TargetConfig to Target", func(t *testing.T) {
|
||||
cfg := &BuildConfig{
|
||||
Targets: []TargetConfig{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "darwin", Arch: "arm64"},
|
||||
{OS: "windows", Arch: "386"},
|
||||
},
|
||||
}
|
||||
|
||||
targets := cfg.ToTargets()
|
||||
require.Len(t, targets, 3)
|
||||
|
||||
assert.Equal(t, Target{OS: "linux", Arch: "amd64"}, targets[0])
|
||||
assert.Equal(t, Target{OS: "darwin", Arch: "arm64"}, targets[1])
|
||||
assert.Equal(t, Target{OS: "windows", Arch: "386"}, targets[2])
|
||||
})
|
||||
|
||||
t.Run("returns empty slice for no targets", func(t *testing.T) {
|
||||
cfg := &BuildConfig{
|
||||
Targets: []TargetConfig{},
|
||||
}
|
||||
|
||||
targets := cfg.ToTargets()
|
||||
assert.Empty(t, targets)
|
||||
})
|
||||
}
|
||||
|
||||
// TestLoadConfig_Testdata tests loading from the testdata fixture.
|
||||
func TestLoadConfig_Testdata(t *testing.T) {
|
||||
fs := io.Local
|
||||
abs, err := filepath.Abs("testdata/config-project")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("loads config-project fixture", func(t *testing.T) {
|
||||
cfg, err := LoadConfig(fs, abs)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "example-cli", cfg.Project.Name)
|
||||
assert.Equal(t, "An example CLI application", cfg.Project.Description)
|
||||
assert.Equal(t, "./cmd/example", cfg.Project.Main)
|
||||
assert.Equal(t, "example", cfg.Project.Binary)
|
||||
assert.False(t, cfg.Build.CGO)
|
||||
assert.Equal(t, []string{"-trimpath"}, cfg.Build.Flags)
|
||||
assert.Equal(t, []string{"-s", "-w"}, cfg.Build.LDFlags)
|
||||
assert.Len(t, cfg.Targets, 3)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Marker files for project type detection.
|
||||
const (
|
||||
markerGoMod = "go.mod"
|
||||
markerWails = "wails.json"
|
||||
markerNodePackage = "package.json"
|
||||
markerComposer = "composer.json"
|
||||
)
|
||||
|
||||
// projectMarker maps a marker file to its project type.
|
||||
type projectMarker struct {
|
||||
file string
|
||||
projectType ProjectType
|
||||
}
|
||||
|
||||
// markers defines the detection order. More specific types come first.
|
||||
// Wails projects have both wails.json and go.mod, so wails is checked first.
|
||||
var markers = []projectMarker{
|
||||
{markerWails, ProjectTypeWails},
|
||||
{markerGoMod, ProjectTypeGo},
|
||||
{markerNodePackage, ProjectTypeNode},
|
||||
{markerComposer, ProjectTypePHP},
|
||||
}
|
||||
|
||||
// Discover detects project types in the given directory by checking for marker files.
|
||||
// Returns a slice of detected project types, ordered by priority (most specific first).
|
||||
// For example, a Wails project returns [wails, go] since it has both wails.json and go.mod.
|
||||
func Discover(fs io.Medium, dir string) ([]ProjectType, error) {
|
||||
var detected []ProjectType
|
||||
|
||||
for _, m := range markers {
|
||||
path := filepath.Join(dir, m.file)
|
||||
if fileExists(fs, path) {
|
||||
// Avoid duplicates (shouldn't happen with current markers, but defensive)
|
||||
if !slices.Contains(detected, m.projectType) {
|
||||
detected = append(detected, m.projectType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return detected, nil
|
||||
}
|
||||
|
||||
// PrimaryType returns the most specific project type detected in the directory.
|
||||
// Returns empty string if no project type is detected.
|
||||
func PrimaryType(fs io.Medium, dir string) (ProjectType, error) {
|
||||
types, err := Discover(fs, dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(types) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return types[0], nil
|
||||
}
|
||||
|
||||
// IsGoProject checks if the directory contains a Go project (go.mod or wails.json).
|
||||
func IsGoProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerGoMod)) ||
|
||||
fileExists(fs, filepath.Join(dir, markerWails))
|
||||
}
|
||||
|
||||
// IsWailsProject checks if the directory contains a Wails project.
|
||||
func IsWailsProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerWails))
|
||||
}
|
||||
|
||||
// IsNodeProject checks if the directory contains a Node.js project.
|
||||
func IsNodeProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerNodePackage))
|
||||
}
|
||||
|
||||
// IsPHPProject checks if the directory contains a PHP project.
|
||||
func IsPHPProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, markerComposer))
|
||||
}
|
||||
|
||||
// IsCPPProject checks if the directory contains a C++ project (CMakeLists.txt).
|
||||
func IsCPPProject(fs io.Medium, dir string) bool {
|
||||
return fileExists(fs, filepath.Join(dir, "CMakeLists.txt"))
|
||||
}
|
||||
|
||||
// fileExists checks if a file exists and is not a directory.
|
||||
func fileExists(fs io.Medium, path string) bool {
|
||||
return fs.IsFile(path)
|
||||
}
|
||||
|
|
@ -1,228 +0,0 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupTestDir creates a temporary directory with the specified marker files.
|
||||
func setupTestDir(t *testing.T, markers ...string) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
for _, m := range markers {
|
||||
path := filepath.Join(dir, m)
|
||||
err := os.WriteFile(path, []byte("{}"), 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestDiscover_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("detects Go project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeGo}, types)
|
||||
})
|
||||
|
||||
t.Run("detects Wails project with priority over Go", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json", "go.mod")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeWails, ProjectTypeGo}, types)
|
||||
})
|
||||
|
||||
t.Run("detects Node.js project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "package.json")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeNode}, types)
|
||||
})
|
||||
|
||||
t.Run("detects PHP project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "composer.json")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypePHP}, types)
|
||||
})
|
||||
|
||||
t.Run("detects multiple project types", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod", "package.json")
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []ProjectType{ProjectTypeGo, ProjectTypeNode}, types)
|
||||
})
|
||||
|
||||
t.Run("empty directory returns empty slice", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, types)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDiscover_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("non-existent directory returns empty slice", func(t *testing.T) {
|
||||
types, err := Discover(fs, "/non/existent/path")
|
||||
assert.NoError(t, err) // os.Stat fails silently in fileExists
|
||||
assert.Empty(t, types)
|
||||
})
|
||||
|
||||
t.Run("directory marker is ignored", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
// Create go.mod as a directory instead of a file
|
||||
err := os.Mkdir(filepath.Join(dir, "go.mod"), 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, types)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrimaryType_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns wails for wails project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json", "go.mod")
|
||||
primary, err := PrimaryType(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ProjectTypeWails, primary)
|
||||
})
|
||||
|
||||
t.Run("returns go for go-only project", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
primary, err := PrimaryType(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ProjectTypeGo, primary)
|
||||
})
|
||||
|
||||
t.Run("returns empty string for empty directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
primary, err := PrimaryType(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, primary)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsGoProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with go.mod", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
assert.True(t, IsGoProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("true with wails.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json")
|
||||
assert.True(t, IsGoProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false without markers", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, IsGoProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsWailsProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with wails.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "wails.json")
|
||||
assert.True(t, IsWailsProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false with only go.mod", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "go.mod")
|
||||
assert.False(t, IsWailsProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsNodeProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with package.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "package.json")
|
||||
assert.True(t, IsNodeProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false without package.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, IsNodeProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsPHPProject_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("true with composer.json", func(t *testing.T) {
|
||||
dir := setupTestDir(t, "composer.json")
|
||||
assert.True(t, IsPHPProject(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("false without composer.json", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, IsPHPProject(fs, dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestTarget_Good(t *testing.T) {
|
||||
target := Target{OS: "linux", Arch: "amd64"}
|
||||
assert.Equal(t, "linux/amd64", target.String())
|
||||
}
|
||||
|
||||
func TestFileExists_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("returns true for existing file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "test.txt")
|
||||
err := os.WriteFile(path, []byte("content"), 0644)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fileExists(fs, path))
|
||||
})
|
||||
|
||||
t.Run("returns false for directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, fileExists(fs, dir))
|
||||
})
|
||||
|
||||
t.Run("returns false for non-existent path", func(t *testing.T) {
|
||||
assert.False(t, fileExists(fs, "/non/existent/file"))
|
||||
})
|
||||
}
|
||||
|
||||
// TestDiscover_Testdata tests discovery using the testdata fixtures.
|
||||
// These serve as integration tests with realistic project structures.
|
||||
func TestDiscover_Testdata(t *testing.T) {
|
||||
fs := io.Local
|
||||
testdataDir, err := filepath.Abs("testdata")
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dir string
|
||||
expected []ProjectType
|
||||
}{
|
||||
{"go-project", "go-project", []ProjectType{ProjectTypeGo}},
|
||||
{"wails-project", "wails-project", []ProjectType{ProjectTypeWails, ProjectTypeGo}},
|
||||
{"node-project", "node-project", []ProjectType{ProjectTypeNode}},
|
||||
{"php-project", "php-project", []ProjectType{ProjectTypePHP}},
|
||||
{"multi-project", "multi-project", []ProjectType{ProjectTypeGo, ProjectTypeNode}},
|
||||
{"empty-project", "empty-project", []ProjectType{}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := filepath.Join(testdataDir, tt.dir)
|
||||
types, err := Discover(fs, dir)
|
||||
assert.NoError(t, err)
|
||||
if len(tt.expected) == 0 {
|
||||
assert.Empty(t, types)
|
||||
} else {
|
||||
assert.Equal(t, tt.expected, types)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// MacOSSigner signs binaries using macOS codesign.
|
||||
type MacOSSigner struct {
|
||||
config MacOSConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*MacOSSigner)(nil)
|
||||
|
||||
// NewMacOSSigner creates a new macOS signer.
|
||||
func NewMacOSSigner(cfg MacOSConfig) *MacOSSigner {
|
||||
return &MacOSSigner{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "codesign".
|
||||
func (s *MacOSSigner) Name() string {
|
||||
return "codesign"
|
||||
}
|
||||
|
||||
// Available checks if running on macOS with codesign and identity configured.
|
||||
func (s *MacOSSigner) Available() bool {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return false
|
||||
}
|
||||
if s.config.Identity == "" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath("codesign")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Sign codesigns a binary with hardened runtime.
|
||||
func (s *MacOSSigner) Sign(ctx context.Context, fs io.Medium, binary string) error {
|
||||
if !s.Available() {
|
||||
return errors.New("codesign.Sign: codesign not available")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "codesign",
|
||||
"--sign", s.config.Identity,
|
||||
"--timestamp",
|
||||
"--options", "runtime", // Hardened runtime for notarization
|
||||
"--force",
|
||||
binary,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("codesign.Sign: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notarize submits binary to Apple for notarization and staples the ticket.
|
||||
// This blocks until Apple responds (typically 1-5 minutes).
|
||||
func (s *MacOSSigner) Notarize(ctx context.Context, fs io.Medium, binary string) error {
|
||||
if s.config.AppleID == "" || s.config.TeamID == "" || s.config.AppPassword == "" {
|
||||
return errors.New("codesign.Notarize: missing Apple credentials (apple_id, team_id, app_password)")
|
||||
}
|
||||
|
||||
// Create ZIP for submission
|
||||
zipPath := binary + ".zip"
|
||||
zipCmd := exec.CommandContext(ctx, "zip", "-j", zipPath, binary)
|
||||
if output, err := zipCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: failed to create zip: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
defer func() { _ = fs.Delete(zipPath) }()
|
||||
|
||||
// Submit to Apple and wait
|
||||
submitCmd := exec.CommandContext(ctx, "xcrun", "notarytool", "submit",
|
||||
zipPath,
|
||||
"--apple-id", s.config.AppleID,
|
||||
"--team-id", s.config.TeamID,
|
||||
"--password", s.config.AppPassword,
|
||||
"--wait",
|
||||
)
|
||||
if output, err := submitCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: notarization failed: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Staple the ticket
|
||||
stapleCmd := exec.CommandContext(ctx, "xcrun", "stapler", "staple", binary)
|
||||
if output, err := stapleCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("codesign.Notarize: failed to staple: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldNotarize returns true if notarization is enabled.
|
||||
func (s *MacOSSigner) ShouldNotarize() bool {
|
||||
return s.config.Notarize
|
||||
}
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMacOSSigner_Good_Name(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
||||
assert.Equal(t, "codesign", s.Name())
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Good_Available(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "Developer ID Application: Test"})
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Just verify it doesn't panic
|
||||
_ = s.Available()
|
||||
} else {
|
||||
assert.False(t, s.Available())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Bad_NoIdentity(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{})
|
||||
assert.False(t, s.Available())
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Sign_Bad(t *testing.T) {
|
||||
t.Run("fails when not available", func(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("skipping on macOS")
|
||||
}
|
||||
fs := io.Local
|
||||
s := NewMacOSSigner(MacOSConfig{Identity: "test"})
|
||||
err := s.Sign(context.Background(), fs, "test")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not available")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMacOSSigner_Notarize_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("fails with missing credentials", func(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{})
|
||||
err := s.Notarize(context.Background(), fs, "test")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "missing Apple credentials")
|
||||
})
|
||||
}
|
||||
|
||||
func TestMacOSSigner_ShouldNotarize(t *testing.T) {
|
||||
s := NewMacOSSigner(MacOSConfig{Notarize: true})
|
||||
assert.True(t, s.ShouldNotarize())
|
||||
|
||||
s2 := NewMacOSSigner(MacOSConfig{Notarize: false})
|
||||
assert.False(t, s2.ShouldNotarize())
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// GPGSigner signs files using GPG.
|
||||
type GPGSigner struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*GPGSigner)(nil)
|
||||
|
||||
// NewGPGSigner creates a new GPG signer.
|
||||
func NewGPGSigner(keyID string) *GPGSigner {
|
||||
return &GPGSigner{KeyID: keyID}
|
||||
}
|
||||
|
||||
// Name returns "gpg".
|
||||
func (s *GPGSigner) Name() string {
|
||||
return "gpg"
|
||||
}
|
||||
|
||||
// Available checks if gpg is installed and key is configured.
|
||||
func (s *GPGSigner) Available() bool {
|
||||
if s.KeyID == "" {
|
||||
return false
|
||||
}
|
||||
_, err := exec.LookPath("gpg")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Sign creates a detached ASCII-armored signature.
|
||||
// For file.txt, creates file.txt.asc
|
||||
func (s *GPGSigner) Sign(ctx context.Context, fs io.Medium, file string) error {
|
||||
if !s.Available() {
|
||||
return errors.New("gpg.Sign: gpg not available or key not configured")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "gpg",
|
||||
"--detach-sign",
|
||||
"--armor",
|
||||
"--local-user", s.KeyID,
|
||||
"--output", file+".asc",
|
||||
file,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("gpg.Sign: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGPGSigner_Good_Name(t *testing.T) {
|
||||
s := NewGPGSigner("ABCD1234")
|
||||
assert.Equal(t, "gpg", s.Name())
|
||||
}
|
||||
|
||||
func TestGPGSigner_Good_Available(t *testing.T) {
|
||||
s := NewGPGSigner("ABCD1234")
|
||||
_ = s.Available()
|
||||
}
|
||||
|
||||
func TestGPGSigner_Bad_NoKey(t *testing.T) {
|
||||
s := NewGPGSigner("")
|
||||
assert.False(t, s.Available())
|
||||
}
|
||||
|
||||
func TestGPGSigner_Sign_Bad(t *testing.T) {
|
||||
fs := io.Local
|
||||
t.Run("fails when no key", func(t *testing.T) {
|
||||
s := NewGPGSigner("")
|
||||
err := s.Sign(context.Background(), fs, "test.txt")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not available or key not configured")
|
||||
})
|
||||
}
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Artifact represents a build output that can be signed.
|
||||
// This mirrors build.Artifact to avoid import cycles.
|
||||
type Artifact struct {
|
||||
Path string
|
||||
OS string
|
||||
Arch string
|
||||
}
|
||||
|
||||
// SignBinaries signs macOS binaries in the artifacts list.
|
||||
// Only signs darwin binaries when running on macOS with a configured identity.
|
||||
func SignBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error {
|
||||
if !cfg.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only sign on macOS
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewMacOSSigner(cfg.MacOS)
|
||||
if !signer.Available() {
|
||||
return nil // Silently skip if not configured
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.OS != "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf(" Signing %s...\n", artifact.Path)
|
||||
if err := signer.Sign(ctx, fs, artifact.Path); err != nil {
|
||||
return fmt.Errorf("failed to sign %s: %w", artifact.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotarizeBinaries notarizes macOS binaries if enabled.
|
||||
func NotarizeBinaries(ctx context.Context, fs io.Medium, cfg SignConfig, artifacts []Artifact) error {
|
||||
if !cfg.Enabled || !cfg.MacOS.Notarize {
|
||||
return nil
|
||||
}
|
||||
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewMacOSSigner(cfg.MacOS)
|
||||
if !signer.Available() {
|
||||
return errors.New("notarization requested but codesign not available")
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
if artifact.OS != "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf(" Notarizing %s (this may take a few minutes)...\n", artifact.Path)
|
||||
if err := signer.Notarize(ctx, fs, artifact.Path); err != nil {
|
||||
return fmt.Errorf("failed to notarize %s: %w", artifact.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignChecksums signs the checksums file with GPG.
|
||||
func SignChecksums(ctx context.Context, fs io.Medium, cfg SignConfig, checksumFile string) error {
|
||||
if !cfg.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
signer := NewGPGSigner(cfg.GPG.Key)
|
||||
if !signer.Available() {
|
||||
return nil // Silently skip if not configured
|
||||
}
|
||||
|
||||
fmt.Printf(" Signing %s with GPG...\n", checksumFile)
|
||||
if err := signer.Sign(ctx, fs, checksumFile); err != nil {
|
||||
return fmt.Errorf("failed to sign checksums: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
// Package signing provides code signing for build artifacts.
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Signer defines the interface for code signing implementations.
|
||||
type Signer interface {
|
||||
// Name returns the signer's identifier.
|
||||
Name() string
|
||||
// Available checks if this signer can be used.
|
||||
Available() bool
|
||||
// Sign signs the artifact at the given path.
|
||||
Sign(ctx context.Context, fs io.Medium, path string) error
|
||||
}
|
||||
|
||||
// SignConfig holds signing configuration from .core/build.yaml.
|
||||
type SignConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
GPG GPGConfig `yaml:"gpg,omitempty"`
|
||||
MacOS MacOSConfig `yaml:"macos,omitempty"`
|
||||
Windows WindowsConfig `yaml:"windows,omitempty"`
|
||||
}
|
||||
|
||||
// GPGConfig holds GPG signing configuration.
|
||||
type GPGConfig struct {
|
||||
Key string `yaml:"key"` // Key ID or fingerprint, supports $ENV
|
||||
}
|
||||
|
||||
// MacOSConfig holds macOS codesign configuration.
|
||||
type MacOSConfig struct {
|
||||
Identity string `yaml:"identity"` // Developer ID Application: ...
|
||||
Notarize bool `yaml:"notarize"` // Submit to Apple for notarization
|
||||
AppleID string `yaml:"apple_id"` // Apple account email
|
||||
TeamID string `yaml:"team_id"` // Team ID
|
||||
AppPassword string `yaml:"app_password"` // App-specific password
|
||||
}
|
||||
|
||||
// WindowsConfig holds Windows signtool configuration (placeholder).
|
||||
type WindowsConfig struct {
|
||||
Certificate string `yaml:"certificate"` // Path to .pfx
|
||||
Password string `yaml:"password"` // Certificate password
|
||||
}
|
||||
|
||||
// DefaultSignConfig returns sensible defaults.
|
||||
func DefaultSignConfig() SignConfig {
|
||||
return SignConfig{
|
||||
Enabled: true,
|
||||
GPG: GPGConfig{
|
||||
Key: os.Getenv("GPG_KEY_ID"),
|
||||
},
|
||||
MacOS: MacOSConfig{
|
||||
Identity: os.Getenv("CODESIGN_IDENTITY"),
|
||||
AppleID: os.Getenv("APPLE_ID"),
|
||||
TeamID: os.Getenv("APPLE_TEAM_ID"),
|
||||
AppPassword: os.Getenv("APPLE_APP_PASSWORD"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ExpandEnv expands environment variables in config values.
|
||||
func (c *SignConfig) ExpandEnv() {
|
||||
c.GPG.Key = expandEnv(c.GPG.Key)
|
||||
c.MacOS.Identity = expandEnv(c.MacOS.Identity)
|
||||
c.MacOS.AppleID = expandEnv(c.MacOS.AppleID)
|
||||
c.MacOS.TeamID = expandEnv(c.MacOS.TeamID)
|
||||
c.MacOS.AppPassword = expandEnv(c.MacOS.AppPassword)
|
||||
c.Windows.Certificate = expandEnv(c.Windows.Certificate)
|
||||
c.Windows.Password = expandEnv(c.Windows.Password)
|
||||
}
|
||||
|
||||
// expandEnv expands $VAR or ${VAR} in a string.
|
||||
func expandEnv(s string) string {
|
||||
if strings.HasPrefix(s, "$") {
|
||||
return os.ExpandEnv(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
@ -1,343 +0,0 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSignBinaries_Good_SkipsNonDarwin(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{
|
||||
Identity: "Developer ID Application: Test",
|
||||
},
|
||||
}
|
||||
|
||||
// Create fake artifact for linux
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "linux", Arch: "amd64"},
|
||||
}
|
||||
|
||||
// Should not error even though binary doesn't exist (skips non-darwin)
|
||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignBinaries_Good_DisabledConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignBinaries_Good_SkipsOnNonMacOS(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("Skipping on macOS - this tests non-macOS behavior")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{
|
||||
Identity: "Developer ID Application: Test",
|
||||
},
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := SignBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotarizeBinaries_Good_DisabledConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := NotarizeBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotarizeBinaries_Good_NotarizeDisabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{
|
||||
Notarize: false,
|
||||
},
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/tmp/test-binary", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := NotarizeBinaries(ctx, fs, cfg, artifacts)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignChecksums_Good_SkipsNoKey(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
GPG: GPGConfig{
|
||||
Key: "", // No key configured
|
||||
},
|
||||
}
|
||||
|
||||
// Should silently skip when no key
|
||||
err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignChecksums_Good_Disabled(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fs := io.Local
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
}
|
||||
|
||||
err := SignChecksums(ctx, fs, cfg, "/tmp/CHECKSUMS.txt")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultSignConfig(t *testing.T) {
|
||||
cfg := DefaultSignConfig()
|
||||
assert.True(t, cfg.Enabled)
|
||||
}
|
||||
|
||||
func TestSignConfig_ExpandEnv(t *testing.T) {
|
||||
t.Setenv("TEST_KEY", "ABC")
|
||||
cfg := SignConfig{
|
||||
GPG: GPGConfig{Key: "$TEST_KEY"},
|
||||
}
|
||||
cfg.ExpandEnv()
|
||||
assert.Equal(t, "ABC", cfg.GPG.Key)
|
||||
}
|
||||
|
||||
func TestWindowsSigner_Good(t *testing.T) {
|
||||
fs := io.Local
|
||||
s := NewWindowsSigner(WindowsConfig{})
|
||||
assert.Equal(t, "signtool", s.Name())
|
||||
assert.False(t, s.Available())
|
||||
assert.NoError(t, s.Sign(context.Background(), fs, "test.exe"))
|
||||
}
|
||||
|
||||
// mockSigner is a test double that records calls to Sign.
|
||||
type mockSigner struct {
|
||||
name string
|
||||
available bool
|
||||
signedPaths []string
|
||||
signError error
|
||||
}
|
||||
|
||||
func (m *mockSigner) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *mockSigner) Available() bool {
|
||||
return m.available
|
||||
}
|
||||
|
||||
func (m *mockSigner) Sign(ctx context.Context, fs io.Medium, path string) error {
|
||||
m.signedPaths = append(m.signedPaths, path)
|
||||
return m.signError
|
||||
}
|
||||
|
||||
// Verify mockSigner implements Signer
|
||||
var _ Signer = (*mockSigner)(nil)
|
||||
|
||||
func TestSignBinaries_Good_MockSigner(t *testing.T) {
|
||||
t.Run("signs only darwin artifacts", func(t *testing.T) {
|
||||
artifacts := []Artifact{
|
||||
{Path: "/dist/linux_amd64/myapp", OS: "linux", Arch: "amd64"},
|
||||
{Path: "/dist/darwin_arm64/myapp", OS: "darwin", Arch: "arm64"},
|
||||
{Path: "/dist/windows_amd64/myapp.exe", OS: "windows", Arch: "amd64"},
|
||||
{Path: "/dist/darwin_amd64/myapp", OS: "darwin", Arch: "amd64"},
|
||||
}
|
||||
|
||||
// SignBinaries filters to darwin only and calls signer.Sign for each.
|
||||
// We can verify the logic by checking that non-darwin artifacts are skipped.
|
||||
// Since SignBinaries uses NewMacOSSigner internally, we test the filtering
|
||||
// by passing only darwin artifacts and confirming non-darwin are skipped.
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{Identity: ""},
|
||||
}
|
||||
|
||||
// With empty identity, Available() returns false, so Sign is never called.
|
||||
// This verifies the short-circuit behavior.
|
||||
ctx := context.Background()
|
||||
err := SignBinaries(ctx, io.Local, cfg, artifacts)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("skips all when enabled is false", func(t *testing.T) {
|
||||
artifacts := []Artifact{
|
||||
{Path: "/dist/darwin_arm64/myapp", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
cfg := SignConfig{Enabled: false}
|
||||
err := SignBinaries(context.Background(), io.Local, cfg, artifacts)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("handles empty artifact list", func(t *testing.T) {
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{Identity: "Developer ID"},
|
||||
}
|
||||
err := SignBinaries(context.Background(), io.Local, cfg, []Artifact{})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSignChecksums_Good_MockSigner(t *testing.T) {
|
||||
t.Run("skips when GPG key is empty", func(t *testing.T) {
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
GPG: GPGConfig{Key: ""},
|
||||
}
|
||||
|
||||
err := SignChecksums(context.Background(), io.Local, cfg, "/tmp/CHECKSUMS.txt")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("skips when disabled", func(t *testing.T) {
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
GPG: GPGConfig{Key: "ABCD1234"},
|
||||
}
|
||||
|
||||
err := SignChecksums(context.Background(), io.Local, cfg, "/tmp/CHECKSUMS.txt")
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNotarizeBinaries_Good_MockSigner(t *testing.T) {
|
||||
t.Run("skips when notarize is false", func(t *testing.T) {
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{Notarize: false},
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/dist/darwin_arm64/myapp", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := NotarizeBinaries(context.Background(), io.Local, cfg, artifacts)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("skips when disabled", func(t *testing.T) {
|
||||
cfg := SignConfig{
|
||||
Enabled: false,
|
||||
MacOS: MacOSConfig{Notarize: true},
|
||||
}
|
||||
|
||||
artifacts := []Artifact{
|
||||
{Path: "/dist/darwin_arm64/myapp", OS: "darwin", Arch: "arm64"},
|
||||
}
|
||||
|
||||
err := NotarizeBinaries(context.Background(), io.Local, cfg, artifacts)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("handles empty artifact list", func(t *testing.T) {
|
||||
cfg := SignConfig{
|
||||
Enabled: true,
|
||||
MacOS: MacOSConfig{Notarize: true, Identity: "Dev ID"},
|
||||
}
|
||||
|
||||
err := NotarizeBinaries(context.Background(), io.Local, cfg, []Artifact{})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExpandEnv_Good(t *testing.T) {
|
||||
t.Run("expands all config fields", func(t *testing.T) {
|
||||
t.Setenv("TEST_GPG_KEY", "GPG123")
|
||||
t.Setenv("TEST_IDENTITY", "Developer ID Application: Test")
|
||||
t.Setenv("TEST_APPLE_ID", "test@apple.com")
|
||||
t.Setenv("TEST_TEAM_ID", "TEAM123")
|
||||
t.Setenv("TEST_APP_PASSWORD", "secret")
|
||||
t.Setenv("TEST_CERT_PATH", "/path/to/cert.pfx")
|
||||
t.Setenv("TEST_CERT_PASS", "certpass")
|
||||
|
||||
cfg := SignConfig{
|
||||
GPG: GPGConfig{Key: "$TEST_GPG_KEY"},
|
||||
MacOS: MacOSConfig{
|
||||
Identity: "$TEST_IDENTITY",
|
||||
AppleID: "$TEST_APPLE_ID",
|
||||
TeamID: "$TEST_TEAM_ID",
|
||||
AppPassword: "$TEST_APP_PASSWORD",
|
||||
},
|
||||
Windows: WindowsConfig{
|
||||
Certificate: "$TEST_CERT_PATH",
|
||||
Password: "$TEST_CERT_PASS",
|
||||
},
|
||||
}
|
||||
|
||||
cfg.ExpandEnv()
|
||||
|
||||
assert.Equal(t, "GPG123", cfg.GPG.Key)
|
||||
assert.Equal(t, "Developer ID Application: Test", cfg.MacOS.Identity)
|
||||
assert.Equal(t, "test@apple.com", cfg.MacOS.AppleID)
|
||||
assert.Equal(t, "TEAM123", cfg.MacOS.TeamID)
|
||||
assert.Equal(t, "secret", cfg.MacOS.AppPassword)
|
||||
assert.Equal(t, "/path/to/cert.pfx", cfg.Windows.Certificate)
|
||||
assert.Equal(t, "certpass", cfg.Windows.Password)
|
||||
})
|
||||
|
||||
t.Run("preserves non-env values", func(t *testing.T) {
|
||||
cfg := SignConfig{
|
||||
GPG: GPGConfig{Key: "literal-key"},
|
||||
MacOS: MacOSConfig{
|
||||
Identity: "Developer ID Application: Literal",
|
||||
},
|
||||
}
|
||||
|
||||
cfg.ExpandEnv()
|
||||
|
||||
assert.Equal(t, "literal-key", cfg.GPG.Key)
|
||||
assert.Equal(t, "Developer ID Application: Literal", cfg.MacOS.Identity)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// WindowsSigner signs binaries using Windows signtool (placeholder).
|
||||
type WindowsSigner struct {
|
||||
config WindowsConfig
|
||||
}
|
||||
|
||||
// Compile-time interface check.
|
||||
var _ Signer = (*WindowsSigner)(nil)
|
||||
|
||||
// NewWindowsSigner creates a new Windows signer.
|
||||
func NewWindowsSigner(cfg WindowsConfig) *WindowsSigner {
|
||||
return &WindowsSigner{config: cfg}
|
||||
}
|
||||
|
||||
// Name returns "signtool".
|
||||
func (s *WindowsSigner) Name() string {
|
||||
return "signtool"
|
||||
}
|
||||
|
||||
// Available returns false (not yet implemented).
|
||||
func (s *WindowsSigner) Available() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Sign is a placeholder that does nothing.
|
||||
func (s *WindowsSigner) Sign(ctx context.Context, fs io.Medium, binary string) error {
|
||||
// TODO: Implement Windows signing
|
||||
return nil
|
||||
}
|
||||
25
build/testdata/config-project/.core/build.yaml
vendored
25
build/testdata/config-project/.core/build.yaml
vendored
|
|
@ -1,25 +0,0 @@
|
|||
# Example build configuration for Core build system
|
||||
version: 1
|
||||
|
||||
project:
|
||||
name: example-cli
|
||||
description: An example CLI application
|
||||
main: ./cmd/example
|
||||
binary: example
|
||||
|
||||
build:
|
||||
cgo: false
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -s
|
||||
- -w
|
||||
env: []
|
||||
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
- os: windows
|
||||
arch: amd64
|
||||
2
build/testdata/cpp-project/CMakeLists.txt
vendored
2
build/testdata/cpp-project/CMakeLists.txt
vendored
|
|
@ -1,2 +0,0 @@
|
|||
cmake_minimum_required(VERSION 3.16)
|
||||
project(TestCPP)
|
||||
0
build/testdata/empty-project/.gitkeep
vendored
0
build/testdata/empty-project/.gitkeep
vendored
3
build/testdata/go-project/go.mod
vendored
3
build/testdata/go-project/go.mod
vendored
|
|
@ -1,3 +0,0 @@
|
|||
module example.com/go-project
|
||||
|
||||
go 1.21
|
||||
3
build/testdata/multi-project/go.mod
vendored
3
build/testdata/multi-project/go.mod
vendored
|
|
@ -1,3 +0,0 @@
|
|||
module example.com/multi-project
|
||||
|
||||
go 1.21
|
||||
4
build/testdata/multi-project/package.json
vendored
4
build/testdata/multi-project/package.json
vendored
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"name": "multi-project",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
4
build/testdata/node-project/package.json
vendored
4
build/testdata/node-project/package.json
vendored
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"name": "node-project",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
4
build/testdata/php-project/composer.json
vendored
4
build/testdata/php-project/composer.json
vendored
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"name": "vendor/php-project",
|
||||
"type": "library"
|
||||
}
|
||||
3
build/testdata/wails-project/go.mod
vendored
3
build/testdata/wails-project/go.mod
vendored
|
|
@ -1,3 +0,0 @@
|
|||
module example.com/wails-project
|
||||
|
||||
go 1.21
|
||||
4
build/testdata/wails-project/wails.json
vendored
4
build/testdata/wails-project/wails.json
vendored
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"name": "wails-project",
|
||||
"outputfilename": "wails-project"
|
||||
}
|
||||
239
cmd/ci/ci.go
239
cmd/ci/ci.go
|
|
@ -1,239 +0,0 @@
|
|||
package ci
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-devops/release"
|
||||
)
|
||||
|
||||
// Style aliases from shared
|
||||
var (
|
||||
headerStyle = cli.RepoStyle
|
||||
successStyle = cli.SuccessStyle
|
||||
errorStyle = cli.ErrorStyle
|
||||
dimStyle = cli.DimStyle
|
||||
valueStyle = cli.ValueStyle
|
||||
)
|
||||
|
||||
// Flag variables for ci command
|
||||
var (
|
||||
ciGoForLaunch bool
|
||||
ciVersion string
|
||||
ciDraft bool
|
||||
ciPrerelease bool
|
||||
)
|
||||
|
||||
// Flag variables for changelog subcommand
|
||||
var (
|
||||
changelogFromRef string
|
||||
changelogToRef string
|
||||
)
|
||||
|
||||
var ciCmd = &cli.Command{
|
||||
Use: "ci",
|
||||
Short: i18n.T("cmd.ci.short"),
|
||||
Long: i18n.T("cmd.ci.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
dryRun := !ciGoForLaunch
|
||||
return runCIPublish(dryRun, ciVersion, ciDraft, ciPrerelease)
|
||||
},
|
||||
}
|
||||
|
||||
var ciInitCmd = &cli.Command{
|
||||
Use: "init",
|
||||
Short: i18n.T("cmd.ci.init.short"),
|
||||
Long: i18n.T("cmd.ci.init.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runCIReleaseInit()
|
||||
},
|
||||
}
|
||||
|
||||
var ciChangelogCmd = &cli.Command{
|
||||
Use: "changelog",
|
||||
Short: i18n.T("cmd.ci.changelog.short"),
|
||||
Long: i18n.T("cmd.ci.changelog.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runChangelog(changelogFromRef, changelogToRef)
|
||||
},
|
||||
}
|
||||
|
||||
var ciVersionCmd = &cli.Command{
|
||||
Use: "version",
|
||||
Short: i18n.T("cmd.ci.version.short"),
|
||||
Long: i18n.T("cmd.ci.version.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runCIReleaseVersion()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Main ci command flags
|
||||
ciCmd.Flags().BoolVar(&ciGoForLaunch, "we-are-go-for-launch", false, i18n.T("cmd.ci.flag.go_for_launch"))
|
||||
ciCmd.Flags().StringVar(&ciVersion, "version", "", i18n.T("cmd.ci.flag.version"))
|
||||
ciCmd.Flags().BoolVar(&ciDraft, "draft", false, i18n.T("cmd.ci.flag.draft"))
|
||||
ciCmd.Flags().BoolVar(&ciPrerelease, "prerelease", false, i18n.T("cmd.ci.flag.prerelease"))
|
||||
|
||||
// Changelog subcommand flags
|
||||
ciChangelogCmd.Flags().StringVar(&changelogFromRef, "from", "", i18n.T("cmd.ci.changelog.flag.from"))
|
||||
ciChangelogCmd.Flags().StringVar(&changelogToRef, "to", "", i18n.T("cmd.ci.changelog.flag.to"))
|
||||
|
||||
// Add subcommands
|
||||
ciCmd.AddCommand(ciInitCmd)
|
||||
ciCmd.AddCommand(ciChangelogCmd)
|
||||
ciCmd.AddCommand(ciVersionCmd)
|
||||
}
|
||||
|
||||
// runCIPublish publishes pre-built artifacts from dist/.
|
||||
func runCIPublish(dryRun bool, version string, draft, prerelease bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return cli.WrapVerb(err, "get", "working directory")
|
||||
}
|
||||
|
||||
cfg, err := release.LoadConfig(projectDir)
|
||||
if err != nil {
|
||||
return cli.WrapVerb(err, "load", "config")
|
||||
}
|
||||
|
||||
if version != "" {
|
||||
cfg.SetVersion(version)
|
||||
}
|
||||
|
||||
if draft || prerelease {
|
||||
for i := range cfg.Publishers {
|
||||
if draft {
|
||||
cfg.Publishers[i].Draft = true
|
||||
}
|
||||
if prerelease {
|
||||
cfg.Publishers[i].Prerelease = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cli.Print("%s %s\n", headerStyle.Render(i18n.T("cmd.ci.label.ci")), i18n.T("cmd.ci.publishing"))
|
||||
if dryRun {
|
||||
cli.Print(" %s\n", dimStyle.Render(i18n.T("cmd.ci.dry_run_hint")))
|
||||
} else {
|
||||
cli.Print(" %s\n", successStyle.Render(i18n.T("cmd.ci.go_for_launch")))
|
||||
}
|
||||
cli.Blank()
|
||||
|
||||
if len(cfg.Publishers) == 0 {
|
||||
return errors.New(i18n.T("cmd.ci.error.no_publishers"))
|
||||
}
|
||||
|
||||
rel, err := release.Publish(ctx, cfg, dryRun)
|
||||
if err != nil {
|
||||
cli.Print("%s %v\n", errorStyle.Render(i18n.Label("error")), err)
|
||||
return err
|
||||
}
|
||||
|
||||
cli.Blank()
|
||||
cli.Print("%s %s\n", successStyle.Render(i18n.T("i18n.done.pass")), i18n.T("cmd.ci.publish_completed"))
|
||||
cli.Print(" %s %s\n", i18n.Label("version"), valueStyle.Render(rel.Version))
|
||||
cli.Print(" %s %d\n", i18n.T("cmd.ci.label.artifacts"), len(rel.Artifacts))
|
||||
|
||||
if !dryRun {
|
||||
for _, pub := range cfg.Publishers {
|
||||
cli.Print(" %s %s\n", i18n.T("cmd.ci.label.published"), valueStyle.Render(pub.Type))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runCIReleaseInit scaffolds a release config.
|
||||
func runCIReleaseInit() error {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return cli.Err("%s: %w", i18n.T("i18n.fail.get", "working directory"), err)
|
||||
}
|
||||
|
||||
cli.Print("%s %s\n\n", dimStyle.Render(i18n.Label("init")), i18n.T("cmd.ci.init.initializing"))
|
||||
|
||||
if release.ConfigExists(cwd) {
|
||||
cli.Text(i18n.T("cmd.ci.init.already_initialized"))
|
||||
return nil
|
||||
}
|
||||
|
||||
cfg := release.DefaultConfig()
|
||||
if err := release.WriteConfig(cfg, cwd); err != nil {
|
||||
return cli.Err("%s: %w", i18n.T("i18n.fail.create", "config"), err)
|
||||
}
|
||||
|
||||
cli.Blank()
|
||||
cli.Print("%s %s\n", successStyle.Render("v"), i18n.T("cmd.ci.init.created_config"))
|
||||
cli.Blank()
|
||||
cli.Text(i18n.T("cmd.ci.init.next_steps"))
|
||||
cli.Print(" %s\n", i18n.T("cmd.ci.init.edit_config"))
|
||||
cli.Print(" %s\n", i18n.T("cmd.ci.init.run_ci"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runChangelog generates a changelog between two git refs.
|
||||
func runChangelog(fromRef, toRef string) error {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return cli.Err("%s: %w", i18n.T("i18n.fail.get", "working directory"), err)
|
||||
}
|
||||
|
||||
if fromRef == "" || toRef == "" {
|
||||
tag, err := latestTag(cwd)
|
||||
if err == nil {
|
||||
if fromRef == "" {
|
||||
fromRef = tag
|
||||
}
|
||||
if toRef == "" {
|
||||
toRef = "HEAD"
|
||||
}
|
||||
} else {
|
||||
cli.Text(i18n.T("cmd.ci.changelog.no_tags"))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
cli.Print("%s %s..%s\n\n", dimStyle.Render(i18n.T("cmd.ci.changelog.generating")), fromRef, toRef)
|
||||
|
||||
changelog, err := release.Generate(cwd, fromRef, toRef)
|
||||
if err != nil {
|
||||
return cli.Err("%s: %w", i18n.T("i18n.fail.generate", "changelog"), err)
|
||||
}
|
||||
|
||||
cli.Text(changelog)
|
||||
return nil
|
||||
}
|
||||
|
||||
// runCIReleaseVersion shows the determined version.
|
||||
func runCIReleaseVersion() error {
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return cli.WrapVerb(err, "get", "working directory")
|
||||
}
|
||||
|
||||
version, err := release.DetermineVersion(projectDir)
|
||||
if err != nil {
|
||||
return cli.WrapVerb(err, "determine", "version")
|
||||
}
|
||||
|
||||
cli.Print("%s %s\n", i18n.Label("version"), valueStyle.Render(version))
|
||||
return nil
|
||||
}
|
||||
|
||||
func latestTag(dir string) (string, error) {
|
||||
cmd := exec.Command("git", "describe", "--tags", "--abbrev=0")
|
||||
cmd.Dir = dir
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
}
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
// Package ci provides release lifecycle commands for CI/CD pipelines.
|
||||
//
|
||||
// Commands:
|
||||
// - ci init: scaffold release config
|
||||
// - ci changelog: generate changelog from git history
|
||||
// - ci version: show determined version
|
||||
// - ci publish: publish pre-built artifacts (dry-run by default)
|
||||
//
|
||||
// Configuration via .core/release.yaml.
|
||||
package ci
|
||||
|
||||
import (
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddCICommands)
|
||||
}
|
||||
|
||||
// AddCICommands registers the 'ci' command and all subcommands.
|
||||
func AddCICommands(root *cli.Command) {
|
||||
root.AddCommand(ciCmd)
|
||||
}
|
||||
136
cmd/sdk/cmd.go
136
cmd/sdk/cmd.go
|
|
@ -1,136 +0,0 @@
|
|||
// Package sdkcmd provides SDK validation and API compatibility commands.
|
||||
//
|
||||
// Commands:
|
||||
// - sdk diff: check for breaking API changes between spec versions
|
||||
// - sdk validate: validate OpenAPI spec syntax
|
||||
//
|
||||
// For SDK generation, use: core build sdk
|
||||
package sdkcmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/sdk"
|
||||
"forge.lthn.ai/core/cli/pkg/cli"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.RegisterCommands(AddSDKCommands)
|
||||
}
|
||||
|
||||
// SDK styles (aliases to shared)
|
||||
var (
|
||||
sdkHeaderStyle = cli.TitleStyle
|
||||
sdkSuccessStyle = cli.SuccessStyle
|
||||
sdkErrorStyle = cli.ErrorStyle
|
||||
sdkDimStyle = cli.DimStyle
|
||||
)
|
||||
|
||||
var sdkCmd = &cli.Command{
|
||||
Use: "sdk",
|
||||
Short: i18n.T("cmd.sdk.short"),
|
||||
Long: i18n.T("cmd.sdk.long"),
|
||||
}
|
||||
|
||||
var diffBasePath string
|
||||
var diffSpecPath string
|
||||
|
||||
var sdkDiffCmd = &cli.Command{
|
||||
Use: "diff",
|
||||
Short: i18n.T("cmd.sdk.diff.short"),
|
||||
Long: i18n.T("cmd.sdk.diff.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runSDKDiff(diffBasePath, diffSpecPath)
|
||||
},
|
||||
}
|
||||
|
||||
var validateSpecPath string
|
||||
|
||||
var sdkValidateCmd = &cli.Command{
|
||||
Use: "validate",
|
||||
Short: i18n.T("cmd.sdk.validate.short"),
|
||||
Long: i18n.T("cmd.sdk.validate.long"),
|
||||
RunE: func(cmd *cli.Command, args []string) error {
|
||||
return runSDKValidate(validateSpecPath)
|
||||
},
|
||||
}
|
||||
|
||||
// AddSDKCommands registers the 'sdk' command and all subcommands.
|
||||
func AddSDKCommands(root *cli.Command) {
|
||||
// sdk diff flags
|
||||
sdkDiffCmd.Flags().StringVar(&diffBasePath, "base", "", i18n.T("cmd.sdk.diff.flag.base"))
|
||||
sdkDiffCmd.Flags().StringVar(&diffSpecPath, "spec", "", i18n.T("cmd.sdk.diff.flag.spec"))
|
||||
|
||||
// sdk validate flags
|
||||
sdkValidateCmd.Flags().StringVar(&validateSpecPath, "spec", "", i18n.T("common.flag.spec"))
|
||||
|
||||
// Add subcommands
|
||||
sdkCmd.AddCommand(sdkDiffCmd)
|
||||
sdkCmd.AddCommand(sdkValidateCmd)
|
||||
|
||||
root.AddCommand(sdkCmd)
|
||||
}
|
||||
|
||||
func runSDKDiff(basePath, specPath string) error {
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("i18n.fail.get", "working directory"), err)
|
||||
}
|
||||
|
||||
if specPath == "" {
|
||||
s := sdk.New(projectDir, nil)
|
||||
specPath, err = s.DetectSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if basePath == "" {
|
||||
return errors.New(i18n.T("cmd.sdk.diff.error.base_required"))
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", sdkHeaderStyle.Render(i18n.T("cmd.sdk.diff.label")), i18n.ProgressSubject("check", "breaking changes"))
|
||||
fmt.Printf(" %s %s\n", i18n.T("cmd.sdk.diff.base_label"), sdkDimStyle.Render(basePath))
|
||||
fmt.Printf(" %s %s\n", i18n.Label("current"), sdkDimStyle.Render(specPath))
|
||||
fmt.Println()
|
||||
|
||||
result, err := sdk.Diff(basePath, specPath)
|
||||
if err != nil {
|
||||
return cli.Exit(2, cli.Wrap(err, i18n.Label("error")))
|
||||
}
|
||||
|
||||
if result.Breaking {
|
||||
fmt.Printf("%s %s\n", sdkErrorStyle.Render(i18n.T("cmd.sdk.diff.breaking")), result.Summary)
|
||||
for _, change := range result.Changes {
|
||||
fmt.Printf(" - %s\n", change)
|
||||
}
|
||||
return cli.Exit(1, cli.Err("%s", result.Summary))
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", sdkSuccessStyle.Render(i18n.T("cmd.sdk.label.ok")), result.Summary)
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSDKValidate(specPath string) error {
|
||||
projectDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", i18n.T("i18n.fail.get", "working directory"), err)
|
||||
}
|
||||
|
||||
s := sdk.New(projectDir, &sdk.Config{Spec: specPath})
|
||||
|
||||
fmt.Printf("%s %s\n", sdkHeaderStyle.Render(i18n.T("cmd.sdk.label.sdk")), i18n.T("cmd.sdk.validate.validating"))
|
||||
|
||||
detectedPath, err := s.DetectSpec()
|
||||
if err != nil {
|
||||
fmt.Printf("%s %v\n", sdkErrorStyle.Render(i18n.Label("error")), err)
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf(" %s %s\n", i18n.Label("spec"), sdkDimStyle.Render(detectedPath))
|
||||
fmt.Printf("%s %s\n", sdkSuccessStyle.Render(i18n.T("cmd.sdk.label.ok")), i18n.T("cmd.sdk.validate.valid"))
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,314 +0,0 @@
|
|||
// Package release provides release automation with changelog generation and publishing.
|
||||
package release
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// ConventionalCommit represents a parsed conventional commit.
|
||||
type ConventionalCommit struct {
|
||||
Type string // feat, fix, etc.
|
||||
Scope string // optional scope in parentheses
|
||||
Description string // commit description
|
||||
Hash string // short commit hash
|
||||
Breaking bool // has breaking change indicator
|
||||
}
|
||||
|
||||
// commitTypeLabels maps commit types to human-readable labels for the changelog.
|
||||
var commitTypeLabels = map[string]string{
|
||||
"feat": "Features",
|
||||
"fix": "Bug Fixes",
|
||||
"perf": "Performance Improvements",
|
||||
"refactor": "Code Refactoring",
|
||||
"docs": "Documentation",
|
||||
"style": "Styles",
|
||||
"test": "Tests",
|
||||
"build": "Build System",
|
||||
"ci": "Continuous Integration",
|
||||
"chore": "Chores",
|
||||
"revert": "Reverts",
|
||||
}
|
||||
|
||||
// commitTypeOrder defines the order of sections in the changelog.
|
||||
var commitTypeOrder = []string{
|
||||
"feat",
|
||||
"fix",
|
||||
"perf",
|
||||
"refactor",
|
||||
"docs",
|
||||
"style",
|
||||
"test",
|
||||
"build",
|
||||
"ci",
|
||||
"chore",
|
||||
"revert",
|
||||
}
|
||||
|
||||
// conventionalCommitRegex matches conventional commit format.
|
||||
// Examples: "feat: add feature", "fix(scope): fix bug", "feat!: breaking change"
|
||||
var conventionalCommitRegex = regexp.MustCompile(`^(\w+)(?:\(([^)]+)\))?(!)?:\s*(.+)$`)
|
||||
|
||||
// Generate generates a markdown changelog from git commits between two refs.
|
||||
// If fromRef is empty, it uses the previous tag or initial commit.
|
||||
// If toRef is empty, it uses HEAD.
|
||||
func Generate(dir, fromRef, toRef string) (string, error) {
|
||||
if toRef == "" {
|
||||
toRef = "HEAD"
|
||||
}
|
||||
|
||||
// If fromRef is empty, try to find previous tag
|
||||
if fromRef == "" {
|
||||
prevTag, err := getPreviousTag(dir, toRef)
|
||||
if err != nil {
|
||||
// No previous tag, use initial commit
|
||||
fromRef = ""
|
||||
} else {
|
||||
fromRef = prevTag
|
||||
}
|
||||
}
|
||||
|
||||
// Get commits between refs
|
||||
commits, err := getCommits(dir, fromRef, toRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("changelog.Generate: failed to get commits: %w", err)
|
||||
}
|
||||
|
||||
// Parse conventional commits
|
||||
var parsedCommits []ConventionalCommit
|
||||
for _, commit := range commits {
|
||||
parsed := parseConventionalCommit(commit)
|
||||
if parsed != nil {
|
||||
parsedCommits = append(parsedCommits, *parsed)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate markdown
|
||||
return formatChangelog(parsedCommits, toRef), nil
|
||||
}
|
||||
|
||||
// GenerateWithConfig generates a changelog with filtering based on config.
|
||||
func GenerateWithConfig(dir, fromRef, toRef string, cfg *ChangelogConfig) (string, error) {
|
||||
if toRef == "" {
|
||||
toRef = "HEAD"
|
||||
}
|
||||
|
||||
// If fromRef is empty, try to find previous tag
|
||||
if fromRef == "" {
|
||||
prevTag, err := getPreviousTag(dir, toRef)
|
||||
if err != nil {
|
||||
fromRef = ""
|
||||
} else {
|
||||
fromRef = prevTag
|
||||
}
|
||||
}
|
||||
|
||||
// Get commits between refs
|
||||
commits, err := getCommits(dir, fromRef, toRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("changelog.GenerateWithConfig: failed to get commits: %w", err)
|
||||
}
|
||||
|
||||
// Build include/exclude sets
|
||||
includeSet := make(map[string]bool)
|
||||
excludeSet := make(map[string]bool)
|
||||
for _, t := range cfg.Include {
|
||||
includeSet[t] = true
|
||||
}
|
||||
for _, t := range cfg.Exclude {
|
||||
excludeSet[t] = true
|
||||
}
|
||||
|
||||
// Parse and filter conventional commits
|
||||
var parsedCommits []ConventionalCommit
|
||||
for _, commit := range commits {
|
||||
parsed := parseConventionalCommit(commit)
|
||||
if parsed == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply filters
|
||||
if len(includeSet) > 0 && !includeSet[parsed.Type] {
|
||||
continue
|
||||
}
|
||||
if excludeSet[parsed.Type] {
|
||||
continue
|
||||
}
|
||||
|
||||
parsedCommits = append(parsedCommits, *parsed)
|
||||
}
|
||||
|
||||
return formatChangelog(parsedCommits, toRef), nil
|
||||
}
|
||||
|
||||
// getPreviousTag returns the tag before the given ref.
|
||||
func getPreviousTag(dir, ref string) (string, error) {
|
||||
cmd := exec.Command("git", "describe", "--tags", "--abbrev=0", ref+"^")
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
// getCommits returns a slice of commit strings between two refs.
|
||||
// Format: "hash subject"
|
||||
func getCommits(dir, fromRef, toRef string) ([]string, error) {
|
||||
var args []string
|
||||
if fromRef == "" {
|
||||
// All commits up to toRef
|
||||
args = []string{"log", "--oneline", "--no-merges", toRef}
|
||||
} else {
|
||||
// Commits between refs
|
||||
args = []string{"log", "--oneline", "--no-merges", fromRef + ".." + toRef}
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var commits []string
|
||||
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line != "" {
|
||||
commits = append(commits, line)
|
||||
}
|
||||
}
|
||||
|
||||
return commits, scanner.Err()
|
||||
}
|
||||
|
||||
// parseConventionalCommit parses a git log --oneline output into a ConventionalCommit.
|
||||
// Returns nil if the commit doesn't follow conventional commit format.
|
||||
func parseConventionalCommit(commitLine string) *ConventionalCommit {
|
||||
// Split hash and subject
|
||||
parts := strings.SplitN(commitLine, " ", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
hash := parts[0]
|
||||
subject := parts[1]
|
||||
|
||||
// Match conventional commit format
|
||||
matches := conventionalCommitRegex.FindStringSubmatch(subject)
|
||||
if matches == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ConventionalCommit{
|
||||
Type: strings.ToLower(matches[1]),
|
||||
Scope: matches[2],
|
||||
Breaking: matches[3] == "!",
|
||||
Description: matches[4],
|
||||
Hash: hash,
|
||||
}
|
||||
}
|
||||
|
||||
// formatChangelog formats parsed commits into markdown.
|
||||
func formatChangelog(commits []ConventionalCommit, version string) string {
|
||||
if len(commits) == 0 {
|
||||
return fmt.Sprintf("## %s\n\nNo notable changes.", version)
|
||||
}
|
||||
|
||||
// Group commits by type
|
||||
grouped := make(map[string][]ConventionalCommit)
|
||||
var breaking []ConventionalCommit
|
||||
|
||||
for _, commit := range commits {
|
||||
if commit.Breaking {
|
||||
breaking = append(breaking, commit)
|
||||
}
|
||||
grouped[commit.Type] = append(grouped[commit.Type], commit)
|
||||
}
|
||||
|
||||
var buf strings.Builder
|
||||
buf.WriteString(fmt.Sprintf("## %s\n\n", version))
|
||||
|
||||
// Breaking changes first
|
||||
if len(breaking) > 0 {
|
||||
buf.WriteString("### BREAKING CHANGES\n\n")
|
||||
for _, commit := range breaking {
|
||||
buf.WriteString(formatCommitLine(commit))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
// Other sections in order
|
||||
for _, commitType := range commitTypeOrder {
|
||||
commits, ok := grouped[commitType]
|
||||
if !ok || len(commits) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
label, ok := commitTypeLabels[commitType]
|
||||
if !ok {
|
||||
label = cases.Title(language.English).String(commitType)
|
||||
}
|
||||
|
||||
buf.WriteString(fmt.Sprintf("### %s\n\n", label))
|
||||
for _, commit := range commits {
|
||||
buf.WriteString(formatCommitLine(commit))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
// Any remaining types not in the order list
|
||||
var remainingTypes []string
|
||||
for commitType := range grouped {
|
||||
if !slices.Contains(commitTypeOrder, commitType) {
|
||||
remainingTypes = append(remainingTypes, commitType)
|
||||
}
|
||||
}
|
||||
slices.Sort(remainingTypes)
|
||||
|
||||
for _, commitType := range remainingTypes {
|
||||
commits := grouped[commitType]
|
||||
label := cases.Title(language.English).String(commitType)
|
||||
buf.WriteString(fmt.Sprintf("### %s\n\n", label))
|
||||
for _, commit := range commits {
|
||||
buf.WriteString(formatCommitLine(commit))
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(buf.String(), "\n")
|
||||
}
|
||||
|
||||
// formatCommitLine formats a single commit as a changelog line.
|
||||
func formatCommitLine(commit ConventionalCommit) string {
|
||||
var buf strings.Builder
|
||||
buf.WriteString("- ")
|
||||
|
||||
if commit.Scope != "" {
|
||||
buf.WriteString(fmt.Sprintf("**%s**: ", commit.Scope))
|
||||
}
|
||||
|
||||
buf.WriteString(commit.Description)
|
||||
buf.WriteString(fmt.Sprintf(" (%s)\n", commit.Hash))
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// ParseCommitType extracts the type from a conventional commit subject.
|
||||
// Returns empty string if not a conventional commit.
|
||||
func ParseCommitType(subject string) string {
|
||||
matches := conventionalCommitRegex.FindStringSubmatch(subject)
|
||||
if matches == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.ToLower(matches[1])
|
||||
}
|
||||
|
|
@ -1,695 +0,0 @@
|
|||
package release
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseConventionalCommit_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected *ConventionalCommit
|
||||
}{
|
||||
{
|
||||
name: "feat without scope",
|
||||
input: "abc1234 feat: add new feature",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "feat",
|
||||
Scope: "",
|
||||
Description: "add new feature",
|
||||
Hash: "abc1234",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fix with scope",
|
||||
input: "def5678 fix(auth): resolve login issue",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "fix",
|
||||
Scope: "auth",
|
||||
Description: "resolve login issue",
|
||||
Hash: "def5678",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "breaking change with exclamation",
|
||||
input: "ghi9012 feat!: breaking API change",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "feat",
|
||||
Scope: "",
|
||||
Description: "breaking API change",
|
||||
Hash: "ghi9012",
|
||||
Breaking: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "breaking change with scope",
|
||||
input: "jkl3456 fix(api)!: remove deprecated endpoint",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "fix",
|
||||
Scope: "api",
|
||||
Description: "remove deprecated endpoint",
|
||||
Hash: "jkl3456",
|
||||
Breaking: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "perf type",
|
||||
input: "mno7890 perf: optimize database queries",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "perf",
|
||||
Scope: "",
|
||||
Description: "optimize database queries",
|
||||
Hash: "mno7890",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "chore type",
|
||||
input: "pqr1234 chore: update dependencies",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "chore",
|
||||
Scope: "",
|
||||
Description: "update dependencies",
|
||||
Hash: "pqr1234",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "uppercase type normalizes to lowercase",
|
||||
input: "stu5678 FEAT: uppercase type",
|
||||
expected: &ConventionalCommit{
|
||||
Type: "feat",
|
||||
Scope: "",
|
||||
Description: "uppercase type",
|
||||
Hash: "stu5678",
|
||||
Breaking: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := parseConventionalCommit(tc.input)
|
||||
assert.NotNil(t, result)
|
||||
assert.Equal(t, tc.expected.Type, result.Type)
|
||||
assert.Equal(t, tc.expected.Scope, result.Scope)
|
||||
assert.Equal(t, tc.expected.Description, result.Description)
|
||||
assert.Equal(t, tc.expected.Hash, result.Hash)
|
||||
assert.Equal(t, tc.expected.Breaking, result.Breaking)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseConventionalCommit_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
}{
|
||||
{
|
||||
name: "non-conventional commit",
|
||||
input: "abc1234 Update README",
|
||||
},
|
||||
{
|
||||
name: "missing colon",
|
||||
input: "def5678 feat add feature",
|
||||
},
|
||||
{
|
||||
name: "empty subject",
|
||||
input: "ghi9012",
|
||||
},
|
||||
{
|
||||
name: "just hash",
|
||||
input: "abc1234",
|
||||
},
|
||||
{
|
||||
name: "merge commit",
|
||||
input: "abc1234 Merge pull request #123",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := parseConventionalCommit(tc.input)
|
||||
assert.Nil(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatChangelog_Good(t *testing.T) {
|
||||
t.Run("formats commits by type", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "feat", Description: "add feature A", Hash: "abc1234"},
|
||||
{Type: "fix", Description: "fix bug B", Hash: "def5678"},
|
||||
{Type: "feat", Description: "add feature C", Hash: "ghi9012"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "## v1.0.0")
|
||||
assert.Contains(t, result, "### Features")
|
||||
assert.Contains(t, result, "### Bug Fixes")
|
||||
assert.Contains(t, result, "- add feature A (abc1234)")
|
||||
assert.Contains(t, result, "- fix bug B (def5678)")
|
||||
assert.Contains(t, result, "- add feature C (ghi9012)")
|
||||
})
|
||||
|
||||
t.Run("includes scope in output", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "feat", Scope: "api", Description: "add endpoint", Hash: "abc1234"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "**api**: add endpoint")
|
||||
})
|
||||
|
||||
t.Run("breaking changes first", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "feat", Description: "normal feature", Hash: "abc1234"},
|
||||
{Type: "feat", Description: "breaking feature", Hash: "def5678", Breaking: true},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "### BREAKING CHANGES")
|
||||
// Breaking changes section should appear before Features
|
||||
breakingPos := indexOf(result, "BREAKING CHANGES")
|
||||
featuresPos := indexOf(result, "Features")
|
||||
assert.Less(t, breakingPos, featuresPos)
|
||||
})
|
||||
|
||||
t.Run("empty commits returns minimal changelog", func(t *testing.T) {
|
||||
result := formatChangelog([]ConventionalCommit{}, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "## v1.0.0")
|
||||
assert.Contains(t, result, "No notable changes")
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseCommitType_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"feat: add feature", "feat"},
|
||||
{"fix(scope): fix bug", "fix"},
|
||||
{"perf!: breaking perf", "perf"},
|
||||
{"chore: update deps", "chore"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
result := ParseCommitType(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCommitType_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
}{
|
||||
{"not a conventional commit"},
|
||||
{"Update README"},
|
||||
{"Merge branch 'main'"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
result := ParseCommitType(tc.input)
|
||||
assert.Empty(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_ConfigValues(t *testing.T) {
|
||||
t.Run("config filters are parsed correctly", func(t *testing.T) {
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat", "fix"},
|
||||
Exclude: []string{"chore", "docs"},
|
||||
}
|
||||
|
||||
// Verify the config values
|
||||
assert.Contains(t, cfg.Include, "feat")
|
||||
assert.Contains(t, cfg.Include, "fix")
|
||||
assert.Contains(t, cfg.Exclude, "chore")
|
||||
assert.Contains(t, cfg.Exclude, "docs")
|
||||
})
|
||||
}
|
||||
|
||||
// indexOf returns the position of a substring in a string, or -1 if not found.
|
||||
func indexOf(s, substr string) int {
|
||||
for i := 0; i+len(substr) <= len(s); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// setupChangelogGitRepo creates a temporary directory with an initialized git repository.
|
||||
func setupChangelogGitRepo(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
// Configure git user for commits
|
||||
cmd = exec.Command("git", "config", "user.email", "test@example.com")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "config", "user.name", "Test User")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
// createChangelogCommit creates a commit in the given directory.
|
||||
func createChangelogCommit(t *testing.T, dir, message string) {
|
||||
t.Helper()
|
||||
|
||||
// Create or modify a file
|
||||
filePath := filepath.Join(dir, "changelog_test.txt")
|
||||
content, _ := os.ReadFile(filePath)
|
||||
content = append(content, []byte(message+"\n")...)
|
||||
require.NoError(t, os.WriteFile(filePath, content, 0644))
|
||||
|
||||
// Stage and commit
|
||||
cmd := exec.Command("git", "add", ".")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", message)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
// createChangelogTag creates a tag in the given directory.
|
||||
func createChangelogTag(t *testing.T, dir, tag string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command("git", "tag", tag)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
func TestGenerate_Good(t *testing.T) {
|
||||
t.Run("generates changelog from commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: add new feature")
|
||||
createChangelogCommit(t, dir, "fix: resolve bug")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "## HEAD")
|
||||
assert.Contains(t, changelog, "### Features")
|
||||
assert.Contains(t, changelog, "add new feature")
|
||||
assert.Contains(t, changelog, "### Bug Fixes")
|
||||
assert.Contains(t, changelog, "resolve bug")
|
||||
})
|
||||
|
||||
t.Run("generates changelog between tags", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: initial feature")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogTag(t, dir, "v1.1.0")
|
||||
|
||||
changelog, err := Generate(dir, "v1.0.0", "v1.1.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "## v1.1.0")
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.Contains(t, changelog, "bug fix")
|
||||
// Should NOT contain the initial feature
|
||||
assert.NotContains(t, changelog, "initial feature")
|
||||
})
|
||||
|
||||
t.Run("handles empty changelog when no conventional commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "Update README")
|
||||
createChangelogCommit(t, dir, "Merge branch main")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "No notable changes")
|
||||
})
|
||||
|
||||
t.Run("uses previous tag when fromRef is empty", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: old feature")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.NotContains(t, changelog, "old feature")
|
||||
})
|
||||
|
||||
t.Run("includes breaking changes", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat!: breaking API change")
|
||||
createChangelogCommit(t, dir, "feat: normal feature")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "### BREAKING CHANGES")
|
||||
assert.Contains(t, changelog, "breaking API change")
|
||||
})
|
||||
|
||||
t.Run("includes scope in output", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat(api): add endpoint")
|
||||
|
||||
changelog, err := Generate(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "**api**:")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerate_Bad(t *testing.T) {
|
||||
t.Run("returns error for non-git directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, err := Generate(dir, "", "HEAD")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_Good(t *testing.T) {
|
||||
t.Run("filters commits by include list", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogCommit(t, dir, "chore: update deps")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.NotContains(t, changelog, "bug fix")
|
||||
assert.NotContains(t, changelog, "update deps")
|
||||
})
|
||||
|
||||
t.Run("filters commits by exclude list", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogCommit(t, dir, "chore: update deps")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Exclude: []string{"chore"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.Contains(t, changelog, "bug fix")
|
||||
assert.NotContains(t, changelog, "update deps")
|
||||
})
|
||||
|
||||
t.Run("combines include and exclude filters", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
createChangelogCommit(t, dir, "fix: bug fix")
|
||||
createChangelogCommit(t, dir, "perf: performance")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat", "fix", "perf"},
|
||||
Exclude: []string{"perf"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.Contains(t, changelog, "bug fix")
|
||||
assert.NotContains(t, changelog, "performance")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetCommits_Good(t *testing.T) {
|
||||
t.Run("returns all commits when fromRef is empty", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
createChangelogCommit(t, dir, "feat: third")
|
||||
|
||||
commits, err := getCommits(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, commits, 3)
|
||||
})
|
||||
|
||||
t.Run("returns commits between refs", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
createChangelogCommit(t, dir, "feat: third")
|
||||
|
||||
commits, err := getCommits(dir, "v1.0.0", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, commits, 2)
|
||||
})
|
||||
|
||||
t.Run("excludes merge commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: regular commit")
|
||||
// Merge commits are excluded by --no-merges flag
|
||||
// We can verify by checking the count matches expected
|
||||
|
||||
commits, err := getCommits(dir, "", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, commits, 1)
|
||||
assert.Contains(t, commits[0], "regular commit")
|
||||
})
|
||||
|
||||
t.Run("returns empty slice for no commits in range", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: only commit")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
|
||||
commits, err := getCommits(dir, "v1.0.0", "HEAD")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Empty(t, commits)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetCommits_Bad(t *testing.T) {
|
||||
t.Run("returns error for invalid ref", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: commit")
|
||||
|
||||
_, err := getCommits(dir, "nonexistent-tag", "HEAD")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("returns error for non-git directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
_, err := getCommits(dir, "", "HEAD")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPreviousTag_Good(t *testing.T) {
|
||||
t.Run("returns previous tag", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
createChangelogTag(t, dir, "v1.1.0")
|
||||
|
||||
tag, err := getPreviousTag(dir, "v1.1.0")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", tag)
|
||||
})
|
||||
|
||||
t.Run("returns tag before HEAD", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: second")
|
||||
|
||||
tag, err := getPreviousTag(dir, "HEAD")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", tag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPreviousTag_Bad(t *testing.T) {
|
||||
t.Run("returns error when no previous tag exists", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
|
||||
// v1.0.0^ has no tag before it
|
||||
_, err := getPreviousTag(dir, "v1.0.0")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("returns error for invalid ref", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: commit")
|
||||
|
||||
_, err := getPreviousTag(dir, "nonexistent")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFormatCommitLine_Good(t *testing.T) {
|
||||
t.Run("formats commit without scope", func(t *testing.T) {
|
||||
commit := ConventionalCommit{
|
||||
Type: "feat",
|
||||
Description: "add feature",
|
||||
Hash: "abc1234",
|
||||
}
|
||||
|
||||
result := formatCommitLine(commit)
|
||||
assert.Equal(t, "- add feature (abc1234)\n", result)
|
||||
})
|
||||
|
||||
t.Run("formats commit with scope", func(t *testing.T) {
|
||||
commit := ConventionalCommit{
|
||||
Type: "fix",
|
||||
Scope: "api",
|
||||
Description: "fix bug",
|
||||
Hash: "def5678",
|
||||
}
|
||||
|
||||
result := formatCommitLine(commit)
|
||||
assert.Equal(t, "- **api**: fix bug (def5678)\n", result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFormatChangelog_Ugly(t *testing.T) {
|
||||
t.Run("handles custom commit type not in order", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "custom", Description: "custom type", Hash: "abc1234"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
assert.Contains(t, result, "### Custom")
|
||||
assert.Contains(t, result, "custom type")
|
||||
})
|
||||
|
||||
t.Run("handles multiple custom commit types", func(t *testing.T) {
|
||||
commits := []ConventionalCommit{
|
||||
{Type: "alpha", Description: "alpha feature", Hash: "abc1234"},
|
||||
{Type: "beta", Description: "beta feature", Hash: "def5678"},
|
||||
}
|
||||
|
||||
result := formatChangelog(commits, "v1.0.0")
|
||||
|
||||
// Should be sorted alphabetically for custom types
|
||||
assert.Contains(t, result, "### Alpha")
|
||||
assert.Contains(t, result, "### Beta")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_Bad(t *testing.T) {
|
||||
t.Run("returns error for non-git directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
_, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenerateWithConfig_EdgeCases(t *testing.T) {
|
||||
t.Run("uses HEAD when toRef is empty", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
// Pass empty toRef
|
||||
changelog, err := GenerateWithConfig(dir, "", "", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "## HEAD")
|
||||
})
|
||||
|
||||
t.Run("handles previous tag lookup failure gracefully", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: first")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
// No tags exist, should still work
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "first")
|
||||
})
|
||||
|
||||
t.Run("uses explicit fromRef when provided", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: old feature")
|
||||
createChangelogTag(t, dir, "v1.0.0")
|
||||
createChangelogCommit(t, dir, "feat: new feature")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
// Use explicit fromRef
|
||||
changelog, err := GenerateWithConfig(dir, "v1.0.0", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "new feature")
|
||||
assert.NotContains(t, changelog, "old feature")
|
||||
})
|
||||
|
||||
t.Run("skips non-conventional commits", func(t *testing.T) {
|
||||
dir := setupChangelogGitRepo(t)
|
||||
createChangelogCommit(t, dir, "feat: conventional commit")
|
||||
createChangelogCommit(t, dir, "Update README")
|
||||
|
||||
cfg := &ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
}
|
||||
|
||||
changelog, err := GenerateWithConfig(dir, "", "HEAD", cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, changelog, "conventional commit")
|
||||
assert.NotContains(t, changelog, "Update README")
|
||||
})
|
||||
}
|
||||
|
|
@ -1,328 +0,0 @@
|
|||
// Package release provides release automation with changelog generation and publishing.
|
||||
package release
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"iter"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// ConfigFileName is the name of the release configuration file.
|
||||
const ConfigFileName = "release.yaml"
|
||||
|
||||
// ConfigDir is the directory where release configuration is stored.
|
||||
const ConfigDir = ".core"
|
||||
|
||||
// Config holds the complete release configuration loaded from .core/release.yaml.
|
||||
type Config struct {
|
||||
// Version is the config file format version.
|
||||
Version int `yaml:"version"`
|
||||
// Project contains project metadata.
|
||||
Project ProjectConfig `yaml:"project"`
|
||||
// Build contains build settings for the release.
|
||||
Build BuildConfig `yaml:"build"`
|
||||
// Publishers defines where to publish the release.
|
||||
Publishers []PublisherConfig `yaml:"publishers"`
|
||||
// Changelog configures changelog generation.
|
||||
Changelog ChangelogConfig `yaml:"changelog"`
|
||||
// SDK configures SDK generation.
|
||||
SDK *SDKConfig `yaml:"sdk,omitempty"`
|
||||
|
||||
// Internal fields (not serialized)
|
||||
projectDir string // Set by LoadConfig
|
||||
version string // Set by CLI flag
|
||||
}
|
||||
|
||||
// ProjectConfig holds project metadata for releases.
|
||||
type ProjectConfig struct {
|
||||
// Name is the project name.
|
||||
Name string `yaml:"name"`
|
||||
// Repository is the GitHub repository in owner/repo format.
|
||||
Repository string `yaml:"repository"`
|
||||
}
|
||||
|
||||
// BuildConfig holds build settings for releases.
|
||||
type BuildConfig struct {
|
||||
// Targets defines the build targets.
|
||||
Targets []TargetConfig `yaml:"targets"`
|
||||
}
|
||||
|
||||
// TargetConfig defines a build target.
|
||||
type TargetConfig struct {
|
||||
// OS is the target operating system (e.g., "linux", "darwin", "windows").
|
||||
OS string `yaml:"os"`
|
||||
// Arch is the target architecture (e.g., "amd64", "arm64").
|
||||
Arch string `yaml:"arch"`
|
||||
}
|
||||
|
||||
// PublisherConfig holds configuration for a publisher.
|
||||
type PublisherConfig struct {
|
||||
// Type is the publisher type (e.g., "github", "linuxkit", "docker").
|
||||
Type string `yaml:"type"`
|
||||
// Prerelease marks the release as a prerelease.
|
||||
Prerelease bool `yaml:"prerelease"`
|
||||
// Draft creates the release as a draft.
|
||||
Draft bool `yaml:"draft"`
|
||||
|
||||
// LinuxKit-specific configuration
|
||||
// Config is the path to the LinuxKit YAML configuration file.
|
||||
Config string `yaml:"config,omitempty"`
|
||||
// Formats are the output formats to build (iso, raw, qcow2, vmdk).
|
||||
Formats []string `yaml:"formats,omitempty"`
|
||||
// Platforms are the target platforms (linux/amd64, linux/arm64).
|
||||
Platforms []string `yaml:"platforms,omitempty"`
|
||||
|
||||
// Docker-specific configuration
|
||||
// Registry is the container registry (default: ghcr.io).
|
||||
Registry string `yaml:"registry,omitempty"`
|
||||
// Image is the image name in owner/repo format.
|
||||
Image string `yaml:"image,omitempty"`
|
||||
// Dockerfile is the path to the Dockerfile (default: Dockerfile).
|
||||
Dockerfile string `yaml:"dockerfile,omitempty"`
|
||||
// Tags are the image tags to apply.
|
||||
Tags []string `yaml:"tags,omitempty"`
|
||||
// BuildArgs are additional Docker build arguments.
|
||||
BuildArgs map[string]string `yaml:"build_args,omitempty"`
|
||||
|
||||
// npm-specific configuration
|
||||
// Package is the npm package name (e.g., "@host-uk/core").
|
||||
Package string `yaml:"package,omitempty"`
|
||||
// Access is the npm access level: "public" or "restricted".
|
||||
Access string `yaml:"access,omitempty"`
|
||||
|
||||
// Homebrew-specific configuration
|
||||
// Tap is the Homebrew tap repository (e.g., "host-uk/homebrew-tap").
|
||||
Tap string `yaml:"tap,omitempty"`
|
||||
// Formula is the formula name (defaults to project name).
|
||||
Formula string `yaml:"formula,omitempty"`
|
||||
|
||||
// Scoop-specific configuration
|
||||
// Bucket is the Scoop bucket repository (e.g., "host-uk/scoop-bucket").
|
||||
Bucket string `yaml:"bucket,omitempty"`
|
||||
|
||||
// AUR-specific configuration
|
||||
// Maintainer is the AUR package maintainer (e.g., "Name <email>").
|
||||
Maintainer string `yaml:"maintainer,omitempty"`
|
||||
|
||||
// Chocolatey-specific configuration
|
||||
// Push determines whether to push to Chocolatey (false = generate only).
|
||||
Push bool `yaml:"push,omitempty"`
|
||||
|
||||
// Official repo configuration (for Homebrew, Scoop)
|
||||
// When enabled, generates files for PR to official repos.
|
||||
Official *OfficialConfig `yaml:"official,omitempty"`
|
||||
}
|
||||
|
||||
// OfficialConfig holds configuration for generating files for official repo PRs.
|
||||
type OfficialConfig struct {
|
||||
// Enabled determines whether to generate files for official repos.
|
||||
Enabled bool `yaml:"enabled"`
|
||||
// Output is the directory to write generated files.
|
||||
Output string `yaml:"output,omitempty"`
|
||||
}
|
||||
|
||||
// SDKConfig holds SDK generation configuration.
|
||||
type SDKConfig struct {
|
||||
// Spec is the path to the OpenAPI spec file.
|
||||
Spec string `yaml:"spec,omitempty"`
|
||||
// Languages to generate.
|
||||
Languages []string `yaml:"languages,omitempty"`
|
||||
// Output directory (default: sdk/).
|
||||
Output string `yaml:"output,omitempty"`
|
||||
// Package naming.
|
||||
Package SDKPackageConfig `yaml:"package,omitempty"`
|
||||
// Diff configuration.
|
||||
Diff SDKDiffConfig `yaml:"diff,omitempty"`
|
||||
// Publish configuration.
|
||||
Publish SDKPublishConfig `yaml:"publish,omitempty"`
|
||||
}
|
||||
|
||||
// SDKPackageConfig holds package naming configuration.
|
||||
type SDKPackageConfig struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Version string `yaml:"version,omitempty"`
|
||||
}
|
||||
|
||||
// SDKDiffConfig holds diff configuration.
|
||||
type SDKDiffConfig struct {
|
||||
Enabled bool `yaml:"enabled,omitempty"`
|
||||
FailOnBreaking bool `yaml:"fail_on_breaking,omitempty"`
|
||||
}
|
||||
|
||||
// SDKPublishConfig holds monorepo publish configuration.
|
||||
type SDKPublishConfig struct {
|
||||
Repo string `yaml:"repo,omitempty"`
|
||||
Path string `yaml:"path,omitempty"`
|
||||
}
|
||||
|
||||
// ChangelogConfig holds changelog generation settings.
|
||||
type ChangelogConfig struct {
|
||||
// Include specifies commit types to include in the changelog.
|
||||
Include []string `yaml:"include"`
|
||||
// Exclude specifies commit types to exclude from the changelog.
|
||||
Exclude []string `yaml:"exclude"`
|
||||
}
|
||||
|
||||
// PublishersIter returns an iterator for the publishers.
|
||||
func (c *Config) PublishersIter() iter.Seq[PublisherConfig] {
|
||||
return func(yield func(PublisherConfig) bool) {
|
||||
for _, p := range c.Publishers {
|
||||
if !yield(p) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LoadConfig loads release configuration from the .core/release.yaml file in the given directory.
|
||||
// If the config file does not exist, it returns DefaultConfig().
|
||||
// Returns an error if the file exists but cannot be parsed.
|
||||
func LoadConfig(dir string) (*Config, error) {
|
||||
configPath := filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
|
||||
// Convert to absolute path for io.Local
|
||||
absPath, err := filepath.Abs(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.LoadConfig: failed to resolve path: %w", err)
|
||||
}
|
||||
|
||||
content, err := io.Local.Read(absPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
cfg := DefaultConfig()
|
||||
cfg.projectDir = dir
|
||||
return cfg, nil
|
||||
}
|
||||
return nil, fmt.Errorf("release.LoadConfig: failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
if err := yaml.Unmarshal([]byte(content), &cfg); err != nil {
|
||||
return nil, fmt.Errorf("release.LoadConfig: failed to parse config file: %w", err)
|
||||
}
|
||||
|
||||
// Apply defaults for any missing fields
|
||||
applyDefaults(&cfg)
|
||||
cfg.projectDir = dir
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// DefaultConfig returns sensible defaults for release configuration.
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Version: 1,
|
||||
Project: ProjectConfig{
|
||||
Name: "",
|
||||
Repository: "",
|
||||
},
|
||||
Build: BuildConfig{
|
||||
Targets: []TargetConfig{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "linux", Arch: "arm64"},
|
||||
{OS: "darwin", Arch: "arm64"},
|
||||
{OS: "windows", Arch: "amd64"},
|
||||
},
|
||||
},
|
||||
Publishers: []PublisherConfig{
|
||||
{
|
||||
Type: "github",
|
||||
Prerelease: false,
|
||||
Draft: false,
|
||||
},
|
||||
},
|
||||
Changelog: ChangelogConfig{
|
||||
Include: []string{"feat", "fix", "perf", "refactor"},
|
||||
Exclude: []string{"chore", "docs", "style", "test", "ci"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// applyDefaults fills in default values for any empty fields in the config.
|
||||
func applyDefaults(cfg *Config) {
|
||||
defaults := DefaultConfig()
|
||||
|
||||
if cfg.Version == 0 {
|
||||
cfg.Version = defaults.Version
|
||||
}
|
||||
|
||||
if len(cfg.Build.Targets) == 0 {
|
||||
cfg.Build.Targets = defaults.Build.Targets
|
||||
}
|
||||
|
||||
if len(cfg.Publishers) == 0 {
|
||||
cfg.Publishers = defaults.Publishers
|
||||
}
|
||||
|
||||
if len(cfg.Changelog.Include) == 0 && len(cfg.Changelog.Exclude) == 0 {
|
||||
cfg.Changelog.Include = defaults.Changelog.Include
|
||||
cfg.Changelog.Exclude = defaults.Changelog.Exclude
|
||||
}
|
||||
}
|
||||
|
||||
// SetProjectDir sets the project directory on the config.
|
||||
func (c *Config) SetProjectDir(dir string) {
|
||||
c.projectDir = dir
|
||||
}
|
||||
|
||||
// SetVersion sets the version override on the config.
|
||||
func (c *Config) SetVersion(version string) {
|
||||
c.version = version
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the release config file for a given directory.
|
||||
func ConfigPath(dir string) string {
|
||||
return filepath.Join(dir, ConfigDir, ConfigFileName)
|
||||
}
|
||||
|
||||
// ConfigExists checks if a release config file exists in the given directory.
|
||||
func ConfigExists(dir string) bool {
|
||||
configPath := ConfigPath(dir)
|
||||
absPath, err := filepath.Abs(configPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return io.Local.IsFile(absPath)
|
||||
}
|
||||
|
||||
// GetRepository returns the repository from the config.
|
||||
func (c *Config) GetRepository() string {
|
||||
return c.Project.Repository
|
||||
}
|
||||
|
||||
// GetProjectName returns the project name from the config.
|
||||
func (c *Config) GetProjectName() string {
|
||||
return c.Project.Name
|
||||
}
|
||||
|
||||
// WriteConfig writes the config to the .core/release.yaml file.
|
||||
func WriteConfig(cfg *Config, dir string) error {
|
||||
configPath := ConfigPath(dir)
|
||||
|
||||
// Convert to absolute path for io.Local
|
||||
absPath, err := filepath.Abs(configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to resolve path: %w", err)
|
||||
}
|
||||
|
||||
// Ensure directory exists
|
||||
configDir := filepath.Dir(absPath)
|
||||
if err := io.Local.EnsureDir(configDir); err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
data, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to marshal config: %w", err)
|
||||
}
|
||||
|
||||
if err := io.Local.Write(absPath, string(data)); err != nil {
|
||||
return fmt.Errorf("release.WriteConfig: failed to write config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,363 +0,0 @@
|
|||
package release
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupConfigTestDir creates a temp directory with optional .core/release.yaml content.
|
||||
func setupConfigTestDir(t *testing.T, configContent string) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
if configContent != "" {
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.WriteFile(configPath, []byte(configContent), 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func TestLoadConfig_Good(t *testing.T) {
|
||||
t.Run("loads valid config", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: myapp
|
||||
repository: owner/repo
|
||||
build:
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
publishers:
|
||||
- type: github
|
||||
prerelease: true
|
||||
draft: false
|
||||
changelog:
|
||||
include:
|
||||
- feat
|
||||
- fix
|
||||
exclude:
|
||||
- chore
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Equal(t, "myapp", cfg.Project.Name)
|
||||
assert.Equal(t, "owner/repo", cfg.Project.Repository)
|
||||
assert.Len(t, cfg.Build.Targets, 2)
|
||||
assert.Equal(t, "linux", cfg.Build.Targets[0].OS)
|
||||
assert.Equal(t, "amd64", cfg.Build.Targets[0].Arch)
|
||||
assert.Equal(t, "darwin", cfg.Build.Targets[1].OS)
|
||||
assert.Equal(t, "arm64", cfg.Build.Targets[1].Arch)
|
||||
assert.Len(t, cfg.Publishers, 1)
|
||||
assert.Equal(t, "github", cfg.Publishers[0].Type)
|
||||
assert.True(t, cfg.Publishers[0].Prerelease)
|
||||
assert.False(t, cfg.Publishers[0].Draft)
|
||||
assert.Equal(t, []string{"feat", "fix"}, cfg.Changelog.Include)
|
||||
assert.Equal(t, []string{"chore"}, cfg.Changelog.Exclude)
|
||||
})
|
||||
|
||||
t.Run("returns defaults when config file missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Version, cfg.Version)
|
||||
assert.Equal(t, defaults.Build.Targets, cfg.Build.Targets)
|
||||
assert.Equal(t, defaults.Publishers, cfg.Publishers)
|
||||
assert.Equal(t, defaults.Changelog.Include, cfg.Changelog.Include)
|
||||
assert.Equal(t, defaults.Changelog.Exclude, cfg.Changelog.Exclude)
|
||||
})
|
||||
|
||||
t.Run("applies defaults for missing fields", func(t *testing.T) {
|
||||
content := `
|
||||
version: 2
|
||||
project:
|
||||
name: partial
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cfg)
|
||||
|
||||
// Explicit values preserved
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
assert.Equal(t, "partial", cfg.Project.Name)
|
||||
|
||||
// Defaults applied
|
||||
defaults := DefaultConfig()
|
||||
assert.Equal(t, defaults.Build.Targets, cfg.Build.Targets)
|
||||
assert.Equal(t, defaults.Publishers, cfg.Publishers)
|
||||
})
|
||||
|
||||
t.Run("sets project directory on load", func(t *testing.T) {
|
||||
dir := setupConfigTestDir(t, "version: 1")
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, dir, cfg.projectDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig_Bad(t *testing.T) {
|
||||
t.Run("returns error for invalid YAML", func(t *testing.T) {
|
||||
content := `
|
||||
version: 1
|
||||
project:
|
||||
name: [invalid yaml
|
||||
`
|
||||
dir := setupConfigTestDir(t, content)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to parse config file")
|
||||
})
|
||||
|
||||
t.Run("returns error for unreadable file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create config as a directory instead of file
|
||||
configPath := filepath.Join(coreDir, ConfigFileName)
|
||||
err = os.Mkdir(configPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := LoadConfig(dir)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
assert.Contains(t, err.Error(), "failed to read config file")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultConfig_Good(t *testing.T) {
|
||||
t.Run("returns sensible defaults", func(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
assert.Empty(t, cfg.Project.Name)
|
||||
assert.Empty(t, cfg.Project.Repository)
|
||||
|
||||
// Default targets
|
||||
assert.Len(t, cfg.Build.Targets, 4)
|
||||
hasLinuxAmd64 := false
|
||||
hasDarwinArm64 := false
|
||||
hasWindowsAmd64 := false
|
||||
for _, target := range cfg.Build.Targets {
|
||||
if target.OS == "linux" && target.Arch == "amd64" {
|
||||
hasLinuxAmd64 = true
|
||||
}
|
||||
if target.OS == "darwin" && target.Arch == "arm64" {
|
||||
hasDarwinArm64 = true
|
||||
}
|
||||
if target.OS == "windows" && target.Arch == "amd64" {
|
||||
hasWindowsAmd64 = true
|
||||
}
|
||||
}
|
||||
assert.True(t, hasLinuxAmd64)
|
||||
assert.True(t, hasDarwinArm64)
|
||||
assert.True(t, hasWindowsAmd64)
|
||||
|
||||
// Default publisher
|
||||
assert.Len(t, cfg.Publishers, 1)
|
||||
assert.Equal(t, "github", cfg.Publishers[0].Type)
|
||||
assert.False(t, cfg.Publishers[0].Prerelease)
|
||||
assert.False(t, cfg.Publishers[0].Draft)
|
||||
|
||||
// Default changelog settings
|
||||
assert.Contains(t, cfg.Changelog.Include, "feat")
|
||||
assert.Contains(t, cfg.Changelog.Include, "fix")
|
||||
assert.Contains(t, cfg.Changelog.Exclude, "chore")
|
||||
assert.Contains(t, cfg.Changelog.Exclude, "docs")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigPath_Good(t *testing.T) {
|
||||
t.Run("returns correct path", func(t *testing.T) {
|
||||
path := ConfigPath("/project/root")
|
||||
assert.Equal(t, "/project/root/.core/release.yaml", path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigExists_Good(t *testing.T) {
|
||||
t.Run("returns true when config exists", func(t *testing.T) {
|
||||
dir := setupConfigTestDir(t, "version: 1")
|
||||
assert.True(t, ConfigExists(dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when config missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(dir))
|
||||
})
|
||||
|
||||
t.Run("returns false when .core dir missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
assert.False(t, ConfigExists(dir))
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteConfig_Good(t *testing.T) {
|
||||
t.Run("writes config to file", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.Project.Name = "testapp"
|
||||
cfg.Project.Repository = "owner/testapp"
|
||||
|
||||
err := WriteConfig(cfg, dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify file exists
|
||||
assert.True(t, ConfigExists(dir))
|
||||
|
||||
// Reload and verify
|
||||
loaded, err := LoadConfig(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "testapp", loaded.Project.Name)
|
||||
assert.Equal(t, "owner/testapp", loaded.Project.Repository)
|
||||
})
|
||||
|
||||
t.Run("creates .core directory if missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
err := WriteConfig(cfg, dir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check directory was created
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
info, err := os.Stat(coreDir)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_GetRepository_Good(t *testing.T) {
|
||||
t.Run("returns repository", func(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Project: ProjectConfig{
|
||||
Repository: "owner/repo",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "owner/repo", cfg.GetRepository())
|
||||
})
|
||||
|
||||
t.Run("returns empty string when not set", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
assert.Empty(t, cfg.GetRepository())
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_GetProjectName_Good(t *testing.T) {
|
||||
t.Run("returns project name", func(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Project: ProjectConfig{
|
||||
Name: "myapp",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "myapp", cfg.GetProjectName())
|
||||
})
|
||||
|
||||
t.Run("returns empty string when not set", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
assert.Empty(t, cfg.GetProjectName())
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_SetVersion_Good(t *testing.T) {
|
||||
t.Run("sets version override", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
cfg.SetVersion("v1.2.3")
|
||||
assert.Equal(t, "v1.2.3", cfg.version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig_SetProjectDir_Good(t *testing.T) {
|
||||
t.Run("sets project directory", func(t *testing.T) {
|
||||
cfg := &Config{}
|
||||
cfg.SetProjectDir("/path/to/project")
|
||||
assert.Equal(t, "/path/to/project", cfg.projectDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteConfig_Bad(t *testing.T) {
|
||||
t.Run("returns error for unwritable directory", func(t *testing.T) {
|
||||
if os.Geteuid() == 0 {
|
||||
t.Skip("root can write to any directory")
|
||||
}
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create .core directory and make it unwritable
|
||||
coreDir := filepath.Join(dir, ConfigDir)
|
||||
err := os.MkdirAll(coreDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make directory read-only
|
||||
err = os.Chmod(coreDir, 0555)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.Chmod(coreDir, 0755) }()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
err = WriteConfig(cfg, dir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to write config file")
|
||||
})
|
||||
|
||||
t.Run("returns error when directory creation fails", func(t *testing.T) {
|
||||
if os.Geteuid() == 0 {
|
||||
t.Skip("root can create directories anywhere")
|
||||
}
|
||||
// Use a path that doesn't exist and can't be created
|
||||
cfg := DefaultConfig()
|
||||
err := WriteConfig(cfg, "/nonexistent/path/that/cannot/be/created")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyDefaults_Good(t *testing.T) {
|
||||
t.Run("applies version default when zero", func(t *testing.T) {
|
||||
cfg := &Config{Version: 0}
|
||||
applyDefaults(cfg)
|
||||
assert.Equal(t, 1, cfg.Version)
|
||||
})
|
||||
|
||||
t.Run("preserves existing version", func(t *testing.T) {
|
||||
cfg := &Config{Version: 2}
|
||||
applyDefaults(cfg)
|
||||
assert.Equal(t, 2, cfg.Version)
|
||||
})
|
||||
|
||||
t.Run("applies changelog defaults only when both empty", func(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Changelog: ChangelogConfig{
|
||||
Include: []string{"feat"},
|
||||
},
|
||||
}
|
||||
applyDefaults(cfg)
|
||||
// Should not apply defaults because Include is set
|
||||
assert.Equal(t, []string{"feat"}, cfg.Changelog.Include)
|
||||
assert.Empty(t, cfg.Changelog.Exclude)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,314 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
//go:embed templates/aur/*.tmpl
|
||||
var aurTemplates embed.FS
|
||||
|
||||
// AURConfig holds AUR-specific configuration.
|
||||
type AURConfig struct {
|
||||
// Package is the AUR package name.
|
||||
Package string
|
||||
// Maintainer is the package maintainer (e.g., "Name <email>").
|
||||
Maintainer string
|
||||
// Official config for generating files for official repo PRs.
|
||||
Official *OfficialConfig
|
||||
}
|
||||
|
||||
// AURPublisher publishes releases to AUR.
|
||||
type AURPublisher struct{}
|
||||
|
||||
// NewAURPublisher creates a new AUR publisher.
|
||||
func NewAURPublisher() *AURPublisher {
|
||||
return &AURPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *AURPublisher) Name() string {
|
||||
return "aur"
|
||||
}
|
||||
|
||||
// Publish publishes the release to AUR.
|
||||
func (p *AURPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
if cfg.Maintainer == "" {
|
||||
return errors.New("aur.Publish: maintainer is required (set publish.aur.maintainer in config)")
|
||||
}
|
||||
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
projectName := ""
|
||||
if relCfg != nil {
|
||||
projectName = relCfg.GetProjectName()
|
||||
}
|
||||
if projectName == "" {
|
||||
parts := strings.Split(repo, "/")
|
||||
projectName = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
packageName := cfg.Package
|
||||
if packageName == "" {
|
||||
packageName = projectName
|
||||
}
|
||||
|
||||
version := strings.TrimPrefix(release.Version, "v")
|
||||
checksums := buildChecksumMap(release.Artifacts)
|
||||
|
||||
data := aurTemplateData{
|
||||
PackageName: packageName,
|
||||
Description: fmt.Sprintf("%s CLI", projectName),
|
||||
Repository: repo,
|
||||
Version: version,
|
||||
License: "MIT",
|
||||
BinaryName: projectName,
|
||||
Maintainer: cfg.Maintainer,
|
||||
Checksums: checksums,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release.FS, data, cfg)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release.ProjectDir, data, cfg, release)
|
||||
}
|
||||
|
||||
type aurTemplateData struct {
|
||||
PackageName string
|
||||
Description string
|
||||
Repository string
|
||||
Version string
|
||||
License string
|
||||
BinaryName string
|
||||
Maintainer string
|
||||
Checksums ChecksumMap
|
||||
}
|
||||
|
||||
func (p *AURPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) AURConfig {
|
||||
cfg := AURConfig{}
|
||||
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if pkg, ok := ext["package"].(string); ok && pkg != "" {
|
||||
cfg.Package = pkg
|
||||
}
|
||||
if maintainer, ok := ext["maintainer"].(string); ok && maintainer != "" {
|
||||
cfg.Maintainer = maintainer
|
||||
}
|
||||
if official, ok := ext["official"].(map[string]any); ok {
|
||||
cfg.Official = &OfficialConfig{}
|
||||
if enabled, ok := official["enabled"].(bool); ok {
|
||||
cfg.Official.Enabled = enabled
|
||||
}
|
||||
if output, ok := official["output"].(string); ok {
|
||||
cfg.Official.Output = output
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (p *AURPublisher) dryRunPublish(m io.Medium, data aurTemplateData, cfg AURConfig) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: AUR Publish ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Package: %s-bin\n", data.PackageName)
|
||||
fmt.Printf("Version: %s\n", data.Version)
|
||||
fmt.Printf("Maintainer: %s\n", data.Maintainer)
|
||||
fmt.Printf("Repository: %s\n", data.Repository)
|
||||
fmt.Println()
|
||||
|
||||
pkgbuild, err := p.renderTemplate(m, "templates/aur/PKGBUILD.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated PKGBUILD:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(pkgbuild)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
srcinfo, err := p.renderTemplate(m, "templates/aur/.SRCINFO.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated .SRCINFO:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(srcinfo)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Printf("Would push to AUR: ssh://aur@aur.archlinux.org/%s-bin.git\n", data.PackageName)
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AURPublisher) executePublish(ctx context.Context, projectDir string, data aurTemplateData, cfg AURConfig, release *Release) error {
|
||||
pkgbuild, err := p.renderTemplate(release.FS, "templates/aur/PKGBUILD.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to render PKGBUILD: %w", err)
|
||||
}
|
||||
|
||||
srcinfo, err := p.renderTemplate(release.FS, "templates/aur/.SRCINFO.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to render .SRCINFO: %w", err)
|
||||
}
|
||||
|
||||
// If official config is enabled, write to output directory
|
||||
if cfg.Official != nil && cfg.Official.Enabled {
|
||||
output := cfg.Official.Output
|
||||
if output == "" {
|
||||
output = filepath.Join(projectDir, "dist", "aur")
|
||||
} else if !filepath.IsAbs(output) {
|
||||
output = filepath.Join(projectDir, output)
|
||||
}
|
||||
|
||||
if err := release.FS.EnsureDir(output); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
pkgbuildPath := filepath.Join(output, "PKGBUILD")
|
||||
if err := release.FS.Write(pkgbuildPath, pkgbuild); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write PKGBUILD: %w", err)
|
||||
}
|
||||
|
||||
srcinfoPath := filepath.Join(output, ".SRCINFO")
|
||||
if err := release.FS.Write(srcinfoPath, srcinfo); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write .SRCINFO: %w", err)
|
||||
}
|
||||
fmt.Printf("Wrote AUR files: %s\n", output)
|
||||
}
|
||||
|
||||
// Push to AUR if not in official-only mode
|
||||
if cfg.Official == nil || !cfg.Official.Enabled {
|
||||
if err := p.pushToAUR(ctx, data, pkgbuild, srcinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AURPublisher) pushToAUR(ctx context.Context, data aurTemplateData, pkgbuild, srcinfo string) error {
|
||||
aurURL := fmt.Sprintf("ssh://aur@aur.archlinux.org/%s-bin.git", data.PackageName)
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "aur-package-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to create temp directory: %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Clone existing AUR repo (or initialize new one)
|
||||
fmt.Printf("Cloning AUR package %s-bin...\n", data.PackageName)
|
||||
cmd := exec.CommandContext(ctx, "git", "clone", aurURL, tmpDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If clone fails, init a new repo
|
||||
cmd = exec.CommandContext(ctx, "git", "init", tmpDir)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to initialize repo: %w", err)
|
||||
}
|
||||
cmd = exec.CommandContext(ctx, "git", "-C", tmpDir, "remote", "add", "origin", aurURL)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to add remote: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write files
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "PKGBUILD"), []byte(pkgbuild), 0644); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write PKGBUILD: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, ".SRCINFO"), []byte(srcinfo), 0644); err != nil {
|
||||
return fmt.Errorf("aur.Publish: failed to write .SRCINFO: %w", err)
|
||||
}
|
||||
|
||||
commitMsg := fmt.Sprintf("Update to %s", data.Version)
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "add", ".")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: git add failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "commit", "-m", commitMsg)
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: git commit failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "push", "origin", "master")
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("aur.Publish: git push failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Published to AUR: https://aur.archlinux.org/packages/%s-bin\n", data.PackageName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AURPublisher) renderTemplate(m io.Medium, name string, data aurTemplateData) (string, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
|
||||
// Try custom template from medium
|
||||
customPath := filepath.Join(".core", name)
|
||||
if m != nil && m.IsFile(customPath) {
|
||||
customContent, err := m.Read(customPath)
|
||||
if err == nil {
|
||||
content = []byte(customContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to embedded template
|
||||
if content == nil {
|
||||
content, err = aurTemplates.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read template %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := template.New(filepath.Base(name)).Parse(string(content))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template %s: %w", name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// Ensure build package is used
|
||||
var _ = build.Artifact{}
|
||||
|
|
@ -1,226 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAURPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns aur", func(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
assert.Equal(t, "aur", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "aur"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Empty(t, cfg.Maintainer)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
|
||||
t.Run("parses package and maintainer from extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Extended: map[string]any{
|
||||
"package": "mypackage",
|
||||
"maintainer": "John Doe <john@example.com>",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Equal(t, "mypackage", cfg.Package)
|
||||
assert.Equal(t, "John Doe <john@example.com>", cfg.Maintainer)
|
||||
})
|
||||
|
||||
t.Run("parses official config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{
|
||||
"enabled": true,
|
||||
"output": "dist/aur-files",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.True(t, cfg.Official.Enabled)
|
||||
assert.Equal(t, "dist/aur-files", cfg.Official.Output)
|
||||
})
|
||||
|
||||
t.Run("handles missing official fields", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.False(t, cfg.Official.Enabled)
|
||||
assert.Empty(t, cfg.Official.Output)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_RenderTemplate_Good(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("renders PKGBUILD template with data", func(t *testing.T) {
|
||||
data := aurTemplateData{
|
||||
PackageName: "myapp",
|
||||
Description: "My awesome CLI",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.2.3",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Maintainer: "John Doe <john@example.com>",
|
||||
Checksums: ChecksumMap{
|
||||
LinuxAmd64: "abc123",
|
||||
LinuxArm64: "def456",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/aur/PKGBUILD.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, "# Maintainer: John Doe <john@example.com>")
|
||||
assert.Contains(t, result, "pkgname=myapp-bin")
|
||||
assert.Contains(t, result, "pkgver=1.2.3")
|
||||
assert.Contains(t, result, `pkgdesc="My awesome CLI"`)
|
||||
assert.Contains(t, result, "url=\"https://github.com/owner/myapp\"")
|
||||
assert.Contains(t, result, "license=('MIT')")
|
||||
assert.Contains(t, result, "sha256sums_x86_64=('abc123')")
|
||||
assert.Contains(t, result, "sha256sums_aarch64=('def456')")
|
||||
})
|
||||
|
||||
t.Run("renders .SRCINFO template with data", func(t *testing.T) {
|
||||
data := aurTemplateData{
|
||||
PackageName: "myapp",
|
||||
Description: "My CLI",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.0.0",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Maintainer: "Test <test@test.com>",
|
||||
Checksums: ChecksumMap{
|
||||
LinuxAmd64: "checksum1",
|
||||
LinuxArm64: "checksum2",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/aur/.SRCINFO.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, "pkgbase = myapp-bin")
|
||||
assert.Contains(t, result, "pkgdesc = My CLI")
|
||||
assert.Contains(t, result, "pkgver = 1.0.0")
|
||||
assert.Contains(t, result, "arch = x86_64")
|
||||
assert.Contains(t, result, "arch = aarch64")
|
||||
assert.Contains(t, result, "sha256sums_x86_64 = checksum1")
|
||||
assert.Contains(t, result, "sha256sums_aarch64 = checksum2")
|
||||
assert.Contains(t, result, "pkgname = myapp-bin")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_RenderTemplate_Bad(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("returns error for non-existent template", func(t *testing.T) {
|
||||
data := aurTemplateData{}
|
||||
_, err := p.renderTemplate(io.Local, "templates/aur/nonexistent.tmpl", data)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to read template")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := aurTemplateData{
|
||||
PackageName: "myapp",
|
||||
Version: "1.0.0",
|
||||
Maintainer: "John Doe <john@example.com>",
|
||||
Repository: "owner/repo",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := AURConfig{
|
||||
Maintainer: "John Doe <john@example.com>",
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: AUR Publish")
|
||||
assert.Contains(t, output, "Package: myapp-bin")
|
||||
assert.Contains(t, output, "Version: 1.0.0")
|
||||
assert.Contains(t, output, "Maintainer: John Doe <john@example.com>")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Generated PKGBUILD:")
|
||||
assert.Contains(t, output, "Generated .SRCINFO:")
|
||||
assert.Contains(t, output, "Would push to AUR: ssh://aur@aur.archlinux.org/myapp-bin.git")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURPublisher_Publish_Bad(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("fails when maintainer not configured", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "aur"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "maintainer is required")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAURConfig_Defaults_Good(t *testing.T) {
|
||||
t.Run("has sensible defaults", func(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
pubCfg := PublisherConfig{Type: "aur"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Empty(t, cfg.Maintainer)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,295 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-i18n"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
//go:embed templates/chocolatey/*.tmpl templates/chocolatey/tools/*.tmpl
|
||||
var chocolateyTemplates embed.FS
|
||||
|
||||
// ChocolateyConfig holds Chocolatey-specific configuration.
|
||||
type ChocolateyConfig struct {
|
||||
// Package is the Chocolatey package name.
|
||||
Package string
|
||||
// Push determines whether to push to Chocolatey (false = generate only).
|
||||
Push bool
|
||||
// Official config for generating files for official repo PRs.
|
||||
Official *OfficialConfig
|
||||
}
|
||||
|
||||
// ChocolateyPublisher publishes releases to Chocolatey.
|
||||
type ChocolateyPublisher struct{}
|
||||
|
||||
// NewChocolateyPublisher creates a new Chocolatey publisher.
|
||||
func NewChocolateyPublisher() *ChocolateyPublisher {
|
||||
return &ChocolateyPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *ChocolateyPublisher) Name() string {
|
||||
return "chocolatey"
|
||||
}
|
||||
|
||||
// Publish publishes the release to Chocolatey.
|
||||
func (p *ChocolateyPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
projectName := ""
|
||||
if relCfg != nil {
|
||||
projectName = relCfg.GetProjectName()
|
||||
}
|
||||
if projectName == "" {
|
||||
parts := strings.Split(repo, "/")
|
||||
projectName = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
packageName := cfg.Package
|
||||
if packageName == "" {
|
||||
packageName = projectName
|
||||
}
|
||||
|
||||
version := strings.TrimPrefix(release.Version, "v")
|
||||
checksums := buildChecksumMap(release.Artifacts)
|
||||
|
||||
// Extract authors from repository
|
||||
authors := strings.Split(repo, "/")[0]
|
||||
|
||||
data := chocolateyTemplateData{
|
||||
PackageName: packageName,
|
||||
Title: fmt.Sprintf("%s CLI", i18n.Title(projectName)),
|
||||
Description: fmt.Sprintf("%s CLI", projectName),
|
||||
Repository: repo,
|
||||
Version: version,
|
||||
License: "MIT",
|
||||
BinaryName: projectName,
|
||||
Authors: authors,
|
||||
Tags: fmt.Sprintf("cli %s", projectName),
|
||||
Checksums: checksums,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release.FS, data, cfg)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release.ProjectDir, data, cfg, release)
|
||||
}
|
||||
|
||||
type chocolateyTemplateData struct {
|
||||
PackageName string
|
||||
Title string
|
||||
Description string
|
||||
Repository string
|
||||
Version string
|
||||
License string
|
||||
BinaryName string
|
||||
Authors string
|
||||
Tags string
|
||||
Checksums ChecksumMap
|
||||
}
|
||||
|
||||
func (p *ChocolateyPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) ChocolateyConfig {
|
||||
cfg := ChocolateyConfig{
|
||||
Push: false, // Default to generate only
|
||||
}
|
||||
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if pkg, ok := ext["package"].(string); ok && pkg != "" {
|
||||
cfg.Package = pkg
|
||||
}
|
||||
if push, ok := ext["push"].(bool); ok {
|
||||
cfg.Push = push
|
||||
}
|
||||
if official, ok := ext["official"].(map[string]any); ok {
|
||||
cfg.Official = &OfficialConfig{}
|
||||
if enabled, ok := official["enabled"].(bool); ok {
|
||||
cfg.Official.Enabled = enabled
|
||||
}
|
||||
if output, ok := official["output"].(string); ok {
|
||||
cfg.Official.Output = output
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (p *ChocolateyPublisher) dryRunPublish(m io.Medium, data chocolateyTemplateData, cfg ChocolateyConfig) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: Chocolatey Publish ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Package: %s\n", data.PackageName)
|
||||
fmt.Printf("Version: %s\n", data.Version)
|
||||
fmt.Printf("Push: %t\n", cfg.Push)
|
||||
fmt.Printf("Repository: %s\n", data.Repository)
|
||||
fmt.Println()
|
||||
|
||||
nuspec, err := p.renderTemplate(m, "templates/chocolatey/package.nuspec.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chocolatey.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated package.nuspec:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(nuspec)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
install, err := p.renderTemplate(m, "templates/chocolatey/tools/chocolateyinstall.ps1.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chocolatey.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated chocolateyinstall.ps1:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(install)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
if cfg.Push {
|
||||
fmt.Println("Would push to Chocolatey community repo")
|
||||
} else {
|
||||
fmt.Println("Would generate package files only (push=false)")
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ChocolateyPublisher) executePublish(ctx context.Context, projectDir string, data chocolateyTemplateData, cfg ChocolateyConfig, release *Release) error {
|
||||
nuspec, err := p.renderTemplate(release.FS, "templates/chocolatey/package.nuspec.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: failed to render nuspec: %w", err)
|
||||
}
|
||||
|
||||
install, err := p.renderTemplate(release.FS, "templates/chocolatey/tools/chocolateyinstall.ps1.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: failed to render install script: %w", err)
|
||||
}
|
||||
|
||||
// Create package directory
|
||||
output := filepath.Join(projectDir, "dist", "chocolatey")
|
||||
if cfg.Official != nil && cfg.Official.Enabled && cfg.Official.Output != "" {
|
||||
output = cfg.Official.Output
|
||||
if !filepath.IsAbs(output) {
|
||||
output = filepath.Join(projectDir, output)
|
||||
}
|
||||
}
|
||||
|
||||
toolsDir := filepath.Join(output, "tools")
|
||||
if err := release.FS.EnsureDir(toolsDir); err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
// Write files
|
||||
nuspecPath := filepath.Join(output, fmt.Sprintf("%s.nuspec", data.PackageName))
|
||||
if err := release.FS.Write(nuspecPath, nuspec); err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: failed to write nuspec: %w", err)
|
||||
}
|
||||
|
||||
installPath := filepath.Join(toolsDir, "chocolateyinstall.ps1")
|
||||
if err := release.FS.Write(installPath, install); err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: failed to write install script: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Wrote Chocolatey package files: %s\n", output)
|
||||
|
||||
// Push to Chocolatey if configured
|
||||
if cfg.Push {
|
||||
if err := p.pushToChocolatey(ctx, output, data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ChocolateyPublisher) pushToChocolatey(ctx context.Context, packageDir string, data chocolateyTemplateData) error {
|
||||
// Check for CHOCOLATEY_API_KEY
|
||||
apiKey := os.Getenv("CHOCOLATEY_API_KEY")
|
||||
if apiKey == "" {
|
||||
return errors.New("chocolatey.Publish: CHOCOLATEY_API_KEY environment variable is required for push")
|
||||
}
|
||||
|
||||
// Pack the package
|
||||
nupkgPath := filepath.Join(packageDir, fmt.Sprintf("%s.%s.nupkg", data.PackageName, data.Version))
|
||||
|
||||
cmd := exec.CommandContext(ctx, "choco", "pack", filepath.Join(packageDir, fmt.Sprintf("%s.nuspec", data.PackageName)), "-OutputDirectory", packageDir)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: choco pack failed: %w", err)
|
||||
}
|
||||
|
||||
// Push the package
|
||||
cmd = exec.CommandContext(ctx, "choco", "push", nupkgPath, "--source", "https://push.chocolatey.org/", "--api-key", apiKey)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("chocolatey.Publish: choco push failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Published to Chocolatey: https://community.chocolatey.org/packages/%s\n", data.PackageName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ChocolateyPublisher) renderTemplate(m io.Medium, name string, data chocolateyTemplateData) (string, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
|
||||
// Try custom template from medium
|
||||
customPath := filepath.Join(".core", name)
|
||||
if m != nil && m.IsFile(customPath) {
|
||||
customContent, err := m.Read(customPath)
|
||||
if err == nil {
|
||||
content = []byte(customContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to embedded template
|
||||
if content == nil {
|
||||
content, err = chocolateyTemplates.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read template %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := template.New(filepath.Base(name)).Parse(string(content))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template %s: %w", name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// Ensure build package is used
|
||||
var _ = build.Artifact{}
|
||||
|
|
@ -1,323 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestChocolateyPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns chocolatey", func(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
assert.Equal(t, "chocolatey", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestChocolateyPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "chocolatey"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.False(t, cfg.Push)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
|
||||
t.Run("parses package and push from extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "chocolatey",
|
||||
Extended: map[string]any{
|
||||
"package": "mypackage",
|
||||
"push": true,
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Equal(t, "mypackage", cfg.Package)
|
||||
assert.True(t, cfg.Push)
|
||||
})
|
||||
|
||||
t.Run("parses official config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "chocolatey",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{
|
||||
"enabled": true,
|
||||
"output": "dist/choco",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.True(t, cfg.Official.Enabled)
|
||||
assert.Equal(t, "dist/choco", cfg.Official.Output)
|
||||
})
|
||||
|
||||
t.Run("handles missing official fields", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "chocolatey",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.False(t, cfg.Official.Enabled)
|
||||
assert.Empty(t, cfg.Official.Output)
|
||||
})
|
||||
|
||||
t.Run("handles nil extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "chocolatey",
|
||||
Extended: nil,
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.False(t, cfg.Push)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
|
||||
t.Run("defaults push to false when not specified", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "chocolatey",
|
||||
Extended: map[string]any{
|
||||
"package": "mypackage",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.False(t, cfg.Push)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChocolateyPublisher_RenderTemplate_Good(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
|
||||
t.Run("renders nuspec template with data", func(t *testing.T) {
|
||||
data := chocolateyTemplateData{
|
||||
PackageName: "myapp",
|
||||
Title: "MyApp CLI",
|
||||
Description: "My awesome CLI",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.2.3",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Authors: "owner",
|
||||
Tags: "cli myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/chocolatey/package.nuspec.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, `<id>myapp</id>`)
|
||||
assert.Contains(t, result, `<version>1.2.3</version>`)
|
||||
assert.Contains(t, result, `<title>MyApp CLI</title>`)
|
||||
assert.Contains(t, result, `<authors>owner</authors>`)
|
||||
assert.Contains(t, result, `<description>My awesome CLI</description>`)
|
||||
assert.Contains(t, result, `<tags>cli myapp</tags>`)
|
||||
assert.Contains(t, result, "projectUrl>https://github.com/owner/myapp")
|
||||
assert.Contains(t, result, "releaseNotes>https://github.com/owner/myapp/releases/tag/v1.2.3")
|
||||
})
|
||||
|
||||
t.Run("renders install script template with data", func(t *testing.T) {
|
||||
data := chocolateyTemplateData{
|
||||
PackageName: "myapp",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.2.3",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{
|
||||
WindowsAmd64: "abc123def456",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/chocolatey/tools/chocolateyinstall.ps1.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, "$ErrorActionPreference = 'Stop'")
|
||||
assert.Contains(t, result, "https://github.com/owner/myapp/releases/download/v1.2.3/myapp-windows-amd64.zip")
|
||||
assert.Contains(t, result, "packageName = 'myapp'")
|
||||
assert.Contains(t, result, "checksum64 = 'abc123def456'")
|
||||
assert.Contains(t, result, "checksumType64 = 'sha256'")
|
||||
assert.Contains(t, result, "Install-ChocolateyZipPackage")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChocolateyPublisher_RenderTemplate_Bad(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
|
||||
t.Run("returns error for non-existent template", func(t *testing.T) {
|
||||
data := chocolateyTemplateData{}
|
||||
_, err := p.renderTemplate(io.Local, "templates/chocolatey/nonexistent.tmpl", data)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to read template")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChocolateyPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := chocolateyTemplateData{
|
||||
PackageName: "myapp",
|
||||
Version: "1.0.0",
|
||||
Repository: "owner/repo",
|
||||
BinaryName: "myapp",
|
||||
Authors: "owner",
|
||||
Tags: "cli myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := ChocolateyConfig{
|
||||
Push: false,
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: Chocolatey Publish")
|
||||
assert.Contains(t, output, "Package: myapp")
|
||||
assert.Contains(t, output, "Version: 1.0.0")
|
||||
assert.Contains(t, output, "Push: false")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Generated package.nuspec:")
|
||||
assert.Contains(t, output, "Generated chocolateyinstall.ps1:")
|
||||
assert.Contains(t, output, "Would generate package files only (push=false)")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("shows push message when push is enabled", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := chocolateyTemplateData{
|
||||
PackageName: "myapp",
|
||||
Version: "1.0.0",
|
||||
BinaryName: "myapp",
|
||||
Authors: "owner",
|
||||
Tags: "cli",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := ChocolateyConfig{
|
||||
Push: true,
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Push: true")
|
||||
assert.Contains(t, output, "Would push to Chocolatey community repo")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChocolateyPublisher_ExecutePublish_Bad(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
|
||||
t.Run("fails when CHOCOLATEY_API_KEY not set for push", func(t *testing.T) {
|
||||
// Ensure CHOCOLATEY_API_KEY is not set
|
||||
oldKey := os.Getenv("CHOCOLATEY_API_KEY")
|
||||
_ = os.Unsetenv("CHOCOLATEY_API_KEY")
|
||||
defer func() {
|
||||
if oldKey != "" {
|
||||
_ = os.Setenv("CHOCOLATEY_API_KEY", oldKey)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a temp directory for the test
|
||||
tmpDir, err := os.MkdirTemp("", "choco-test-*")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
data := chocolateyTemplateData{
|
||||
PackageName: "testpkg",
|
||||
Version: "1.0.0",
|
||||
BinaryName: "testpkg",
|
||||
Repository: "owner/repo",
|
||||
Authors: "owner",
|
||||
Tags: "cli",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
|
||||
err = p.pushToChocolatey(context.TODO(), tmpDir, data)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "CHOCOLATEY_API_KEY environment variable is required")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChocolateyConfig_Defaults_Good(t *testing.T) {
|
||||
t.Run("has sensible defaults", func(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
pubCfg := PublisherConfig{Type: "chocolatey"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.False(t, cfg.Push)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChocolateyTemplateData_Good(t *testing.T) {
|
||||
t.Run("struct has all expected fields", func(t *testing.T) {
|
||||
data := chocolateyTemplateData{
|
||||
PackageName: "myapp",
|
||||
Title: "MyApp CLI",
|
||||
Description: "description",
|
||||
Repository: "org/repo",
|
||||
Version: "1.0.0",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Authors: "org",
|
||||
Tags: "cli tool",
|
||||
Checksums: ChecksumMap{
|
||||
WindowsAmd64: "hash1",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, "myapp", data.PackageName)
|
||||
assert.Equal(t, "MyApp CLI", data.Title)
|
||||
assert.Equal(t, "description", data.Description)
|
||||
assert.Equal(t, "org/repo", data.Repository)
|
||||
assert.Equal(t, "1.0.0", data.Version)
|
||||
assert.Equal(t, "MIT", data.License)
|
||||
assert.Equal(t, "myapp", data.BinaryName)
|
||||
assert.Equal(t, "org", data.Authors)
|
||||
assert.Equal(t, "cli tool", data.Tags)
|
||||
assert.Equal(t, "hash1", data.Checksums.WindowsAmd64)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,279 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DockerConfig holds configuration for the Docker publisher.
|
||||
type DockerConfig struct {
|
||||
// Registry is the container registry (default: ghcr.io).
|
||||
Registry string `yaml:"registry"`
|
||||
// Image is the image name in owner/repo format.
|
||||
Image string `yaml:"image"`
|
||||
// Dockerfile is the path to the Dockerfile (default: Dockerfile).
|
||||
Dockerfile string `yaml:"dockerfile"`
|
||||
// Platforms are the target platforms (linux/amd64, linux/arm64).
|
||||
Platforms []string `yaml:"platforms"`
|
||||
// Tags are additional tags to apply (supports {{.Version}} template).
|
||||
Tags []string `yaml:"tags"`
|
||||
// BuildArgs are additional build arguments.
|
||||
BuildArgs map[string]string `yaml:"build_args"`
|
||||
}
|
||||
|
||||
// DockerPublisher builds and publishes Docker images.
|
||||
type DockerPublisher struct{}
|
||||
|
||||
// NewDockerPublisher creates a new Docker publisher.
|
||||
func NewDockerPublisher() *DockerPublisher {
|
||||
return &DockerPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *DockerPublisher) Name() string {
|
||||
return "docker"
|
||||
}
|
||||
|
||||
// Publish builds and pushes Docker images.
|
||||
func (p *DockerPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
// Validate docker CLI is available
|
||||
if err := validateDockerCli(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse Docker-specific config from publisher config
|
||||
dockerCfg := p.parseConfig(pubCfg, relCfg, release.ProjectDir)
|
||||
|
||||
// Validate Dockerfile exists
|
||||
if !release.FS.Exists(dockerCfg.Dockerfile) {
|
||||
return fmt.Errorf("docker.Publish: Dockerfile not found: %s", dockerCfg.Dockerfile)
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release, dockerCfg)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release, dockerCfg)
|
||||
}
|
||||
|
||||
// parseConfig extracts Docker-specific configuration.
|
||||
func (p *DockerPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig, projectDir string) DockerConfig {
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "",
|
||||
Dockerfile: filepath.Join(projectDir, "Dockerfile"),
|
||||
Platforms: []string{"linux/amd64", "linux/arm64"},
|
||||
Tags: []string{"latest", "{{.Version}}"},
|
||||
BuildArgs: make(map[string]string),
|
||||
}
|
||||
|
||||
// Try to get image from repository config
|
||||
if relCfg != nil && relCfg.GetRepository() != "" {
|
||||
cfg.Image = relCfg.GetRepository()
|
||||
}
|
||||
|
||||
// Override from extended config if present
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if registry, ok := ext["registry"].(string); ok && registry != "" {
|
||||
cfg.Registry = registry
|
||||
}
|
||||
if image, ok := ext["image"].(string); ok && image != "" {
|
||||
cfg.Image = image
|
||||
}
|
||||
if dockerfile, ok := ext["dockerfile"].(string); ok && dockerfile != "" {
|
||||
if filepath.IsAbs(dockerfile) {
|
||||
cfg.Dockerfile = dockerfile
|
||||
} else {
|
||||
cfg.Dockerfile = filepath.Join(projectDir, dockerfile)
|
||||
}
|
||||
}
|
||||
if platforms, ok := ext["platforms"].([]any); ok && len(platforms) > 0 {
|
||||
cfg.Platforms = make([]string, 0, len(platforms))
|
||||
for _, plat := range platforms {
|
||||
if s, ok := plat.(string); ok {
|
||||
cfg.Platforms = append(cfg.Platforms, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
if tags, ok := ext["tags"].([]any); ok && len(tags) > 0 {
|
||||
cfg.Tags = make([]string, 0, len(tags))
|
||||
for _, tag := range tags {
|
||||
if s, ok := tag.(string); ok {
|
||||
cfg.Tags = append(cfg.Tags, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
if buildArgs, ok := ext["build_args"].(map[string]any); ok {
|
||||
for k, v := range buildArgs {
|
||||
if s, ok := v.(string); ok {
|
||||
cfg.BuildArgs[k] = s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// dryRunPublish shows what would be done without actually building.
|
||||
func (p *DockerPublisher) dryRunPublish(release *Release, cfg DockerConfig) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: Docker Build & Push ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Version: %s\n", release.Version)
|
||||
fmt.Printf("Registry: %s\n", cfg.Registry)
|
||||
fmt.Printf("Image: %s\n", cfg.Image)
|
||||
fmt.Printf("Dockerfile: %s\n", cfg.Dockerfile)
|
||||
fmt.Printf("Platforms: %s\n", strings.Join(cfg.Platforms, ", "))
|
||||
fmt.Println()
|
||||
|
||||
// Resolve tags
|
||||
tags := p.resolveTags(cfg.Tags, release.Version)
|
||||
fmt.Println("Tags to be applied:")
|
||||
for _, tag := range tags {
|
||||
fullTag := p.buildFullTag(cfg.Registry, cfg.Image, tag)
|
||||
fmt.Printf(" - %s\n", fullTag)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Would execute command:")
|
||||
args := p.buildBuildxArgs(cfg, tags, release.Version)
|
||||
fmt.Printf(" docker %s\n", strings.Join(args, " "))
|
||||
|
||||
if len(cfg.BuildArgs) > 0 {
|
||||
fmt.Println()
|
||||
fmt.Println("Build arguments:")
|
||||
for k, v := range cfg.BuildArgs {
|
||||
fmt.Printf(" %s=%s\n", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executePublish builds and pushes Docker images.
|
||||
func (p *DockerPublisher) executePublish(ctx context.Context, release *Release, cfg DockerConfig) error {
|
||||
// Ensure buildx is available and builder is set up
|
||||
if err := p.ensureBuildx(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve tags
|
||||
tags := p.resolveTags(cfg.Tags, release.Version)
|
||||
|
||||
// Build the docker buildx command
|
||||
args := p.buildBuildxArgs(cfg, tags, release.Version)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", args...)
|
||||
cmd.Dir = release.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
fmt.Printf("Building and pushing Docker image: %s\n", cfg.Image)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("docker.Publish: buildx build failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveTags expands template variables in tags.
|
||||
func (p *DockerPublisher) resolveTags(tags []string, version string) []string {
|
||||
resolved := make([]string, 0, len(tags))
|
||||
for _, tag := range tags {
|
||||
// Replace {{.Version}} with actual version
|
||||
resolvedTag := strings.ReplaceAll(tag, "{{.Version}}", version)
|
||||
// Also support simpler {{Version}} syntax
|
||||
resolvedTag = strings.ReplaceAll(resolvedTag, "{{Version}}", version)
|
||||
resolved = append(resolved, resolvedTag)
|
||||
}
|
||||
return resolved
|
||||
}
|
||||
|
||||
// buildFullTag builds the full image tag including registry.
|
||||
func (p *DockerPublisher) buildFullTag(registry, image, tag string) string {
|
||||
if registry != "" {
|
||||
return fmt.Sprintf("%s/%s:%s", registry, image, tag)
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", image, tag)
|
||||
}
|
||||
|
||||
// buildBuildxArgs builds the arguments for docker buildx build command.
|
||||
func (p *DockerPublisher) buildBuildxArgs(cfg DockerConfig, tags []string, version string) []string {
|
||||
args := []string{"buildx", "build"}
|
||||
|
||||
// Multi-platform support
|
||||
if len(cfg.Platforms) > 0 {
|
||||
args = append(args, "--platform", strings.Join(cfg.Platforms, ","))
|
||||
}
|
||||
|
||||
// Add all tags
|
||||
for _, tag := range tags {
|
||||
fullTag := p.buildFullTag(cfg.Registry, cfg.Image, tag)
|
||||
args = append(args, "-t", fullTag)
|
||||
}
|
||||
|
||||
// Dockerfile path
|
||||
dockerfilePath := cfg.Dockerfile
|
||||
args = append(args, "-f", dockerfilePath)
|
||||
|
||||
// Build arguments
|
||||
for k, v := range cfg.BuildArgs {
|
||||
// Expand version in build args
|
||||
expandedValue := strings.ReplaceAll(v, "{{.Version}}", version)
|
||||
expandedValue = strings.ReplaceAll(expandedValue, "{{Version}}", version)
|
||||
args = append(args, "--build-arg", fmt.Sprintf("%s=%s", k, expandedValue))
|
||||
}
|
||||
|
||||
// Always add VERSION build arg
|
||||
args = append(args, "--build-arg", fmt.Sprintf("VERSION=%s", version))
|
||||
|
||||
// Push the image
|
||||
args = append(args, "--push")
|
||||
|
||||
// Build context (current directory)
|
||||
args = append(args, ".")
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// ensureBuildx ensures docker buildx is available and has a builder.
|
||||
func (p *DockerPublisher) ensureBuildx(ctx context.Context) error {
|
||||
// Check if buildx is available
|
||||
cmd := exec.CommandContext(ctx, "docker", "buildx", "version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.New("docker: buildx is not available. Install it from https://docs.docker.com/buildx/working-with-buildx/")
|
||||
}
|
||||
|
||||
// Check if we have a builder, create one if not
|
||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "inspect", "--bootstrap")
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Try to create a builder
|
||||
cmd = exec.CommandContext(ctx, "docker", "buildx", "create", "--use", "--bootstrap")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("docker: failed to create buildx builder: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateDockerCli checks if the docker CLI is available.
|
||||
func validateDockerCli() error {
|
||||
cmd := exec.Command("docker", "--version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.New("docker: docker CLI not found. Install it from https://docs.docker.com/get-docker/")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,810 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDockerPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns docker", func(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
assert.Equal(t, "docker", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "docker"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/project")
|
||||
|
||||
assert.Equal(t, "ghcr.io", cfg.Registry)
|
||||
assert.Equal(t, "owner/repo", cfg.Image)
|
||||
assert.Equal(t, "/project/Dockerfile", cfg.Dockerfile)
|
||||
assert.Equal(t, []string{"linux/amd64", "linux/arm64"}, cfg.Platforms)
|
||||
assert.Equal(t, []string{"latest", "{{.Version}}"}, cfg.Tags)
|
||||
})
|
||||
|
||||
t.Run("parses extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"registry": "docker.io",
|
||||
"image": "myorg/myimage",
|
||||
"dockerfile": "docker/Dockerfile.prod",
|
||||
"platforms": []any{"linux/amd64"},
|
||||
"tags": []any{"latest", "stable", "{{.Version}}"},
|
||||
"build_args": map[string]any{
|
||||
"GO_VERSION": "1.21",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/project")
|
||||
|
||||
assert.Equal(t, "docker.io", cfg.Registry)
|
||||
assert.Equal(t, "myorg/myimage", cfg.Image)
|
||||
assert.Equal(t, "/project/docker/Dockerfile.prod", cfg.Dockerfile)
|
||||
assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms)
|
||||
assert.Equal(t, []string{"latest", "stable", "{{.Version}}"}, cfg.Tags)
|
||||
assert.Equal(t, "1.21", cfg.BuildArgs["GO_VERSION"])
|
||||
})
|
||||
|
||||
t.Run("handles absolute dockerfile path", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"dockerfile": "/absolute/path/Dockerfile",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/project")
|
||||
|
||||
assert.Equal(t, "/absolute/path/Dockerfile", cfg.Dockerfile)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_ResolveTags_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("resolves version template", func(t *testing.T) {
|
||||
tags := p.resolveTags([]string{"latest", "{{.Version}}", "stable"}, "v1.2.3")
|
||||
|
||||
assert.Equal(t, []string{"latest", "v1.2.3", "stable"}, tags)
|
||||
})
|
||||
|
||||
t.Run("handles simple version syntax", func(t *testing.T) {
|
||||
tags := p.resolveTags([]string{"{{Version}}"}, "v1.0.0")
|
||||
|
||||
assert.Equal(t, []string{"v1.0.0"}, tags)
|
||||
})
|
||||
|
||||
t.Run("handles no templates", func(t *testing.T) {
|
||||
tags := p.resolveTags([]string{"latest", "stable"}, "v1.2.3")
|
||||
|
||||
assert.Equal(t, []string{"latest", "stable"}, tags)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_BuildFullTag_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
registry string
|
||||
image string
|
||||
tag string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "with registry",
|
||||
registry: "ghcr.io",
|
||||
image: "owner/repo",
|
||||
tag: "v1.0.0",
|
||||
expected: "ghcr.io/owner/repo:v1.0.0",
|
||||
},
|
||||
{
|
||||
name: "without registry",
|
||||
registry: "",
|
||||
image: "myimage",
|
||||
tag: "latest",
|
||||
expected: "myimage:latest",
|
||||
},
|
||||
{
|
||||
name: "docker hub",
|
||||
registry: "docker.io",
|
||||
image: "library/nginx",
|
||||
tag: "alpine",
|
||||
expected: "docker.io/library/nginx:alpine",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tag := p.buildFullTag(tc.registry, tc.image, tc.tag)
|
||||
assert.Equal(t, tc.expected, tag)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerPublisher_BuildBuildxArgs_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("builds basic args", func(t *testing.T) {
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "/project/Dockerfile",
|
||||
Platforms: []string{"linux/amd64", "linux/arm64"},
|
||||
BuildArgs: make(map[string]string),
|
||||
}
|
||||
tags := []string{"latest", "v1.0.0"}
|
||||
|
||||
args := p.buildBuildxArgs(cfg, tags, "v1.0.0")
|
||||
|
||||
assert.Contains(t, args, "buildx")
|
||||
assert.Contains(t, args, "build")
|
||||
assert.Contains(t, args, "--platform")
|
||||
assert.Contains(t, args, "linux/amd64,linux/arm64")
|
||||
assert.Contains(t, args, "-t")
|
||||
assert.Contains(t, args, "ghcr.io/owner/repo:latest")
|
||||
assert.Contains(t, args, "ghcr.io/owner/repo:v1.0.0")
|
||||
assert.Contains(t, args, "-f")
|
||||
assert.Contains(t, args, "/project/Dockerfile")
|
||||
assert.Contains(t, args, "--push")
|
||||
assert.Contains(t, args, ".")
|
||||
})
|
||||
|
||||
t.Run("includes build args", func(t *testing.T) {
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "/project/Dockerfile",
|
||||
Platforms: []string{"linux/amd64"},
|
||||
BuildArgs: map[string]string{
|
||||
"GO_VERSION": "1.21",
|
||||
"APP_NAME": "myapp",
|
||||
},
|
||||
}
|
||||
tags := []string{"latest"}
|
||||
|
||||
args := p.buildBuildxArgs(cfg, tags, "v1.0.0")
|
||||
|
||||
assert.Contains(t, args, "--build-arg")
|
||||
// Check that build args are present (order may vary)
|
||||
foundGoVersion := false
|
||||
foundAppName := false
|
||||
foundVersion := false
|
||||
for i, arg := range args {
|
||||
if arg == "--build-arg" && i+1 < len(args) {
|
||||
if args[i+1] == "GO_VERSION=1.21" {
|
||||
foundGoVersion = true
|
||||
}
|
||||
if args[i+1] == "APP_NAME=myapp" {
|
||||
foundAppName = true
|
||||
}
|
||||
if args[i+1] == "VERSION=v1.0.0" {
|
||||
foundVersion = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.True(t, foundGoVersion, "GO_VERSION build arg not found")
|
||||
assert.True(t, foundAppName, "APP_NAME build arg not found")
|
||||
assert.True(t, foundVersion, "VERSION build arg not found")
|
||||
})
|
||||
|
||||
t.Run("expands version in build args", func(t *testing.T) {
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "/project/Dockerfile",
|
||||
Platforms: []string{"linux/amd64"},
|
||||
BuildArgs: map[string]string{
|
||||
"APP_VERSION": "{{.Version}}",
|
||||
},
|
||||
}
|
||||
tags := []string{"latest"}
|
||||
|
||||
args := p.buildBuildxArgs(cfg, tags, "v2.0.0")
|
||||
|
||||
foundExpandedVersion := false
|
||||
for i, arg := range args {
|
||||
if arg == "--build-arg" && i+1 < len(args) {
|
||||
if args[i+1] == "APP_VERSION=v2.0.0" {
|
||||
foundExpandedVersion = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.True(t, foundExpandedVersion, "APP_VERSION should be expanded to v2.0.0")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_Publish_Bad(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("fails when dockerfile not found", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/nonexistent",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"dockerfile": "/nonexistent/Dockerfile",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Dockerfile not found")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerConfig_Defaults_Good(t *testing.T) {
|
||||
t.Run("has sensible defaults", func(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
pubCfg := PublisherConfig{Type: "docker"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/project")
|
||||
|
||||
// Verify defaults
|
||||
assert.Equal(t, "ghcr.io", cfg.Registry)
|
||||
assert.Equal(t, "owner/repo", cfg.Image)
|
||||
assert.Len(t, cfg.Platforms, 2)
|
||||
assert.Contains(t, cfg.Platforms, "linux/amd64")
|
||||
assert.Contains(t, cfg.Platforms, "linux/arm64")
|
||||
assert.Contains(t, cfg.Tags, "latest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "/project/Dockerfile",
|
||||
Platforms: []string{"linux/amd64", "linux/arm64"},
|
||||
Tags: []string{"latest", "{{.Version}}"},
|
||||
BuildArgs: make(map[string]string),
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: Docker Build & Push")
|
||||
assert.Contains(t, output, "Version: v1.0.0")
|
||||
assert.Contains(t, output, "Registry: ghcr.io")
|
||||
assert.Contains(t, output, "Image: owner/repo")
|
||||
assert.Contains(t, output, "Dockerfile: /project/Dockerfile")
|
||||
assert.Contains(t, output, "Platforms: linux/amd64, linux/arm64")
|
||||
assert.Contains(t, output, "Tags to be applied:")
|
||||
assert.Contains(t, output, "ghcr.io/owner/repo:latest")
|
||||
assert.Contains(t, output, "ghcr.io/owner/repo:v1.0.0")
|
||||
assert.Contains(t, output, "Would execute command:")
|
||||
assert.Contains(t, output, "docker buildx build")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("shows build args when present", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := DockerConfig{
|
||||
Registry: "docker.io",
|
||||
Image: "myorg/myapp",
|
||||
Dockerfile: "/project/Dockerfile",
|
||||
Platforms: []string{"linux/amd64"},
|
||||
Tags: []string{"latest"},
|
||||
BuildArgs: map[string]string{
|
||||
"GO_VERSION": "1.21",
|
||||
"APP_NAME": "myapp",
|
||||
},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Build arguments:")
|
||||
assert.Contains(t, output, "GO_VERSION=1.21")
|
||||
assert.Contains(t, output, "APP_NAME=myapp")
|
||||
})
|
||||
|
||||
t.Run("handles single platform", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v2.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "/project/Dockerfile.prod",
|
||||
Platforms: []string{"linux/amd64"},
|
||||
Tags: []string{"stable"},
|
||||
BuildArgs: make(map[string]string),
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Platforms: linux/amd64")
|
||||
assert.Contains(t, output, "ghcr.io/owner/repo:stable")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_ParseConfig_EdgeCases_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("handles nil release config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"image": "custom/image",
|
||||
},
|
||||
}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, nil, "/project")
|
||||
|
||||
assert.Equal(t, "custom/image", cfg.Image)
|
||||
assert.Equal(t, "ghcr.io", cfg.Registry)
|
||||
})
|
||||
|
||||
t.Run("handles empty repository in release config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"image": "fallback/image",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: ""}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/project")
|
||||
|
||||
assert.Equal(t, "fallback/image", cfg.Image)
|
||||
})
|
||||
|
||||
t.Run("extended config overrides repository image", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"image": "override/image",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "original/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/project")
|
||||
|
||||
assert.Equal(t, "override/image", cfg.Image)
|
||||
})
|
||||
|
||||
t.Run("handles mixed build args types", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"build_args": map[string]any{
|
||||
"STRING_ARG": "value",
|
||||
"INT_ARG": 123, // Non-string value should be skipped
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/project")
|
||||
|
||||
assert.Equal(t, "value", cfg.BuildArgs["STRING_ARG"])
|
||||
_, exists := cfg.BuildArgs["INT_ARG"]
|
||||
assert.False(t, exists, "non-string build arg should not be included")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_ResolveTags_EdgeCases_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("handles empty tags", func(t *testing.T) {
|
||||
tags := p.resolveTags([]string{}, "v1.0.0")
|
||||
assert.Empty(t, tags)
|
||||
})
|
||||
|
||||
t.Run("handles multiple version placeholders", func(t *testing.T) {
|
||||
tags := p.resolveTags([]string{"{{.Version}}", "prefix-{{.Version}}", "{{.Version}}-suffix"}, "v1.2.3")
|
||||
assert.Equal(t, []string{"v1.2.3", "prefix-v1.2.3", "v1.2.3-suffix"}, tags)
|
||||
})
|
||||
|
||||
t.Run("handles mixed template formats", func(t *testing.T) {
|
||||
tags := p.resolveTags([]string{"{{.Version}}", "{{Version}}", "latest"}, "v3.0.0")
|
||||
assert.Equal(t, []string{"v3.0.0", "v3.0.0", "latest"}, tags)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_BuildBuildxArgs_EdgeCases_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("handles empty platforms", func(t *testing.T) {
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "/project/Dockerfile",
|
||||
Platforms: []string{},
|
||||
BuildArgs: make(map[string]string),
|
||||
}
|
||||
|
||||
args := p.buildBuildxArgs(cfg, []string{"latest"}, "v1.0.0")
|
||||
|
||||
assert.Contains(t, args, "buildx")
|
||||
assert.Contains(t, args, "build")
|
||||
// Should not have --platform if empty
|
||||
foundPlatform := false
|
||||
for i, arg := range args {
|
||||
if arg == "--platform" {
|
||||
foundPlatform = true
|
||||
// Check the next arg exists (it shouldn't be empty)
|
||||
if i+1 < len(args) && args[i+1] == "" {
|
||||
t.Error("platform argument should not be empty string")
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.False(t, foundPlatform, "should not include --platform when platforms is empty")
|
||||
})
|
||||
|
||||
t.Run("handles version expansion in build args", func(t *testing.T) {
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "/Dockerfile",
|
||||
Platforms: []string{"linux/amd64"},
|
||||
BuildArgs: map[string]string{
|
||||
"VERSION": "{{.Version}}",
|
||||
"SIMPLE_VER": "{{Version}}",
|
||||
"STATIC_VALUE": "static",
|
||||
},
|
||||
}
|
||||
|
||||
args := p.buildBuildxArgs(cfg, []string{"latest"}, "v2.5.0")
|
||||
|
||||
foundVersionArg := false
|
||||
foundSimpleArg := false
|
||||
foundStaticArg := false
|
||||
foundAutoVersion := false
|
||||
|
||||
for i, arg := range args {
|
||||
if arg == "--build-arg" && i+1 < len(args) {
|
||||
switch args[i+1] {
|
||||
case "VERSION=v2.5.0":
|
||||
foundVersionArg = true
|
||||
case "SIMPLE_VER=v2.5.0":
|
||||
foundSimpleArg = true
|
||||
case "STATIC_VALUE=static":
|
||||
foundStaticArg = true
|
||||
}
|
||||
// Auto-added VERSION build arg
|
||||
if args[i+1] == "VERSION=v2.5.0" {
|
||||
foundAutoVersion = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Note: VERSION is both in BuildArgs and auto-added, so we just check it exists
|
||||
assert.True(t, foundVersionArg || foundAutoVersion, "VERSION build arg not found")
|
||||
assert.True(t, foundSimpleArg, "SIMPLE_VER build arg not expanded")
|
||||
assert.True(t, foundStaticArg, "STATIC_VALUE build arg not found")
|
||||
})
|
||||
|
||||
t.Run("handles empty registry", func(t *testing.T) {
|
||||
cfg := DockerConfig{
|
||||
Registry: "",
|
||||
Image: "localimage",
|
||||
Dockerfile: "/Dockerfile",
|
||||
Platforms: []string{"linux/amd64"},
|
||||
BuildArgs: make(map[string]string),
|
||||
}
|
||||
|
||||
args := p.buildBuildxArgs(cfg, []string{"latest"}, "v1.0.0")
|
||||
|
||||
assert.Contains(t, args, "-t")
|
||||
assert.Contains(t, args, "localimage:latest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_Publish_DryRun_Good(t *testing.T) {
|
||||
// Skip if docker CLI is not available - dry run still validates docker is installed
|
||||
if err := validateDockerCli(); err != nil {
|
||||
t.Skip("skipping test: docker CLI not available")
|
||||
}
|
||||
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("dry run succeeds with valid Dockerfile", func(t *testing.T) {
|
||||
// Create temp directory with Dockerfile
|
||||
tmpDir, err := os.MkdirTemp("", "docker-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
dockerfilePath := filepath.Join(tmpDir, "Dockerfile")
|
||||
err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "docker"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "DRY RUN: Docker Build & Push")
|
||||
})
|
||||
|
||||
t.Run("dry run uses custom dockerfile path", func(t *testing.T) {
|
||||
// Create temp directory with custom Dockerfile
|
||||
tmpDir, err := os.MkdirTemp("", "docker-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
customDir := filepath.Join(tmpDir, "docker")
|
||||
err = os.MkdirAll(customDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
dockerfilePath := filepath.Join(customDir, "Dockerfile.prod")
|
||||
err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"dockerfile": "docker/Dockerfile.prod",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Dockerfile.prod")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_Publish_Validation_Bad(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("fails when Dockerfile not found with docker installed", func(t *testing.T) {
|
||||
if err := validateDockerCli(); err != nil {
|
||||
t.Skip("skipping test: docker CLI not available")
|
||||
}
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/nonexistent/path",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "docker"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Dockerfile not found")
|
||||
})
|
||||
|
||||
t.Run("fails when docker CLI not available", func(t *testing.T) {
|
||||
if err := validateDockerCli(); err == nil {
|
||||
t.Skip("skipping test: docker CLI is available")
|
||||
}
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "docker"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "docker CLI not found")
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateDockerCli_Good(t *testing.T) {
|
||||
t.Run("returns nil when docker is installed", func(t *testing.T) {
|
||||
err := validateDockerCli()
|
||||
if err != nil {
|
||||
// Docker is not installed, which is fine for this test
|
||||
assert.Contains(t, err.Error(), "docker CLI not found")
|
||||
}
|
||||
// If err is nil, docker is installed - that's OK
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_Publish_WithCLI_Good(t *testing.T) {
|
||||
// These tests run only when docker CLI is available
|
||||
if err := validateDockerCli(); err != nil {
|
||||
t.Skip("skipping test: docker CLI not available")
|
||||
}
|
||||
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("dry run succeeds with all config options", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "docker-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
dockerfilePath := filepath.Join(tmpDir, "Dockerfile")
|
||||
err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"registry": "docker.io",
|
||||
"image": "myorg/myapp",
|
||||
"platforms": []any{"linux/amd64", "linux/arm64"},
|
||||
"tags": []any{"latest", "{{.Version}}", "stable"},
|
||||
"build_args": map[string]any{"GO_VERSION": "1.21"},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "DRY RUN: Docker Build & Push")
|
||||
assert.Contains(t, output, "docker.io")
|
||||
assert.Contains(t, output, "myorg/myapp")
|
||||
})
|
||||
|
||||
t.Run("dry run with nil relCfg uses extended image", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "docker-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
dockerfilePath := filepath.Join(tmpDir, "Dockerfile")
|
||||
err = os.WriteFile(dockerfilePath, []byte("FROM alpine:latest\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"image": "standalone/image",
|
||||
},
|
||||
}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, nil, true) // nil relCfg
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "standalone/image")
|
||||
})
|
||||
|
||||
t.Run("fails with non-existent Dockerfile in non-dry-run", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "docker-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Don't create a Dockerfile
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "docker"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Dockerfile not found")
|
||||
})
|
||||
}
|
||||
|
|
@ -1,234 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GitHubPublisher publishes releases to GitHub using the gh CLI.
|
||||
type GitHubPublisher struct{}
|
||||
|
||||
// NewGitHubPublisher creates a new GitHub publisher.
|
||||
func NewGitHubPublisher() *GitHubPublisher {
|
||||
return &GitHubPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *GitHubPublisher) Name() string {
|
||||
return "github"
|
||||
}
|
||||
|
||||
// Publish publishes the release to GitHub.
|
||||
// Uses the gh CLI for creating releases and uploading assets.
|
||||
func (p *GitHubPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
// Determine repository
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
// Try to detect from git remote
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("github.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release, pubCfg, repo)
|
||||
}
|
||||
|
||||
// Validate gh CLI is available and authenticated for actual publish
|
||||
if err := validateGhCli(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release, pubCfg, repo)
|
||||
}
|
||||
|
||||
// dryRunPublish shows what would be done without actually publishing.
|
||||
func (p *GitHubPublisher) dryRunPublish(release *Release, pubCfg PublisherConfig, repo string) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: GitHub Release ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Repository: %s\n", repo)
|
||||
fmt.Printf("Version: %s\n", release.Version)
|
||||
fmt.Printf("Draft: %t\n", pubCfg.Draft)
|
||||
fmt.Printf("Prerelease: %t\n", pubCfg.Prerelease)
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Would create release with command:")
|
||||
args := p.buildCreateArgs(release, pubCfg, repo)
|
||||
fmt.Printf(" gh %s\n", strings.Join(args, " "))
|
||||
fmt.Println()
|
||||
|
||||
if len(release.Artifacts) > 0 {
|
||||
fmt.Println("Would upload artifacts:")
|
||||
for _, artifact := range release.Artifacts {
|
||||
fmt.Printf(" - %s\n", filepath.Base(artifact.Path))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Changelog:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(release.Changelog)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executePublish actually creates the release and uploads artifacts.
|
||||
func (p *GitHubPublisher) executePublish(ctx context.Context, release *Release, pubCfg PublisherConfig, repo string) error {
|
||||
// Build the release create command
|
||||
args := p.buildCreateArgs(release, pubCfg, repo)
|
||||
|
||||
// Add artifact paths to the command
|
||||
for _, artifact := range release.Artifacts {
|
||||
args = append(args, artifact.Path)
|
||||
}
|
||||
|
||||
// Execute gh release create
|
||||
cmd := exec.CommandContext(ctx, "gh", args...)
|
||||
cmd.Dir = release.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("github.Publish: gh release create failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildCreateArgs builds the arguments for gh release create.
|
||||
func (p *GitHubPublisher) buildCreateArgs(release *Release, pubCfg PublisherConfig, repo string) []string {
|
||||
args := []string{"release", "create", release.Version}
|
||||
|
||||
// Add repository flag
|
||||
if repo != "" {
|
||||
args = append(args, "--repo", repo)
|
||||
}
|
||||
|
||||
// Add title
|
||||
args = append(args, "--title", release.Version)
|
||||
|
||||
// Add notes (changelog)
|
||||
if release.Changelog != "" {
|
||||
args = append(args, "--notes", release.Changelog)
|
||||
} else {
|
||||
args = append(args, "--generate-notes")
|
||||
}
|
||||
|
||||
// Add draft flag
|
||||
if pubCfg.Draft {
|
||||
args = append(args, "--draft")
|
||||
}
|
||||
|
||||
// Add prerelease flag
|
||||
if pubCfg.Prerelease {
|
||||
args = append(args, "--prerelease")
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// validateGhCli checks if the gh CLI is available and authenticated.
|
||||
func validateGhCli() error {
|
||||
// Check if gh is installed
|
||||
cmd := exec.Command("gh", "--version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.New("github: gh CLI not found. Install it from https://cli.github.com")
|
||||
}
|
||||
|
||||
// Check if authenticated
|
||||
cmd = exec.Command("gh", "auth", "status")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.New("github: not authenticated with gh CLI. Run 'gh auth login' first")
|
||||
}
|
||||
|
||||
if !strings.Contains(string(output), "Logged in") {
|
||||
return errors.New("github: not authenticated with gh CLI. Run 'gh auth login' first")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// detectRepository detects the GitHub repository from git remote.
|
||||
func detectRepository(dir string) (string, error) {
|
||||
cmd := exec.Command("git", "remote", "get-url", "origin")
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get git remote: %w", err)
|
||||
}
|
||||
|
||||
url := strings.TrimSpace(string(output))
|
||||
return parseGitHubRepo(url)
|
||||
}
|
||||
|
||||
// parseGitHubRepo extracts owner/repo from a GitHub URL.
|
||||
// Supports:
|
||||
// - git@github.com:owner/repo.git
|
||||
// - https://github.com/owner/repo.git
|
||||
// - https://github.com/owner/repo
|
||||
func parseGitHubRepo(url string) (string, error) {
|
||||
// SSH format
|
||||
if strings.HasPrefix(url, "git@github.com:") {
|
||||
repo := strings.TrimPrefix(url, "git@github.com:")
|
||||
repo = strings.TrimSuffix(repo, ".git")
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
// HTTPS format
|
||||
if strings.HasPrefix(url, "https://github.com/") {
|
||||
repo := strings.TrimPrefix(url, "https://github.com/")
|
||||
repo = strings.TrimSuffix(repo, ".git")
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("not a GitHub URL: %s", url)
|
||||
}
|
||||
|
||||
// UploadArtifact uploads a single artifact to an existing release.
|
||||
// This can be used to add artifacts to a release after creation.
|
||||
func UploadArtifact(ctx context.Context, repo, version, artifactPath string) error {
|
||||
cmd := exec.CommandContext(ctx, "gh", "release", "upload", version, artifactPath, "--repo", repo)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("github.UploadArtifact: failed to upload %s: %w", artifactPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRelease deletes a release by tag name.
|
||||
func DeleteRelease(ctx context.Context, repo, version string) error {
|
||||
cmd := exec.CommandContext(ctx, "gh", "release", "delete", version, "--repo", repo, "--yes")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("github.DeleteRelease: failed to delete %s: %w", version, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReleaseExists checks if a release exists for the given version.
|
||||
func ReleaseExists(ctx context.Context, repo, version string) bool {
|
||||
cmd := exec.CommandContext(ctx, "gh", "release", "view", version, "--repo", repo)
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
|
@ -1,560 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseGitHubRepo_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "SSH URL",
|
||||
input: "git@github.com:owner/repo.git",
|
||||
expected: "owner/repo",
|
||||
},
|
||||
{
|
||||
name: "HTTPS URL with .git",
|
||||
input: "https://github.com/owner/repo.git",
|
||||
expected: "owner/repo",
|
||||
},
|
||||
{
|
||||
name: "HTTPS URL without .git",
|
||||
input: "https://github.com/owner/repo",
|
||||
expected: "owner/repo",
|
||||
},
|
||||
{
|
||||
name: "SSH URL without .git",
|
||||
input: "git@github.com:owner/repo",
|
||||
expected: "owner/repo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := parseGitHubRepo(tc.input)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGitHubRepo_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
}{
|
||||
{
|
||||
name: "GitLab URL",
|
||||
input: "https://gitlab.com/owner/repo.git",
|
||||
},
|
||||
{
|
||||
name: "Bitbucket URL",
|
||||
input: "git@bitbucket.org:owner/repo.git",
|
||||
},
|
||||
{
|
||||
name: "Random URL",
|
||||
input: "https://example.com/something",
|
||||
},
|
||||
{
|
||||
name: "Not a URL",
|
||||
input: "owner/repo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, err := parseGitHubRepo(tc.input)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitHubPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns github", func(t *testing.T) {
|
||||
p := NewGitHubPublisher()
|
||||
assert.Equal(t, "github", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewRelease_Good(t *testing.T) {
|
||||
t.Run("creates release struct", func(t *testing.T) {
|
||||
r := NewRelease("v1.0.0", nil, "changelog", "/project", io.Local)
|
||||
assert.Equal(t, "v1.0.0", r.Version)
|
||||
assert.Equal(t, "changelog", r.Changelog)
|
||||
assert.Equal(t, "/project", r.ProjectDir)
|
||||
assert.Nil(t, r.Artifacts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewPublisherConfig_Good(t *testing.T) {
|
||||
t.Run("creates config struct", func(t *testing.T) {
|
||||
cfg := NewPublisherConfig("github", true, false, nil)
|
||||
assert.Equal(t, "github", cfg.Type)
|
||||
assert.True(t, cfg.Prerelease)
|
||||
assert.False(t, cfg.Draft)
|
||||
assert.Nil(t, cfg.Extended)
|
||||
})
|
||||
|
||||
t.Run("creates config with extended", func(t *testing.T) {
|
||||
ext := map[string]any{"key": "value"}
|
||||
cfg := NewPublisherConfig("docker", false, false, ext)
|
||||
assert.Equal(t, "docker", cfg.Type)
|
||||
assert.Equal(t, ext, cfg.Extended)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildCreateArgs_Good(t *testing.T) {
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("basic args", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "## v1.0.0\n\nChanges",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
}
|
||||
|
||||
args := p.buildCreateArgs(release, cfg, "owner/repo")
|
||||
|
||||
assert.Contains(t, args, "release")
|
||||
assert.Contains(t, args, "create")
|
||||
assert.Contains(t, args, "v1.0.0")
|
||||
assert.Contains(t, args, "--repo")
|
||||
assert.Contains(t, args, "owner/repo")
|
||||
assert.Contains(t, args, "--title")
|
||||
assert.Contains(t, args, "--notes")
|
||||
})
|
||||
|
||||
t.Run("with draft flag", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Draft: true,
|
||||
}
|
||||
|
||||
args := p.buildCreateArgs(release, cfg, "owner/repo")
|
||||
|
||||
assert.Contains(t, args, "--draft")
|
||||
})
|
||||
|
||||
t.Run("with prerelease flag", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Prerelease: true,
|
||||
}
|
||||
|
||||
args := p.buildCreateArgs(release, cfg, "owner/repo")
|
||||
|
||||
assert.Contains(t, args, "--prerelease")
|
||||
})
|
||||
|
||||
t.Run("generates notes when no changelog", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
}
|
||||
|
||||
args := p.buildCreateArgs(release, cfg, "owner/repo")
|
||||
|
||||
assert.Contains(t, args, "--generate-notes")
|
||||
})
|
||||
|
||||
t.Run("with draft and prerelease flags", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0-alpha",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Draft: true,
|
||||
Prerelease: true,
|
||||
}
|
||||
|
||||
args := p.buildCreateArgs(release, cfg, "owner/repo")
|
||||
|
||||
assert.Contains(t, args, "--draft")
|
||||
assert.Contains(t, args, "--prerelease")
|
||||
})
|
||||
|
||||
t.Run("without repo includes version", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v2.0.0",
|
||||
Changelog: "Some changes",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
}
|
||||
|
||||
args := p.buildCreateArgs(release, cfg, "")
|
||||
|
||||
assert.Contains(t, args, "release")
|
||||
assert.Contains(t, args, "create")
|
||||
assert.Contains(t, args, "v2.0.0")
|
||||
assert.NotContains(t, args, "--repo")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGitHubPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "## Changes\n\n- Feature A\n- Bug fix B",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Draft: false,
|
||||
Prerelease: false,
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg, "owner/repo")
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: GitHub Release")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Version: v1.0.0")
|
||||
assert.Contains(t, output, "Draft: false")
|
||||
assert.Contains(t, output, "Prerelease: false")
|
||||
assert.Contains(t, output, "Would create release with command:")
|
||||
assert.Contains(t, output, "gh release create")
|
||||
assert.Contains(t, output, "Changelog:")
|
||||
assert.Contains(t, output, "## Changes")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("shows artifacts when present", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/myapp-darwin-amd64.tar.gz"},
|
||||
{Path: "/dist/myapp-linux-amd64.tar.gz"},
|
||||
},
|
||||
}
|
||||
cfg := PublisherConfig{Type: "github"}
|
||||
|
||||
err := p.dryRunPublish(release, cfg, "owner/repo")
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Would upload artifacts:")
|
||||
assert.Contains(t, output, "myapp-darwin-amd64.tar.gz")
|
||||
assert.Contains(t, output, "myapp-linux-amd64.tar.gz")
|
||||
})
|
||||
|
||||
t.Run("shows draft and prerelease flags", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0-beta",
|
||||
Changelog: "Beta release",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Draft: true,
|
||||
Prerelease: true,
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg, "owner/repo")
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Draft: true")
|
||||
assert.Contains(t, output, "Prerelease: true")
|
||||
assert.Contains(t, output, "--draft")
|
||||
assert.Contains(t, output, "--prerelease")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGitHubPublisher_Publish_Good(t *testing.T) {
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("dry run uses repository from config", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
relCfg := &mockReleaseConfig{repository: "custom/repo"}
|
||||
|
||||
// Dry run should succeed without needing gh CLI
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Repository: custom/repo")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGitHubPublisher_Publish_Bad(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("fails when gh CLI not available and not dry run", func(t *testing.T) {
|
||||
// This test will fail if gh is installed but not authenticated
|
||||
// or succeed if gh is not installed
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: "/nonexistent",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, false)
|
||||
|
||||
// Should fail due to either gh not found or not authenticated
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("fails when repository cannot be detected", func(t *testing.T) {
|
||||
// Create a temp directory that is NOT a git repo
|
||||
tmpDir, err := os.MkdirTemp("", "github-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
relCfg := &mockReleaseConfig{repository: ""} // Empty repository
|
||||
|
||||
err = p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
// Should fail because detectRepository will fail on non-git dir
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "could not determine repository")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDetectRepository_Good(t *testing.T) {
|
||||
t.Run("detects repository from git remote", func(t *testing.T) {
|
||||
// Create a temp git repo
|
||||
tmpDir, err := os.MkdirTemp("", "git-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Initialize git repo and set remote
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "git@github.com:test-owner/test-repo.git")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
repo, err := detectRepository(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "test-owner/test-repo", repo)
|
||||
})
|
||||
|
||||
t.Run("detects repository from HTTPS remote", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "git-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "https://github.com/another-owner/another-repo.git")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
repo, err := detectRepository(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "another-owner/another-repo", repo)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDetectRepository_Bad(t *testing.T) {
|
||||
t.Run("fails when not a git repository", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "no-git-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
_, err = detectRepository(tmpDir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to get git remote")
|
||||
})
|
||||
|
||||
t.Run("fails when directory does not exist", func(t *testing.T) {
|
||||
_, err := detectRepository("/nonexistent/directory/that/does/not/exist")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("fails when remote is not GitHub", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "git-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "git@gitlab.com:owner/repo.git")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
_, err = detectRepository(tmpDir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not a GitHub URL")
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateGhCli_Bad(t *testing.T) {
|
||||
// This test verifies the error messages from validateGhCli
|
||||
// We can't easily mock exec.Command, but we can at least
|
||||
// verify the function exists and returns expected error types
|
||||
t.Run("returns error when gh not installed", func(t *testing.T) {
|
||||
// We can't force gh to not be installed, but we can verify
|
||||
// the function signature works correctly
|
||||
err := validateGhCli()
|
||||
if err != nil {
|
||||
// Either gh is not installed or not authenticated
|
||||
assert.True(t,
|
||||
strings.Contains(err.Error(), "gh CLI not found") ||
|
||||
strings.Contains(err.Error(), "not authenticated"),
|
||||
"unexpected error: %s", err.Error())
|
||||
}
|
||||
// If err is nil, gh is installed and authenticated - that's OK too
|
||||
})
|
||||
}
|
||||
|
||||
func TestGitHubPublisher_ExecutePublish_Good(t *testing.T) {
|
||||
// These tests run only when gh CLI is available and authenticated
|
||||
if err := validateGhCli(); err != nil {
|
||||
t.Skip("skipping test: gh CLI not available or not authenticated")
|
||||
}
|
||||
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("executePublish builds command with artifacts", func(t *testing.T) {
|
||||
// We test the command building by checking that it fails appropriately
|
||||
// with a non-existent release (rather than testing actual release creation)
|
||||
release := &Release{
|
||||
Version: "v999.999.999-test-nonexistent",
|
||||
Changelog: "Test changelog",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/tmp/nonexistent-artifact.tar.gz"},
|
||||
},
|
||||
}
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Draft: true,
|
||||
Prerelease: true,
|
||||
}
|
||||
|
||||
// This will fail because the artifact doesn't exist, but it proves
|
||||
// the code path runs
|
||||
err := p.executePublish(context.Background(), release, cfg, "test-owner/test-repo-nonexistent")
|
||||
assert.Error(t, err) // Expected to fail
|
||||
})
|
||||
}
|
||||
|
||||
func TestReleaseExists_Good(t *testing.T) {
|
||||
// These tests run only when gh CLI is available
|
||||
if err := validateGhCli(); err != nil {
|
||||
t.Skip("skipping test: gh CLI not available or not authenticated")
|
||||
}
|
||||
|
||||
t.Run("returns false for non-existent release", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Use a non-existent repo and version
|
||||
exists := ReleaseExists(ctx, "nonexistent-owner-12345/nonexistent-repo-67890", "v999.999.999")
|
||||
assert.False(t, exists)
|
||||
})
|
||||
|
||||
t.Run("checks release existence", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Test against a known public repository with releases
|
||||
// This tests the true path if the release exists
|
||||
exists := ReleaseExists(ctx, "cli/cli", "v2.0.0")
|
||||
// We don't assert the result since it depends on network access
|
||||
// and the release may or may not exist
|
||||
_ = exists // Just verify function runs without panic
|
||||
})
|
||||
}
|
||||
|
|
@ -1,372 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
//go:embed templates/homebrew/*.tmpl
|
||||
var homebrewTemplates embed.FS
|
||||
|
||||
// HomebrewConfig holds Homebrew-specific configuration.
|
||||
type HomebrewConfig struct {
|
||||
// Tap is the Homebrew tap repository (e.g., "host-uk/homebrew-tap").
|
||||
Tap string
|
||||
// Formula is the formula name (defaults to project name).
|
||||
Formula string
|
||||
// Official config for generating files for official repo PRs.
|
||||
Official *OfficialConfig
|
||||
}
|
||||
|
||||
// OfficialConfig holds configuration for generating files for official repo PRs.
|
||||
type OfficialConfig struct {
|
||||
// Enabled determines whether to generate files for official repos.
|
||||
Enabled bool
|
||||
// Output is the directory to write generated files.
|
||||
Output string
|
||||
}
|
||||
|
||||
// HomebrewPublisher publishes releases to Homebrew.
|
||||
type HomebrewPublisher struct{}
|
||||
|
||||
// NewHomebrewPublisher creates a new Homebrew publisher.
|
||||
func NewHomebrewPublisher() *HomebrewPublisher {
|
||||
return &HomebrewPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *HomebrewPublisher) Name() string {
|
||||
return "homebrew"
|
||||
}
|
||||
|
||||
// Publish publishes the release to Homebrew.
|
||||
func (p *HomebrewPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
// Parse config
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
// Validate configuration
|
||||
if cfg.Tap == "" && (cfg.Official == nil || !cfg.Official.Enabled) {
|
||||
return errors.New("homebrew.Publish: tap is required (set publish.homebrew.tap in config)")
|
||||
}
|
||||
|
||||
// Get repository and project info
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
projectName := ""
|
||||
if relCfg != nil {
|
||||
projectName = relCfg.GetProjectName()
|
||||
}
|
||||
if projectName == "" {
|
||||
parts := strings.Split(repo, "/")
|
||||
projectName = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
formulaName := cfg.Formula
|
||||
if formulaName == "" {
|
||||
formulaName = projectName
|
||||
}
|
||||
|
||||
// Strip leading 'v' from version
|
||||
version := strings.TrimPrefix(release.Version, "v")
|
||||
|
||||
// Build checksums map from artifacts
|
||||
checksums := buildChecksumMap(release.Artifacts)
|
||||
|
||||
// Template data
|
||||
data := homebrewTemplateData{
|
||||
FormulaClass: toFormulaClass(formulaName),
|
||||
Description: fmt.Sprintf("%s CLI", projectName),
|
||||
Repository: repo,
|
||||
Version: version,
|
||||
License: "MIT",
|
||||
BinaryName: projectName,
|
||||
Checksums: checksums,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release.FS, data, cfg)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release.ProjectDir, data, cfg, release)
|
||||
}
|
||||
|
||||
// homebrewTemplateData holds data for Homebrew templates.
|
||||
type homebrewTemplateData struct {
|
||||
FormulaClass string
|
||||
Description string
|
||||
Repository string
|
||||
Version string
|
||||
License string
|
||||
BinaryName string
|
||||
Checksums ChecksumMap
|
||||
}
|
||||
|
||||
// ChecksumMap holds checksums for different platform/arch combinations.
|
||||
type ChecksumMap struct {
|
||||
DarwinAmd64 string
|
||||
DarwinArm64 string
|
||||
LinuxAmd64 string
|
||||
LinuxArm64 string
|
||||
WindowsAmd64 string
|
||||
WindowsArm64 string
|
||||
}
|
||||
|
||||
// parseConfig extracts Homebrew-specific configuration.
|
||||
func (p *HomebrewPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) HomebrewConfig {
|
||||
cfg := HomebrewConfig{
|
||||
Tap: "",
|
||||
Formula: "",
|
||||
}
|
||||
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if tap, ok := ext["tap"].(string); ok && tap != "" {
|
||||
cfg.Tap = tap
|
||||
}
|
||||
if formula, ok := ext["formula"].(string); ok && formula != "" {
|
||||
cfg.Formula = formula
|
||||
}
|
||||
if official, ok := ext["official"].(map[string]any); ok {
|
||||
cfg.Official = &OfficialConfig{}
|
||||
if enabled, ok := official["enabled"].(bool); ok {
|
||||
cfg.Official.Enabled = enabled
|
||||
}
|
||||
if output, ok := official["output"].(string); ok {
|
||||
cfg.Official.Output = output
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// dryRunPublish shows what would be done.
|
||||
func (p *HomebrewPublisher) dryRunPublish(m io.Medium, data homebrewTemplateData, cfg HomebrewConfig) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: Homebrew Publish ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Formula: %s\n", data.FormulaClass)
|
||||
fmt.Printf("Version: %s\n", data.Version)
|
||||
fmt.Printf("Tap: %s\n", cfg.Tap)
|
||||
fmt.Printf("Repository: %s\n", data.Repository)
|
||||
fmt.Println()
|
||||
|
||||
// Generate and show formula
|
||||
formula, err := p.renderTemplate(m, "templates/homebrew/formula.rb.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("homebrew.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated formula.rb:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(formula)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
if cfg.Tap != "" {
|
||||
fmt.Printf("Would commit to tap: %s\n", cfg.Tap)
|
||||
}
|
||||
if cfg.Official != nil && cfg.Official.Enabled {
|
||||
output := cfg.Official.Output
|
||||
if output == "" {
|
||||
output = "dist/homebrew"
|
||||
}
|
||||
fmt.Printf("Would write files for official PR to: %s\n", output)
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executePublish creates the formula and commits to tap.
|
||||
func (p *HomebrewPublisher) executePublish(ctx context.Context, projectDir string, data homebrewTemplateData, cfg HomebrewConfig, release *Release) error {
|
||||
// Generate formula
|
||||
formula, err := p.renderTemplate(release.FS, "templates/homebrew/formula.rb.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: failed to render formula: %w", err)
|
||||
}
|
||||
|
||||
// If official config is enabled, write to output directory
|
||||
if cfg.Official != nil && cfg.Official.Enabled {
|
||||
output := cfg.Official.Output
|
||||
if output == "" {
|
||||
output = filepath.Join(projectDir, "dist", "homebrew")
|
||||
} else if !filepath.IsAbs(output) {
|
||||
output = filepath.Join(projectDir, output)
|
||||
}
|
||||
|
||||
if err := release.FS.EnsureDir(output); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
formulaPath := filepath.Join(output, fmt.Sprintf("%s.rb", strings.ToLower(data.FormulaClass)))
|
||||
if err := release.FS.Write(formulaPath, formula); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: failed to write formula: %w", err)
|
||||
}
|
||||
fmt.Printf("Wrote Homebrew formula for official PR: %s\n", formulaPath)
|
||||
}
|
||||
|
||||
// If tap is configured, commit to it
|
||||
if cfg.Tap != "" {
|
||||
if err := p.commitToTap(ctx, cfg.Tap, data, formula); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// commitToTap commits the formula to the tap repository.
|
||||
func (p *HomebrewPublisher) commitToTap(ctx context.Context, tap string, data homebrewTemplateData, formula string) error {
|
||||
// Clone tap repo to temp directory
|
||||
tmpDir, err := os.MkdirTemp("", "homebrew-tap-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: failed to create temp directory: %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Clone the tap
|
||||
fmt.Printf("Cloning tap %s...\n", tap)
|
||||
cmd := exec.CommandContext(ctx, "gh", "repo", "clone", tap, tmpDir, "--", "--depth=1")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: failed to clone tap: %w", err)
|
||||
}
|
||||
|
||||
// Ensure Formula directory exists
|
||||
formulaDir := filepath.Join(tmpDir, "Formula")
|
||||
if err := os.MkdirAll(formulaDir, 0755); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: failed to create Formula directory: %w", err)
|
||||
}
|
||||
|
||||
// Write formula
|
||||
formulaPath := filepath.Join(formulaDir, fmt.Sprintf("%s.rb", strings.ToLower(data.FormulaClass)))
|
||||
if err := os.WriteFile(formulaPath, []byte(formula), 0644); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: failed to write formula: %w", err)
|
||||
}
|
||||
|
||||
// Git add, commit, push
|
||||
commitMsg := fmt.Sprintf("Update %s to %s", data.FormulaClass, data.Version)
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "add", ".")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: git add failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "commit", "-m", commitMsg)
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: git commit failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "push")
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("homebrew.Publish: git push failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Updated Homebrew tap: %s\n", tap)
|
||||
return nil
|
||||
}
|
||||
|
||||
// renderTemplate renders an embedded template with the given data.
|
||||
func (p *HomebrewPublisher) renderTemplate(m io.Medium, name string, data homebrewTemplateData) (string, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
|
||||
// Try custom template from medium
|
||||
customPath := filepath.Join(".core", name)
|
||||
if m != nil && m.IsFile(customPath) {
|
||||
customContent, err := m.Read(customPath)
|
||||
if err == nil {
|
||||
content = []byte(customContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to embedded template
|
||||
if content == nil {
|
||||
content, err = homebrewTemplates.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read template %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := template.New(filepath.Base(name)).Parse(string(content))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template %s: %w", name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// toFormulaClass converts a package name to a Ruby class name.
|
||||
func toFormulaClass(name string) string {
|
||||
// Convert kebab-case to PascalCase
|
||||
parts := strings.Split(name, "-")
|
||||
for i, part := range parts {
|
||||
if len(part) > 0 {
|
||||
parts[i] = strings.ToUpper(part[:1]) + part[1:]
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, "")
|
||||
}
|
||||
|
||||
// buildChecksumMap extracts checksums from artifacts into a structured map.
|
||||
func buildChecksumMap(artifacts []build.Artifact) ChecksumMap {
|
||||
checksums := ChecksumMap{}
|
||||
|
||||
for _, a := range artifacts {
|
||||
// Parse artifact name to determine platform
|
||||
name := filepath.Base(a.Path)
|
||||
checksum := a.Checksum
|
||||
|
||||
switch {
|
||||
case strings.Contains(name, "darwin-amd64"):
|
||||
checksums.DarwinAmd64 = checksum
|
||||
case strings.Contains(name, "darwin-arm64"):
|
||||
checksums.DarwinArm64 = checksum
|
||||
case strings.Contains(name, "linux-amd64"):
|
||||
checksums.LinuxAmd64 = checksum
|
||||
case strings.Contains(name, "linux-arm64"):
|
||||
checksums.LinuxArm64 = checksum
|
||||
case strings.Contains(name, "windows-amd64"):
|
||||
checksums.WindowsAmd64 = checksum
|
||||
case strings.Contains(name, "windows-arm64"):
|
||||
checksums.WindowsArm64 = checksum
|
||||
}
|
||||
}
|
||||
|
||||
return checksums
|
||||
}
|
||||
|
|
@ -1,347 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHomebrewPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns homebrew", func(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
assert.Equal(t, "homebrew", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "homebrew"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Tap)
|
||||
assert.Empty(t, cfg.Formula)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
|
||||
t.Run("parses tap and formula from extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "homebrew",
|
||||
Extended: map[string]any{
|
||||
"tap": "host-uk/homebrew-tap",
|
||||
"formula": "myformula",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Equal(t, "host-uk/homebrew-tap", cfg.Tap)
|
||||
assert.Equal(t, "myformula", cfg.Formula)
|
||||
})
|
||||
|
||||
t.Run("parses official config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "homebrew",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{
|
||||
"enabled": true,
|
||||
"output": "dist/brew",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.True(t, cfg.Official.Enabled)
|
||||
assert.Equal(t, "dist/brew", cfg.Official.Output)
|
||||
})
|
||||
|
||||
t.Run("handles missing official fields", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "homebrew",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.False(t, cfg.Official.Enabled)
|
||||
assert.Empty(t, cfg.Official.Output)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_ToFormulaClass_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple name",
|
||||
input: "core",
|
||||
expected: "Core",
|
||||
},
|
||||
{
|
||||
name: "kebab case",
|
||||
input: "my-cli-tool",
|
||||
expected: "MyCliTool",
|
||||
},
|
||||
{
|
||||
name: "already capitalised",
|
||||
input: "CLI",
|
||||
expected: "CLI",
|
||||
},
|
||||
{
|
||||
name: "single letter",
|
||||
input: "x",
|
||||
expected: "X",
|
||||
},
|
||||
{
|
||||
name: "multiple dashes",
|
||||
input: "my-super-cool-app",
|
||||
expected: "MySuperCoolApp",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := toFormulaClass(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_BuildChecksumMap_Good(t *testing.T) {
|
||||
t.Run("maps artifacts to checksums by platform", func(t *testing.T) {
|
||||
artifacts := []build.Artifact{
|
||||
{Path: "/dist/myapp-darwin-amd64.tar.gz", OS: "darwin", Arch: "amd64", Checksum: "abc123"},
|
||||
{Path: "/dist/myapp-darwin-arm64.tar.gz", OS: "darwin", Arch: "arm64", Checksum: "def456"},
|
||||
{Path: "/dist/myapp-linux-amd64.tar.gz", OS: "linux", Arch: "amd64", Checksum: "ghi789"},
|
||||
{Path: "/dist/myapp-linux-arm64.tar.gz", OS: "linux", Arch: "arm64", Checksum: "jkl012"},
|
||||
{Path: "/dist/myapp-windows-amd64.zip", OS: "windows", Arch: "amd64", Checksum: "mno345"},
|
||||
{Path: "/dist/myapp-windows-arm64.zip", OS: "windows", Arch: "arm64", Checksum: "pqr678"},
|
||||
}
|
||||
|
||||
checksums := buildChecksumMap(artifacts)
|
||||
|
||||
assert.Equal(t, "abc123", checksums.DarwinAmd64)
|
||||
assert.Equal(t, "def456", checksums.DarwinArm64)
|
||||
assert.Equal(t, "ghi789", checksums.LinuxAmd64)
|
||||
assert.Equal(t, "jkl012", checksums.LinuxArm64)
|
||||
assert.Equal(t, "mno345", checksums.WindowsAmd64)
|
||||
assert.Equal(t, "pqr678", checksums.WindowsArm64)
|
||||
})
|
||||
|
||||
t.Run("handles empty artifacts", func(t *testing.T) {
|
||||
checksums := buildChecksumMap([]build.Artifact{})
|
||||
|
||||
assert.Empty(t, checksums.DarwinAmd64)
|
||||
assert.Empty(t, checksums.DarwinArm64)
|
||||
assert.Empty(t, checksums.LinuxAmd64)
|
||||
assert.Empty(t, checksums.LinuxArm64)
|
||||
})
|
||||
|
||||
t.Run("handles partial platform coverage", func(t *testing.T) {
|
||||
artifacts := []build.Artifact{
|
||||
{Path: "/dist/myapp-darwin-arm64.tar.gz", Checksum: "def456"},
|
||||
{Path: "/dist/myapp-linux-amd64.tar.gz", Checksum: "ghi789"},
|
||||
}
|
||||
|
||||
checksums := buildChecksumMap(artifacts)
|
||||
|
||||
assert.Empty(t, checksums.DarwinAmd64)
|
||||
assert.Equal(t, "def456", checksums.DarwinArm64)
|
||||
assert.Equal(t, "ghi789", checksums.LinuxAmd64)
|
||||
assert.Empty(t, checksums.LinuxArm64)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_RenderTemplate_Good(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
|
||||
t.Run("renders formula template with data", func(t *testing.T) {
|
||||
data := homebrewTemplateData{
|
||||
FormulaClass: "MyApp",
|
||||
Description: "My awesome CLI",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.2.3",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{
|
||||
DarwinAmd64: "abc123",
|
||||
DarwinArm64: "def456",
|
||||
LinuxAmd64: "ghi789",
|
||||
LinuxArm64: "jkl012",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/homebrew/formula.rb.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, "class MyApp < Formula")
|
||||
assert.Contains(t, result, `desc "My awesome CLI"`)
|
||||
assert.Contains(t, result, `version "1.2.3"`)
|
||||
assert.Contains(t, result, `license "MIT"`)
|
||||
assert.Contains(t, result, "owner/myapp")
|
||||
assert.Contains(t, result, "abc123")
|
||||
assert.Contains(t, result, "def456")
|
||||
assert.Contains(t, result, "ghi789")
|
||||
assert.Contains(t, result, "jkl012")
|
||||
assert.Contains(t, result, `bin.install "myapp"`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_RenderTemplate_Bad(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
|
||||
t.Run("returns error for non-existent template", func(t *testing.T) {
|
||||
data := homebrewTemplateData{}
|
||||
_, err := p.renderTemplate(io.Local, "templates/homebrew/nonexistent.tmpl", data)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to read template")
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
// Capture stdout
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := homebrewTemplateData{
|
||||
FormulaClass: "MyApp",
|
||||
Description: "My CLI",
|
||||
Repository: "owner/repo",
|
||||
Version: "1.0.0",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := HomebrewConfig{
|
||||
Tap: "owner/homebrew-tap",
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: Homebrew Publish")
|
||||
assert.Contains(t, output, "Formula: MyApp")
|
||||
assert.Contains(t, output, "Version: 1.0.0")
|
||||
assert.Contains(t, output, "Tap: owner/homebrew-tap")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Would commit to tap: owner/homebrew-tap")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("shows official output path when enabled", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := homebrewTemplateData{
|
||||
FormulaClass: "MyApp",
|
||||
Version: "1.0.0",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := HomebrewConfig{
|
||||
Official: &OfficialConfig{
|
||||
Enabled: true,
|
||||
Output: "custom/path",
|
||||
},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Would write files for official PR to: custom/path")
|
||||
})
|
||||
|
||||
t.Run("uses default official output path when not specified", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := homebrewTemplateData{
|
||||
FormulaClass: "MyApp",
|
||||
Version: "1.0.0",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := HomebrewConfig{
|
||||
Official: &OfficialConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Would write files for official PR to: dist/homebrew")
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_Publish_Bad(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
|
||||
t.Run("fails when tap not configured and not official mode", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "homebrew"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "tap is required")
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewConfig_Defaults_Good(t *testing.T) {
|
||||
t.Run("has sensible defaults", func(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
pubCfg := PublisherConfig{Type: "homebrew"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Tap)
|
||||
assert.Empty(t, cfg.Formula)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,989 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- GitHub Publisher Integration Tests ---
|
||||
|
||||
func TestGitHubPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("dry run creates no files on disk", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "## v1.0.0\n\n- feat: initial release",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: filepath.Join(tmpDir, "app-linux-amd64.tar.gz")},
|
||||
{Path: filepath.Join(tmpDir, "app-darwin-arm64.tar.gz")},
|
||||
{Path: filepath.Join(tmpDir, "CHECKSUMS.txt")},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Draft: true,
|
||||
Prerelease: true,
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "test-org/test-repo", projectName: "testapp"}
|
||||
|
||||
// Capture stdout
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
// Verify dry run output contains expected information
|
||||
assert.Contains(t, output, "DRY RUN: GitHub Release")
|
||||
assert.Contains(t, output, "Repository: test-org/test-repo")
|
||||
assert.Contains(t, output, "Version: v1.0.0")
|
||||
assert.Contains(t, output, "Draft: true")
|
||||
assert.Contains(t, output, "Prerelease: true")
|
||||
assert.Contains(t, output, "Would upload artifacts:")
|
||||
assert.Contains(t, output, "app-linux-amd64.tar.gz")
|
||||
assert.Contains(t, output, "app-darwin-arm64.tar.gz")
|
||||
assert.Contains(t, output, "CHECKSUMS.txt")
|
||||
assert.Contains(t, output, "gh release create")
|
||||
assert.Contains(t, output, "--draft")
|
||||
assert.Contains(t, output, "--prerelease")
|
||||
|
||||
// Verify no files were created in the temp directory
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries, "dry run should not create any files")
|
||||
})
|
||||
|
||||
t.Run("dry run builds correct gh CLI command for standard release", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v2.3.0",
|
||||
Changelog: "## v2.3.0\n\n### Features\n\n- new feature",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/app-linux-amd64.tar.gz"},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "github",
|
||||
Draft: false,
|
||||
Prerelease: false,
|
||||
}
|
||||
|
||||
args := p.buildCreateArgs(release, pubCfg, "owner/repo")
|
||||
|
||||
// Verify exact argument structure
|
||||
assert.Equal(t, "release", args[0])
|
||||
assert.Equal(t, "create", args[1])
|
||||
assert.Equal(t, "v2.3.0", args[2])
|
||||
|
||||
// Should have --repo
|
||||
repoIdx := indexOf(args, "--repo")
|
||||
assert.Greater(t, repoIdx, -1)
|
||||
assert.Equal(t, "owner/repo", args[repoIdx+1])
|
||||
|
||||
// Should have --title
|
||||
titleIdx := indexOf(args, "--title")
|
||||
assert.Greater(t, titleIdx, -1)
|
||||
assert.Equal(t, "v2.3.0", args[titleIdx+1])
|
||||
|
||||
// Should have --notes (since changelog is non-empty)
|
||||
assert.Contains(t, args, "--notes")
|
||||
|
||||
// Should NOT have --draft or --prerelease
|
||||
assert.NotContains(t, args, "--draft")
|
||||
assert.NotContains(t, args, "--prerelease")
|
||||
})
|
||||
|
||||
t.Run("dry run uses generate-notes when changelog empty", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
|
||||
args := p.buildCreateArgs(release, pubCfg, "owner/repo")
|
||||
|
||||
assert.Contains(t, args, "--generate-notes")
|
||||
assert.NotContains(t, args, "--notes")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGitHubPublisher_Integration_RepositoryDetection_Good(t *testing.T) {
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("uses relCfg repository when provided", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
relCfg := &mockReleaseConfig{repository: "explicit/repo"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), "Repository: explicit/repo")
|
||||
})
|
||||
|
||||
t.Run("detects repository from git remote when relCfg empty", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "https://github.com/detected/from-git.git")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
relCfg := &mockReleaseConfig{repository: ""}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), "Repository: detected/from-git")
|
||||
})
|
||||
|
||||
t.Run("fails when no repository available", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
relCfg := &mockReleaseConfig{repository: ""}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "could not determine repository")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGitHubPublisher_Integration_ArtifactUpload_Good(t *testing.T) {
|
||||
p := NewGitHubPublisher()
|
||||
|
||||
t.Run("dry run lists all artifact types", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Release notes",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/app-linux-amd64.tar.gz", Checksum: "abc123"},
|
||||
{Path: "/dist/app-darwin-arm64.tar.gz", Checksum: "def456"},
|
||||
{Path: "/dist/app-windows-amd64.zip", Checksum: "ghi789"},
|
||||
{Path: "/dist/CHECKSUMS.txt"},
|
||||
{Path: "/dist/app-linux-amd64.tar.gz.sig"},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
|
||||
err := p.dryRunPublish(release, pubCfg, "owner/repo")
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Would upload artifacts:")
|
||||
assert.Contains(t, output, "app-linux-amd64.tar.gz")
|
||||
assert.Contains(t, output, "app-darwin-arm64.tar.gz")
|
||||
assert.Contains(t, output, "app-windows-amd64.zip")
|
||||
assert.Contains(t, output, "CHECKSUMS.txt")
|
||||
assert.Contains(t, output, "app-linux-amd64.tar.gz.sig")
|
||||
})
|
||||
|
||||
t.Run("executePublish appends artifact paths to gh command", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/file1.tar.gz"},
|
||||
{Path: "/dist/file2.zip"},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
|
||||
args := p.buildCreateArgs(release, pubCfg, "owner/repo")
|
||||
|
||||
// The executePublish method appends artifact paths after these base args
|
||||
for _, a := range release.Artifacts {
|
||||
args = append(args, a.Path)
|
||||
}
|
||||
|
||||
// Verify artifacts are at end of args
|
||||
assert.Equal(t, "/dist/file1.tar.gz", args[len(args)-2])
|
||||
assert.Equal(t, "/dist/file2.zip", args[len(args)-1])
|
||||
})
|
||||
}
|
||||
|
||||
// --- Docker Publisher Integration Tests ---
|
||||
|
||||
func TestDockerPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
if err := validateDockerCli(); err != nil {
|
||||
t.Skip("skipping: docker CLI not available")
|
||||
}
|
||||
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("dry run creates no images or containers", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create a Dockerfile
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte("FROM alpine:latest\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.2.3",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"registry": "ghcr.io",
|
||||
"image": "test-org/test-app",
|
||||
"platforms": []any{"linux/amd64", "linux/arm64"},
|
||||
"tags": []any{"latest", "{{.Version}}", "stable"},
|
||||
"build_args": map[string]any{
|
||||
"APP_VERSION": "{{.Version}}",
|
||||
"GO_VERSION": "1.21",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "test-org/test-app"}
|
||||
|
||||
err = p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
// Verify dry run output
|
||||
assert.Contains(t, output, "DRY RUN: Docker Build & Push")
|
||||
assert.Contains(t, output, "Version: v1.2.3")
|
||||
assert.Contains(t, output, "Registry: ghcr.io")
|
||||
assert.Contains(t, output, "Image: test-org/test-app")
|
||||
assert.Contains(t, output, "Platforms: linux/amd64, linux/arm64")
|
||||
|
||||
// Verify resolved tags
|
||||
assert.Contains(t, output, "ghcr.io/test-org/test-app:latest")
|
||||
assert.Contains(t, output, "ghcr.io/test-org/test-app:v1.2.3")
|
||||
assert.Contains(t, output, "ghcr.io/test-org/test-app:stable")
|
||||
|
||||
// Verify build args shown
|
||||
assert.Contains(t, output, "Build arguments:")
|
||||
assert.Contains(t, output, "GO_VERSION=1.21")
|
||||
|
||||
// Verify command
|
||||
assert.Contains(t, output, "docker buildx build")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("dry run produces correct buildx command for multi-platform", func(t *testing.T) {
|
||||
cfg := DockerConfig{
|
||||
Registry: "ghcr.io",
|
||||
Image: "org/app",
|
||||
Dockerfile: "/project/Dockerfile",
|
||||
Platforms: []string{"linux/amd64", "linux/arm64", "linux/arm/v7"},
|
||||
Tags: []string{"latest", "{{.Version}}"},
|
||||
BuildArgs: map[string]string{
|
||||
"CUSTOM_ARG": "custom_value",
|
||||
},
|
||||
}
|
||||
tags := p.resolveTags(cfg.Tags, "v3.0.0")
|
||||
args := p.buildBuildxArgs(cfg, tags, "v3.0.0")
|
||||
|
||||
// Verify multi-platform string
|
||||
foundPlatform := false
|
||||
for i, arg := range args {
|
||||
if arg == "--platform" && i+1 < len(args) {
|
||||
foundPlatform = true
|
||||
assert.Equal(t, "linux/amd64,linux/arm64,linux/arm/v7", args[i+1])
|
||||
}
|
||||
}
|
||||
assert.True(t, foundPlatform, "should have --platform flag")
|
||||
|
||||
// Verify tags
|
||||
assert.Contains(t, args, "ghcr.io/org/app:latest")
|
||||
assert.Contains(t, args, "ghcr.io/org/app:v3.0.0")
|
||||
|
||||
// Verify build args
|
||||
foundCustom := false
|
||||
foundVersion := false
|
||||
for i, arg := range args {
|
||||
if arg == "--build-arg" && i+1 < len(args) {
|
||||
if args[i+1] == "CUSTOM_ARG=custom_value" {
|
||||
foundCustom = true
|
||||
}
|
||||
if args[i+1] == "VERSION=v3.0.0" {
|
||||
foundVersion = true
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.True(t, foundCustom, "CUSTOM_ARG build arg not found")
|
||||
assert.True(t, foundVersion, "auto-added VERSION build arg not found")
|
||||
|
||||
// Verify push flag
|
||||
assert.Contains(t, args, "--push")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDockerPublisher_Integration_ConfigParsing_Good(t *testing.T) {
|
||||
p := NewDockerPublisher()
|
||||
|
||||
t.Run("full config round-trip from PublisherConfig to DockerConfig", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Extended: map[string]any{
|
||||
"registry": "registry.example.com",
|
||||
"image": "myteam/myservice",
|
||||
"dockerfile": "deploy/Dockerfile.prod",
|
||||
"platforms": []any{"linux/amd64"},
|
||||
"tags": []any{"{{.Version}}", "latest", "release-{{.Version}}"},
|
||||
"build_args": map[string]any{
|
||||
"BUILD_ENV": "production",
|
||||
"VERSION": "{{.Version}}",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "fallback/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg, "/myproject")
|
||||
|
||||
assert.Equal(t, "registry.example.com", cfg.Registry)
|
||||
assert.Equal(t, "myteam/myservice", cfg.Image)
|
||||
assert.Equal(t, "/myproject/deploy/Dockerfile.prod", cfg.Dockerfile)
|
||||
assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms)
|
||||
assert.Equal(t, []string{"{{.Version}}", "latest", "release-{{.Version}}"}, cfg.Tags)
|
||||
assert.Equal(t, "production", cfg.BuildArgs["BUILD_ENV"])
|
||||
assert.Equal(t, "{{.Version}}", cfg.BuildArgs["VERSION"])
|
||||
|
||||
// Verify tag resolution
|
||||
resolved := p.resolveTags(cfg.Tags, "v2.5.0")
|
||||
assert.Equal(t, []string{"v2.5.0", "latest", "release-v2.5.0"}, resolved)
|
||||
})
|
||||
}
|
||||
|
||||
// --- Homebrew Publisher Integration Tests ---
|
||||
|
||||
func TestHomebrewPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
|
||||
t.Run("dry run generates formula without writing files", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v2.1.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/myapp-darwin-amd64.tar.gz", Checksum: "sha256_darwin_amd64"},
|
||||
{Path: "/dist/myapp-darwin-arm64.tar.gz", Checksum: "sha256_darwin_arm64"},
|
||||
{Path: "/dist/myapp-linux-amd64.tar.gz", Checksum: "sha256_linux_amd64"},
|
||||
{Path: "/dist/myapp-linux-arm64.tar.gz", Checksum: "sha256_linux_arm64"},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "homebrew",
|
||||
Extended: map[string]any{
|
||||
"tap": "test-org/homebrew-tap",
|
||||
"formula": "my-cli",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "test-org/my-cli", projectName: "my-cli"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
// Verify dry run output
|
||||
assert.Contains(t, output, "DRY RUN: Homebrew Publish")
|
||||
assert.Contains(t, output, "Formula: MyCli")
|
||||
assert.Contains(t, output, "Version: 2.1.0")
|
||||
assert.Contains(t, output, "Tap: test-org/homebrew-tap")
|
||||
assert.Contains(t, output, "Repository: test-org/my-cli")
|
||||
|
||||
// Verify generated formula content
|
||||
assert.Contains(t, output, "class MyCli < Formula")
|
||||
assert.Contains(t, output, `version "2.1.0"`)
|
||||
assert.Contains(t, output, "sha256_darwin_amd64")
|
||||
assert.Contains(t, output, "sha256_darwin_arm64")
|
||||
assert.Contains(t, output, "sha256_linux_amd64")
|
||||
assert.Contains(t, output, "sha256_linux_arm64")
|
||||
|
||||
assert.Contains(t, output, "Would commit to tap: test-org/homebrew-tap")
|
||||
|
||||
// Verify no files created
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries, "dry run should not create any files")
|
||||
})
|
||||
|
||||
t.Run("dry run with official config shows output path", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{},
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "homebrew",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{
|
||||
"enabled": true,
|
||||
"output": "dist/homebrew-official",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo", projectName: "repo"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Would write files for official PR to: dist/homebrew-official")
|
||||
})
|
||||
}
|
||||
|
||||
func TestHomebrewPublisher_Integration_FormulaGeneration_Good(t *testing.T) {
|
||||
p := NewHomebrewPublisher()
|
||||
|
||||
t.Run("generated formula contains correct Ruby class structure", func(t *testing.T) {
|
||||
data := homebrewTemplateData{
|
||||
FormulaClass: "CoreCli",
|
||||
Description: "Core CLI tool",
|
||||
Repository: "host-uk/core-cli",
|
||||
Version: "3.0.0",
|
||||
License: "MIT",
|
||||
BinaryName: "core",
|
||||
Checksums: ChecksumMap{
|
||||
DarwinAmd64: "a1b2c3d4e5f6",
|
||||
DarwinArm64: "f6e5d4c3b2a1",
|
||||
LinuxAmd64: "112233445566",
|
||||
LinuxArm64: "665544332211",
|
||||
},
|
||||
}
|
||||
|
||||
formula, err := p.renderTemplate(io.Local, "templates/homebrew/formula.rb.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify class definition
|
||||
assert.Contains(t, formula, "class CoreCli < Formula")
|
||||
|
||||
// Verify metadata
|
||||
assert.Contains(t, formula, `desc "Core CLI tool"`)
|
||||
assert.Contains(t, formula, `version "3.0.0"`)
|
||||
assert.Contains(t, formula, `license "MIT"`)
|
||||
|
||||
// Verify checksums for all platforms
|
||||
assert.Contains(t, formula, "a1b2c3d4e5f6")
|
||||
assert.Contains(t, formula, "f6e5d4c3b2a1")
|
||||
assert.Contains(t, formula, "112233445566")
|
||||
assert.Contains(t, formula, "665544332211")
|
||||
|
||||
// Verify binary install
|
||||
assert.Contains(t, formula, `bin.install "core"`)
|
||||
})
|
||||
|
||||
t.Run("toFormulaClass handles various naming patterns", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"my-app", "MyApp"},
|
||||
{"core", "Core"},
|
||||
{"go-devops", "GoDevops"},
|
||||
{"a-b-c-d", "ABCD"},
|
||||
{"single", "Single"},
|
||||
{"UPPER", "UPPER"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
result := toFormulaClass(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --- Scoop Publisher Integration Tests ---
|
||||
|
||||
func TestScoopPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
|
||||
t.Run("dry run generates manifest without writing files", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.5.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/myapp-windows-amd64.zip", Checksum: "win64hash"},
|
||||
{Path: "/dist/myapp-windows-arm64.zip", Checksum: "winarm64hash"},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "scoop",
|
||||
Extended: map[string]any{
|
||||
"bucket": "test-org/scoop-bucket",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "test-org/myapp", projectName: "myapp"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: Scoop Publish")
|
||||
assert.Contains(t, output, "Package: myapp")
|
||||
assert.Contains(t, output, "Version: 1.5.0")
|
||||
assert.Contains(t, output, "Bucket: test-org/scoop-bucket")
|
||||
assert.Contains(t, output, "Generated manifest.json:")
|
||||
assert.Contains(t, output, `"version": "1.5.0"`)
|
||||
assert.Contains(t, output, "Would commit to bucket: test-org/scoop-bucket")
|
||||
|
||||
// Verify no files created
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries)
|
||||
})
|
||||
}
|
||||
|
||||
// --- AUR Publisher Integration Tests ---
|
||||
|
||||
func TestAURPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
p := NewAURPublisher()
|
||||
|
||||
t.Run("dry run generates PKGBUILD and SRCINFO without writing files", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v2.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/myapp-linux-amd64.tar.gz", Checksum: "amd64hash"},
|
||||
{Path: "/dist/myapp-linux-arm64.tar.gz", Checksum: "arm64hash"},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Extended: map[string]any{
|
||||
"maintainer": "Test User <test@example.com>",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "test-org/myapp", projectName: "myapp"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: AUR Publish")
|
||||
assert.Contains(t, output, "Package: myapp-bin")
|
||||
assert.Contains(t, output, "Version: 2.0.0")
|
||||
assert.Contains(t, output, "Maintainer: Test User <test@example.com>")
|
||||
assert.Contains(t, output, "Generated PKGBUILD:")
|
||||
assert.Contains(t, output, "pkgname=myapp-bin")
|
||||
assert.Contains(t, output, "pkgver=2.0.0")
|
||||
assert.Contains(t, output, "Generated .SRCINFO:")
|
||||
assert.Contains(t, output, "pkgbase = myapp-bin")
|
||||
assert.Contains(t, output, "Would push to AUR:")
|
||||
|
||||
// Verify no files created
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries)
|
||||
})
|
||||
}
|
||||
|
||||
// --- Chocolatey Publisher Integration Tests ---
|
||||
|
||||
func TestChocolateyPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
p := NewChocolateyPublisher()
|
||||
|
||||
t.Run("dry run generates nuspec and install script without side effects", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
Artifacts: []build.Artifact{
|
||||
{Path: "/dist/myapp-windows-amd64.zip", Checksum: "choco_hash"},
|
||||
},
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "chocolatey",
|
||||
Extended: map[string]any{
|
||||
"package": "my-cli-tool",
|
||||
"push": false,
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/my-cli-tool", projectName: "my-cli-tool"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: Chocolatey Publish")
|
||||
assert.Contains(t, output, "Package: my-cli-tool")
|
||||
assert.Contains(t, output, "Version: 1.0.0")
|
||||
assert.Contains(t, output, "Push: false")
|
||||
assert.Contains(t, output, "Generated package.nuspec:")
|
||||
assert.Contains(t, output, "<id>my-cli-tool</id>")
|
||||
assert.Contains(t, output, "Generated chocolateyinstall.ps1:")
|
||||
assert.Contains(t, output, "Would generate package files only")
|
||||
|
||||
// Verify no files created
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries)
|
||||
})
|
||||
}
|
||||
|
||||
// --- npm Publisher Integration Tests ---
|
||||
|
||||
func TestNpmPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
|
||||
t.Run("dry run generates package.json without writing files or publishing", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v3.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "npm",
|
||||
Extended: map[string]any{
|
||||
"package": "@test-org/my-cli",
|
||||
"access": "public",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "test-org/my-cli", projectName: "my-cli"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: npm Publish")
|
||||
assert.Contains(t, output, "Package: @test-org/my-cli")
|
||||
assert.Contains(t, output, "Version: 3.0.0")
|
||||
assert.Contains(t, output, "Access: public")
|
||||
assert.Contains(t, output, "Generated package.json:")
|
||||
assert.Contains(t, output, `"name": "@test-org/my-cli"`)
|
||||
assert.Contains(t, output, `"version": "3.0.0"`)
|
||||
assert.Contains(t, output, "Would run: npm publish --access public")
|
||||
|
||||
// Verify no files created
|
||||
entries, err := os.ReadDir(tmpDir)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, entries)
|
||||
})
|
||||
}
|
||||
|
||||
// --- LinuxKit Publisher Integration Tests ---
|
||||
|
||||
func TestLinuxKitPublisher_Integration_DryRunNoSideEffects_Good(t *testing.T) {
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
t.Skip("skipping: linuxkit CLI not available")
|
||||
}
|
||||
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("dry run with multiple formats and platforms", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create config file
|
||||
configDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
require.NoError(t, os.MkdirAll(configDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(configDir, "server.yml"), []byte("kernel:\n image: test\n"), 0644))
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"formats": []any{"iso", "qcow2", "docker"},
|
||||
"platforms": []any{"linux/amd64", "linux/arm64"},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "test-org/my-os"}
|
||||
|
||||
err := p.Publish(context.Background(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: LinuxKit Build & Publish")
|
||||
assert.Contains(t, output, "Formats: iso, qcow2, docker")
|
||||
assert.Contains(t, output, "Platforms: linux/amd64, linux/arm64")
|
||||
|
||||
// Verify all combinations listed
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-amd64.iso")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-amd64.qcow2")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-amd64.docker.tar")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-arm64.iso")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-arm64.qcow2")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-arm64.docker.tar")
|
||||
|
||||
// Verify docker usage hint
|
||||
assert.Contains(t, output, "docker load")
|
||||
|
||||
// Verify no files created in dist
|
||||
distDir := filepath.Join(tmpDir, "dist")
|
||||
_, err = os.Stat(distDir)
|
||||
assert.True(t, os.IsNotExist(err), "dry run should not create dist directory")
|
||||
})
|
||||
}
|
||||
|
||||
// --- Cross-Publisher Integration Tests ---
|
||||
|
||||
func TestAllPublishers_Integration_NameUniqueness_Good(t *testing.T) {
|
||||
t.Run("all publishers have unique names", func(t *testing.T) {
|
||||
publishers := []Publisher{
|
||||
NewGitHubPublisher(),
|
||||
NewDockerPublisher(),
|
||||
NewHomebrewPublisher(),
|
||||
NewNpmPublisher(),
|
||||
NewScoopPublisher(),
|
||||
NewAURPublisher(),
|
||||
NewChocolateyPublisher(),
|
||||
NewLinuxKitPublisher(),
|
||||
}
|
||||
|
||||
names := make(map[string]bool)
|
||||
for _, pub := range publishers {
|
||||
name := pub.Name()
|
||||
assert.False(t, names[name], "duplicate publisher name: %s", name)
|
||||
names[name] = true
|
||||
assert.NotEmpty(t, name, "publisher name should not be empty")
|
||||
}
|
||||
|
||||
assert.Len(t, names, 8, "should have 8 unique publishers")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAllPublishers_Integration_NilRelCfg_Good(t *testing.T) {
|
||||
t.Run("github handles nil relCfg with git repo", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "git@github.com:niltest/repo.git")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Changelog: "Changes",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "github"}
|
||||
|
||||
err := NewGitHubPublisher().Publish(context.Background(), release, pubCfg, nil, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), "niltest/repo")
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildChecksumMap_Integration_Good(t *testing.T) {
|
||||
t.Run("maps all platforms correctly from realistic artifacts", func(t *testing.T) {
|
||||
artifacts := []build.Artifact{
|
||||
{Path: "/dist/core-v1.0.0-darwin-amd64.tar.gz", Checksum: "da64"},
|
||||
{Path: "/dist/core-v1.0.0-darwin-arm64.tar.gz", Checksum: "da65"},
|
||||
{Path: "/dist/core-v1.0.0-linux-amd64.tar.gz", Checksum: "la64"},
|
||||
{Path: "/dist/core-v1.0.0-linux-arm64.tar.gz", Checksum: "la65"},
|
||||
{Path: "/dist/core-v1.0.0-windows-amd64.zip", Checksum: "wa64"},
|
||||
{Path: "/dist/core-v1.0.0-windows-arm64.zip", Checksum: "wa65"},
|
||||
{Path: "/dist/CHECKSUMS.txt"}, // No checksum for checksum file
|
||||
}
|
||||
|
||||
checksums := buildChecksumMap(artifacts)
|
||||
|
||||
assert.Equal(t, "da64", checksums.DarwinAmd64)
|
||||
assert.Equal(t, "da65", checksums.DarwinArm64)
|
||||
assert.Equal(t, "la64", checksums.LinuxAmd64)
|
||||
assert.Equal(t, "la65", checksums.LinuxArm64)
|
||||
assert.Equal(t, "wa64", checksums.WindowsAmd64)
|
||||
assert.Equal(t, "wa65", checksums.WindowsArm64)
|
||||
})
|
||||
}
|
||||
|
||||
// indexOf returns the index of an element in a string slice, or -1 if not found.
|
||||
func indexOf(slice []string, item string) int {
|
||||
for i, s := range slice {
|
||||
if s == item {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Compile-time check: all publishers implement Publisher interface
|
||||
var _ Publisher = (*GitHubPublisher)(nil)
|
||||
var _ Publisher = (*DockerPublisher)(nil)
|
||||
var _ Publisher = (*HomebrewPublisher)(nil)
|
||||
var _ Publisher = (*NpmPublisher)(nil)
|
||||
var _ Publisher = (*ScoopPublisher)(nil)
|
||||
var _ Publisher = (*AURPublisher)(nil)
|
||||
var _ Publisher = (*ChocolateyPublisher)(nil)
|
||||
var _ Publisher = (*LinuxKitPublisher)(nil)
|
||||
|
|
@ -1,304 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// LinuxKitConfig holds configuration for the LinuxKit publisher.
|
||||
type LinuxKitConfig struct {
|
||||
// Config is the path to the LinuxKit YAML configuration file.
|
||||
Config string `yaml:"config"`
|
||||
// Formats are the output formats to build.
|
||||
// Supported: iso, iso-bios, iso-efi, raw, raw-bios, raw-efi,
|
||||
// qcow2, qcow2-bios, qcow2-efi, vmdk, vhd, gcp, aws,
|
||||
// docker (tarball for `docker load`), tar, kernel+initrd
|
||||
Formats []string `yaml:"formats"`
|
||||
// Platforms are the target platforms (linux/amd64, linux/arm64).
|
||||
Platforms []string `yaml:"platforms"`
|
||||
}
|
||||
|
||||
// LinuxKitPublisher builds and publishes LinuxKit images.
|
||||
type LinuxKitPublisher struct{}
|
||||
|
||||
// NewLinuxKitPublisher creates a new LinuxKit publisher.
|
||||
func NewLinuxKitPublisher() *LinuxKitPublisher {
|
||||
return &LinuxKitPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *LinuxKitPublisher) Name() string {
|
||||
return "linuxkit"
|
||||
}
|
||||
|
||||
// Publish builds LinuxKit images and uploads them to the GitHub release.
|
||||
func (p *LinuxKitPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
// Validate linuxkit CLI is available
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse LinuxKit-specific config from publisher config
|
||||
lkCfg := p.parseConfig(pubCfg, release.ProjectDir)
|
||||
|
||||
// Validate config file exists
|
||||
if release.FS == nil {
|
||||
return errors.New("linuxkit.Publish: release filesystem (FS) is nil")
|
||||
}
|
||||
if !release.FS.Exists(lkCfg.Config) {
|
||||
return fmt.Errorf("linuxkit.Publish: config file not found: %s", lkCfg.Config)
|
||||
}
|
||||
|
||||
// Determine repository for artifact upload
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("linuxkit.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release, lkCfg, repo)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release, lkCfg, repo)
|
||||
}
|
||||
|
||||
// parseConfig extracts LinuxKit-specific configuration.
|
||||
func (p *LinuxKitPublisher) parseConfig(pubCfg PublisherConfig, projectDir string) LinuxKitConfig {
|
||||
cfg := LinuxKitConfig{
|
||||
Config: filepath.Join(projectDir, ".core", "linuxkit", "server.yml"),
|
||||
Formats: []string{"iso"},
|
||||
Platforms: []string{"linux/amd64"},
|
||||
}
|
||||
|
||||
// Override from extended config if present
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if configPath, ok := ext["config"].(string); ok && configPath != "" {
|
||||
if filepath.IsAbs(configPath) {
|
||||
cfg.Config = configPath
|
||||
} else {
|
||||
cfg.Config = filepath.Join(projectDir, configPath)
|
||||
}
|
||||
}
|
||||
if formats, ok := ext["formats"].([]any); ok && len(formats) > 0 {
|
||||
cfg.Formats = make([]string, 0, len(formats))
|
||||
for _, f := range formats {
|
||||
if s, ok := f.(string); ok {
|
||||
cfg.Formats = append(cfg.Formats, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
if platforms, ok := ext["platforms"].([]any); ok && len(platforms) > 0 {
|
||||
cfg.Platforms = make([]string, 0, len(platforms))
|
||||
for _, p := range platforms {
|
||||
if s, ok := p.(string); ok {
|
||||
cfg.Platforms = append(cfg.Platforms, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// dryRunPublish shows what would be done without actually building.
|
||||
func (p *LinuxKitPublisher) dryRunPublish(release *Release, cfg LinuxKitConfig, repo string) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: LinuxKit Build & Publish ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Repository: %s\n", repo)
|
||||
fmt.Printf("Version: %s\n", release.Version)
|
||||
fmt.Printf("Config: %s\n", cfg.Config)
|
||||
fmt.Printf("Formats: %s\n", strings.Join(cfg.Formats, ", "))
|
||||
fmt.Printf("Platforms: %s\n", strings.Join(cfg.Platforms, ", "))
|
||||
fmt.Println()
|
||||
|
||||
outputDir := filepath.Join(release.ProjectDir, "dist", "linuxkit")
|
||||
baseName := p.buildBaseName(release.Version)
|
||||
|
||||
fmt.Println("Would execute commands:")
|
||||
for _, platform := range cfg.Platforms {
|
||||
parts := strings.Split(platform, "/")
|
||||
arch := "amd64"
|
||||
if len(parts) == 2 {
|
||||
arch = parts[1]
|
||||
}
|
||||
|
||||
for _, format := range cfg.Formats {
|
||||
outputName := fmt.Sprintf("%s-%s", baseName, arch)
|
||||
args := p.buildLinuxKitArgs(cfg.Config, format, outputName, outputDir, arch)
|
||||
fmt.Printf(" linuxkit %s\n", strings.Join(args, " "))
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Would upload artifacts to release:")
|
||||
for _, platform := range cfg.Platforms {
|
||||
parts := strings.Split(platform, "/")
|
||||
arch := "amd64"
|
||||
if len(parts) == 2 {
|
||||
arch = parts[1]
|
||||
}
|
||||
|
||||
for _, format := range cfg.Formats {
|
||||
outputName := fmt.Sprintf("%s-%s", baseName, arch)
|
||||
artifactPath := p.getArtifactPath(outputDir, outputName, format)
|
||||
fmt.Printf(" - %s\n", filepath.Base(artifactPath))
|
||||
if format == "docker" {
|
||||
fmt.Printf(" Usage: docker load < %s\n", filepath.Base(artifactPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executePublish builds LinuxKit images and uploads them.
|
||||
func (p *LinuxKitPublisher) executePublish(ctx context.Context, release *Release, cfg LinuxKitConfig, repo string) error {
|
||||
outputDir := filepath.Join(release.ProjectDir, "dist", "linuxkit")
|
||||
|
||||
// Create output directory
|
||||
if err := release.FS.EnsureDir(outputDir); err != nil {
|
||||
return fmt.Errorf("linuxkit.Publish: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
baseName := p.buildBaseName(release.Version)
|
||||
var artifacts []string
|
||||
|
||||
// Build for each platform and format
|
||||
for _, platform := range cfg.Platforms {
|
||||
parts := strings.Split(platform, "/")
|
||||
arch := "amd64"
|
||||
if len(parts) == 2 {
|
||||
arch = parts[1]
|
||||
}
|
||||
|
||||
for _, format := range cfg.Formats {
|
||||
outputName := fmt.Sprintf("%s-%s", baseName, arch)
|
||||
|
||||
// Build the image
|
||||
args := p.buildLinuxKitArgs(cfg.Config, format, outputName, outputDir, arch)
|
||||
cmd := exec.CommandContext(ctx, "linuxkit", args...)
|
||||
cmd.Dir = release.ProjectDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
fmt.Printf("Building LinuxKit image: %s (%s)\n", outputName, format)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("linuxkit.Publish: build failed for %s/%s: %w", platform, format, err)
|
||||
}
|
||||
|
||||
// Track artifact for upload
|
||||
artifactPath := p.getArtifactPath(outputDir, outputName, format)
|
||||
artifacts = append(artifacts, artifactPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Upload artifacts to GitHub release
|
||||
for _, artifactPath := range artifacts {
|
||||
if !release.FS.Exists(artifactPath) {
|
||||
return fmt.Errorf("linuxkit.Publish: artifact not found after build: %s", artifactPath)
|
||||
}
|
||||
|
||||
if err := UploadArtifact(ctx, repo, release.Version, artifactPath); err != nil {
|
||||
return fmt.Errorf("linuxkit.Publish: failed to upload %s: %w", filepath.Base(artifactPath), err)
|
||||
}
|
||||
|
||||
// Print helpful usage info for docker format
|
||||
if strings.HasSuffix(artifactPath, ".docker.tar") {
|
||||
fmt.Printf(" Load with: docker load < %s\n", filepath.Base(artifactPath))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildBaseName creates the base name for output files.
|
||||
func (p *LinuxKitPublisher) buildBaseName(version string) string {
|
||||
// Strip leading 'v' if present for cleaner filenames
|
||||
name := strings.TrimPrefix(version, "v")
|
||||
return fmt.Sprintf("linuxkit-%s", name)
|
||||
}
|
||||
|
||||
// buildLinuxKitArgs builds the arguments for linuxkit build command.
|
||||
func (p *LinuxKitPublisher) buildLinuxKitArgs(configPath, format, outputName, outputDir, arch string) []string {
|
||||
args := []string{"build"}
|
||||
|
||||
// Output format
|
||||
args = append(args, "--format", format)
|
||||
|
||||
// Output name
|
||||
args = append(args, "--name", outputName)
|
||||
|
||||
// Output directory
|
||||
args = append(args, "--dir", outputDir)
|
||||
|
||||
// Architecture (if not amd64)
|
||||
if arch != "amd64" {
|
||||
args = append(args, "--arch", arch)
|
||||
}
|
||||
|
||||
// Config file
|
||||
args = append(args, configPath)
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// getArtifactPath returns the expected path of the built artifact.
|
||||
func (p *LinuxKitPublisher) getArtifactPath(outputDir, outputName, format string) string {
|
||||
ext := p.getFormatExtension(format)
|
||||
return filepath.Join(outputDir, outputName+ext)
|
||||
}
|
||||
|
||||
// getFormatExtension returns the file extension for a LinuxKit output format.
|
||||
func (p *LinuxKitPublisher) getFormatExtension(format string) string {
|
||||
switch format {
|
||||
case "iso", "iso-bios", "iso-efi":
|
||||
return ".iso"
|
||||
case "raw", "raw-bios", "raw-efi":
|
||||
return ".raw"
|
||||
case "qcow2", "qcow2-bios", "qcow2-efi":
|
||||
return ".qcow2"
|
||||
case "vmdk":
|
||||
return ".vmdk"
|
||||
case "vhd":
|
||||
return ".vhd"
|
||||
case "gcp":
|
||||
return ".img.tar.gz"
|
||||
case "aws":
|
||||
return ".raw"
|
||||
case "docker":
|
||||
// Docker format outputs a tarball that can be loaded with `docker load`
|
||||
return ".docker.tar"
|
||||
case "tar":
|
||||
return ".tar"
|
||||
case "kernel+initrd":
|
||||
return "-initrd.img"
|
||||
default:
|
||||
return "." + format
|
||||
}
|
||||
}
|
||||
|
||||
// validateLinuxKitCli checks if the linuxkit CLI is available.
|
||||
func validateLinuxKitCli() error {
|
||||
cmd := exec.Command("linuxkit", "version")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.New("linuxkit: linuxkit CLI not found. Install it from https://github.com/linuxkit/linuxkit")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,961 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLinuxKitPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns linuxkit", func(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
assert.Equal(t, "linuxkit", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
cfg := p.parseConfig(pubCfg, "/project")
|
||||
|
||||
assert.Equal(t, "/project/.core/linuxkit/server.yml", cfg.Config)
|
||||
assert.Equal(t, []string{"iso"}, cfg.Formats)
|
||||
assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms)
|
||||
})
|
||||
|
||||
t.Run("parses extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"config": ".core/linuxkit/custom.yml",
|
||||
"formats": []any{"iso", "qcow2", "vmdk"},
|
||||
"platforms": []any{"linux/amd64", "linux/arm64"},
|
||||
},
|
||||
}
|
||||
cfg := p.parseConfig(pubCfg, "/project")
|
||||
|
||||
assert.Equal(t, "/project/.core/linuxkit/custom.yml", cfg.Config)
|
||||
assert.Equal(t, []string{"iso", "qcow2", "vmdk"}, cfg.Formats)
|
||||
assert.Equal(t, []string{"linux/amd64", "linux/arm64"}, cfg.Platforms)
|
||||
})
|
||||
|
||||
t.Run("handles absolute config path", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"config": "/absolute/path/to/config.yml",
|
||||
},
|
||||
}
|
||||
cfg := p.parseConfig(pubCfg, "/project")
|
||||
|
||||
assert.Equal(t, "/absolute/path/to/config.yml", cfg.Config)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_BuildLinuxKitArgs_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("builds basic args for amd64", func(t *testing.T) {
|
||||
args := p.buildLinuxKitArgs("/config/server.yml", "iso", "linuxkit-1.0.0-amd64", "/output", "amd64")
|
||||
|
||||
assert.Contains(t, args, "build")
|
||||
assert.Contains(t, args, "--format")
|
||||
assert.Contains(t, args, "iso")
|
||||
assert.Contains(t, args, "--name")
|
||||
assert.Contains(t, args, "linuxkit-1.0.0-amd64")
|
||||
assert.Contains(t, args, "--dir")
|
||||
assert.Contains(t, args, "/output")
|
||||
assert.Contains(t, args, "/config/server.yml")
|
||||
// Should not contain --arch for amd64 (default)
|
||||
assert.NotContains(t, args, "--arch")
|
||||
})
|
||||
|
||||
t.Run("builds args with arch for arm64", func(t *testing.T) {
|
||||
args := p.buildLinuxKitArgs("/config/server.yml", "qcow2", "linuxkit-1.0.0-arm64", "/output", "arm64")
|
||||
|
||||
assert.Contains(t, args, "--arch")
|
||||
assert.Contains(t, args, "arm64")
|
||||
assert.Contains(t, args, "qcow2")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_BuildBaseName_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("strips v prefix", func(t *testing.T) {
|
||||
name := p.buildBaseName("v1.2.3")
|
||||
assert.Equal(t, "linuxkit-1.2.3", name)
|
||||
})
|
||||
|
||||
t.Run("handles version without v prefix", func(t *testing.T) {
|
||||
name := p.buildBaseName("1.2.3")
|
||||
assert.Equal(t, "linuxkit-1.2.3", name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_GetArtifactPath_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
outputDir string
|
||||
outputName string
|
||||
format string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "ISO format",
|
||||
outputDir: "/dist/linuxkit",
|
||||
outputName: "linuxkit-1.0.0-amd64",
|
||||
format: "iso",
|
||||
expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.iso",
|
||||
},
|
||||
{
|
||||
name: "raw format",
|
||||
outputDir: "/dist/linuxkit",
|
||||
outputName: "linuxkit-1.0.0-amd64",
|
||||
format: "raw",
|
||||
expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.raw",
|
||||
},
|
||||
{
|
||||
name: "qcow2 format",
|
||||
outputDir: "/dist/linuxkit",
|
||||
outputName: "linuxkit-1.0.0-arm64",
|
||||
format: "qcow2",
|
||||
expected: "/dist/linuxkit/linuxkit-1.0.0-arm64.qcow2",
|
||||
},
|
||||
{
|
||||
name: "vmdk format",
|
||||
outputDir: "/dist/linuxkit",
|
||||
outputName: "linuxkit-1.0.0-amd64",
|
||||
format: "vmdk",
|
||||
expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.vmdk",
|
||||
},
|
||||
{
|
||||
name: "gcp format",
|
||||
outputDir: "/dist/linuxkit",
|
||||
outputName: "linuxkit-1.0.0-amd64",
|
||||
format: "gcp",
|
||||
expected: "/dist/linuxkit/linuxkit-1.0.0-amd64.img.tar.gz",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
path := p.getArtifactPath(tc.outputDir, tc.outputName, tc.format)
|
||||
assert.Equal(t, tc.expected, path)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_GetFormatExtension_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
tests := []struct {
|
||||
format string
|
||||
expected string
|
||||
}{
|
||||
{"iso", ".iso"},
|
||||
{"raw", ".raw"},
|
||||
{"qcow2", ".qcow2"},
|
||||
{"vmdk", ".vmdk"},
|
||||
{"vhd", ".vhd"},
|
||||
{"gcp", ".img.tar.gz"},
|
||||
{"aws", ".raw"},
|
||||
{"unknown", ".unknown"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.format, func(t *testing.T) {
|
||||
ext := p.getFormatExtension(tc.format)
|
||||
assert.Equal(t, tc.expected, ext)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_Publish_Bad(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("fails when config file not found with linuxkit installed", func(t *testing.T) {
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
t.Skip("skipping test: linuxkit CLI not available")
|
||||
}
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/nonexistent",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"config": "/nonexistent/config.yml",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "config file not found")
|
||||
})
|
||||
|
||||
t.Run("fails when linuxkit CLI not available", func(t *testing.T) {
|
||||
if err := validateLinuxKitCli(); err == nil {
|
||||
t.Skip("skipping test: linuxkit CLI is available")
|
||||
}
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/tmp",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "linuxkit CLI not found")
|
||||
})
|
||||
|
||||
t.Run("fails when repository cannot be detected and not provided", func(t *testing.T) {
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
t.Skip("skipping test: linuxkit CLI not available")
|
||||
}
|
||||
|
||||
// Create temp directory that is NOT a git repo
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Create a config file
|
||||
configPath := filepath.Join(tmpDir, "config.yml")
|
||||
err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"config": "config.yml",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: ""} // Empty repository
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "could not determine repository")
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateLinuxKitCli_Good(t *testing.T) {
|
||||
t.Run("returns expected error when linuxkit not installed", func(t *testing.T) {
|
||||
err := validateLinuxKitCli()
|
||||
if err != nil {
|
||||
// LinuxKit is not installed
|
||||
assert.Contains(t, err.Error(), "linuxkit CLI not found")
|
||||
}
|
||||
// If err is nil, linuxkit is installed - that's OK
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_Publish_WithCLI_Good(t *testing.T) {
|
||||
// These tests run only when linuxkit CLI is available
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
t.Skip("skipping test: linuxkit CLI not available")
|
||||
}
|
||||
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("succeeds with dry run and valid config", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Create config directory and file
|
||||
configDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(configDir, "server.yml")
|
||||
err = os.WriteFile(configPath, []byte("kernel:\n image: linuxkit/kernel:5.10\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "DRY RUN: LinuxKit Build & Publish")
|
||||
})
|
||||
|
||||
t.Run("fails with missing config file", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "config file not found")
|
||||
})
|
||||
|
||||
t.Run("uses relCfg repository", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
configDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(configDir, "server.yml")
|
||||
err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
relCfg := &mockReleaseConfig{repository: "custom-owner/custom-repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "custom-owner/custom-repo")
|
||||
})
|
||||
|
||||
t.Run("detects repository when not provided", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Create config file
|
||||
configDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(configDir, "server.yml")
|
||||
err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "git@github.com:detected-owner/detected-repo.git")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
relCfg := &mockReleaseConfig{repository: ""} // Empty to trigger detection
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "detected-owner/detected-repo")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_Publish_NilRelCfg_Good(t *testing.T) {
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
t.Skip("skipping test: linuxkit CLI not available")
|
||||
}
|
||||
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("handles nil relCfg by detecting repo", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Create config file
|
||||
configDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(configDir, "server.yml")
|
||||
err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "git@github.com:nil-owner/nil-repo.git")
|
||||
cmd.Dir = tmpDir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, nil, true) // nil relCfg
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "nil-owner/nil-repo")
|
||||
})
|
||||
}
|
||||
|
||||
// mockReleaseConfig implements ReleaseConfig for testing.
|
||||
type mockReleaseConfig struct {
|
||||
repository string
|
||||
projectName string
|
||||
}
|
||||
|
||||
func (m *mockReleaseConfig) GetRepository() string {
|
||||
return m.repository
|
||||
}
|
||||
|
||||
func (m *mockReleaseConfig) GetProjectName() string {
|
||||
return m.projectName
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := LinuxKitConfig{
|
||||
Config: "/project/.core/linuxkit/server.yml",
|
||||
Formats: []string{"iso", "qcow2"},
|
||||
Platforms: []string{"linux/amd64", "linux/arm64"},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg, "owner/repo")
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: LinuxKit Build & Publish")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Version: v1.0.0")
|
||||
assert.Contains(t, output, "Config: /project/.core/linuxkit/server.yml")
|
||||
assert.Contains(t, output, "Formats: iso, qcow2")
|
||||
assert.Contains(t, output, "Platforms: linux/amd64, linux/arm64")
|
||||
assert.Contains(t, output, "Would execute commands:")
|
||||
assert.Contains(t, output, "linuxkit build")
|
||||
assert.Contains(t, output, "Would upload artifacts to release:")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-amd64.iso")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-amd64.qcow2")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-arm64.iso")
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-arm64.qcow2")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("shows docker format usage hint", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := LinuxKitConfig{
|
||||
Config: "/config.yml",
|
||||
Formats: []string{"docker"},
|
||||
Platforms: []string{"linux/amd64"},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg, "owner/repo")
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "linuxkit-1.0.0-amd64.docker.tar")
|
||||
assert.Contains(t, output, "Usage: docker load <")
|
||||
})
|
||||
|
||||
t.Run("handles single platform and format", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v2.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
cfg := LinuxKitConfig{
|
||||
Config: "/config.yml",
|
||||
Formats: []string{"iso"},
|
||||
Platforms: []string{"linux/amd64"},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(release, cfg, "owner/repo")
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "linuxkit-2.0.0-amd64.iso")
|
||||
assert.NotContains(t, output, "arm64")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_GetFormatExtension_AllFormats_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
tests := []struct {
|
||||
format string
|
||||
expected string
|
||||
}{
|
||||
{"iso", ".iso"},
|
||||
{"iso-bios", ".iso"},
|
||||
{"iso-efi", ".iso"},
|
||||
{"raw", ".raw"},
|
||||
{"raw-bios", ".raw"},
|
||||
{"raw-efi", ".raw"},
|
||||
{"qcow2", ".qcow2"},
|
||||
{"qcow2-bios", ".qcow2"},
|
||||
{"qcow2-efi", ".qcow2"},
|
||||
{"vmdk", ".vmdk"},
|
||||
{"vhd", ".vhd"},
|
||||
{"gcp", ".img.tar.gz"},
|
||||
{"aws", ".raw"},
|
||||
{"docker", ".docker.tar"},
|
||||
{"tar", ".tar"},
|
||||
{"kernel+initrd", "-initrd.img"},
|
||||
{"custom--format", ".custom--format"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.format, func(t *testing.T) {
|
||||
ext := p.getFormatExtension(tc.format)
|
||||
assert.Equal(t, tc.expected, ext)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_BuildLinuxKitArgs_AllArchitectures_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("amd64 does not include arch flag", func(t *testing.T) {
|
||||
args := p.buildLinuxKitArgs("/config.yml", "iso", "output--name", "/output", "amd64")
|
||||
|
||||
assert.Contains(t, args, "build")
|
||||
assert.Contains(t, args, "--format")
|
||||
assert.Contains(t, args, "iso")
|
||||
assert.Contains(t, args, "--name")
|
||||
assert.Contains(t, args, "output--name")
|
||||
assert.Contains(t, args, "--dir")
|
||||
assert.Contains(t, args, "/output")
|
||||
assert.Contains(t, args, "/config.yml")
|
||||
assert.NotContains(t, args, "--arch")
|
||||
})
|
||||
|
||||
t.Run("arm64 includes arch flag", func(t *testing.T) {
|
||||
args := p.buildLinuxKitArgs("/config.yml", "qcow2", "output--name", "/output", "arm64")
|
||||
|
||||
assert.Contains(t, args, "--arch")
|
||||
assert.Contains(t, args, "arm64")
|
||||
})
|
||||
|
||||
t.Run("other architectures include arch flag", func(t *testing.T) {
|
||||
args := p.buildLinuxKitArgs("/config.yml", "raw", "output--name", "/output", "riscv64")
|
||||
|
||||
assert.Contains(t, args, "--arch")
|
||||
assert.Contains(t, args, "riscv64")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_ParseConfig_EdgeCases_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("handles nil extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: nil,
|
||||
}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, "/project")
|
||||
|
||||
assert.Equal(t, "/project/.core/linuxkit/server.yml", cfg.Config)
|
||||
assert.Equal(t, []string{"iso"}, cfg.Formats)
|
||||
assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms)
|
||||
})
|
||||
|
||||
t.Run("handles empty extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{},
|
||||
}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, "/project")
|
||||
|
||||
assert.Equal(t, "/project/.core/linuxkit/server.yml", cfg.Config)
|
||||
assert.Equal(t, []string{"iso"}, cfg.Formats)
|
||||
assert.Equal(t, []string{"linux/amd64"}, cfg.Platforms)
|
||||
})
|
||||
|
||||
t.Run("handles mixed format types in extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"formats": []any{"iso", 123, "qcow2"}, // includes non-string
|
||||
},
|
||||
}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, "/project")
|
||||
|
||||
assert.Equal(t, []string{"iso", "qcow2"}, cfg.Formats)
|
||||
})
|
||||
|
||||
t.Run("handles mixed platform types in extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"platforms": []any{"linux/amd64", nil, "linux/arm64"},
|
||||
},
|
||||
}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, "/project")
|
||||
|
||||
assert.Equal(t, []string{"linux/amd64", "linux/arm64"}, cfg.Platforms)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_BuildBaseName_EdgeCases_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
expected string
|
||||
}{
|
||||
{"strips v prefix", "v1.2.3", "linuxkit-1.2.3"},
|
||||
{"no v prefix", "1.2.3", "linuxkit-1.2.3"},
|
||||
{"prerelease version", "v1.0.0-alpha.1", "linuxkit-1.0.0-alpha.1"},
|
||||
{"build metadata", "v1.0.0+build.123", "linuxkit-1.0.0+build.123"},
|
||||
{"only v", "v", "linuxkit-"},
|
||||
{"empty string", "", "linuxkit-"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
name := p.buildBaseName(tc.version)
|
||||
assert.Equal(t, tc.expected, name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_GetArtifactPath_AllFormats_Good(t *testing.T) {
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
outputDir string
|
||||
outputName string
|
||||
format string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "ISO format",
|
||||
outputDir: "/dist",
|
||||
outputName: "linuxkit-1.0.0-amd64",
|
||||
format: "iso",
|
||||
expected: "/dist/linuxkit-1.0.0-amd64.iso",
|
||||
},
|
||||
{
|
||||
name: "ISO-BIOS format",
|
||||
outputDir: "/dist",
|
||||
outputName: "linuxkit-1.0.0-amd64",
|
||||
format: "iso-bios",
|
||||
expected: "/dist/linuxkit-1.0.0-amd64.iso",
|
||||
},
|
||||
{
|
||||
name: "docker format",
|
||||
outputDir: "/output",
|
||||
outputName: "linuxkit-2.0.0-arm64",
|
||||
format: "docker",
|
||||
expected: "/output/linuxkit-2.0.0-arm64.docker.tar",
|
||||
},
|
||||
{
|
||||
name: "tar format",
|
||||
outputDir: "/output",
|
||||
outputName: "linuxkit-1.0.0",
|
||||
format: "tar",
|
||||
expected: "/output/linuxkit-1.0.0.tar",
|
||||
},
|
||||
{
|
||||
name: "kernel+initrd format",
|
||||
outputDir: "/output",
|
||||
outputName: "linuxkit-1.0.0",
|
||||
format: "kernel+initrd",
|
||||
expected: "/output/linuxkit-1.0.0-initrd.img",
|
||||
},
|
||||
{
|
||||
name: "GCP format",
|
||||
outputDir: "/output",
|
||||
outputName: "linuxkit-1.0.0",
|
||||
format: "gcp",
|
||||
expected: "/output/linuxkit-1.0.0.img.tar.gz",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
path := p.getArtifactPath(tc.outputDir, tc.outputName, tc.format)
|
||||
assert.Equal(t, tc.expected, path)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_Publish_NilFS_Bad(t *testing.T) {
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
t.Skip("skipping test: linuxkit CLI not available")
|
||||
}
|
||||
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("returns error when release FS is nil", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/tmp",
|
||||
FS: nil, // nil FS should be guarded
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "release filesystem (FS) is nil")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLinuxKitPublisher_Publish_DryRun_Good(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Skip if linuxkit CLI is not available
|
||||
if err := validateLinuxKitCli(); err != nil {
|
||||
t.Skip("skipping test: linuxkit CLI not available")
|
||||
}
|
||||
|
||||
p := NewLinuxKitPublisher()
|
||||
|
||||
t.Run("dry run succeeds with valid config file", func(t *testing.T) {
|
||||
// Create temp directory with config file
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
configDir := filepath.Join(tmpDir, ".core", "linuxkit")
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
configPath := filepath.Join(configDir, "server.yml")
|
||||
err = os.WriteFile(configPath, []byte("kernel:\n image: linuxkit/kernel:5.10\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "linuxkit"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "DRY RUN: LinuxKit Build & Publish")
|
||||
})
|
||||
|
||||
t.Run("dry run uses custom config path", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
customConfigPath := filepath.Join(tmpDir, "custom-config.yml")
|
||||
err = os.WriteFile(customConfigPath, []byte("kernel:\n image: custom\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"config": customConfigPath,
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "custom-config.yml")
|
||||
})
|
||||
|
||||
t.Run("dry run with multiple formats and platforms", func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "linuxkit-test")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
configPath := filepath.Join(tmpDir, "config.yml")
|
||||
err = os.WriteFile(configPath, []byte("kernel:\n image: test\n"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
release := &Release{
|
||||
Version: "v2.0.0",
|
||||
ProjectDir: tmpDir,
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Extended: map[string]any{
|
||||
"config": "config.yml",
|
||||
"formats": []any{"iso", "qcow2", "vmdk"},
|
||||
"platforms": []any{"linux/amd64", "linux/arm64"},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err = p.Publish(context.TODO(), release, pubCfg, relCfg, true)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
// Check all format/platform combinations are listed
|
||||
assert.Contains(t, output, "linuxkit-2.0.0-amd64.iso")
|
||||
assert.Contains(t, output, "linuxkit-2.0.0-amd64.qcow2")
|
||||
assert.Contains(t, output, "linuxkit-2.0.0-amd64.vmdk")
|
||||
assert.Contains(t, output, "linuxkit-2.0.0-arm64.iso")
|
||||
assert.Contains(t, output, "linuxkit-2.0.0-arm64.qcow2")
|
||||
assert.Contains(t, output, "linuxkit-2.0.0-arm64.vmdk")
|
||||
})
|
||||
}
|
||||
|
|
@ -1,266 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
//go:embed templates/npm/*.tmpl
|
||||
var npmTemplates embed.FS
|
||||
|
||||
// NpmConfig holds npm-specific configuration.
|
||||
type NpmConfig struct {
|
||||
// Package is the npm package name (e.g., "@host-uk/core").
|
||||
Package string
|
||||
// Access is the npm access level: "public" or "restricted".
|
||||
Access string
|
||||
}
|
||||
|
||||
// NpmPublisher publishes releases to npm using the binary wrapper pattern.
|
||||
type NpmPublisher struct{}
|
||||
|
||||
// NewNpmPublisher creates a new npm publisher.
|
||||
func NewNpmPublisher() *NpmPublisher {
|
||||
return &NpmPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *NpmPublisher) Name() string {
|
||||
return "npm"
|
||||
}
|
||||
|
||||
// Publish publishes the release to npm.
|
||||
// It generates a binary wrapper package that downloads the correct platform binary on postinstall.
|
||||
func (p *NpmPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
// Parse npm config
|
||||
npmCfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
// Validate configuration
|
||||
if npmCfg.Package == "" {
|
||||
return errors.New("npm.Publish: package name is required (set publish.npm.package in config)")
|
||||
}
|
||||
|
||||
// Get repository
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("npm.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
// Get project name (binary name)
|
||||
projectName := ""
|
||||
if relCfg != nil {
|
||||
projectName = relCfg.GetProjectName()
|
||||
}
|
||||
if projectName == "" {
|
||||
// Try to infer from package name
|
||||
parts := strings.Split(npmCfg.Package, "/")
|
||||
projectName = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
// Strip leading 'v' from version for npm
|
||||
version := strings.TrimPrefix(release.Version, "v")
|
||||
|
||||
// Template data
|
||||
data := npmTemplateData{
|
||||
Package: npmCfg.Package,
|
||||
Version: version,
|
||||
Description: fmt.Sprintf("%s CLI", projectName),
|
||||
License: "MIT",
|
||||
Repository: repo,
|
||||
BinaryName: projectName,
|
||||
ProjectName: projectName,
|
||||
Access: npmCfg.Access,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release.FS, data, &npmCfg)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release.FS, data, &npmCfg)
|
||||
}
|
||||
|
||||
// parseConfig extracts npm-specific configuration from the publisher config.
|
||||
func (p *NpmPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) NpmConfig {
|
||||
cfg := NpmConfig{
|
||||
Package: "",
|
||||
Access: "public",
|
||||
}
|
||||
|
||||
// Override from extended config if present
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if pkg, ok := ext["package"].(string); ok && pkg != "" {
|
||||
cfg.Package = pkg
|
||||
}
|
||||
if access, ok := ext["access"].(string); ok && access != "" {
|
||||
cfg.Access = access
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// npmTemplateData holds data for npm templates.
|
||||
type npmTemplateData struct {
|
||||
Package string
|
||||
Version string
|
||||
Description string
|
||||
License string
|
||||
Repository string
|
||||
BinaryName string
|
||||
ProjectName string
|
||||
Access string
|
||||
}
|
||||
|
||||
// dryRunPublish shows what would be done without actually publishing.
|
||||
func (p *NpmPublisher) dryRunPublish(m io.Medium, data npmTemplateData, cfg *NpmConfig) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: npm Publish ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Package: %s\n", data.Package)
|
||||
fmt.Printf("Version: %s\n", data.Version)
|
||||
fmt.Printf("Access: %s\n", data.Access)
|
||||
fmt.Printf("Repository: %s\n", data.Repository)
|
||||
fmt.Printf("Binary: %s\n", data.BinaryName)
|
||||
fmt.Println()
|
||||
|
||||
// Generate and show package.json
|
||||
pkgJSON, err := p.renderTemplate(m, "templates/npm/package.json.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("npm.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated package.json:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(pkgJSON)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("Would run: npm publish --access", data.Access)
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executePublish actually creates and publishes the npm package.
|
||||
func (p *NpmPublisher) executePublish(ctx context.Context, m io.Medium, data npmTemplateData, cfg *NpmConfig) error {
|
||||
// Check for NPM_TOKEN
|
||||
if os.Getenv("NPM_TOKEN") == "" {
|
||||
return errors.New("npm.Publish: NPM_TOKEN environment variable is required")
|
||||
}
|
||||
|
||||
// Create temp directory for package
|
||||
tmpDir, err := os.MkdirTemp("", "npm-publish-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to create temp directory: %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
// Create bin directory
|
||||
binDir := filepath.Join(tmpDir, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to create bin directory: %w", err)
|
||||
}
|
||||
|
||||
// Generate package.json
|
||||
pkgJSON, err := p.renderTemplate(m, "templates/npm/package.json.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to render package.json: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "package.json"), []byte(pkgJSON), 0644); err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to write package.json: %w", err)
|
||||
}
|
||||
|
||||
// Generate install.js
|
||||
installJS, err := p.renderTemplate(m, "templates/npm/install.js.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to render install.js: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "install.js"), []byte(installJS), 0644); err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to write install.js: %w", err)
|
||||
}
|
||||
|
||||
// Generate run.js
|
||||
runJS, err := p.renderTemplate(m, "templates/npm/run.js.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to render run.js: %w", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(binDir, "run.js"), []byte(runJS), 0755); err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to write run.js: %w", err)
|
||||
}
|
||||
|
||||
// Create .npmrc with token
|
||||
npmrc := "//registry.npmjs.org/:_authToken=${NPM_TOKEN}\n"
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, ".npmrc"), []byte(npmrc), 0600); err != nil {
|
||||
return fmt.Errorf("npm.Publish: failed to write .npmrc: %w", err)
|
||||
}
|
||||
|
||||
// Run npm publish
|
||||
cmd := exec.CommandContext(ctx, "npm", "publish", "--access", data.Access)
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Env = append(os.Environ(), "NPM_TOKEN="+os.Getenv("NPM_TOKEN"))
|
||||
|
||||
fmt.Printf("Publishing %s@%s to npm...\n", data.Package, data.Version)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("npm.Publish: npm publish failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Published %s@%s to npm\n", data.Package, data.Version)
|
||||
fmt.Printf(" https://www.npmjs.com/package/%s\n", data.Package)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// renderTemplate renders an embedded template with the given data.
|
||||
func (p *NpmPublisher) renderTemplate(m io.Medium, name string, data npmTemplateData) (string, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
|
||||
// Try custom template from medium
|
||||
customPath := filepath.Join(".core", name)
|
||||
if m != nil && m.IsFile(customPath) {
|
||||
customContent, err := m.Read(customPath)
|
||||
if err == nil {
|
||||
content = []byte(customContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to embedded template
|
||||
if content == nil {
|
||||
content, err = npmTemplates.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read template %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := template.New(filepath.Base(name)).Parse(string(content))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template %s: %w", name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
|
@ -1,303 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNpmPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns npm", func(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
assert.Equal(t, "npm", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNpmPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "npm"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Equal(t, "public", cfg.Access)
|
||||
})
|
||||
|
||||
t.Run("parses package and access from extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "npm",
|
||||
Extended: map[string]any{
|
||||
"package": "@myorg/mypackage",
|
||||
"access": "restricted",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Equal(t, "@myorg/mypackage", cfg.Package)
|
||||
assert.Equal(t, "restricted", cfg.Access)
|
||||
})
|
||||
|
||||
t.Run("keeps default access when not specified", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "npm",
|
||||
Extended: map[string]any{
|
||||
"package": "@myorg/mypackage",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Equal(t, "@myorg/mypackage", cfg.Package)
|
||||
assert.Equal(t, "public", cfg.Access)
|
||||
})
|
||||
|
||||
t.Run("handles nil extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "npm",
|
||||
Extended: nil,
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Equal(t, "public", cfg.Access)
|
||||
})
|
||||
|
||||
t.Run("handles empty strings in config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "npm",
|
||||
Extended: map[string]any{
|
||||
"package": "",
|
||||
"access": "",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Equal(t, "public", cfg.Access)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNpmPublisher_RenderTemplate_Good(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
|
||||
t.Run("renders package.json template with data", func(t *testing.T) {
|
||||
data := npmTemplateData{
|
||||
Package: "@myorg/mycli",
|
||||
Version: "1.2.3",
|
||||
Description: "My awesome CLI",
|
||||
License: "MIT",
|
||||
Repository: "owner/myapp",
|
||||
BinaryName: "myapp",
|
||||
ProjectName: "myapp",
|
||||
Access: "public",
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/npm/package.json.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, `"name": "@myorg/mycli"`)
|
||||
assert.Contains(t, result, `"version": "1.2.3"`)
|
||||
assert.Contains(t, result, `"description": "My awesome CLI"`)
|
||||
assert.Contains(t, result, `"license": "MIT"`)
|
||||
assert.Contains(t, result, "owner/myapp")
|
||||
assert.Contains(t, result, `"myapp": "./bin/run.js"`)
|
||||
assert.Contains(t, result, `"access": "public"`)
|
||||
})
|
||||
|
||||
t.Run("renders restricted access correctly", func(t *testing.T) {
|
||||
data := npmTemplateData{
|
||||
Package: "@private/cli",
|
||||
Version: "1.0.0",
|
||||
Description: "Private CLI",
|
||||
License: "MIT",
|
||||
Repository: "org/repo",
|
||||
BinaryName: "cli",
|
||||
ProjectName: "cli",
|
||||
Access: "restricted",
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/npm/package.json.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, `"access": "restricted"`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNpmPublisher_RenderTemplate_Bad(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
|
||||
t.Run("returns error for non-existent template", func(t *testing.T) {
|
||||
data := npmTemplateData{}
|
||||
_, err := p.renderTemplate(io.Local, "templates/npm/nonexistent.tmpl", data)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to read template")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNpmPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := npmTemplateData{
|
||||
Package: "@myorg/mycli",
|
||||
Version: "1.0.0",
|
||||
Access: "public",
|
||||
Repository: "owner/repo",
|
||||
BinaryName: "mycli",
|
||||
Description: "My CLI",
|
||||
}
|
||||
cfg := &NpmConfig{
|
||||
Package: "@myorg/mycli",
|
||||
Access: "public",
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: npm Publish")
|
||||
assert.Contains(t, output, "Package: @myorg/mycli")
|
||||
assert.Contains(t, output, "Version: 1.0.0")
|
||||
assert.Contains(t, output, "Access: public")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Binary: mycli")
|
||||
assert.Contains(t, output, "Generated package.json:")
|
||||
assert.Contains(t, output, "Would run: npm publish --access public")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("shows restricted access correctly", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := npmTemplateData{
|
||||
Package: "@private/cli",
|
||||
Version: "2.0.0",
|
||||
Access: "restricted",
|
||||
Repository: "org/repo",
|
||||
BinaryName: "cli",
|
||||
}
|
||||
cfg := &NpmConfig{
|
||||
Package: "@private/cli",
|
||||
Access: "restricted",
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "Access: restricted")
|
||||
assert.Contains(t, output, "Would run: npm publish --access restricted")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNpmPublisher_Publish_Bad(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
|
||||
t.Run("fails when package name not configured", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "npm"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "package name is required")
|
||||
})
|
||||
|
||||
t.Run("fails when NPM_TOKEN not set in non-dry-run", func(t *testing.T) {
|
||||
// Ensure NPM_TOKEN is not set
|
||||
oldToken := os.Getenv("NPM_TOKEN")
|
||||
_ = os.Unsetenv("NPM_TOKEN")
|
||||
defer func() {
|
||||
if oldToken != "" {
|
||||
_ = os.Setenv("NPM_TOKEN", oldToken)
|
||||
}
|
||||
}()
|
||||
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "npm",
|
||||
Extended: map[string]any{
|
||||
"package": "@test/package",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "NPM_TOKEN environment variable is required")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNpmConfig_Defaults_Good(t *testing.T) {
|
||||
t.Run("has sensible defaults", func(t *testing.T) {
|
||||
p := NewNpmPublisher()
|
||||
pubCfg := PublisherConfig{Type: "npm"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Package)
|
||||
assert.Equal(t, "public", cfg.Access)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNpmTemplateData_Good(t *testing.T) {
|
||||
t.Run("struct has all expected fields", func(t *testing.T) {
|
||||
data := npmTemplateData{
|
||||
Package: "@myorg/package",
|
||||
Version: "1.0.0",
|
||||
Description: "description",
|
||||
License: "MIT",
|
||||
Repository: "org/repo",
|
||||
BinaryName: "cli",
|
||||
ProjectName: "cli",
|
||||
Access: "public",
|
||||
}
|
||||
|
||||
assert.Equal(t, "@myorg/package", data.Package)
|
||||
assert.Equal(t, "1.0.0", data.Version)
|
||||
assert.Equal(t, "description", data.Description)
|
||||
assert.Equal(t, "MIT", data.License)
|
||||
assert.Equal(t, "org/repo", data.Repository)
|
||||
assert.Equal(t, "cli", data.BinaryName)
|
||||
assert.Equal(t, "cli", data.ProjectName)
|
||||
assert.Equal(t, "public", data.Access)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Release represents a release to be published.
|
||||
type Release struct {
|
||||
// Version is the semantic version string (e.g., "v1.2.3").
|
||||
Version string
|
||||
// Artifacts are the built release artifacts.
|
||||
Artifacts []build.Artifact
|
||||
// Changelog is the generated markdown changelog.
|
||||
Changelog string
|
||||
// ProjectDir is the root directory of the project.
|
||||
ProjectDir string
|
||||
// FS is the medium for file operations.
|
||||
FS io.Medium
|
||||
}
|
||||
|
||||
// PublisherConfig holds configuration for a publisher.
|
||||
type PublisherConfig struct {
|
||||
// Type is the publisher type (e.g., "github", "linuxkit", "docker").
|
||||
Type string
|
||||
// Prerelease marks the release as a prerelease.
|
||||
Prerelease bool
|
||||
// Draft creates the release as a draft.
|
||||
Draft bool
|
||||
// Extended holds publisher-specific configuration.
|
||||
Extended any
|
||||
}
|
||||
|
||||
// ReleaseConfig holds release configuration needed by publishers.
|
||||
type ReleaseConfig interface {
|
||||
GetRepository() string
|
||||
GetProjectName() string
|
||||
}
|
||||
|
||||
// Publisher defines the interface for release publishers.
|
||||
type Publisher interface {
|
||||
// Name returns the publisher's identifier.
|
||||
Name() string
|
||||
// Publish publishes the release to the target.
|
||||
// If dryRun is true, it prints what would be done without executing.
|
||||
Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error
|
||||
}
|
||||
|
||||
// NewRelease creates a Release from the release package's Release type.
|
||||
// This is a helper to convert between packages.
|
||||
func NewRelease(version string, artifacts []build.Artifact, changelog, projectDir string, fs io.Medium) *Release {
|
||||
return &Release{
|
||||
Version: version,
|
||||
Artifacts: artifacts,
|
||||
Changelog: changelog,
|
||||
ProjectDir: projectDir,
|
||||
FS: fs,
|
||||
}
|
||||
}
|
||||
|
||||
// NewPublisherConfig creates a PublisherConfig.
|
||||
func NewPublisherConfig(pubType string, prerelease, draft bool, extended any) PublisherConfig {
|
||||
return PublisherConfig{
|
||||
Type: pubType,
|
||||
Prerelease: prerelease,
|
||||
Draft: draft,
|
||||
Extended: extended,
|
||||
}
|
||||
}
|
||||
|
|
@ -1,285 +0,0 @@
|
|||
// Package publishers provides release publishing implementations.
|
||||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
//go:embed templates/scoop/*.tmpl
|
||||
var scoopTemplates embed.FS
|
||||
|
||||
// ScoopConfig holds Scoop-specific configuration.
|
||||
type ScoopConfig struct {
|
||||
// Bucket is the Scoop bucket repository (e.g., "host-uk/scoop-bucket").
|
||||
Bucket string
|
||||
// Official config for generating files for official repo PRs.
|
||||
Official *OfficialConfig
|
||||
}
|
||||
|
||||
// ScoopPublisher publishes releases to Scoop.
|
||||
type ScoopPublisher struct{}
|
||||
|
||||
// NewScoopPublisher creates a new Scoop publisher.
|
||||
func NewScoopPublisher() *ScoopPublisher {
|
||||
return &ScoopPublisher{}
|
||||
}
|
||||
|
||||
// Name returns the publisher's identifier.
|
||||
func (p *ScoopPublisher) Name() string {
|
||||
return "scoop"
|
||||
}
|
||||
|
||||
// Publish publishes the release to Scoop.
|
||||
func (p *ScoopPublisher) Publish(ctx context.Context, release *Release, pubCfg PublisherConfig, relCfg ReleaseConfig, dryRun bool) error {
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
if cfg.Bucket == "" && (cfg.Official == nil || !cfg.Official.Enabled) {
|
||||
return errors.New("scoop.Publish: bucket is required (set publish.scoop.bucket in config)")
|
||||
}
|
||||
|
||||
repo := ""
|
||||
if relCfg != nil {
|
||||
repo = relCfg.GetRepository()
|
||||
}
|
||||
if repo == "" {
|
||||
detectedRepo, err := detectRepository(release.ProjectDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scoop.Publish: could not determine repository: %w", err)
|
||||
}
|
||||
repo = detectedRepo
|
||||
}
|
||||
|
||||
projectName := ""
|
||||
if relCfg != nil {
|
||||
projectName = relCfg.GetProjectName()
|
||||
}
|
||||
if projectName == "" {
|
||||
parts := strings.Split(repo, "/")
|
||||
projectName = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
version := strings.TrimPrefix(release.Version, "v")
|
||||
checksums := buildChecksumMap(release.Artifacts)
|
||||
|
||||
data := scoopTemplateData{
|
||||
PackageName: projectName,
|
||||
Description: fmt.Sprintf("%s CLI", projectName),
|
||||
Repository: repo,
|
||||
Version: version,
|
||||
License: "MIT",
|
||||
BinaryName: projectName,
|
||||
Checksums: checksums,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return p.dryRunPublish(release.FS, data, cfg)
|
||||
}
|
||||
|
||||
return p.executePublish(ctx, release.ProjectDir, data, cfg, release)
|
||||
}
|
||||
|
||||
type scoopTemplateData struct {
|
||||
PackageName string
|
||||
Description string
|
||||
Repository string
|
||||
Version string
|
||||
License string
|
||||
BinaryName string
|
||||
Checksums ChecksumMap
|
||||
}
|
||||
|
||||
func (p *ScoopPublisher) parseConfig(pubCfg PublisherConfig, relCfg ReleaseConfig) ScoopConfig {
|
||||
cfg := ScoopConfig{}
|
||||
|
||||
if ext, ok := pubCfg.Extended.(map[string]any); ok {
|
||||
if bucket, ok := ext["bucket"].(string); ok && bucket != "" {
|
||||
cfg.Bucket = bucket
|
||||
}
|
||||
if official, ok := ext["official"].(map[string]any); ok {
|
||||
cfg.Official = &OfficialConfig{}
|
||||
if enabled, ok := official["enabled"].(bool); ok {
|
||||
cfg.Official.Enabled = enabled
|
||||
}
|
||||
if output, ok := official["output"].(string); ok {
|
||||
cfg.Official.Output = output
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (p *ScoopPublisher) dryRunPublish(m io.Medium, data scoopTemplateData, cfg ScoopConfig) error {
|
||||
fmt.Println()
|
||||
fmt.Println("=== DRY RUN: Scoop Publish ===")
|
||||
fmt.Println()
|
||||
fmt.Printf("Package: %s\n", data.PackageName)
|
||||
fmt.Printf("Version: %s\n", data.Version)
|
||||
fmt.Printf("Bucket: %s\n", cfg.Bucket)
|
||||
fmt.Printf("Repository: %s\n", data.Repository)
|
||||
fmt.Println()
|
||||
|
||||
manifest, err := p.renderTemplate(m, "templates/scoop/manifest.json.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scoop.dryRunPublish: %w", err)
|
||||
}
|
||||
fmt.Println("Generated manifest.json:")
|
||||
fmt.Println("---")
|
||||
fmt.Println(manifest)
|
||||
fmt.Println("---")
|
||||
fmt.Println()
|
||||
|
||||
if cfg.Bucket != "" {
|
||||
fmt.Printf("Would commit to bucket: %s\n", cfg.Bucket)
|
||||
}
|
||||
if cfg.Official != nil && cfg.Official.Enabled {
|
||||
output := cfg.Official.Output
|
||||
if output == "" {
|
||||
output = "dist/scoop"
|
||||
}
|
||||
fmt.Printf("Would write files for official PR to: %s\n", output)
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("=== END DRY RUN ===")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ScoopPublisher) executePublish(ctx context.Context, projectDir string, data scoopTemplateData, cfg ScoopConfig, release *Release) error {
|
||||
manifest, err := p.renderTemplate(release.FS, "templates/scoop/manifest.json.tmpl", data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scoop.Publish: failed to render manifest: %w", err)
|
||||
}
|
||||
|
||||
// If official config is enabled, write to output directory
|
||||
if cfg.Official != nil && cfg.Official.Enabled {
|
||||
output := cfg.Official.Output
|
||||
if output == "" {
|
||||
output = filepath.Join(projectDir, "dist", "scoop")
|
||||
} else if !filepath.IsAbs(output) {
|
||||
output = filepath.Join(projectDir, output)
|
||||
}
|
||||
|
||||
if err := release.FS.EnsureDir(output); err != nil {
|
||||
return fmt.Errorf("scoop.Publish: failed to create output directory: %w", err)
|
||||
}
|
||||
|
||||
manifestPath := filepath.Join(output, fmt.Sprintf("%s.json", data.PackageName))
|
||||
if err := release.FS.Write(manifestPath, manifest); err != nil {
|
||||
return fmt.Errorf("scoop.Publish: failed to write manifest: %w", err)
|
||||
}
|
||||
fmt.Printf("Wrote Scoop manifest for official PR: %s\n", manifestPath)
|
||||
}
|
||||
|
||||
// If bucket is configured, commit to it
|
||||
if cfg.Bucket != "" {
|
||||
if err := p.commitToBucket(ctx, cfg.Bucket, data, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ScoopPublisher) commitToBucket(ctx context.Context, bucket string, data scoopTemplateData, manifest string) error {
|
||||
tmpDir, err := os.MkdirTemp("", "scoop-bucket-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("scoop.Publish: failed to create temp directory: %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
|
||||
fmt.Printf("Cloning bucket %s...\n", bucket)
|
||||
cmd := exec.CommandContext(ctx, "gh", "repo", "clone", bucket, tmpDir, "--", "--depth=1")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("scoop.Publish: failed to clone bucket: %w", err)
|
||||
}
|
||||
|
||||
// Ensure bucket directory exists
|
||||
bucketDir := filepath.Join(tmpDir, "bucket")
|
||||
if _, err := os.Stat(bucketDir); os.IsNotExist(err) {
|
||||
bucketDir = tmpDir // Some repos put manifests in root
|
||||
}
|
||||
|
||||
manifestPath := filepath.Join(bucketDir, fmt.Sprintf("%s.json", data.PackageName))
|
||||
if err := os.WriteFile(manifestPath, []byte(manifest), 0644); err != nil {
|
||||
return fmt.Errorf("scoop.Publish: failed to write manifest: %w", err)
|
||||
}
|
||||
|
||||
commitMsg := fmt.Sprintf("Update %s to %s", data.PackageName, data.Version)
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "add", ".")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("scoop.Publish: git add failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "commit", "-m", commitMsg)
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("scoop.Publish: git commit failed: %w", err)
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "push")
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("scoop.Publish: git push failed: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Updated Scoop bucket: %s\n", bucket)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ScoopPublisher) renderTemplate(m io.Medium, name string, data scoopTemplateData) (string, error) {
|
||||
var content []byte
|
||||
var err error
|
||||
|
||||
// Try custom template from medium
|
||||
customPath := filepath.Join(".core", name)
|
||||
if m != nil && m.IsFile(customPath) {
|
||||
customContent, err := m.Read(customPath)
|
||||
if err == nil {
|
||||
content = []byte(customContent)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to embedded template
|
||||
if content == nil {
|
||||
content, err = scoopTemplates.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read template %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
tmpl, err := template.New(filepath.Base(name)).Parse(string(content))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse template %s: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return "", fmt.Errorf("failed to execute template %s: %w", name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// Ensure build package is used
|
||||
var _ = build.Artifact{}
|
||||
|
|
@ -1,311 +0,0 @@
|
|||
package publishers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-io"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestScoopPublisher_Name_Good(t *testing.T) {
|
||||
t.Run("returns scoop", func(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
assert.Equal(t, "scoop", p.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestScoopPublisher_ParseConfig_Good(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
|
||||
t.Run("uses defaults when no extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{Type: "scoop"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Bucket)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
|
||||
t.Run("parses bucket from extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "scoop",
|
||||
Extended: map[string]any{
|
||||
"bucket": "host-uk/scoop-bucket",
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Equal(t, "host-uk/scoop-bucket", cfg.Bucket)
|
||||
})
|
||||
|
||||
t.Run("parses official config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "scoop",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{
|
||||
"enabled": true,
|
||||
"output": "dist/scoop-manifest",
|
||||
},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.True(t, cfg.Official.Enabled)
|
||||
assert.Equal(t, "dist/scoop-manifest", cfg.Official.Output)
|
||||
})
|
||||
|
||||
t.Run("handles missing official fields", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "scoop",
|
||||
Extended: map[string]any{
|
||||
"official": map[string]any{},
|
||||
},
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
require.NotNil(t, cfg.Official)
|
||||
assert.False(t, cfg.Official.Enabled)
|
||||
assert.Empty(t, cfg.Official.Output)
|
||||
})
|
||||
|
||||
t.Run("handles nil extended config", func(t *testing.T) {
|
||||
pubCfg := PublisherConfig{
|
||||
Type: "scoop",
|
||||
Extended: nil,
|
||||
}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Bucket)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
}
|
||||
|
||||
func TestScoopPublisher_RenderTemplate_Good(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
|
||||
t.Run("renders manifest template with data", func(t *testing.T) {
|
||||
data := scoopTemplateData{
|
||||
PackageName: "myapp",
|
||||
Description: "My awesome CLI",
|
||||
Repository: "owner/myapp",
|
||||
Version: "1.2.3",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{
|
||||
WindowsAmd64: "abc123",
|
||||
WindowsArm64: "def456",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/scoop/manifest.json.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, `"version": "1.2.3"`)
|
||||
assert.Contains(t, result, `"description": "My awesome CLI"`)
|
||||
assert.Contains(t, result, `"homepage": "https://github.com/owner/myapp"`)
|
||||
assert.Contains(t, result, `"license": "MIT"`)
|
||||
assert.Contains(t, result, `"64bit"`)
|
||||
assert.Contains(t, result, `"arm64"`)
|
||||
assert.Contains(t, result, "myapp-windows-amd64.zip")
|
||||
assert.Contains(t, result, "myapp-windows-arm64.zip")
|
||||
assert.Contains(t, result, `"hash": "abc123"`)
|
||||
assert.Contains(t, result, `"hash": "def456"`)
|
||||
assert.Contains(t, result, `"bin": "myapp.exe"`)
|
||||
})
|
||||
|
||||
t.Run("includes autoupdate configuration", func(t *testing.T) {
|
||||
data := scoopTemplateData{
|
||||
PackageName: "tool",
|
||||
Description: "A tool",
|
||||
Repository: "org/tool",
|
||||
Version: "2.0.0",
|
||||
License: "Apache-2.0",
|
||||
BinaryName: "tool",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
|
||||
result, err := p.renderTemplate(io.Local, "templates/scoop/manifest.json.tmpl", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, result, `"checkver"`)
|
||||
assert.Contains(t, result, `"github": "https://github.com/org/tool"`)
|
||||
assert.Contains(t, result, `"autoupdate"`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestScoopPublisher_RenderTemplate_Bad(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
|
||||
t.Run("returns error for non-existent template", func(t *testing.T) {
|
||||
data := scoopTemplateData{}
|
||||
_, err := p.renderTemplate(io.Local, "templates/scoop/nonexistent.tmpl", data)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to read template")
|
||||
})
|
||||
}
|
||||
|
||||
func TestScoopPublisher_DryRunPublish_Good(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
|
||||
t.Run("outputs expected dry run information", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := scoopTemplateData{
|
||||
PackageName: "myapp",
|
||||
Version: "1.0.0",
|
||||
Repository: "owner/repo",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := ScoopConfig{
|
||||
Bucket: "owner/scoop-bucket",
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
|
||||
assert.Contains(t, output, "DRY RUN: Scoop Publish")
|
||||
assert.Contains(t, output, "Package: myapp")
|
||||
assert.Contains(t, output, "Version: 1.0.0")
|
||||
assert.Contains(t, output, "Bucket: owner/scoop-bucket")
|
||||
assert.Contains(t, output, "Repository: owner/repo")
|
||||
assert.Contains(t, output, "Generated manifest.json:")
|
||||
assert.Contains(t, output, "Would commit to bucket: owner/scoop-bucket")
|
||||
assert.Contains(t, output, "END DRY RUN")
|
||||
})
|
||||
|
||||
t.Run("shows official output path when enabled", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := scoopTemplateData{
|
||||
PackageName: "myapp",
|
||||
Version: "1.0.0",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := ScoopConfig{
|
||||
Official: &OfficialConfig{
|
||||
Enabled: true,
|
||||
Output: "custom/scoop/path",
|
||||
},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Would write files for official PR to: custom/scoop/path")
|
||||
})
|
||||
|
||||
t.Run("uses default official output path when not specified", func(t *testing.T) {
|
||||
oldStdout := os.Stdout
|
||||
r, w, _ := os.Pipe()
|
||||
os.Stdout = w
|
||||
|
||||
data := scoopTemplateData{
|
||||
PackageName: "myapp",
|
||||
Version: "1.0.0",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{},
|
||||
}
|
||||
cfg := ScoopConfig{
|
||||
Official: &OfficialConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
err := p.dryRunPublish(io.Local, data, cfg)
|
||||
|
||||
_ = w.Close()
|
||||
var buf bytes.Buffer
|
||||
_, _ = buf.ReadFrom(r)
|
||||
os.Stdout = oldStdout
|
||||
|
||||
require.NoError(t, err)
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "Would write files for official PR to: dist/scoop")
|
||||
})
|
||||
}
|
||||
|
||||
func TestScoopPublisher_Publish_Bad(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
|
||||
t.Run("fails when bucket not configured and not official mode", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
ProjectDir: "/project",
|
||||
FS: io.Local,
|
||||
}
|
||||
pubCfg := PublisherConfig{Type: "scoop"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
err := p.Publish(context.TODO(), release, pubCfg, relCfg, false)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "bucket is required")
|
||||
})
|
||||
}
|
||||
|
||||
func TestScoopConfig_Defaults_Good(t *testing.T) {
|
||||
t.Run("has sensible defaults", func(t *testing.T) {
|
||||
p := NewScoopPublisher()
|
||||
pubCfg := PublisherConfig{Type: "scoop"}
|
||||
relCfg := &mockReleaseConfig{repository: "owner/repo"}
|
||||
|
||||
cfg := p.parseConfig(pubCfg, relCfg)
|
||||
|
||||
assert.Empty(t, cfg.Bucket)
|
||||
assert.Nil(t, cfg.Official)
|
||||
})
|
||||
}
|
||||
|
||||
func TestScoopTemplateData_Good(t *testing.T) {
|
||||
t.Run("struct has all expected fields", func(t *testing.T) {
|
||||
data := scoopTemplateData{
|
||||
PackageName: "myapp",
|
||||
Description: "description",
|
||||
Repository: "org/repo",
|
||||
Version: "1.0.0",
|
||||
License: "MIT",
|
||||
BinaryName: "myapp",
|
||||
Checksums: ChecksumMap{
|
||||
WindowsAmd64: "hash1",
|
||||
WindowsArm64: "hash2",
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, "myapp", data.PackageName)
|
||||
assert.Equal(t, "description", data.Description)
|
||||
assert.Equal(t, "org/repo", data.Repository)
|
||||
assert.Equal(t, "1.0.0", data.Version)
|
||||
assert.Equal(t, "MIT", data.License)
|
||||
assert.Equal(t, "myapp", data.BinaryName)
|
||||
assert.Equal(t, "hash1", data.Checksums.WindowsAmd64)
|
||||
assert.Equal(t, "hash2", data.Checksums.WindowsArm64)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
pkgbase = {{.PackageName}}-bin
|
||||
pkgdesc = {{.Description}}
|
||||
pkgver = {{.Version}}
|
||||
pkgrel = 1
|
||||
url = https://github.com/{{.Repository}}
|
||||
arch = x86_64
|
||||
arch = aarch64
|
||||
license = {{.License}}
|
||||
provides = {{.PackageName}}
|
||||
conflicts = {{.PackageName}}
|
||||
source_x86_64 = {{.PackageName}}-bin-{{.Version}}-x86_64.tar.gz::https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-amd64.tar.gz
|
||||
sha256sums_x86_64 = {{.Checksums.LinuxAmd64}}
|
||||
source_aarch64 = {{.PackageName}}-bin-{{.Version}}-aarch64.tar.gz::https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-arm64.tar.gz
|
||||
sha256sums_aarch64 = {{.Checksums.LinuxArm64}}
|
||||
|
||||
pkgname = {{.PackageName}}-bin
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
# Maintainer: {{.Maintainer}}
|
||||
pkgname={{.PackageName}}-bin
|
||||
pkgver={{.Version}}
|
||||
pkgrel=1
|
||||
pkgdesc="{{.Description}}"
|
||||
arch=('x86_64' 'aarch64')
|
||||
url="https://github.com/{{.Repository}}"
|
||||
license=('{{.License}}')
|
||||
provides=('{{.PackageName}}')
|
||||
conflicts=('{{.PackageName}}')
|
||||
|
||||
source_x86_64=("${pkgname}-${pkgver}-x86_64.tar.gz::https://github.com/{{.Repository}}/releases/download/v${pkgver}/{{.BinaryName}}-linux-amd64.tar.gz")
|
||||
source_aarch64=("${pkgname}-${pkgver}-aarch64.tar.gz::https://github.com/{{.Repository}}/releases/download/v${pkgver}/{{.BinaryName}}-linux-arm64.tar.gz")
|
||||
|
||||
sha256sums_x86_64=('{{.Checksums.LinuxAmd64}}')
|
||||
sha256sums_aarch64=('{{.Checksums.LinuxArm64}}')
|
||||
|
||||
package() {
|
||||
install -Dm755 {{.BinaryName}} "${pkgdir}/usr/bin/{{.BinaryName}}"
|
||||
}
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<package xmlns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd">
|
||||
<metadata>
|
||||
<id>{{.PackageName}}</id>
|
||||
<version>{{.Version}}</version>
|
||||
<title>{{.Title}}</title>
|
||||
<authors>{{.Authors}}</authors>
|
||||
<projectUrl>https://github.com/{{.Repository}}</projectUrl>
|
||||
<licenseUrl>https://github.com/{{.Repository}}/blob/main/LICENSE</licenseUrl>
|
||||
<requireLicenseAcceptance>false</requireLicenseAcceptance>
|
||||
<description>{{.Description}}</description>
|
||||
<tags>{{.Tags}}</tags>
|
||||
<releaseNotes>https://github.com/{{.Repository}}/releases/tag/v{{.Version}}</releaseNotes>
|
||||
</metadata>
|
||||
<files>
|
||||
<file src="tools\**" target="tools" />
|
||||
</files>
|
||||
</package>
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
$ErrorActionPreference = 'Stop'
|
||||
$toolsDir = "$(Split-Path -parent $MyInvocation.MyCommand.Definition)"
|
||||
$url64 = 'https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-windows-amd64.zip'
|
||||
|
||||
$packageArgs = @{
|
||||
packageName = '{{.PackageName}}'
|
||||
unzipLocation = $toolsDir
|
||||
url64bit = $url64
|
||||
checksum64 = '{{.Checksums.WindowsAmd64}}'
|
||||
checksumType64 = 'sha256'
|
||||
}
|
||||
|
||||
Install-ChocolateyZipPackage @packageArgs
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
# typed: false
|
||||
# frozen_string_literal: true
|
||||
|
||||
class {{.FormulaClass}} < Formula
|
||||
desc "{{.Description}}"
|
||||
homepage "https://github.com/{{.Repository}}"
|
||||
version "{{.Version}}"
|
||||
license "{{.License}}"
|
||||
|
||||
on_macos do
|
||||
if Hardware::CPU.arm?
|
||||
url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-darwin-arm64.tar.gz"
|
||||
sha256 "{{.Checksums.DarwinArm64}}"
|
||||
else
|
||||
url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-darwin-amd64.tar.gz"
|
||||
sha256 "{{.Checksums.DarwinAmd64}}"
|
||||
end
|
||||
end
|
||||
|
||||
on_linux do
|
||||
if Hardware::CPU.arm?
|
||||
url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-arm64.tar.gz"
|
||||
sha256 "{{.Checksums.LinuxArm64}}"
|
||||
else
|
||||
url "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-linux-amd64.tar.gz"
|
||||
sha256 "{{.Checksums.LinuxAmd64}}"
|
||||
end
|
||||
end
|
||||
|
||||
def install
|
||||
bin.install "{{.BinaryName}}"
|
||||
end
|
||||
|
||||
test do
|
||||
system "#{bin}/{{.BinaryName}}", "--version"
|
||||
end
|
||||
end
|
||||
|
|
@ -1,176 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
/**
|
||||
* Binary installer for {{.Package}}
|
||||
* Downloads the correct binary for the current platform from GitHub releases.
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const https = require('https');
|
||||
const { spawnSync } = require('child_process');
|
||||
const crypto = require('crypto');
|
||||
|
||||
const PACKAGE_VERSION = '{{.Version}}';
|
||||
const GITHUB_REPO = '{{.Repository}}';
|
||||
const BINARY_NAME = '{{.BinaryName}}';
|
||||
|
||||
// Platform/arch mapping
|
||||
const PLATFORM_MAP = {
|
||||
darwin: 'darwin',
|
||||
linux: 'linux',
|
||||
win32: 'windows',
|
||||
};
|
||||
|
||||
const ARCH_MAP = {
|
||||
x64: 'amd64',
|
||||
arm64: 'arm64',
|
||||
};
|
||||
|
||||
function getPlatformInfo() {
|
||||
const platform = PLATFORM_MAP[process.platform];
|
||||
const arch = ARCH_MAP[process.arch];
|
||||
|
||||
if (!platform || !arch) {
|
||||
console.error(`Unsupported platform: ${process.platform}/${process.arch}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
return { platform, arch };
|
||||
}
|
||||
|
||||
function getDownloadUrl(platform, arch) {
|
||||
const ext = platform === 'windows' ? '.zip' : '.tar.gz';
|
||||
const name = `${BINARY_NAME}-${platform}-${arch}${ext}`;
|
||||
return `https://github.com/${GITHUB_REPO}/releases/download/v${PACKAGE_VERSION}/${name}`;
|
||||
}
|
||||
|
||||
function getChecksumsUrl() {
|
||||
return `https://github.com/${GITHUB_REPO}/releases/download/v${PACKAGE_VERSION}/checksums.txt`;
|
||||
}
|
||||
|
||||
function download(url) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const request = (url) => {
|
||||
https.get(url, (res) => {
|
||||
if (res.statusCode >= 300 && res.statusCode < 400 && res.headers.location) {
|
||||
// Follow redirect
|
||||
request(res.headers.location);
|
||||
return;
|
||||
}
|
||||
|
||||
if (res.statusCode !== 200) {
|
||||
reject(new Error(`Failed to download ${url}: HTTP ${res.statusCode}`));
|
||||
return;
|
||||
}
|
||||
|
||||
const chunks = [];
|
||||
res.on('data', (chunk) => chunks.push(chunk));
|
||||
res.on('end', () => resolve(Buffer.concat(chunks)));
|
||||
res.on('error', reject);
|
||||
}).on('error', reject);
|
||||
};
|
||||
request(url);
|
||||
});
|
||||
}
|
||||
|
||||
async function fetchChecksums() {
|
||||
try {
|
||||
const data = await download(getChecksumsUrl());
|
||||
const checksums = {};
|
||||
data.toString().split('\n').forEach((line) => {
|
||||
const parts = line.trim().split(/\s+/);
|
||||
if (parts.length === 2) {
|
||||
checksums[parts[1]] = parts[0];
|
||||
}
|
||||
});
|
||||
return checksums;
|
||||
} catch (err) {
|
||||
console.warn('Warning: Could not fetch checksums, skipping verification');
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function verifyChecksum(data, expectedHash) {
|
||||
const actualHash = crypto.createHash('sha256').update(data).digest('hex');
|
||||
return actualHash === expectedHash;
|
||||
}
|
||||
|
||||
function extract(data, destDir, platform) {
|
||||
const tempFile = path.join(destDir, platform === 'windows' ? 'temp.zip' : 'temp.tar.gz');
|
||||
fs.writeFileSync(tempFile, data);
|
||||
|
||||
try {
|
||||
if (platform === 'windows') {
|
||||
// Use PowerShell to extract zip
|
||||
const result = spawnSync('powershell', [
|
||||
'-command',
|
||||
`Expand-Archive -Path '${tempFile}' -DestinationPath '${destDir}' -Force`
|
||||
], { stdio: 'ignore' });
|
||||
if (result.status !== 0) {
|
||||
throw new Error('Failed to extract zip');
|
||||
}
|
||||
} else {
|
||||
const result = spawnSync('tar', ['-xzf', tempFile, '-C', destDir], { stdio: 'ignore' });
|
||||
if (result.status !== 0) {
|
||||
throw new Error('Failed to extract tar.gz');
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
fs.unlinkSync(tempFile);
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const { platform, arch } = getPlatformInfo();
|
||||
const binDir = path.join(__dirname, 'bin');
|
||||
const binaryPath = path.join(binDir, platform === 'windows' ? `${BINARY_NAME}.exe` : BINARY_NAME);
|
||||
|
||||
// Skip if binary already exists
|
||||
if (fs.existsSync(binaryPath)) {
|
||||
console.log(`${BINARY_NAME} binary already installed`);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Installing ${BINARY_NAME} v${PACKAGE_VERSION} for ${platform}/${arch}...`);
|
||||
|
||||
// Ensure bin directory exists
|
||||
if (!fs.existsSync(binDir)) {
|
||||
fs.mkdirSync(binDir, { recursive: true });
|
||||
}
|
||||
|
||||
// Fetch checksums
|
||||
const checksums = await fetchChecksums();
|
||||
|
||||
// Download binary
|
||||
const url = getDownloadUrl(platform, arch);
|
||||
console.log(`Downloading from ${url}`);
|
||||
|
||||
const data = await download(url);
|
||||
|
||||
// Verify checksum if available
|
||||
if (checksums) {
|
||||
const ext = platform === 'windows' ? '.zip' : '.tar.gz';
|
||||
const filename = `${BINARY_NAME}-${platform}-${arch}${ext}`;
|
||||
const expectedHash = checksums[filename];
|
||||
if (expectedHash && !verifyChecksum(data, expectedHash)) {
|
||||
console.error('Checksum verification failed!');
|
||||
process.exit(1);
|
||||
}
|
||||
console.log('Checksum verified');
|
||||
}
|
||||
|
||||
// Extract
|
||||
extract(data, binDir, platform);
|
||||
|
||||
// Make executable on Unix
|
||||
if (platform !== 'windows') {
|
||||
fs.chmodSync(binaryPath, 0o755);
|
||||
}
|
||||
|
||||
console.log(`${BINARY_NAME} installed successfully`);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(`Installation failed: ${err.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
{
|
||||
"name": "{{.Package}}",
|
||||
"version": "{{.Version}}",
|
||||
"description": "{{.Description}}",
|
||||
"license": "{{.License}}",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/{{.Repository}}.git"
|
||||
},
|
||||
"homepage": "https://github.com/{{.Repository}}",
|
||||
"bugs": {
|
||||
"url": "https://github.com/{{.Repository}}/issues"
|
||||
},
|
||||
"bin": {
|
||||
"{{.BinaryName}}": "./bin/run.js"
|
||||
},
|
||||
"scripts": {
|
||||
"postinstall": "node ./install.js"
|
||||
},
|
||||
"files": [
|
||||
"bin/",
|
||||
"install.js"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=14.0.0"
|
||||
},
|
||||
"keywords": [
|
||||
"cli",
|
||||
"{{.ProjectName}}"
|
||||
],
|
||||
"publishConfig": {
|
||||
"access": "{{.Access}}"
|
||||
}
|
||||
}
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
#!/usr/bin/env node
|
||||
/**
|
||||
* Binary wrapper for {{.Package}}
|
||||
* Executes the platform-specific binary.
|
||||
*/
|
||||
|
||||
const { spawn } = require('child_process');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
const BINARY_NAME = '{{.BinaryName}}';
|
||||
|
||||
function getBinaryPath() {
|
||||
const binDir = path.join(__dirname);
|
||||
const isWindows = process.platform === 'win32';
|
||||
const binaryName = isWindows ? `${BINARY_NAME}.exe` : BINARY_NAME;
|
||||
return path.join(binDir, binaryName);
|
||||
}
|
||||
|
||||
function main() {
|
||||
const binaryPath = getBinaryPath();
|
||||
|
||||
if (!fs.existsSync(binaryPath)) {
|
||||
console.error(`Binary not found at ${binaryPath}`);
|
||||
console.error('Try reinstalling the package: npm install -g {{.Package}}');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const child = spawn(binaryPath, process.argv.slice(2), {
|
||||
stdio: 'inherit',
|
||||
windowsHide: true,
|
||||
});
|
||||
|
||||
child.on('error', (err) => {
|
||||
console.error(`Failed to start ${BINARY_NAME}: ${err.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
child.on('exit', (code, signal) => {
|
||||
if (signal) {
|
||||
process.kill(process.pid, signal);
|
||||
} else {
|
||||
process.exit(code ?? 0);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
main();
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
{
|
||||
"version": "{{.Version}}",
|
||||
"description": "{{.Description}}",
|
||||
"homepage": "https://github.com/{{.Repository}}",
|
||||
"license": "{{.License}}",
|
||||
"architecture": {
|
||||
"64bit": {
|
||||
"url": "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-windows-amd64.zip",
|
||||
"hash": "{{.Checksums.WindowsAmd64}}"
|
||||
},
|
||||
"arm64": {
|
||||
"url": "https://github.com/{{.Repository}}/releases/download/v{{.Version}}/{{.BinaryName}}-windows-arm64.zip",
|
||||
"hash": "{{.Checksums.WindowsArm64}}"
|
||||
}
|
||||
},
|
||||
"bin": "{{.BinaryName}}.exe",
|
||||
"checkver": {
|
||||
"github": "https://github.com/{{.Repository}}"
|
||||
},
|
||||
"autoupdate": {
|
||||
"architecture": {
|
||||
"64bit": {
|
||||
"url": "https://github.com/{{.Repository}}/releases/download/v$version/{{.BinaryName}}-windows-amd64.zip"
|
||||
},
|
||||
"arm64": {
|
||||
"url": "https://github.com/{{.Repository}}/releases/download/v$version/{{.BinaryName}}-windows-arm64.zip"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,440 +0,0 @@
|
|||
// Package release provides release automation with changelog generation and publishing.
|
||||
// It orchestrates the build system, changelog generation, and publishing to targets
|
||||
// like GitHub Releases.
|
||||
package release
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-devops/build/builders"
|
||||
"forge.lthn.ai/core/go-devops/release/publishers"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// Release represents a release with its version, artifacts, and changelog.
|
||||
type Release struct {
|
||||
// Version is the semantic version string (e.g., "v1.2.3").
|
||||
Version string
|
||||
// Artifacts are the built release artifacts (archives with checksums).
|
||||
Artifacts []build.Artifact
|
||||
// Changelog is the generated markdown changelog.
|
||||
Changelog string
|
||||
// ProjectDir is the root directory of the project.
|
||||
ProjectDir string
|
||||
// FS is the medium for file operations.
|
||||
FS io.Medium
|
||||
}
|
||||
|
||||
// Publish publishes pre-built artifacts from dist/ to configured targets.
|
||||
// Use this after `core build` to separate build and publish concerns.
|
||||
// If dryRun is true, it will show what would be done without actually publishing.
|
||||
func Publish(ctx context.Context, cfg *Config, dryRun bool) (*Release, error) {
|
||||
if cfg == nil {
|
||||
return nil, errors.New("release.Publish: config is nil")
|
||||
}
|
||||
|
||||
m := io.Local
|
||||
|
||||
projectDir := cfg.projectDir
|
||||
if projectDir == "" {
|
||||
projectDir = "."
|
||||
}
|
||||
|
||||
// Resolve to absolute path
|
||||
absProjectDir, err := filepath.Abs(projectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.Publish: failed to resolve project directory: %w", err)
|
||||
}
|
||||
|
||||
// Step 1: Determine version
|
||||
version := cfg.version
|
||||
if version == "" {
|
||||
version, err = DetermineVersion(absProjectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.Publish: failed to determine version: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Find pre-built artifacts in dist/
|
||||
distDir := filepath.Join(absProjectDir, "dist")
|
||||
artifacts, err := findArtifacts(m, distDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.Publish: %w", err)
|
||||
}
|
||||
|
||||
if len(artifacts) == 0 {
|
||||
return nil, errors.New("release.Publish: no artifacts found in dist/\nRun 'core build' first to create artifacts")
|
||||
}
|
||||
|
||||
// Step 3: Generate changelog
|
||||
changelog, err := Generate(absProjectDir, "", version)
|
||||
if err != nil {
|
||||
// Non-fatal: continue with empty changelog
|
||||
changelog = fmt.Sprintf("Release %s", version)
|
||||
}
|
||||
|
||||
release := &Release{
|
||||
Version: version,
|
||||
Artifacts: artifacts,
|
||||
Changelog: changelog,
|
||||
ProjectDir: absProjectDir,
|
||||
FS: m,
|
||||
}
|
||||
|
||||
// Step 4: Publish to configured targets
|
||||
if len(cfg.Publishers) > 0 {
|
||||
pubRelease := publishers.NewRelease(release.Version, release.Artifacts, release.Changelog, release.ProjectDir, release.FS)
|
||||
|
||||
for _, pubCfg := range cfg.Publishers {
|
||||
publisher, err := getPublisher(pubCfg.Type)
|
||||
if err != nil {
|
||||
return release, fmt.Errorf("release.Publish: %w", err)
|
||||
}
|
||||
|
||||
extendedCfg := buildExtendedConfig(pubCfg)
|
||||
publisherCfg := publishers.NewPublisherConfig(pubCfg.Type, pubCfg.Prerelease, pubCfg.Draft, extendedCfg)
|
||||
if err := publisher.Publish(ctx, pubRelease, publisherCfg, cfg, dryRun); err != nil {
|
||||
return release, fmt.Errorf("release.Publish: publish to %s failed: %w", pubCfg.Type, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return release, nil
|
||||
}
|
||||
|
||||
// findArtifacts discovers pre-built artifacts in the dist directory.
|
||||
func findArtifacts(m io.Medium, distDir string) ([]build.Artifact, error) {
|
||||
if !m.IsDir(distDir) {
|
||||
return nil, errors.New("dist/ directory not found")
|
||||
}
|
||||
|
||||
var artifacts []build.Artifact
|
||||
|
||||
entries, err := m.List(distDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read dist/: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := entry.Name()
|
||||
path := filepath.Join(distDir, name)
|
||||
|
||||
// Include archives and checksums
|
||||
if strings.HasSuffix(name, ".tar.gz") ||
|
||||
strings.HasSuffix(name, ".zip") ||
|
||||
strings.HasSuffix(name, ".txt") ||
|
||||
strings.HasSuffix(name, ".sig") {
|
||||
artifacts = append(artifacts, build.Artifact{Path: path})
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
// Run executes the full release process: determine version, build artifacts,
|
||||
// generate changelog, and publish to configured targets.
|
||||
// For separated concerns, prefer using `core build` then `core ci` (Publish).
|
||||
// If dryRun is true, it will show what would be done without actually publishing.
|
||||
func Run(ctx context.Context, cfg *Config, dryRun bool) (*Release, error) {
|
||||
if cfg == nil {
|
||||
return nil, errors.New("release.Run: config is nil")
|
||||
}
|
||||
|
||||
m := io.Local
|
||||
|
||||
projectDir := cfg.projectDir
|
||||
if projectDir == "" {
|
||||
projectDir = "."
|
||||
}
|
||||
|
||||
// Resolve to absolute path
|
||||
absProjectDir, err := filepath.Abs(projectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.Run: failed to resolve project directory: %w", err)
|
||||
}
|
||||
|
||||
// Step 1: Determine version
|
||||
version := cfg.version
|
||||
if version == "" {
|
||||
version, err = DetermineVersion(absProjectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.Run: failed to determine version: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Generate changelog
|
||||
changelog, err := Generate(absProjectDir, "", version)
|
||||
if err != nil {
|
||||
// Non-fatal: continue with empty changelog
|
||||
changelog = fmt.Sprintf("Release %s", version)
|
||||
}
|
||||
|
||||
// Step 3: Build artifacts
|
||||
artifacts, err := buildArtifacts(ctx, m, cfg, absProjectDir, version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.Run: build failed: %w", err)
|
||||
}
|
||||
|
||||
release := &Release{
|
||||
Version: version,
|
||||
Artifacts: artifacts,
|
||||
Changelog: changelog,
|
||||
ProjectDir: absProjectDir,
|
||||
FS: m,
|
||||
}
|
||||
|
||||
// Step 4: Publish to configured targets
|
||||
if len(cfg.Publishers) > 0 {
|
||||
// Convert to publisher types
|
||||
pubRelease := publishers.NewRelease(release.Version, release.Artifacts, release.Changelog, release.ProjectDir, release.FS)
|
||||
|
||||
for _, pubCfg := range cfg.Publishers {
|
||||
publisher, err := getPublisher(pubCfg.Type)
|
||||
if err != nil {
|
||||
return release, fmt.Errorf("release.Run: %w", err)
|
||||
}
|
||||
|
||||
// Build extended config for publisher-specific settings
|
||||
extendedCfg := buildExtendedConfig(pubCfg)
|
||||
publisherCfg := publishers.NewPublisherConfig(pubCfg.Type, pubCfg.Prerelease, pubCfg.Draft, extendedCfg)
|
||||
if err := publisher.Publish(ctx, pubRelease, publisherCfg, cfg, dryRun); err != nil {
|
||||
return release, fmt.Errorf("release.Run: publish to %s failed: %w", pubCfg.Type, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return release, nil
|
||||
}
|
||||
|
||||
// buildArtifacts builds all artifacts for the release.
|
||||
func buildArtifacts(ctx context.Context, fs io.Medium, cfg *Config, projectDir, version string) ([]build.Artifact, error) {
|
||||
// Load build configuration
|
||||
buildCfg, err := build.LoadConfig(fs, projectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load build config: %w", err)
|
||||
}
|
||||
|
||||
// Determine targets
|
||||
var targets []build.Target
|
||||
if len(cfg.Build.Targets) > 0 {
|
||||
for _, t := range cfg.Build.Targets {
|
||||
targets = append(targets, build.Target{OS: t.OS, Arch: t.Arch})
|
||||
}
|
||||
} else if len(buildCfg.Targets) > 0 {
|
||||
targets = buildCfg.ToTargets()
|
||||
} else {
|
||||
// Default targets
|
||||
targets = []build.Target{
|
||||
{OS: "linux", Arch: "amd64"},
|
||||
{OS: "linux", Arch: "arm64"},
|
||||
{OS: "darwin", Arch: "arm64"},
|
||||
{OS: "windows", Arch: "amd64"},
|
||||
}
|
||||
}
|
||||
|
||||
// Determine binary name
|
||||
binaryName := cfg.Project.Name
|
||||
if binaryName == "" {
|
||||
binaryName = buildCfg.Project.Binary
|
||||
}
|
||||
if binaryName == "" {
|
||||
binaryName = buildCfg.Project.Name
|
||||
}
|
||||
if binaryName == "" {
|
||||
binaryName = filepath.Base(projectDir)
|
||||
}
|
||||
|
||||
// Determine output directory
|
||||
outputDir := filepath.Join(projectDir, "dist")
|
||||
|
||||
// Get builder (detect project type)
|
||||
projectType, err := build.PrimaryType(fs, projectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to detect project type: %w", err)
|
||||
}
|
||||
|
||||
builder, err := getBuilder(projectType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build configuration
|
||||
buildConfig := &build.Config{
|
||||
FS: fs,
|
||||
ProjectDir: projectDir,
|
||||
OutputDir: outputDir,
|
||||
Name: binaryName,
|
||||
Version: version,
|
||||
LDFlags: buildCfg.Build.LDFlags,
|
||||
}
|
||||
|
||||
// Build
|
||||
artifacts, err := builder.Build(ctx, buildConfig, targets)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build failed: %w", err)
|
||||
}
|
||||
|
||||
// Archive artifacts
|
||||
archivedArtifacts, err := build.ArchiveAll(fs, artifacts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("archive failed: %w", err)
|
||||
}
|
||||
|
||||
// Compute checksums
|
||||
checksummedArtifacts, err := build.ChecksumAll(fs, archivedArtifacts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checksum failed: %w", err)
|
||||
}
|
||||
|
||||
// Write CHECKSUMS.txt
|
||||
checksumPath := filepath.Join(outputDir, "CHECKSUMS.txt")
|
||||
if err := build.WriteChecksumFile(fs, checksummedArtifacts, checksumPath); err != nil {
|
||||
return nil, fmt.Errorf("failed to write checksums file: %w", err)
|
||||
}
|
||||
|
||||
// Add CHECKSUMS.txt as an artifact
|
||||
checksumArtifact := build.Artifact{
|
||||
Path: checksumPath,
|
||||
}
|
||||
checksummedArtifacts = append(checksummedArtifacts, checksumArtifact)
|
||||
|
||||
return checksummedArtifacts, nil
|
||||
}
|
||||
|
||||
// getBuilder returns the appropriate builder for the project type.
|
||||
func getBuilder(projectType build.ProjectType) (build.Builder, error) {
|
||||
switch projectType {
|
||||
case build.ProjectTypeWails:
|
||||
return builders.NewWailsBuilder(), nil
|
||||
case build.ProjectTypeGo:
|
||||
return builders.NewGoBuilder(), nil
|
||||
case build.ProjectTypeNode:
|
||||
return nil, errors.New("node.js builder not yet implemented")
|
||||
case build.ProjectTypePHP:
|
||||
return nil, errors.New("PHP builder not yet implemented")
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported project type: %s", projectType)
|
||||
}
|
||||
}
|
||||
|
||||
// getPublisher returns the publisher for the given type.
|
||||
func getPublisher(pubType string) (publishers.Publisher, error) {
|
||||
switch pubType {
|
||||
case "github":
|
||||
return publishers.NewGitHubPublisher(), nil
|
||||
case "linuxkit":
|
||||
return publishers.NewLinuxKitPublisher(), nil
|
||||
case "docker":
|
||||
return publishers.NewDockerPublisher(), nil
|
||||
case "npm":
|
||||
return publishers.NewNpmPublisher(), nil
|
||||
case "homebrew":
|
||||
return publishers.NewHomebrewPublisher(), nil
|
||||
case "scoop":
|
||||
return publishers.NewScoopPublisher(), nil
|
||||
case "aur":
|
||||
return publishers.NewAURPublisher(), nil
|
||||
case "chocolatey":
|
||||
return publishers.NewChocolateyPublisher(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported publisher type: %s", pubType)
|
||||
}
|
||||
}
|
||||
|
||||
// buildExtendedConfig builds a map of extended configuration for a publisher.
|
||||
func buildExtendedConfig(pubCfg PublisherConfig) map[string]any {
|
||||
ext := make(map[string]any)
|
||||
|
||||
// LinuxKit-specific config
|
||||
if pubCfg.Config != "" {
|
||||
ext["config"] = pubCfg.Config
|
||||
}
|
||||
if len(pubCfg.Formats) > 0 {
|
||||
ext["formats"] = toAnySlice(pubCfg.Formats)
|
||||
}
|
||||
if len(pubCfg.Platforms) > 0 {
|
||||
ext["platforms"] = toAnySlice(pubCfg.Platforms)
|
||||
}
|
||||
|
||||
// Docker-specific config
|
||||
if pubCfg.Registry != "" {
|
||||
ext["registry"] = pubCfg.Registry
|
||||
}
|
||||
if pubCfg.Image != "" {
|
||||
ext["image"] = pubCfg.Image
|
||||
}
|
||||
if pubCfg.Dockerfile != "" {
|
||||
ext["dockerfile"] = pubCfg.Dockerfile
|
||||
}
|
||||
if len(pubCfg.Tags) > 0 {
|
||||
ext["tags"] = toAnySlice(pubCfg.Tags)
|
||||
}
|
||||
if len(pubCfg.BuildArgs) > 0 {
|
||||
args := make(map[string]any)
|
||||
for k, v := range pubCfg.BuildArgs {
|
||||
args[k] = v
|
||||
}
|
||||
ext["build_args"] = args
|
||||
}
|
||||
|
||||
// npm-specific config
|
||||
if pubCfg.Package != "" {
|
||||
ext["package"] = pubCfg.Package
|
||||
}
|
||||
if pubCfg.Access != "" {
|
||||
ext["access"] = pubCfg.Access
|
||||
}
|
||||
|
||||
// Homebrew-specific config
|
||||
if pubCfg.Tap != "" {
|
||||
ext["tap"] = pubCfg.Tap
|
||||
}
|
||||
if pubCfg.Formula != "" {
|
||||
ext["formula"] = pubCfg.Formula
|
||||
}
|
||||
|
||||
// Scoop-specific config
|
||||
if pubCfg.Bucket != "" {
|
||||
ext["bucket"] = pubCfg.Bucket
|
||||
}
|
||||
|
||||
// AUR-specific config
|
||||
if pubCfg.Maintainer != "" {
|
||||
ext["maintainer"] = pubCfg.Maintainer
|
||||
}
|
||||
|
||||
// Chocolatey-specific config
|
||||
if pubCfg.Push {
|
||||
ext["push"] = pubCfg.Push
|
||||
}
|
||||
|
||||
// Official repo config (shared by multiple publishers)
|
||||
if pubCfg.Official != nil {
|
||||
official := make(map[string]any)
|
||||
official["enabled"] = pubCfg.Official.Enabled
|
||||
if pubCfg.Official.Output != "" {
|
||||
official["output"] = pubCfg.Official.Output
|
||||
}
|
||||
ext["official"] = official
|
||||
}
|
||||
|
||||
return ext
|
||||
}
|
||||
|
||||
// toAnySlice converts a string slice to an any slice.
|
||||
func toAnySlice(s []string) []any {
|
||||
result := make([]any, len(s))
|
||||
for i, v := range s {
|
||||
result[i] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
|
@ -1,704 +0,0 @@
|
|||
package release
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/build"
|
||||
"forge.lthn.ai/core/go-io"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFindArtifacts_Good(t *testing.T) {
|
||||
t.Run("finds tar.gz artifacts", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
// Create test artifact files
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-linux-amd64.tar.gz"), []byte("test"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-darwin-arm64.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, artifacts, 2)
|
||||
})
|
||||
|
||||
t.Run("finds zip artifacts", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-windows-amd64.zip"), []byte("test"), 0644))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "app-windows-amd64.zip")
|
||||
})
|
||||
|
||||
t.Run("finds checksum files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "CHECKSUMS.txt"), []byte("checksums"), 0644))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "CHECKSUMS.txt")
|
||||
})
|
||||
|
||||
t.Run("finds signature files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz.sig"), []byte("signature"), 0644))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, artifacts, 1)
|
||||
})
|
||||
|
||||
t.Run("finds mixed artifact types", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-linux.tar.gz"), []byte("test"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-windows.zip"), []byte("test"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "CHECKSUMS.txt"), []byte("checksums"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.sig"), []byte("sig"), 0644))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, artifacts, 4)
|
||||
})
|
||||
|
||||
t.Run("ignores non-artifact files", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "README.md"), []byte("readme"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.exe"), []byte("binary"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("artifact"), 0644))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, artifacts, 1)
|
||||
assert.Contains(t, artifacts[0].Path, "app.tar.gz")
|
||||
})
|
||||
|
||||
t.Run("ignores subdirectories", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.MkdirAll(filepath.Join(distDir, "subdir"), 0755))
|
||||
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("artifact"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "subdir", "nested.tar.gz"), []byte("nested"), 0644))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should only find the top-level artifact
|
||||
assert.Len(t, artifacts, 1)
|
||||
})
|
||||
|
||||
t.Run("returns empty slice for empty dist directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
artifacts, err := findArtifacts(io.Local, distDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Empty(t, artifacts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindArtifacts_Bad(t *testing.T) {
|
||||
t.Run("returns error when dist directory does not exist", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
|
||||
_, err := findArtifacts(io.Local, distDir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "dist/ directory not found")
|
||||
})
|
||||
|
||||
t.Run("returns error when dist directory is unreadable", func(t *testing.T) {
|
||||
if os.Geteuid() == 0 {
|
||||
t.Skip("root can read any directory")
|
||||
}
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
// Create a file that looks like dist but will cause ReadDir to fail
|
||||
// by making the directory unreadable
|
||||
require.NoError(t, os.Chmod(distDir, 0000))
|
||||
defer func() { _ = os.Chmod(distDir, 0755) }()
|
||||
|
||||
_, err := findArtifacts(io.Local, distDir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to read dist/")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBuilder_Good(t *testing.T) {
|
||||
t.Run("returns Go builder for go project type", func(t *testing.T) {
|
||||
builder, err := getBuilder(build.ProjectTypeGo)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, builder)
|
||||
assert.Equal(t, "go", builder.Name())
|
||||
})
|
||||
|
||||
t.Run("returns Wails builder for wails project type", func(t *testing.T) {
|
||||
builder, err := getBuilder(build.ProjectTypeWails)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, builder)
|
||||
assert.Equal(t, "wails", builder.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBuilder_Bad(t *testing.T) {
|
||||
t.Run("returns error for Node project type", func(t *testing.T) {
|
||||
_, err := getBuilder(build.ProjectTypeNode)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "node.js builder not yet implemented")
|
||||
})
|
||||
|
||||
t.Run("returns error for PHP project type", func(t *testing.T) {
|
||||
_, err := getBuilder(build.ProjectTypePHP)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "PHP builder not yet implemented")
|
||||
})
|
||||
|
||||
t.Run("returns error for unsupported project type", func(t *testing.T) {
|
||||
_, err := getBuilder(build.ProjectType("unknown"))
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unsupported project type")
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPublisher_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
pubType string
|
||||
expectedName string
|
||||
}{
|
||||
{"github", "github"},
|
||||
{"linuxkit", "linuxkit"},
|
||||
{"docker", "docker"},
|
||||
{"npm", "npm"},
|
||||
{"homebrew", "homebrew"},
|
||||
{"scoop", "scoop"},
|
||||
{"aur", "aur"},
|
||||
{"chocolatey", "chocolatey"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.pubType, func(t *testing.T) {
|
||||
publisher, err := getPublisher(tc.pubType)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, publisher)
|
||||
assert.Equal(t, tc.expectedName, publisher.Name())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPublisher_Bad(t *testing.T) {
|
||||
t.Run("returns error for unsupported publisher type", func(t *testing.T) {
|
||||
_, err := getPublisher("unsupported")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unsupported publisher type: unsupported")
|
||||
})
|
||||
|
||||
t.Run("returns error for empty publisher type", func(t *testing.T) {
|
||||
_, err := getPublisher("")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unsupported publisher type")
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildExtendedConfig_Good(t *testing.T) {
|
||||
t.Run("returns empty map for minimal config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "github",
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
assert.Empty(t, ext)
|
||||
})
|
||||
|
||||
t.Run("includes LinuxKit config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "linuxkit",
|
||||
Config: "linuxkit.yaml",
|
||||
Formats: []string{"iso", "qcow2"},
|
||||
Platforms: []string{"linux/amd64", "linux/arm64"},
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
assert.Equal(t, "linuxkit.yaml", ext["config"])
|
||||
assert.Equal(t, []any{"iso", "qcow2"}, ext["formats"])
|
||||
assert.Equal(t, []any{"linux/amd64", "linux/arm64"}, ext["platforms"])
|
||||
})
|
||||
|
||||
t.Run("includes Docker config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "docker",
|
||||
Registry: "ghcr.io",
|
||||
Image: "owner/repo",
|
||||
Dockerfile: "Dockerfile.prod",
|
||||
Tags: []string{"latest", "v1.0.0"},
|
||||
BuildArgs: map[string]string{"VERSION": "1.0.0"},
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
assert.Equal(t, "ghcr.io", ext["registry"])
|
||||
assert.Equal(t, "owner/repo", ext["image"])
|
||||
assert.Equal(t, "Dockerfile.prod", ext["dockerfile"])
|
||||
assert.Equal(t, []any{"latest", "v1.0.0"}, ext["tags"])
|
||||
buildArgs := ext["build_args"].(map[string]any)
|
||||
assert.Equal(t, "1.0.0", buildArgs["VERSION"])
|
||||
})
|
||||
|
||||
t.Run("includes npm config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "npm",
|
||||
Package: "@host-uk/core",
|
||||
Access: "public",
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
assert.Equal(t, "@host-uk/core", ext["package"])
|
||||
assert.Equal(t, "public", ext["access"])
|
||||
})
|
||||
|
||||
t.Run("includes Homebrew config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "homebrew",
|
||||
Tap: "host-uk/tap",
|
||||
Formula: "core",
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
assert.Equal(t, "host-uk/tap", ext["tap"])
|
||||
assert.Equal(t, "core", ext["formula"])
|
||||
})
|
||||
|
||||
t.Run("includes Scoop config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "scoop",
|
||||
Bucket: "host-uk/bucket",
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
assert.Equal(t, "host-uk/bucket", ext["bucket"])
|
||||
})
|
||||
|
||||
t.Run("includes AUR config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "aur",
|
||||
Maintainer: "John Doe <john@example.com>",
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
assert.Equal(t, "John Doe <john@example.com>", ext["maintainer"])
|
||||
})
|
||||
|
||||
t.Run("includes Chocolatey config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "chocolatey",
|
||||
Push: true,
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
assert.True(t, ext["push"].(bool))
|
||||
})
|
||||
|
||||
t.Run("includes Official config", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "homebrew",
|
||||
Official: &OfficialConfig{
|
||||
Enabled: true,
|
||||
Output: "/path/to/output",
|
||||
},
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
official := ext["official"].(map[string]any)
|
||||
assert.True(t, official["enabled"].(bool))
|
||||
assert.Equal(t, "/path/to/output", official["output"])
|
||||
})
|
||||
|
||||
t.Run("Official config without output", func(t *testing.T) {
|
||||
cfg := PublisherConfig{
|
||||
Type: "scoop",
|
||||
Official: &OfficialConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
ext := buildExtendedConfig(cfg)
|
||||
|
||||
official := ext["official"].(map[string]any)
|
||||
assert.True(t, official["enabled"].(bool))
|
||||
_, hasOutput := official["output"]
|
||||
assert.False(t, hasOutput)
|
||||
})
|
||||
}
|
||||
|
||||
func TestToAnySlice_Good(t *testing.T) {
|
||||
t.Run("converts string slice to any slice", func(t *testing.T) {
|
||||
input := []string{"a", "b", "c"}
|
||||
|
||||
result := toAnySlice(input)
|
||||
|
||||
assert.Len(t, result, 3)
|
||||
assert.Equal(t, "a", result[0])
|
||||
assert.Equal(t, "b", result[1])
|
||||
assert.Equal(t, "c", result[2])
|
||||
})
|
||||
|
||||
t.Run("handles empty slice", func(t *testing.T) {
|
||||
input := []string{}
|
||||
|
||||
result := toAnySlice(input)
|
||||
|
||||
assert.Empty(t, result)
|
||||
})
|
||||
|
||||
t.Run("handles single element", func(t *testing.T) {
|
||||
input := []string{"only"}
|
||||
|
||||
result := toAnySlice(input)
|
||||
|
||||
assert.Len(t, result, 1)
|
||||
assert.Equal(t, "only", result[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublish_Good(t *testing.T) {
|
||||
t.Run("returns release with version from config", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
cfg.Publishers = nil // No publishers to avoid network calls
|
||||
|
||||
release, err := Publish(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "v1.0.0", release.Version)
|
||||
assert.Len(t, release.Artifacts, 1)
|
||||
})
|
||||
|
||||
t.Run("finds artifacts in dist directory", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-linux.tar.gz"), []byte("test"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app-darwin.tar.gz"), []byte("test"), 0644))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "CHECKSUMS.txt"), []byte("checksums"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
cfg.Publishers = nil
|
||||
|
||||
release, err := Publish(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, release.Artifacts, 3)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublish_Bad(t *testing.T) {
|
||||
t.Run("returns error when config is nil", func(t *testing.T) {
|
||||
_, err := Publish(context.Background(), nil, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
|
||||
t.Run("returns error when dist directory missing", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
|
||||
_, err := Publish(context.Background(), cfg, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "dist/ directory not found")
|
||||
})
|
||||
|
||||
t.Run("returns error when no artifacts found", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
|
||||
_, err := Publish(context.Background(), cfg, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no artifacts found")
|
||||
})
|
||||
|
||||
t.Run("returns error for unsupported publisher", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
cfg.Publishers = []PublisherConfig{
|
||||
{Type: "unsupported"},
|
||||
}
|
||||
|
||||
_, err := Publish(context.Background(), cfg, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unsupported publisher type")
|
||||
})
|
||||
|
||||
t.Run("returns error when version determination fails in non-git dir", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
// Don't set version - let it try to determine from git
|
||||
cfg.Publishers = nil
|
||||
|
||||
// In a non-git directory, DetermineVersion returns v0.0.1 as default
|
||||
// so we verify that the publish proceeds without error
|
||||
release, err := Publish(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v0.0.1", release.Version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRun_Good(t *testing.T) {
|
||||
t.Run("returns release with version from config", func(t *testing.T) {
|
||||
// Create a minimal Go project for testing
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create go.mod
|
||||
goMod := `module testapp
|
||||
|
||||
go 1.21
|
||||
`
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644))
|
||||
|
||||
// Create main.go
|
||||
mainGo := `package main
|
||||
|
||||
func main() {}
|
||||
`
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "main.go"), []byte(mainGo), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
cfg.Project.Name = "testapp"
|
||||
cfg.Build.Targets = []TargetConfig{} // Empty targets to use defaults
|
||||
cfg.Publishers = nil // No publishers to avoid network calls
|
||||
|
||||
// Note: This test will actually try to build, which may fail in CI
|
||||
// So we just test that the function accepts the config properly
|
||||
release, err := Run(context.Background(), cfg, true)
|
||||
if err != nil {
|
||||
// Build might fail in test environment, but we still verify the error message
|
||||
assert.Contains(t, err.Error(), "build")
|
||||
} else {
|
||||
assert.Equal(t, "v1.0.0", release.Version)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRun_Bad(t *testing.T) {
|
||||
t.Run("returns error when config is nil", func(t *testing.T) {
|
||||
_, err := Run(context.Background(), nil, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
})
|
||||
}
|
||||
|
||||
func TestRelease_Structure(t *testing.T) {
|
||||
t.Run("Release struct holds expected fields", func(t *testing.T) {
|
||||
release := &Release{
|
||||
Version: "v1.0.0",
|
||||
Artifacts: []build.Artifact{{Path: "/path/to/artifact"}},
|
||||
Changelog: "## v1.0.0\n\nChanges",
|
||||
ProjectDir: "/project",
|
||||
}
|
||||
|
||||
assert.Equal(t, "v1.0.0", release.Version)
|
||||
assert.Len(t, release.Artifacts, 1)
|
||||
assert.Contains(t, release.Changelog, "v1.0.0")
|
||||
assert.Equal(t, "/project", release.ProjectDir)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublish_VersionFromGit(t *testing.T) {
|
||||
t.Run("determines version from git when not set", func(t *testing.T) {
|
||||
dir := setupPublishGitRepo(t)
|
||||
createPublishCommit(t, dir, "feat: initial commit")
|
||||
createPublishTag(t, dir, "v1.2.3")
|
||||
|
||||
// Create dist directory with artifact
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
// Don't set version - let it be determined from git
|
||||
cfg.Publishers = nil
|
||||
|
||||
release, err := Publish(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "v1.2.3", release.Version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublish_ChangelogGeneration(t *testing.T) {
|
||||
t.Run("generates changelog from git commits when available", func(t *testing.T) {
|
||||
dir := setupPublishGitRepo(t)
|
||||
createPublishCommit(t, dir, "feat: add feature")
|
||||
createPublishTag(t, dir, "v1.0.0")
|
||||
createPublishCommit(t, dir, "fix: fix bug")
|
||||
createPublishTag(t, dir, "v1.0.1")
|
||||
|
||||
// Create dist directory with artifact
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.1")
|
||||
cfg.Publishers = nil
|
||||
|
||||
release, err := Publish(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Changelog should contain either the commit message or the version
|
||||
assert.Contains(t, release.Changelog, "v1.0.1")
|
||||
})
|
||||
|
||||
t.Run("uses fallback changelog on error", func(t *testing.T) {
|
||||
dir := t.TempDir() // Not a git repo
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
cfg.Publishers = nil
|
||||
|
||||
release, err := Publish(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should use fallback changelog
|
||||
assert.Contains(t, release.Changelog, "Release v1.0.0")
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublish_DefaultProjectDir(t *testing.T) {
|
||||
t.Run("uses current directory when projectDir is empty", func(t *testing.T) {
|
||||
// Create artifacts in current directory's dist folder
|
||||
dir := t.TempDir()
|
||||
distDir := filepath.Join(dir, "dist")
|
||||
require.NoError(t, os.MkdirAll(distDir, 0755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(distDir, "app.tar.gz"), []byte("test"), 0644))
|
||||
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetProjectDir(dir)
|
||||
cfg.SetVersion("v1.0.0")
|
||||
cfg.Publishers = nil
|
||||
|
||||
release, err := Publish(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotEmpty(t, release.ProjectDir)
|
||||
})
|
||||
}
|
||||
|
||||
// Helper functions for publish tests
|
||||
func setupPublishGitRepo(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "config", "user.email", "test@example.com")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "config", "user.name", "Test User")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func createPublishCommit(t *testing.T, dir, message string) {
|
||||
t.Helper()
|
||||
|
||||
filePath := filepath.Join(dir, "publish_test.txt")
|
||||
content, _ := os.ReadFile(filePath)
|
||||
content = append(content, []byte(message+"\n")...)
|
||||
require.NoError(t, os.WriteFile(filePath, content, 0644))
|
||||
|
||||
cmd := exec.Command("git", "add", ".")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", message)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
func createPublishTag(t *testing.T, dir, tag string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command("git", "tag", tag)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
134
release/sdk.go
134
release/sdk.go
|
|
@ -1,134 +0,0 @@
|
|||
// Package release provides release automation with changelog generation and publishing.
|
||||
package release
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/sdk"
|
||||
)
|
||||
|
||||
// SDKRelease holds the result of an SDK release.
|
||||
type SDKRelease struct {
|
||||
// Version is the SDK version.
|
||||
Version string
|
||||
// Languages that were generated.
|
||||
Languages []string
|
||||
// Output directory.
|
||||
Output string
|
||||
}
|
||||
|
||||
// RunSDK executes SDK-only release: diff check + generate.
|
||||
// If dryRun is true, it shows what would be done without generating.
|
||||
func RunSDK(ctx context.Context, cfg *Config, dryRun bool) (*SDKRelease, error) {
|
||||
if cfg == nil {
|
||||
return nil, errors.New("release.RunSDK: config is nil")
|
||||
}
|
||||
if cfg.SDK == nil {
|
||||
return nil, errors.New("release.RunSDK: sdk not configured in .core/release.yaml")
|
||||
}
|
||||
|
||||
projectDir := cfg.projectDir
|
||||
if projectDir == "" {
|
||||
projectDir = "."
|
||||
}
|
||||
|
||||
// Determine version
|
||||
version := cfg.version
|
||||
if version == "" {
|
||||
var err error
|
||||
version, err = DetermineVersion(projectDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("release.RunSDK: failed to determine version: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run diff check if enabled
|
||||
if cfg.SDK.Diff.Enabled {
|
||||
breaking, err := checkBreakingChanges(projectDir, cfg.SDK)
|
||||
if err != nil {
|
||||
// Non-fatal: warn and continue
|
||||
fmt.Printf("Warning: diff check failed: %v\n", err)
|
||||
} else if breaking {
|
||||
if cfg.SDK.Diff.FailOnBreaking {
|
||||
return nil, errors.New("release.RunSDK: breaking API changes detected")
|
||||
}
|
||||
fmt.Printf("Warning: breaking API changes detected\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare result
|
||||
output := cfg.SDK.Output
|
||||
if output == "" {
|
||||
output = "sdk"
|
||||
}
|
||||
|
||||
result := &SDKRelease{
|
||||
Version: version,
|
||||
Languages: cfg.SDK.Languages,
|
||||
Output: output,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Generate SDKs
|
||||
sdkCfg := toSDKConfig(cfg.SDK)
|
||||
s := sdk.New(projectDir, sdkCfg)
|
||||
s.SetVersion(version)
|
||||
|
||||
if err := s.Generate(ctx); err != nil {
|
||||
return nil, fmt.Errorf("release.RunSDK: generation failed: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// checkBreakingChanges runs oasdiff to detect breaking changes.
|
||||
func checkBreakingChanges(projectDir string, cfg *SDKConfig) (bool, error) {
|
||||
// Get previous tag for comparison (uses getPreviousTag from changelog.go)
|
||||
prevTag, err := getPreviousTag(projectDir, "HEAD")
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("no previous tag found: %w", err)
|
||||
}
|
||||
|
||||
// Detect spec path
|
||||
specPath := cfg.Spec
|
||||
if specPath == "" {
|
||||
s := sdk.New(projectDir, nil)
|
||||
specPath, err = s.DetectSpec()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Run diff
|
||||
result, err := sdk.Diff(prevTag, specPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result.Breaking, nil
|
||||
}
|
||||
|
||||
// toSDKConfig converts release.SDKConfig to sdk.Config.
|
||||
func toSDKConfig(cfg *SDKConfig) *sdk.Config {
|
||||
if cfg == nil {
|
||||
return nil
|
||||
}
|
||||
return &sdk.Config{
|
||||
Spec: cfg.Spec,
|
||||
Languages: cfg.Languages,
|
||||
Output: cfg.Output,
|
||||
Package: sdk.PackageConfig{
|
||||
Name: cfg.Package.Name,
|
||||
Version: cfg.Package.Version,
|
||||
},
|
||||
Diff: sdk.DiffConfig{
|
||||
Enabled: cfg.Diff.Enabled,
|
||||
FailOnBreaking: cfg.Diff.FailOnBreaking,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1,229 +0,0 @@
|
|||
package release
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRunSDK_Bad_NilConfig(t *testing.T) {
|
||||
_, err := RunSDK(context.Background(), nil, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "config is nil")
|
||||
}
|
||||
|
||||
func TestRunSDK_Bad_NoSDKConfig(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: nil,
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
|
||||
_, err := RunSDK(context.Background(), cfg, true)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "sdk not configured")
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_DryRun(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"typescript", "python"},
|
||||
Output: "sdk",
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "v1.0.0", result.Version)
|
||||
assert.Len(t, result.Languages, 2)
|
||||
assert.Contains(t, result.Languages, "typescript")
|
||||
assert.Contains(t, result.Languages, "python")
|
||||
assert.Equal(t, "sdk", result.Output)
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_DryRunDefaultOutput(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"go"},
|
||||
Output: "", // Empty output, should default to "sdk"
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v2.0.0"
|
||||
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "sdk", result.Output)
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_DryRunDefaultProjectDir(t *testing.T) {
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"typescript"},
|
||||
Output: "out",
|
||||
},
|
||||
}
|
||||
// projectDir is empty, should default to "."
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "v1.0.0", result.Version)
|
||||
}
|
||||
|
||||
func TestRunSDK_Bad_BreakingChangesFailOnBreaking(t *testing.T) {
|
||||
// This test verifies that when diff.FailOnBreaking is true and breaking changes
|
||||
// are detected, RunSDK returns an error. However, since we can't easily mock
|
||||
// the diff check, this test verifies the config is correctly processed.
|
||||
// The actual breaking change detection is tested in pkg/sdk/diff_test.go.
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"typescript"},
|
||||
Output: "sdk",
|
||||
Diff: SDKDiffConfig{
|
||||
Enabled: true,
|
||||
FailOnBreaking: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
// In dry run mode with no git repo, diff check will fail gracefully
|
||||
// (non-fatal warning), so this should succeed
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", result.Version)
|
||||
}
|
||||
|
||||
func TestToSDKConfig_Good(t *testing.T) {
|
||||
sdkCfg := &SDKConfig{
|
||||
Spec: "api/openapi.yaml",
|
||||
Languages: []string{"typescript", "go"},
|
||||
Output: "sdk",
|
||||
Package: SDKPackageConfig{
|
||||
Name: "myapi",
|
||||
Version: "v1.0.0",
|
||||
},
|
||||
Diff: SDKDiffConfig{
|
||||
Enabled: true,
|
||||
FailOnBreaking: true,
|
||||
},
|
||||
}
|
||||
|
||||
result := toSDKConfig(sdkCfg)
|
||||
|
||||
assert.Equal(t, "api/openapi.yaml", result.Spec)
|
||||
assert.Equal(t, []string{"typescript", "go"}, result.Languages)
|
||||
assert.Equal(t, "sdk", result.Output)
|
||||
assert.Equal(t, "myapi", result.Package.Name)
|
||||
assert.Equal(t, "v1.0.0", result.Package.Version)
|
||||
assert.True(t, result.Diff.Enabled)
|
||||
assert.True(t, result.Diff.FailOnBreaking)
|
||||
}
|
||||
|
||||
func TestToSDKConfig_Good_NilInput(t *testing.T) {
|
||||
result := toSDKConfig(nil)
|
||||
assert.Nil(t, result)
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_WithDiffEnabledNoFailOnBreaking(t *testing.T) {
|
||||
// Tests diff enabled but FailOnBreaking=false (should warn but not fail)
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"typescript"},
|
||||
Output: "sdk",
|
||||
Diff: SDKDiffConfig{
|
||||
Enabled: true,
|
||||
FailOnBreaking: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
// Dry run should succeed even without git repo (diff check fails gracefully)
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", result.Version)
|
||||
assert.Contains(t, result.Languages, "typescript")
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_MultipleLanguages(t *testing.T) {
|
||||
// Tests multiple language support
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Languages: []string{"typescript", "python", "go", "java"},
|
||||
Output: "multi-sdk",
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v3.0.0"
|
||||
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "v3.0.0", result.Version)
|
||||
assert.Len(t, result.Languages, 4)
|
||||
assert.Equal(t, "multi-sdk", result.Output)
|
||||
}
|
||||
|
||||
func TestRunSDK_Good_WithPackageConfig(t *testing.T) {
|
||||
// Tests that package config is properly handled
|
||||
cfg := &Config{
|
||||
SDK: &SDKConfig{
|
||||
Spec: "openapi.yaml",
|
||||
Languages: []string{"typescript"},
|
||||
Output: "sdk",
|
||||
Package: SDKPackageConfig{
|
||||
Name: "my-custom-sdk",
|
||||
Version: "v2.5.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg.projectDir = "/tmp"
|
||||
cfg.version = "v1.0.0"
|
||||
|
||||
result, err := RunSDK(context.Background(), cfg, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", result.Version)
|
||||
}
|
||||
|
||||
func TestToSDKConfig_Good_EmptyPackageConfig(t *testing.T) {
|
||||
// Tests conversion with empty package config
|
||||
sdkCfg := &SDKConfig{
|
||||
Languages: []string{"go"},
|
||||
Output: "sdk",
|
||||
// Package is empty struct
|
||||
}
|
||||
|
||||
result := toSDKConfig(sdkCfg)
|
||||
|
||||
assert.Equal(t, []string{"go"}, result.Languages)
|
||||
assert.Equal(t, "sdk", result.Output)
|
||||
assert.Empty(t, result.Package.Name)
|
||||
assert.Empty(t, result.Package.Version)
|
||||
}
|
||||
|
||||
func TestToSDKConfig_Good_DiffDisabled(t *testing.T) {
|
||||
// Tests conversion with diff disabled
|
||||
sdkCfg := &SDKConfig{
|
||||
Languages: []string{"typescript"},
|
||||
Output: "sdk",
|
||||
Diff: SDKDiffConfig{
|
||||
Enabled: false,
|
||||
FailOnBreaking: false,
|
||||
},
|
||||
}
|
||||
|
||||
result := toSDKConfig(sdkCfg)
|
||||
|
||||
assert.False(t, result.Diff.Enabled)
|
||||
assert.False(t, result.Diff.FailOnBreaking)
|
||||
}
|
||||
35
release/testdata/.core/release.yaml
vendored
35
release/testdata/.core/release.yaml
vendored
|
|
@ -1,35 +0,0 @@
|
|||
version: 1
|
||||
|
||||
project:
|
||||
name: myapp
|
||||
repository: owner/repo
|
||||
|
||||
build:
|
||||
targets:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: linux
|
||||
arch: arm64
|
||||
- os: darwin
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
- os: windows
|
||||
arch: amd64
|
||||
|
||||
publishers:
|
||||
- type: github
|
||||
prerelease: false
|
||||
draft: false
|
||||
|
||||
changelog:
|
||||
include:
|
||||
- feat
|
||||
- fix
|
||||
- perf
|
||||
exclude:
|
||||
- chore
|
||||
- docs
|
||||
- style
|
||||
- test
|
||||
- ci
|
||||
|
|
@ -1,195 +0,0 @@
|
|||
// Package release provides release automation with changelog generation and publishing.
|
||||
package release
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// semverRegex matches semantic version strings with or without 'v' prefix.
|
||||
var semverRegex = regexp.MustCompile(`^v?(\d+)\.(\d+)\.(\d+)(?:-([a-zA-Z0-9.-]+))?(?:\+([a-zA-Z0-9.-]+))?$`)
|
||||
|
||||
// DetermineVersion determines the version for a release.
|
||||
// It checks in order:
|
||||
// 1. Git tag on HEAD
|
||||
// 2. Most recent tag + increment patch
|
||||
// 3. Default to v0.0.1 if no tags exist
|
||||
func DetermineVersion(dir string) (string, error) {
|
||||
// Check if HEAD has a tag
|
||||
headTag, err := getTagOnHead(dir)
|
||||
if err == nil && headTag != "" {
|
||||
return normalizeVersion(headTag), nil
|
||||
}
|
||||
|
||||
// Get most recent tag
|
||||
latestTag, err := getLatestTag(dir)
|
||||
if err != nil || latestTag == "" {
|
||||
// No tags exist, return default
|
||||
return "v0.0.1", nil
|
||||
}
|
||||
|
||||
// Increment patch version
|
||||
return IncrementVersion(latestTag), nil
|
||||
}
|
||||
|
||||
// IncrementVersion increments the patch version of a semver string.
|
||||
// Examples:
|
||||
// - "v1.2.3" -> "v1.2.4"
|
||||
// - "1.2.3" -> "v1.2.4"
|
||||
// - "v1.2.3-alpha" -> "v1.2.4" (strips prerelease)
|
||||
func IncrementVersion(current string) string {
|
||||
matches := semverRegex.FindStringSubmatch(current)
|
||||
if matches == nil {
|
||||
// Not a valid semver, return as-is with increment suffix
|
||||
return current + ".1"
|
||||
}
|
||||
|
||||
major, _ := strconv.Atoi(matches[1])
|
||||
minor, _ := strconv.Atoi(matches[2])
|
||||
patch, _ := strconv.Atoi(matches[3])
|
||||
|
||||
// Increment patch
|
||||
patch++
|
||||
|
||||
return fmt.Sprintf("v%d.%d.%d", major, minor, patch)
|
||||
}
|
||||
|
||||
// IncrementMinor increments the minor version of a semver string.
|
||||
// Examples:
|
||||
// - "v1.2.3" -> "v1.3.0"
|
||||
// - "1.2.3" -> "v1.3.0"
|
||||
func IncrementMinor(current string) string {
|
||||
matches := semverRegex.FindStringSubmatch(current)
|
||||
if matches == nil {
|
||||
return current + ".1"
|
||||
}
|
||||
|
||||
major, _ := strconv.Atoi(matches[1])
|
||||
minor, _ := strconv.Atoi(matches[2])
|
||||
|
||||
// Increment minor, reset patch
|
||||
minor++
|
||||
|
||||
return fmt.Sprintf("v%d.%d.0", major, minor)
|
||||
}
|
||||
|
||||
// IncrementMajor increments the major version of a semver string.
|
||||
// Examples:
|
||||
// - "v1.2.3" -> "v2.0.0"
|
||||
// - "1.2.3" -> "v2.0.0"
|
||||
func IncrementMajor(current string) string {
|
||||
matches := semverRegex.FindStringSubmatch(current)
|
||||
if matches == nil {
|
||||
return current + ".1"
|
||||
}
|
||||
|
||||
major, _ := strconv.Atoi(matches[1])
|
||||
|
||||
// Increment major, reset minor and patch
|
||||
major++
|
||||
|
||||
return fmt.Sprintf("v%d.0.0", major)
|
||||
}
|
||||
|
||||
// ParseVersion parses a semver string into its components.
|
||||
// Returns (major, minor, patch, prerelease, build, error).
|
||||
func ParseVersion(version string) (int, int, int, string, string, error) {
|
||||
matches := semverRegex.FindStringSubmatch(version)
|
||||
if matches == nil {
|
||||
return 0, 0, 0, "", "", fmt.Errorf("invalid semver: %s", version)
|
||||
}
|
||||
|
||||
major, _ := strconv.Atoi(matches[1])
|
||||
minor, _ := strconv.Atoi(matches[2])
|
||||
patch, _ := strconv.Atoi(matches[3])
|
||||
prerelease := matches[4]
|
||||
build := matches[5]
|
||||
|
||||
return major, minor, patch, prerelease, build, nil
|
||||
}
|
||||
|
||||
// ValidateVersion checks if a string is a valid semver.
|
||||
func ValidateVersion(version string) bool {
|
||||
return semverRegex.MatchString(version)
|
||||
}
|
||||
|
||||
// normalizeVersion ensures the version starts with 'v'.
|
||||
func normalizeVersion(version string) string {
|
||||
if !strings.HasPrefix(version, "v") {
|
||||
return "v" + version
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
// getTagOnHead returns the tag on HEAD, if any.
|
||||
func getTagOnHead(dir string) (string, error) {
|
||||
cmd := exec.Command("git", "describe", "--tags", "--exact-match", "HEAD")
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
// getLatestTag returns the most recent tag in the repository.
|
||||
func getLatestTag(dir string) (string, error) {
|
||||
cmd := exec.Command("git", "describe", "--tags", "--abbrev=0")
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
// CompareVersions compares two semver strings.
|
||||
// Returns:
|
||||
//
|
||||
// -1 if a < b
|
||||
// 0 if a == b
|
||||
// 1 if a > b
|
||||
func CompareVersions(a, b string) int {
|
||||
aMajor, aMinor, aPatch, _, _, errA := ParseVersion(a)
|
||||
bMajor, bMinor, bPatch, _, _, errB := ParseVersion(b)
|
||||
|
||||
// Invalid versions are considered less than valid ones
|
||||
if errA != nil && errB != nil {
|
||||
return strings.Compare(a, b)
|
||||
}
|
||||
if errA != nil {
|
||||
return -1
|
||||
}
|
||||
if errB != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Compare major
|
||||
if aMajor != bMajor {
|
||||
if aMajor < bMajor {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// Compare minor
|
||||
if aMinor != bMinor {
|
||||
if aMinor < bMinor {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// Compare patch
|
||||
if aPatch != bPatch {
|
||||
if aPatch < bPatch {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
|
@ -1,520 +0,0 @@
|
|||
package release
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// setupGitRepo creates a temporary directory with an initialized git repository.
|
||||
func setupGitRepo(t *testing.T) string {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
// Configure git user for commits
|
||||
cmd = exec.Command("git", "config", "user.email", "test@example.com")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "config", "user.name", "Test User")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
// createCommit creates a commit in the given directory.
|
||||
func createCommit(t *testing.T, dir, message string) {
|
||||
t.Helper()
|
||||
|
||||
// Create or modify a file
|
||||
filePath := filepath.Join(dir, "test.txt")
|
||||
content, _ := os.ReadFile(filePath)
|
||||
content = append(content, []byte(message+"\n")...)
|
||||
require.NoError(t, os.WriteFile(filePath, content, 0644))
|
||||
|
||||
// Stage and commit
|
||||
cmd := exec.Command("git", "add", ".")
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", message)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
// createTag creates a tag in the given directory.
|
||||
func createTag(t *testing.T, dir, tag string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command("git", "tag", tag)
|
||||
cmd.Dir = dir
|
||||
require.NoError(t, cmd.Run())
|
||||
}
|
||||
|
||||
func TestDetermineVersion_Good(t *testing.T) {
|
||||
t.Run("returns tag when HEAD has tag", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
createTag(t, dir, "v1.0.0")
|
||||
|
||||
version, err := DetermineVersion(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", version)
|
||||
})
|
||||
|
||||
t.Run("normalizes tag without v prefix", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
createTag(t, dir, "1.0.0")
|
||||
|
||||
version, err := DetermineVersion(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", version)
|
||||
})
|
||||
|
||||
t.Run("increments patch when commits after tag", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
createTag(t, dir, "v1.0.0")
|
||||
createCommit(t, dir, "feat: new feature")
|
||||
|
||||
version, err := DetermineVersion(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.1", version)
|
||||
})
|
||||
|
||||
t.Run("returns v0.0.1 when no tags exist", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
|
||||
version, err := DetermineVersion(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v0.0.1", version)
|
||||
})
|
||||
|
||||
t.Run("handles multiple tags with increments", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: first")
|
||||
createTag(t, dir, "v1.0.0")
|
||||
createCommit(t, dir, "feat: second")
|
||||
createTag(t, dir, "v1.0.1")
|
||||
createCommit(t, dir, "feat: third")
|
||||
|
||||
version, err := DetermineVersion(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.2", version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDetermineVersion_Bad(t *testing.T) {
|
||||
t.Run("returns v0.0.1 for empty repo", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
|
||||
// No commits, git describe will fail
|
||||
version, err := DetermineVersion(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v0.0.1", version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetTagOnHead_Good(t *testing.T) {
|
||||
t.Run("returns tag when HEAD has tag", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
createTag(t, dir, "v1.2.3")
|
||||
|
||||
tag, err := getTagOnHead(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.2.3", tag)
|
||||
})
|
||||
|
||||
t.Run("returns latest tag when multiple tags on HEAD", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
createTag(t, dir, "v1.0.0")
|
||||
createTag(t, dir, "v1.0.0-beta")
|
||||
|
||||
tag, err := getTagOnHead(dir)
|
||||
require.NoError(t, err)
|
||||
// Git returns one of the tags
|
||||
assert.Contains(t, []string{"v1.0.0", "v1.0.0-beta"}, tag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetTagOnHead_Bad(t *testing.T) {
|
||||
t.Run("returns error when HEAD has no tag", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
|
||||
_, err := getTagOnHead(dir)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("returns error when commits after tag", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
createTag(t, dir, "v1.0.0")
|
||||
createCommit(t, dir, "feat: new feature")
|
||||
|
||||
_, err := getTagOnHead(dir)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetLatestTag_Good(t *testing.T) {
|
||||
t.Run("returns latest tag", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
createTag(t, dir, "v1.0.0")
|
||||
|
||||
tag, err := getLatestTag(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.0.0", tag)
|
||||
})
|
||||
|
||||
t.Run("returns most recent tag after multiple commits", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: first")
|
||||
createTag(t, dir, "v1.0.0")
|
||||
createCommit(t, dir, "feat: second")
|
||||
createTag(t, dir, "v1.1.0")
|
||||
createCommit(t, dir, "feat: third")
|
||||
|
||||
tag, err := getLatestTag(dir)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "v1.1.0", tag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetLatestTag_Bad(t *testing.T) {
|
||||
t.Run("returns error when no tags exist", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
createCommit(t, dir, "feat: initial commit")
|
||||
|
||||
_, err := getLatestTag(dir)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("returns error for empty repo", func(t *testing.T) {
|
||||
dir := setupGitRepo(t)
|
||||
|
||||
_, err := getLatestTag(dir)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIncrementMinor_Bad(t *testing.T) {
|
||||
t.Run("returns fallback for invalid version", func(t *testing.T) {
|
||||
result := IncrementMinor("not-valid")
|
||||
assert.Equal(t, "not-valid.1", result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIncrementMajor_Bad(t *testing.T) {
|
||||
t.Run("returns fallback for invalid version", func(t *testing.T) {
|
||||
result := IncrementMajor("not-valid")
|
||||
assert.Equal(t, "not-valid.1", result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCompareVersions_Ugly(t *testing.T) {
|
||||
t.Run("handles both invalid versions", func(t *testing.T) {
|
||||
result := CompareVersions("invalid-a", "invalid-b")
|
||||
// Should do string comparison for invalid versions
|
||||
assert.Equal(t, -1, result) // "invalid-a" < "invalid-b"
|
||||
})
|
||||
|
||||
t.Run("invalid a returns -1", func(t *testing.T) {
|
||||
result := CompareVersions("invalid", "v1.0.0")
|
||||
assert.Equal(t, -1, result)
|
||||
})
|
||||
|
||||
t.Run("invalid b returns 1", func(t *testing.T) {
|
||||
result := CompareVersions("v1.0.0", "invalid")
|
||||
assert.Equal(t, 1, result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIncrementVersion_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "increment patch with v prefix",
|
||||
input: "v1.2.3",
|
||||
expected: "v1.2.4",
|
||||
},
|
||||
{
|
||||
name: "increment patch without v prefix",
|
||||
input: "1.2.3",
|
||||
expected: "v1.2.4",
|
||||
},
|
||||
{
|
||||
name: "increment from zero",
|
||||
input: "v0.0.0",
|
||||
expected: "v0.0.1",
|
||||
},
|
||||
{
|
||||
name: "strips prerelease",
|
||||
input: "v1.2.3-alpha",
|
||||
expected: "v1.2.4",
|
||||
},
|
||||
{
|
||||
name: "strips build metadata",
|
||||
input: "v1.2.3+build123",
|
||||
expected: "v1.2.4",
|
||||
},
|
||||
{
|
||||
name: "strips prerelease and build",
|
||||
input: "v1.2.3-beta.1+build456",
|
||||
expected: "v1.2.4",
|
||||
},
|
||||
{
|
||||
name: "handles large numbers",
|
||||
input: "v10.20.99",
|
||||
expected: "v10.20.100",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := IncrementVersion(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementVersion_Bad(t *testing.T) {
|
||||
t.Run("invalid semver returns original with suffix", func(t *testing.T) {
|
||||
result := IncrementVersion("not-a-version")
|
||||
assert.Equal(t, "not-a-version.1", result)
|
||||
})
|
||||
}
|
||||
|
||||
func TestIncrementMinor_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "increment minor resets patch",
|
||||
input: "v1.2.3",
|
||||
expected: "v1.3.0",
|
||||
},
|
||||
{
|
||||
name: "increment minor from zero",
|
||||
input: "v1.0.5",
|
||||
expected: "v1.1.0",
|
||||
},
|
||||
{
|
||||
name: "handles large numbers",
|
||||
input: "v5.99.50",
|
||||
expected: "v5.100.0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := IncrementMinor(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementMajor_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "increment major resets minor and patch",
|
||||
input: "v1.2.3",
|
||||
expected: "v2.0.0",
|
||||
},
|
||||
{
|
||||
name: "increment major from zero",
|
||||
input: "v0.5.10",
|
||||
expected: "v1.0.0",
|
||||
},
|
||||
{
|
||||
name: "handles large numbers",
|
||||
input: "v99.50.25",
|
||||
expected: "v100.0.0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := IncrementMajor(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseVersion_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
major int
|
||||
minor int
|
||||
patch int
|
||||
prerelease string
|
||||
build string
|
||||
}{
|
||||
{
|
||||
name: "simple version with v",
|
||||
input: "v1.2.3",
|
||||
major: 1, minor: 2, patch: 3,
|
||||
},
|
||||
{
|
||||
name: "simple version without v",
|
||||
input: "1.2.3",
|
||||
major: 1, minor: 2, patch: 3,
|
||||
},
|
||||
{
|
||||
name: "with prerelease",
|
||||
input: "v1.2.3-alpha",
|
||||
major: 1, minor: 2, patch: 3,
|
||||
prerelease: "alpha",
|
||||
},
|
||||
{
|
||||
name: "with prerelease and build",
|
||||
input: "v1.2.3-beta.1+build.456",
|
||||
major: 1, minor: 2, patch: 3,
|
||||
prerelease: "beta.1",
|
||||
build: "build.456",
|
||||
},
|
||||
{
|
||||
name: "with build only",
|
||||
input: "v1.2.3+sha.abc123",
|
||||
major: 1, minor: 2, patch: 3,
|
||||
build: "sha.abc123",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
major, minor, patch, prerelease, build, err := ParseVersion(tc.input)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.major, major)
|
||||
assert.Equal(t, tc.minor, minor)
|
||||
assert.Equal(t, tc.patch, patch)
|
||||
assert.Equal(t, tc.prerelease, prerelease)
|
||||
assert.Equal(t, tc.build, build)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseVersion_Bad(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
}{
|
||||
{"empty string", ""},
|
||||
{"not a version", "not-a-version"},
|
||||
{"missing minor", "v1"},
|
||||
{"missing patch", "v1.2"},
|
||||
{"letters in version", "v1.2.x"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, _, _, _, _, err := ParseVersion(tc.input)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateVersion_Good(t *testing.T) {
|
||||
validVersions := []string{
|
||||
"v1.0.0",
|
||||
"1.0.0",
|
||||
"v0.0.1",
|
||||
"v10.20.30",
|
||||
"v1.2.3-alpha",
|
||||
"v1.2.3+build",
|
||||
"v1.2.3-alpha.1+build.123",
|
||||
}
|
||||
|
||||
for _, v := range validVersions {
|
||||
t.Run(v, func(t *testing.T) {
|
||||
assert.True(t, ValidateVersion(v))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateVersion_Bad(t *testing.T) {
|
||||
invalidVersions := []string{
|
||||
"",
|
||||
"v1",
|
||||
"v1.2",
|
||||
"1.2",
|
||||
"not-a-version",
|
||||
"v1.2.x",
|
||||
"version1.0.0",
|
||||
}
|
||||
|
||||
for _, v := range invalidVersions {
|
||||
t.Run(v, func(t *testing.T) {
|
||||
assert.False(t, ValidateVersion(v))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareVersions_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
a string
|
||||
b string
|
||||
expected int
|
||||
}{
|
||||
{"equal versions", "v1.0.0", "v1.0.0", 0},
|
||||
{"a less than b major", "v1.0.0", "v2.0.0", -1},
|
||||
{"a greater than b major", "v2.0.0", "v1.0.0", 1},
|
||||
{"a less than b minor", "v1.1.0", "v1.2.0", -1},
|
||||
{"a greater than b minor", "v1.2.0", "v1.1.0", 1},
|
||||
{"a less than b patch", "v1.0.1", "v1.0.2", -1},
|
||||
{"a greater than b patch", "v1.0.2", "v1.0.1", 1},
|
||||
{"with and without v prefix", "v1.0.0", "1.0.0", 0},
|
||||
{"different scales", "v1.10.0", "v1.9.0", 1},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := CompareVersions(tc.a, tc.b)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeVersion_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"1.0.0", "v1.0.0"},
|
||||
{"v1.0.0", "v1.0.0"},
|
||||
{"0.0.1", "v0.0.1"},
|
||||
{"v10.20.30", "v10.20.30"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
result := normalizeVersion(tc.input)
|
||||
assert.Equal(t, tc.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,630 +0,0 @@
|
|||
package sdk
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- Breaking Change Detection Tests (oasdiff integration) ---
|
||||
|
||||
func TestDiff_Good_AddEndpoint_NonBreaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.1.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/status:
|
||||
get:
|
||||
operationId: getStatus
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Breaking, "adding endpoints should not be breaking")
|
||||
assert.Empty(t, result.Changes)
|
||||
assert.Equal(t, "No breaking changes", result.Summary)
|
||||
}
|
||||
|
||||
func TestDiff_Good_RemoveEndpoint_Breaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/orders:
|
||||
get:
|
||||
operationId: listOrders
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "2.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Breaking, "removing endpoints should be breaking")
|
||||
assert.NotEmpty(t, result.Changes)
|
||||
assert.Contains(t, result.Summary, "breaking change")
|
||||
}
|
||||
|
||||
func TestDiff_Good_AddRequiredParam_Breaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.1.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
parameters:
|
||||
- name: tenant_id
|
||||
in: query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Breaking, "adding required parameter should be breaking")
|
||||
assert.NotEmpty(t, result.Changes)
|
||||
}
|
||||
|
||||
func TestDiff_Good_AddOptionalParam_NonBreaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.1.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
parameters:
|
||||
- name: page
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: integer
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Breaking, "adding optional parameter should not be breaking")
|
||||
}
|
||||
|
||||
func TestDiff_Good_ChangeResponseType_Breaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
name:
|
||||
type: string
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "2.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
name:
|
||||
type: string
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Breaking, "changing response schema type should be breaking")
|
||||
}
|
||||
|
||||
func TestDiff_Good_RemoveHTTPMethod_Breaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
post:
|
||||
operationId: createUser
|
||||
responses:
|
||||
"201":
|
||||
description: Created
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "2.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, result.Breaking, "removing HTTP method should be breaking")
|
||||
assert.NotEmpty(t, result.Changes)
|
||||
}
|
||||
|
||||
func TestDiff_Good_IdenticalSpecs_NonBreaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
spec := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
post:
|
||||
operationId: createUser
|
||||
responses:
|
||||
"201":
|
||||
description: Created
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(spec), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(spec), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Breaking, "identical specs should not be breaking")
|
||||
assert.Empty(t, result.Changes)
|
||||
assert.Equal(t, "No breaking changes", result.Summary)
|
||||
}
|
||||
|
||||
// --- Error Handling Tests ---
|
||||
|
||||
func TestDiff_Bad_NonExistentBase(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(`openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths: {}
|
||||
`), 0644))
|
||||
|
||||
_, err := Diff(filepath.Join(tmpDir, "nonexistent.yaml"), revPath)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to load base spec")
|
||||
}
|
||||
|
||||
func TestDiff_Bad_NonExistentRevision(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(`openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths: {}
|
||||
`), 0644))
|
||||
|
||||
_, err := Diff(basePath, filepath.Join(tmpDir, "nonexistent.yaml"))
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to load revision spec")
|
||||
}
|
||||
|
||||
func TestDiff_Bad_InvalidYAML(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte("not: valid: openapi: spec: {{{{"), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(`openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths: {}
|
||||
`), 0644))
|
||||
|
||||
_, err := Diff(basePath, revPath)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// --- DiffExitCode Tests ---
|
||||
|
||||
func TestDiffExitCode_Good(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
result *DiffResult
|
||||
err error
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
name: "no breaking changes returns 0",
|
||||
result: &DiffResult{Breaking: false},
|
||||
err: nil,
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "breaking changes returns 1",
|
||||
result: &DiffResult{Breaking: true, Changes: []string{"removed endpoint"}},
|
||||
err: nil,
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
name: "error returns 2",
|
||||
result: nil,
|
||||
err: assert.AnError,
|
||||
expected: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
code := DiffExitCode(tc.result, tc.err)
|
||||
assert.Equal(t, tc.expected, code)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --- DiffResult Structure Tests ---
|
||||
|
||||
func TestDiffResult_Good_Summary(t *testing.T) {
|
||||
t.Run("breaking result has count in summary", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create specs with 2 removed endpoints
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/orders:
|
||||
get:
|
||||
operationId: listOrders
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "2.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Breaking)
|
||||
assert.Contains(t, result.Summary, "breaking change")
|
||||
// Should have at least 2 changes (removed /users and /orders)
|
||||
assert.GreaterOrEqual(t, len(result.Changes), 2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDiffResult_Good_ChangesAreHumanReadable(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/removed-endpoint:
|
||||
get:
|
||||
operationId: removedEndpoint
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "2.0.0"
|
||||
paths: {}
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Breaking)
|
||||
// Changes should contain human-readable descriptions from oasdiff
|
||||
for _, change := range result.Changes {
|
||||
assert.NotEmpty(t, change, "each change should have a description")
|
||||
}
|
||||
}
|
||||
|
||||
// --- Multiple Changes Detection Tests ---
|
||||
|
||||
func TestDiff_Good_MultipleBreakingChanges(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
base := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
post:
|
||||
operationId: createUser
|
||||
responses:
|
||||
"201":
|
||||
description: Created
|
||||
delete:
|
||||
operationId: deleteAllUsers
|
||||
responses:
|
||||
"204":
|
||||
description: No Content
|
||||
`
|
||||
revision := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "2.0.0"
|
||||
paths:
|
||||
/users:
|
||||
get:
|
||||
operationId: listUsers
|
||||
parameters:
|
||||
- name: required_filter
|
||||
in: query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(base), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revision), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, result.Breaking)
|
||||
// Should detect: removed POST, removed DELETE, and possibly added required param
|
||||
assert.GreaterOrEqual(t, len(result.Changes), 2,
|
||||
"should detect multiple breaking changes, got: %v", result.Changes)
|
||||
}
|
||||
|
||||
// --- JSON Spec Support Tests ---
|
||||
|
||||
func TestDiff_Good_JSONSpecs(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
baseJSON := `{
|
||||
"openapi": "3.0.0",
|
||||
"info": {"title": "Test API", "version": "1.0.0"},
|
||||
"paths": {
|
||||
"/health": {
|
||||
"get": {
|
||||
"operationId": "getHealth",
|
||||
"responses": {"200": {"description": "OK"}}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
revJSON := `{
|
||||
"openapi": "3.0.0",
|
||||
"info": {"title": "Test API", "version": "1.1.0"},
|
||||
"paths": {
|
||||
"/health": {
|
||||
"get": {
|
||||
"operationId": "getHealth",
|
||||
"responses": {"200": {"description": "OK"}}
|
||||
}
|
||||
},
|
||||
"/status": {
|
||||
"get": {
|
||||
"operationId": "getStatus",
|
||||
"responses": {"200": {"description": "OK"}}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
basePath := filepath.Join(tmpDir, "base.json")
|
||||
revPath := filepath.Join(tmpDir, "rev.json")
|
||||
require.NoError(t, os.WriteFile(basePath, []byte(baseJSON), 0644))
|
||||
require.NoError(t, os.WriteFile(revPath, []byte(revJSON), 0644))
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, result.Breaking, "adding endpoint in JSON format should not be breaking")
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
package sdk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// commonSpecPaths are checked in order when no spec is configured.
|
||||
var commonSpecPaths = []string{
|
||||
"api/openapi.yaml",
|
||||
"api/openapi.json",
|
||||
"openapi.yaml",
|
||||
"openapi.json",
|
||||
"docs/api.yaml",
|
||||
"docs/api.json",
|
||||
"swagger.yaml",
|
||||
"swagger.json",
|
||||
}
|
||||
|
||||
// DetectSpec finds the OpenAPI spec file.
|
||||
// Priority: config path -> common paths -> Laravel Scramble.
|
||||
func (s *SDK) DetectSpec() (string, error) {
|
||||
// 1. Check configured path
|
||||
if s.config.Spec != "" {
|
||||
specPath := filepath.Join(s.projectDir, s.config.Spec)
|
||||
if coreio.Local.IsFile(specPath) {
|
||||
return specPath, nil
|
||||
}
|
||||
return "", fmt.Errorf("sdk.DetectSpec: configured spec not found: %s", s.config.Spec)
|
||||
}
|
||||
|
||||
// 2. Check common paths
|
||||
for _, p := range commonSpecPaths {
|
||||
specPath := filepath.Join(s.projectDir, p)
|
||||
if coreio.Local.IsFile(specPath) {
|
||||
return specPath, nil
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Try Laravel Scramble detection
|
||||
specPath, err := s.detectScramble()
|
||||
if err == nil {
|
||||
return specPath, nil
|
||||
}
|
||||
|
||||
return "", errors.New("sdk.DetectSpec: no OpenAPI spec found (checked config, common paths, Scramble)")
|
||||
}
|
||||
|
||||
// detectScramble checks for Laravel Scramble and exports the spec.
|
||||
func (s *SDK) detectScramble() (string, error) {
|
||||
composerPath := filepath.Join(s.projectDir, "composer.json")
|
||||
if !coreio.Local.IsFile(composerPath) {
|
||||
return "", errors.New("no composer.json")
|
||||
}
|
||||
|
||||
// Check for scramble in composer.json
|
||||
data, err := coreio.Local.Read(composerPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Simple check for scramble package
|
||||
if !containsScramble(data) {
|
||||
return "", errors.New("scramble not found in composer.json")
|
||||
}
|
||||
|
||||
// TODO: Run php artisan scramble:export
|
||||
return "", errors.New("scramble export not implemented")
|
||||
}
|
||||
|
||||
// containsScramble checks if composer.json includes scramble.
|
||||
func containsScramble(content string) bool {
|
||||
return strings.Contains(content, "dedoc/scramble") ||
|
||||
strings.Contains(content, "\"scramble\"")
|
||||
}
|
||||
|
|
@ -1,87 +0,0 @@
|
|||
package sdk
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDetectSpec_Good_ConfigPath(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
specPath := filepath.Join(tmpDir, "api", "spec.yaml")
|
||||
err := os.MkdirAll(filepath.Dir(specPath), 0755)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(specPath, []byte("openapi: 3.0.0"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
sdk := New(tmpDir, &Config{Spec: "api/spec.yaml"})
|
||||
got, err := sdk.DetectSpec()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, specPath, got)
|
||||
}
|
||||
|
||||
func TestDetectSpec_Good_CommonPath(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
specPath := filepath.Join(tmpDir, "openapi.yaml")
|
||||
err := os.WriteFile(specPath, []byte("openapi: 3.0.0"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
sdk := New(tmpDir, nil)
|
||||
got, err := sdk.DetectSpec()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, specPath, got)
|
||||
}
|
||||
|
||||
func TestDetectSpec_Bad_NotFound(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
sdk := New(tmpDir, nil)
|
||||
_, err := sdk.DetectSpec()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no OpenAPI spec found")
|
||||
}
|
||||
|
||||
func TestDetectSpec_Bad_ConfigNotFound(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
sdk := New(tmpDir, &Config{Spec: "non-existent.yaml"})
|
||||
_, err := sdk.DetectSpec()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "configured spec not found")
|
||||
}
|
||||
|
||||
func TestContainsScramble(t *testing.T) {
|
||||
tests := []struct {
|
||||
data string
|
||||
expected bool
|
||||
}{
|
||||
{`{"require": {"dedoc/scramble": "^0.1"}}`, true},
|
||||
{`{"require": {"scramble": "^0.1"}}`, true},
|
||||
{`{"require": {"laravel/framework": "^11.0"}}`, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.expected, containsScramble(tt.data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectScramble_Bad(t *testing.T) {
|
||||
t.Run("no composer.json", func(t *testing.T) {
|
||||
sdk := New(t.TempDir(), nil)
|
||||
_, err := sdk.detectScramble()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no composer.json")
|
||||
})
|
||||
|
||||
t.Run("no scramble in composer.json", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "composer.json"), []byte(`{}`), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
sdk := New(tmpDir, nil)
|
||||
_, err = sdk.detectScramble()
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "scramble not found")
|
||||
})
|
||||
}
|
||||
83
sdk/diff.go
83
sdk/diff.go
|
|
@ -1,83 +0,0 @@
|
|||
package sdk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/getkin/kin-openapi/openapi3"
|
||||
"github.com/oasdiff/oasdiff/checker"
|
||||
"github.com/oasdiff/oasdiff/diff"
|
||||
"github.com/oasdiff/oasdiff/load"
|
||||
)
|
||||
|
||||
// DiffResult holds the result of comparing two OpenAPI specs.
|
||||
type DiffResult struct {
|
||||
// Breaking is true if breaking changes were detected.
|
||||
Breaking bool
|
||||
// Changes is the list of breaking changes.
|
||||
Changes []string
|
||||
// Summary is a human-readable summary.
|
||||
Summary string
|
||||
}
|
||||
|
||||
// Diff compares two OpenAPI specs and detects breaking changes.
|
||||
func Diff(basePath, revisionPath string) (*DiffResult, error) {
|
||||
loader := openapi3.NewLoader()
|
||||
loader.IsExternalRefsAllowed = true
|
||||
|
||||
// Load specs
|
||||
baseSpec, err := load.NewSpecInfo(loader, load.NewSource(basePath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sdk.Diff: failed to load base spec: %w", err)
|
||||
}
|
||||
|
||||
revSpec, err := load.NewSpecInfo(loader, load.NewSource(revisionPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sdk.Diff: failed to load revision spec: %w", err)
|
||||
}
|
||||
|
||||
// Compute diff with operations sources map for better error reporting
|
||||
diffResult, operationsSources, err := diff.GetWithOperationsSourcesMap(diff.NewConfig(), baseSpec, revSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sdk.Diff: failed to compute diff: %w", err)
|
||||
}
|
||||
|
||||
// Check for breaking changes
|
||||
config := checker.NewConfig(checker.GetAllChecks())
|
||||
breaks := checker.CheckBackwardCompatibilityUntilLevel(
|
||||
config,
|
||||
diffResult,
|
||||
operationsSources,
|
||||
checker.ERR, // Only errors (breaking changes)
|
||||
)
|
||||
|
||||
// Build result
|
||||
result := &DiffResult{
|
||||
Breaking: len(breaks) > 0,
|
||||
Changes: make([]string, 0, len(breaks)),
|
||||
}
|
||||
|
||||
localizer := checker.NewDefaultLocalizer()
|
||||
for _, b := range breaks {
|
||||
result.Changes = append(result.Changes, b.GetUncolorizedText(localizer))
|
||||
}
|
||||
|
||||
if result.Breaking {
|
||||
result.Summary = fmt.Sprintf("%d breaking change(s) detected", len(breaks))
|
||||
} else {
|
||||
result.Summary = "No breaking changes"
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DiffExitCode returns the exit code for CI integration.
|
||||
// 0 = no breaking changes, 1 = breaking changes, 2 = error
|
||||
func DiffExitCode(result *DiffResult, err error) int {
|
||||
if err != nil {
|
||||
return 2
|
||||
}
|
||||
if result.Breaking {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
101
sdk/diff_test.go
101
sdk/diff_test.go
|
|
@ -1,101 +0,0 @@
|
|||
package sdk
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDiff_Good_NoBreaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
baseSpec := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revSpec := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.1.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/status:
|
||||
get:
|
||||
operationId: getStatus
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
_ = os.WriteFile(basePath, []byte(baseSpec), 0644)
|
||||
_ = os.WriteFile(revPath, []byte(revSpec), 0644)
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Diff failed: %v", err)
|
||||
}
|
||||
if result.Breaking {
|
||||
t.Error("expected no breaking changes for adding endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiff_Good_Breaking(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
baseSpec := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
/users:
|
||||
get:
|
||||
operationId: getUsers
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
revSpec := `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "2.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
basePath := filepath.Join(tmpDir, "base.yaml")
|
||||
revPath := filepath.Join(tmpDir, "rev.yaml")
|
||||
_ = os.WriteFile(basePath, []byte(baseSpec), 0644)
|
||||
_ = os.WriteFile(revPath, []byte(revSpec), 0644)
|
||||
|
||||
result, err := Diff(basePath, revPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Diff failed: %v", err)
|
||||
}
|
||||
if !result.Breaking {
|
||||
t.Error("expected breaking change for removed endpoint")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,337 +0,0 @@
|
|||
package sdk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"forge.lthn.ai/core/go-devops/sdk/generators"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// --- SDK Generation Orchestration Tests ---
|
||||
|
||||
func TestSDK_Generate_Good_AllLanguages(t *testing.T) {
|
||||
t.Run("Generate iterates all configured languages", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create a minimal OpenAPI spec
|
||||
specPath := filepath.Join(tmpDir, "openapi.yaml")
|
||||
err := os.WriteFile(specPath, []byte(minimalSpec), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &Config{
|
||||
Spec: "openapi.yaml",
|
||||
Languages: []string{"nonexistent-lang"},
|
||||
Output: "sdk",
|
||||
Package: PackageConfig{
|
||||
Name: "testclient",
|
||||
Version: "1.0.0",
|
||||
},
|
||||
}
|
||||
s := New(tmpDir, cfg)
|
||||
s.SetVersion("v1.0.0")
|
||||
|
||||
// Generate should fail on unknown language
|
||||
err = s.Generate(context.Background())
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown language")
|
||||
})
|
||||
}
|
||||
|
||||
func TestSDK_GenerateLanguage_Good_OutputDir(t *testing.T) {
|
||||
t.Run("output directory uses language subdirectory", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
specPath := filepath.Join(tmpDir, "openapi.yaml")
|
||||
err := os.WriteFile(specPath, []byte(minimalSpec), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &Config{
|
||||
Spec: "openapi.yaml",
|
||||
Languages: []string{"typescript"},
|
||||
Output: "custom-sdk",
|
||||
Package: PackageConfig{
|
||||
Name: "my-client",
|
||||
Version: "2.0.0",
|
||||
},
|
||||
}
|
||||
s := New(tmpDir, cfg)
|
||||
s.SetVersion("v2.0.0")
|
||||
|
||||
// This will fail because generators aren't installed, but we can verify
|
||||
// the spec detection works correctly
|
||||
specResult, err := s.DetectSpec()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, specPath, specResult)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSDK_GenerateLanguage_Bad_NoSpec(t *testing.T) {
|
||||
t.Run("fails when no OpenAPI spec exists", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
s := New(tmpDir, &Config{
|
||||
Languages: []string{"typescript"},
|
||||
Output: "sdk",
|
||||
})
|
||||
|
||||
err := s.GenerateLanguage(context.Background(), "typescript")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no OpenAPI spec found")
|
||||
})
|
||||
}
|
||||
|
||||
func TestSDK_GenerateLanguage_Bad_UnknownLanguage(t *testing.T) {
|
||||
t.Run("fails for unregistered language", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
specPath := filepath.Join(tmpDir, "openapi.yaml")
|
||||
err := os.WriteFile(specPath, []byte(minimalSpec), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := New(tmpDir, nil)
|
||||
err = s.GenerateLanguage(context.Background(), "cobol")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown language: cobol")
|
||||
})
|
||||
}
|
||||
|
||||
// --- Generator Registry Tests ---
|
||||
|
||||
func TestRegistry_Good_RegisterAndGet(t *testing.T) {
|
||||
t.Run("register and retrieve all generators", func(t *testing.T) {
|
||||
registry := generators.NewRegistry()
|
||||
registry.Register(generators.NewTypeScriptGenerator())
|
||||
registry.Register(generators.NewPythonGenerator())
|
||||
registry.Register(generators.NewGoGenerator())
|
||||
registry.Register(generators.NewPHPGenerator())
|
||||
|
||||
// Verify all languages are registered
|
||||
languages := registry.Languages()
|
||||
assert.Len(t, languages, 4)
|
||||
assert.Contains(t, languages, "typescript")
|
||||
assert.Contains(t, languages, "python")
|
||||
assert.Contains(t, languages, "go")
|
||||
assert.Contains(t, languages, "php")
|
||||
|
||||
// Verify retrieval
|
||||
for _, lang := range []string{"typescript", "python", "go", "php"} {
|
||||
gen, ok := registry.Get(lang)
|
||||
assert.True(t, ok, "should find generator for %s", lang)
|
||||
assert.Equal(t, lang, gen.Language())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Get returns false for unregistered language", func(t *testing.T) {
|
||||
registry := generators.NewRegistry()
|
||||
gen, ok := registry.Get("rust")
|
||||
assert.False(t, ok)
|
||||
assert.Nil(t, gen)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRegistry_Good_OverwritesDuplicateLanguage(t *testing.T) {
|
||||
registry := generators.NewRegistry()
|
||||
registry.Register(generators.NewTypeScriptGenerator())
|
||||
registry.Register(generators.NewTypeScriptGenerator()) // register again
|
||||
|
||||
languages := registry.Languages()
|
||||
count := 0
|
||||
for _, lang := range languages {
|
||||
if lang == "typescript" {
|
||||
count++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 1, count, "should have exactly one typescript entry")
|
||||
}
|
||||
|
||||
// --- Generator Interface Compliance Tests ---
|
||||
|
||||
func TestGenerators_Good_LanguageIdentifiers(t *testing.T) {
|
||||
tests := []struct {
|
||||
generator generators.Generator
|
||||
expected string
|
||||
}{
|
||||
{generators.NewTypeScriptGenerator(), "typescript"},
|
||||
{generators.NewPythonGenerator(), "python"},
|
||||
{generators.NewGoGenerator(), "go"},
|
||||
{generators.NewPHPGenerator(), "php"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.expected, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, tc.generator.Language())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerators_Good_InstallInstructions(t *testing.T) {
|
||||
tests := []struct {
|
||||
language string
|
||||
gen generators.Generator
|
||||
contains string
|
||||
}{
|
||||
{"typescript", generators.NewTypeScriptGenerator(), "npm install"},
|
||||
{"python", generators.NewPythonGenerator(), "pip install"},
|
||||
{"go", generators.NewGoGenerator(), "go install"},
|
||||
{"php", generators.NewPHPGenerator(), "Docker"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.language, func(t *testing.T) {
|
||||
instructions := tc.gen.Install()
|
||||
assert.NotEmpty(t, instructions)
|
||||
assert.Contains(t, instructions, tc.contains)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerators_Good_AvailableDoesNotPanic(t *testing.T) {
|
||||
// Available() should never panic regardless of system state
|
||||
gens := []generators.Generator{
|
||||
generators.NewTypeScriptGenerator(),
|
||||
generators.NewPythonGenerator(),
|
||||
generators.NewGoGenerator(),
|
||||
generators.NewPHPGenerator(),
|
||||
}
|
||||
|
||||
for _, gen := range gens {
|
||||
t.Run(gen.Language(), func(t *testing.T) {
|
||||
// Should not panic — result depends on system
|
||||
_ = gen.Available()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --- SDK Config Tests ---
|
||||
|
||||
func TestSDKConfig_Good_DefaultConfig(t *testing.T) {
|
||||
t.Run("default config has all four languages", func(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
assert.Contains(t, cfg.Languages, "typescript")
|
||||
assert.Contains(t, cfg.Languages, "python")
|
||||
assert.Contains(t, cfg.Languages, "go")
|
||||
assert.Contains(t, cfg.Languages, "php")
|
||||
assert.Len(t, cfg.Languages, 4)
|
||||
})
|
||||
|
||||
t.Run("default config enables diff", func(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
assert.True(t, cfg.Diff.Enabled)
|
||||
assert.False(t, cfg.Diff.FailOnBreaking)
|
||||
})
|
||||
|
||||
t.Run("default config uses sdk/ output", func(t *testing.T) {
|
||||
cfg := DefaultConfig()
|
||||
assert.Equal(t, "sdk", cfg.Output)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSDKConfig_Good_SetVersion(t *testing.T) {
|
||||
t.Run("SetVersion updates both fields", func(t *testing.T) {
|
||||
s := New("/tmp", &Config{
|
||||
Package: PackageConfig{
|
||||
Name: "test",
|
||||
Version: "old",
|
||||
},
|
||||
})
|
||||
s.SetVersion("v3.0.0")
|
||||
|
||||
assert.Equal(t, "v3.0.0", s.version)
|
||||
assert.Equal(t, "v3.0.0", s.config.Package.Version)
|
||||
})
|
||||
|
||||
t.Run("SetVersion on nil config is safe", func(t *testing.T) {
|
||||
s := &SDK{}
|
||||
// Should not panic
|
||||
s.SetVersion("v1.0.0")
|
||||
assert.Equal(t, "v1.0.0", s.version)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSDKConfig_Good_NewWithNilConfig(t *testing.T) {
|
||||
s := New("/project", nil)
|
||||
assert.NotNil(t, s.config)
|
||||
assert.Equal(t, "sdk", s.config.Output)
|
||||
assert.True(t, s.config.Diff.Enabled)
|
||||
}
|
||||
|
||||
// --- Spec Detection Integration Tests ---
|
||||
|
||||
func TestSpecDetection_Good_Priority(t *testing.T) {
|
||||
t.Run("configured spec takes priority over common paths", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create both a common path spec and a configured spec
|
||||
commonSpec := filepath.Join(tmpDir, "openapi.yaml")
|
||||
err := os.WriteFile(commonSpec, []byte(minimalSpec), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
configuredSpec := filepath.Join(tmpDir, "custom", "api.yaml")
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(configuredSpec), 0755))
|
||||
err = os.WriteFile(configuredSpec, []byte(minimalSpec), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := New(tmpDir, &Config{Spec: "custom/api.yaml"})
|
||||
specPath, err := s.DetectSpec()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, configuredSpec, specPath)
|
||||
})
|
||||
|
||||
t.Run("common paths checked in order", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create the second common path only (api/openapi.yaml is first)
|
||||
apiDir := filepath.Join(tmpDir, "api")
|
||||
require.NoError(t, os.MkdirAll(apiDir, 0755))
|
||||
apiSpec := filepath.Join(apiDir, "openapi.json")
|
||||
err := os.WriteFile(apiSpec, []byte(`{"openapi":"3.0.0"}`), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := New(tmpDir, nil)
|
||||
specPath, err := s.DetectSpec()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, apiSpec, specPath)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSpecDetection_Good_AllCommonPaths(t *testing.T) {
|
||||
for _, commonPath := range commonSpecPaths {
|
||||
t.Run(commonPath, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
specPath := filepath.Join(tmpDir, commonPath)
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(specPath), 0755))
|
||||
err := os.WriteFile(specPath, []byte(minimalSpec), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := New(tmpDir, nil)
|
||||
detected, err := s.DetectSpec()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, specPath, detected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --- Compile-time interface checks ---
|
||||
|
||||
var _ generators.Generator = (*generators.TypeScriptGenerator)(nil)
|
||||
var _ generators.Generator = (*generators.PythonGenerator)(nil)
|
||||
var _ generators.Generator = (*generators.GoGenerator)(nil)
|
||||
var _ generators.Generator = (*generators.PHPGenerator)(nil)
|
||||
|
||||
// minimalSpec is a valid OpenAPI 3.0 spec used across tests.
|
||||
const minimalSpec = `openapi: "3.0.0"
|
||||
info:
|
||||
title: Test API
|
||||
version: "1.0.0"
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
operationId: getHealth
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
`
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
// Package generators provides SDK code generators for different languages.
|
||||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"iter"
|
||||
"maps"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// Options holds common generation options.
|
||||
type Options struct {
|
||||
// SpecPath is the path to the OpenAPI spec file.
|
||||
SpecPath string
|
||||
// OutputDir is where to write the generated SDK.
|
||||
OutputDir string
|
||||
// PackageName is the package/module name.
|
||||
PackageName string
|
||||
// Version is the SDK version.
|
||||
Version string
|
||||
}
|
||||
|
||||
// Generator defines the interface for SDK generators.
|
||||
type Generator interface {
|
||||
// Language returns the generator's target language identifier.
|
||||
Language() string
|
||||
|
||||
// Generate creates SDK from OpenAPI spec.
|
||||
Generate(ctx context.Context, opts Options) error
|
||||
|
||||
// Available checks if generator dependencies are installed.
|
||||
Available() bool
|
||||
|
||||
// Install returns instructions for installing the generator.
|
||||
Install() string
|
||||
}
|
||||
|
||||
// Registry holds available generators.
|
||||
type Registry struct {
|
||||
generators map[string]Generator
|
||||
}
|
||||
|
||||
// NewRegistry creates a registry with all available generators.
|
||||
func NewRegistry() *Registry {
|
||||
r := &Registry{
|
||||
generators: make(map[string]Generator),
|
||||
}
|
||||
// Generators will be registered in subsequent tasks
|
||||
return r
|
||||
}
|
||||
|
||||
// Get returns a generator by language.
|
||||
func (r *Registry) Get(lang string) (Generator, bool) {
|
||||
g, ok := r.generators[lang]
|
||||
return g, ok
|
||||
}
|
||||
|
||||
// Register adds a generator to the registry.
|
||||
func (r *Registry) Register(g Generator) {
|
||||
r.generators[g.Language()] = g
|
||||
}
|
||||
|
||||
// Languages returns all registered language identifiers.
|
||||
func (r *Registry) Languages() []string {
|
||||
return slices.Collect(r.LanguagesIter())
|
||||
}
|
||||
|
||||
// LanguagesIter returns an iterator for all registered language identifiers.
|
||||
func (r *Registry) LanguagesIter() iter.Seq[string] {
|
||||
return func(yield func(string) bool) {
|
||||
// Sort keys for deterministic iteration
|
||||
for _, lang := range slices.Sorted(maps.Keys(r.generators)) {
|
||||
if !yield(lang) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dockerUserArgs returns Docker --user args for the current user on Unix systems.
|
||||
// On Windows, Docker handles permissions differently, so no args are returned.
|
||||
func dockerUserArgs() []string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return nil
|
||||
}
|
||||
return []string{"--user", fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid())}
|
||||
}
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
"forge.lthn.ai/core/go-log"
|
||||
)
|
||||
|
||||
// GoGenerator generates Go SDKs from OpenAPI specs.
|
||||
type GoGenerator struct{}
|
||||
|
||||
// NewGoGenerator creates a new Go generator.
|
||||
func NewGoGenerator() *GoGenerator {
|
||||
return &GoGenerator{}
|
||||
}
|
||||
|
||||
// Language returns the generator's target language identifier.
|
||||
func (g *GoGenerator) Language() string {
|
||||
return "go"
|
||||
}
|
||||
|
||||
// Available checks if generator dependencies are installed.
|
||||
func (g *GoGenerator) Available() bool {
|
||||
_, err := exec.LookPath("oapi-codegen")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Install returns instructions for installing the generator.
|
||||
func (g *GoGenerator) Install() string {
|
||||
return "go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@latest"
|
||||
}
|
||||
|
||||
// Generate creates SDK from OpenAPI spec.
|
||||
func (g *GoGenerator) Generate(ctx context.Context, opts Options) error {
|
||||
if err := coreio.Local.EnsureDir(opts.OutputDir); err != nil {
|
||||
return log.E("go.Generate", "failed to create output dir", err)
|
||||
}
|
||||
|
||||
if g.Available() {
|
||||
return g.generateNative(ctx, opts)
|
||||
}
|
||||
return g.generateDocker(ctx, opts)
|
||||
}
|
||||
|
||||
func (g *GoGenerator) generateNative(ctx context.Context, opts Options) error {
|
||||
outputFile := filepath.Join(opts.OutputDir, "client.go")
|
||||
|
||||
cmd := exec.CommandContext(ctx, "oapi-codegen",
|
||||
"-package", opts.PackageName,
|
||||
"-generate", "types,client",
|
||||
"-o", outputFile,
|
||||
opts.SpecPath,
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return log.E("go.generateNative", "oapi-codegen failed", err)
|
||||
}
|
||||
|
||||
goMod := fmt.Sprintf("module %s\n\ngo 1.21\n", opts.PackageName)
|
||||
return coreio.Local.Write(filepath.Join(opts.OutputDir, "go.mod"), goMod)
|
||||
}
|
||||
|
||||
func (g *GoGenerator) generateDocker(ctx context.Context, opts Options) error {
|
||||
specDir := filepath.Dir(opts.SpecPath)
|
||||
specName := filepath.Base(opts.SpecPath)
|
||||
|
||||
args := []string{"run", "--rm"}
|
||||
args = append(args, dockerUserArgs()...)
|
||||
args = append(args,
|
||||
"-v", specDir+":/spec",
|
||||
"-v", opts.OutputDir+":/out",
|
||||
"openapitools/openapi-generator-cli", "generate",
|
||||
"-i", "/spec/"+specName,
|
||||
"-g", "go",
|
||||
"-o", "/out",
|
||||
"--additional-properties=packageName="+opts.PackageName,
|
||||
)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGoGenerator_Good_Available(t *testing.T) {
|
||||
g := NewGoGenerator()
|
||||
|
||||
// These should not panic
|
||||
lang := g.Language()
|
||||
if lang != "go" {
|
||||
t.Errorf("expected language 'go', got '%s'", lang)
|
||||
}
|
||||
|
||||
_ = g.Available()
|
||||
|
||||
install := g.Install()
|
||||
if install == "" {
|
||||
t.Error("expected non-empty install instructions")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoGenerator_Good_Generate(t *testing.T) {
|
||||
g := NewGoGenerator()
|
||||
if !g.Available() && !dockerAvailable() {
|
||||
t.Skip("no Go generator available (neither native nor docker)")
|
||||
}
|
||||
|
||||
// Create temp directories
|
||||
tmpDir := t.TempDir()
|
||||
specPath := createTestSpec(t, tmpDir)
|
||||
outputDir := filepath.Join(tmpDir, "output")
|
||||
|
||||
opts := Options{
|
||||
SpecPath: specPath,
|
||||
OutputDir: outputDir,
|
||||
PackageName: "testclient",
|
||||
Version: "1.0.0",
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
err := g.Generate(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Generate failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify output directory was created
|
||||
if _, err := os.Stat(outputDir); os.IsNotExist(err) {
|
||||
t.Error("output directory was not created")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
package generators
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
coreio "forge.lthn.ai/core/go-io"
|
||||
)
|
||||
|
||||
// PHPGenerator generates PHP SDKs from OpenAPI specs.
|
||||
type PHPGenerator struct{}
|
||||
|
||||
// NewPHPGenerator creates a new PHP generator.
|
||||
func NewPHPGenerator() *PHPGenerator {
|
||||
return &PHPGenerator{}
|
||||
}
|
||||
|
||||
// Language returns the generator's target language identifier.
|
||||
func (g *PHPGenerator) Language() string {
|
||||
return "php"
|
||||
}
|
||||
|
||||
// Available checks if generator dependencies are installed.
|
||||
func (g *PHPGenerator) Available() bool {
|
||||
_, err := exec.LookPath("docker")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Install returns instructions for installing the generator.
|
||||
func (g *PHPGenerator) Install() string {
|
||||
return "Docker is required for PHP SDK generation"
|
||||
}
|
||||
|
||||
// Generate creates SDK from OpenAPI spec.
|
||||
func (g *PHPGenerator) Generate(ctx context.Context, opts Options) error {
|
||||
if !g.Available() {
|
||||
return errors.New("php.Generate: Docker is required but not available")
|
||||
}
|
||||
|
||||
if err := coreio.Local.EnsureDir(opts.OutputDir); err != nil {
|
||||
return fmt.Errorf("php.Generate: failed to create output dir: %w", err)
|
||||
}
|
||||
|
||||
specDir := filepath.Dir(opts.SpecPath)
|
||||
specName := filepath.Base(opts.SpecPath)
|
||||
|
||||
args := []string{"run", "--rm"}
|
||||
args = append(args, dockerUserArgs()...)
|
||||
args = append(args,
|
||||
"-v", specDir+":/spec",
|
||||
"-v", opts.OutputDir+":/out",
|
||||
"openapitools/openapi-generator-cli", "generate",
|
||||
"-i", "/spec/"+specName,
|
||||
"-g", "php",
|
||||
"-o", "/out",
|
||||
"--additional-properties=invokerPackage="+opts.PackageName,
|
||||
)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("php.Generate: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue