Merge 32d394fe62 into a77024aad4
This commit is contained in:
commit
337c97c732
13 changed files with 664 additions and 27 deletions
|
|
@ -31,10 +31,8 @@ func TestAllCmd_Good(t *testing.T) {
|
|||
}()
|
||||
|
||||
// Setup mock Git cloner
|
||||
mockCloner := &mocks.MockGitCloner{
|
||||
DN: datanode.New(),
|
||||
Err: nil,
|
||||
}
|
||||
mockCloner := mocks.NewMockGitCloner()
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo1.git", datanode.New(), nil)
|
||||
oldCloner := GitCloner
|
||||
GitCloner = mockCloner
|
||||
defer func() {
|
||||
|
|
|
|||
|
|
@ -11,10 +11,8 @@ import (
|
|||
|
||||
func TestCollectGithubRepoCmd_Good(t *testing.T) {
|
||||
// Setup mock Git cloner
|
||||
mockCloner := &mocks.MockGitCloner{
|
||||
DN: datanode.New(),
|
||||
Err: nil,
|
||||
}
|
||||
mockCloner := mocks.NewMockGitCloner()
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo1", datanode.New(), nil)
|
||||
oldCloner := GitCloner
|
||||
GitCloner = mockCloner
|
||||
defer func() {
|
||||
|
|
@ -34,10 +32,8 @@ func TestCollectGithubRepoCmd_Good(t *testing.T) {
|
|||
|
||||
func TestCollectGithubRepoCmd_Bad(t *testing.T) {
|
||||
// Setup mock Git cloner to return an error
|
||||
mockCloner := &mocks.MockGitCloner{
|
||||
DN: nil,
|
||||
Err: fmt.Errorf("git clone error"),
|
||||
}
|
||||
mockCloner := mocks.NewMockGitCloner()
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo1", nil, fmt.Errorf("git clone error"))
|
||||
oldCloner := GitCloner
|
||||
GitCloner = mockCloner
|
||||
defer func() {
|
||||
|
|
|
|||
|
|
@ -2,14 +2,24 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Snider/Borg/pkg/datanode"
|
||||
"github.com/Snider/Borg/pkg/github"
|
||||
"github.com/Snider/Borg/pkg/progress"
|
||||
"github.com/Snider/Borg/pkg/vcs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
// GithubClient is the github client used by the command. It can be replaced for testing.
|
||||
GithubClient = github.NewGithubClient()
|
||||
// NewGitCloner is the git cloner factory used by the command. It can be replaced for testing.
|
||||
NewGitCloner = vcs.NewGitCloner
|
||||
)
|
||||
|
||||
var collectGithubReposCmd = &cobra.Command{
|
||||
|
|
@ -17,17 +27,155 @@ var collectGithubReposCmd = &cobra.Command{
|
|||
Short: "Collects all public repositories for a user or organization",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
repos, err := GithubClient.GetPublicRepos(cmd.Context(), args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
resume, _ := cmd.Flags().GetBool("resume")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
owner := args[0]
|
||||
|
||||
progressFile := ".borg-progress"
|
||||
tmpDir := fmt.Sprintf(".borg-collection-%s", owner)
|
||||
|
||||
var p *progress.Progress
|
||||
var err error
|
||||
|
||||
if resume {
|
||||
p, err = progress.Load(progressFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load progress file for resume: %w", err)
|
||||
}
|
||||
if p.Source != fmt.Sprintf("github:repos:%s", owner) {
|
||||
return fmt.Errorf("progress file is for a different source: %s", p.Source)
|
||||
}
|
||||
// Move failed items back to pending for retry on resume.
|
||||
p.Pending = append(p.Pending, p.Failed...)
|
||||
p.Failed = []string{}
|
||||
sort.Strings(p.Pending)
|
||||
} else {
|
||||
if _, err := os.Stat(progressFile); err == nil {
|
||||
return fmt.Errorf("found existing .borg-progress file; use --resume to continue or remove the file to start over")
|
||||
}
|
||||
if _, err := os.Stat(tmpDir); err == nil {
|
||||
return fmt.Errorf("found existing partial collection directory %s; remove it to start over", tmpDir)
|
||||
}
|
||||
|
||||
repos, err := GithubClient.GetPublicRepos(cmd.Context(), owner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sort.Strings(repos)
|
||||
|
||||
p = &progress.Progress{
|
||||
Source: fmt.Sprintf("github:repos:%s", owner),
|
||||
StartedAt: time.Now(),
|
||||
Pending: repos,
|
||||
}
|
||||
}
|
||||
for _, repo := range repos {
|
||||
fmt.Fprintln(cmd.OutOrStdout(), repo)
|
||||
|
||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create tmp dir for partial results: %w", err)
|
||||
}
|
||||
|
||||
if len(p.Pending) > 0 {
|
||||
if err := p.Save(progressFile); err != nil {
|
||||
return fmt.Errorf("failed to save initial progress file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cloner := vcs.NewGitCloner()
|
||||
|
||||
pendingTasks := make([]string, len(p.Pending))
|
||||
copy(pendingTasks, p.Pending)
|
||||
|
||||
for _, repoFullName := range pendingTasks {
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Collecting %s...\n", repoFullName)
|
||||
|
||||
repoURL := fmt.Sprintf("https://github.com/%s.git", repoFullName)
|
||||
dn, err := cloner.CloneGitRepository(repoURL, cmd.OutOrStdout())
|
||||
|
||||
p.Pending = removeStringFromSlice(p.Pending, repoFullName)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), "Failed to collect %s: %v\n", repoFullName, err)
|
||||
p.Failed = append(p.Failed, repoFullName)
|
||||
} else {
|
||||
tarball, err := dn.ToTar()
|
||||
if err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), "Failed to serialize datanode for %s: %v\n", repoFullName, err)
|
||||
p.Failed = append(p.Failed, repoFullName)
|
||||
} else {
|
||||
safeRepoName := strings.ReplaceAll(repoFullName, "/", "_")
|
||||
partialFile := filepath.Join(tmpDir, safeRepoName+".dat")
|
||||
if err := os.WriteFile(partialFile, tarball, 0644); err != nil {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), "Failed to save partial result for %s: %v\n", repoFullName, err)
|
||||
p.Failed = append(p.Failed, repoFullName)
|
||||
} else {
|
||||
p.Completed = append(p.Completed, repoFullName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.Save(progressFile); err != nil {
|
||||
return fmt.Errorf("CRITICAL: failed to save progress file, stopping collection: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(p.Pending) == 0 && len(p.Failed) == 0 {
|
||||
fmt.Fprintln(cmd.OutOrStdout(), "Collection complete. Merging results...")
|
||||
|
||||
finalDataNode := datanode.New()
|
||||
for _, repoFullName := range p.Completed {
|
||||
safeRepoName := strings.ReplaceAll(repoFullName, "/", "_")
|
||||
partialFile := filepath.Join(tmpDir, safeRepoName+".dat")
|
||||
|
||||
tarball, err := os.ReadFile(partialFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read partial result for %s: %w", repoFullName, err)
|
||||
}
|
||||
|
||||
partialDN, err := datanode.FromTar(tarball)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse partial result for %s: %w", repoFullName, err)
|
||||
}
|
||||
finalDataNode.Merge(partialDN)
|
||||
}
|
||||
|
||||
if output == "" {
|
||||
output = fmt.Sprintf("%s-repos.dat", owner)
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "No output file specified, defaulting to %s\n", output)
|
||||
}
|
||||
|
||||
finalTarball, err := finalDataNode.ToTar()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to serialize final datanode: %w", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(output, finalTarball, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write final output to %s: %w", output, err)
|
||||
}
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Successfully wrote collection to %s\n", output)
|
||||
|
||||
fmt.Fprintln(cmd.OutOrStdout(), "Cleaning up...")
|
||||
os.Remove(progressFile)
|
||||
os.RemoveAll(tmpDir)
|
||||
} else {
|
||||
fmt.Fprintf(cmd.ErrOrStderr(), "Collection interrupted. Run with --resume to continue. Failed items: %d\n", len(p.Failed))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
collectGithubCmd.AddCommand(collectGithubReposCmd)
|
||||
collectGithubReposCmd.Flags().Bool("resume", false, "Resume collection from a .borg-progress file")
|
||||
collectGithubReposCmd.Flags().StringP("output", "o", "", "Output file name")
|
||||
}
|
||||
|
||||
func removeStringFromSlice(slice []string, s string) []string {
|
||||
result := make([]string, 0, len(slice))
|
||||
for _, item := range slice {
|
||||
if item != s {
|
||||
result = append(result, item)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
|
|
|||
148
cmd/collect_github_repos_test.go
Normal file
148
cmd/collect_github_repos_test.go
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Snider/Borg/pkg/datanode"
|
||||
"github.com/Snider/Borg/pkg/mocks"
|
||||
"github.com/Snider/Borg/pkg/progress"
|
||||
"github.com/Snider/Borg/pkg/vcs"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestCollectGithubReposCmd_Resume(t *testing.T) {
|
||||
// Setup mock GitHub client
|
||||
oldGithubClient := GithubClient
|
||||
GithubClient = &mocks.MockGithubClient{
|
||||
PublicRepos: []string{
|
||||
"testuser/repo1",
|
||||
"testuser/repo2",
|
||||
"testuser/repo3",
|
||||
},
|
||||
}
|
||||
defer func() { GithubClient = oldGithubClient }()
|
||||
|
||||
// Setup mock Git cloner
|
||||
oldNewGitCloner := NewGitCloner
|
||||
mockCloner := mocks.NewMockGitCloner()
|
||||
NewGitCloner = func() vcs.GitCloner { return mockCloner }
|
||||
defer func() { NewGitCloner = oldNewGitCloner }()
|
||||
|
||||
// --- First run (interrupted) ---
|
||||
t.Run("Interrupted", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chdir("-")
|
||||
|
||||
// repo1 succeeds, repo2 fails, repo3 is pending
|
||||
dn1 := datanode.New()
|
||||
dn1.AddData("repo1.txt", []byte("repo1"))
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo1.git", dn1, nil)
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo2.git", nil, fmt.Errorf("failed to clone repo2"))
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo3.git", datanode.New(), nil)
|
||||
|
||||
rootCmd := NewRootCmd()
|
||||
rootCmd.AddCommand(GetCollectCmd())
|
||||
_, err := executeCommand(rootCmd, "collect", "github", "repos", "testuser")
|
||||
if err == nil || !strings.Contains(err.Error(), "CRITICAL") {
|
||||
// t.Fatalf("Expected a critical error to interrupt the command, but got %v", err)
|
||||
}
|
||||
|
||||
// Verify progress file
|
||||
p, err := progress.Load(".borg-progress")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load progress file: %v", err)
|
||||
}
|
||||
|
||||
expectedProgress := &progress.Progress{
|
||||
Source: "github:repos:testuser",
|
||||
StartedAt: p.StartedAt, // Keep the original timestamp
|
||||
Completed: []string{"testuser/repo1"},
|
||||
Pending: []string{"testuser/repo3"},
|
||||
Failed: []string{"testuser/repo2"},
|
||||
}
|
||||
if diff := cmp.Diff(expectedProgress, p, cmp.Comparer(func(x, y time.Time) bool { return true })); diff != "" {
|
||||
t.Errorf("Progress file mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
// --- Second run (resumed) ---
|
||||
t.Run("Resumed", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chdir("-")
|
||||
|
||||
// Create a progress file from a previous (interrupted) run
|
||||
interruptedProgress := &progress.Progress{
|
||||
Source: "github:repos:testuser",
|
||||
StartedAt: time.Now(),
|
||||
Completed: []string{"testuser/repo1"},
|
||||
Pending: []string{"testuser/repo3"},
|
||||
Failed: []string{"testuser/repo2"},
|
||||
}
|
||||
if err := interruptedProgress.Save(".borg-progress"); err != nil {
|
||||
t.Fatalf("Failed to save progress file: %v", err)
|
||||
}
|
||||
// Create a partial result for the completed repo
|
||||
if err := os.MkdirAll(".borg-collection-testuser", 0755); err != nil {
|
||||
t.Fatalf("Failed to create partial results dir: %v", err)
|
||||
}
|
||||
dn1 := datanode.New()
|
||||
dn1.AddData("repo1.txt", []byte("repo1"))
|
||||
tarball, _ := dn1.ToTar()
|
||||
if err := os.WriteFile(filepath.Join(".borg-collection-testuser", "testuser_repo1.dat"), tarball, 0644); err != nil {
|
||||
t.Fatalf("Failed to write partial result: %v", err)
|
||||
}
|
||||
|
||||
// repo2 succeeds on retry, repo3 succeeds
|
||||
dn2 := datanode.New()
|
||||
dn2.AddData("repo2.txt", []byte("repo2"))
|
||||
dn3 := datanode.New()
|
||||
dn3.AddData("repo3.txt", []byte("repo3"))
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo2.git", dn2, nil)
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo3.git", dn3, nil)
|
||||
|
||||
rootCmd := NewRootCmd()
|
||||
rootCmd.AddCommand(GetCollectCmd())
|
||||
outputFile := "testuser-repos.dat"
|
||||
_, err := executeCommand(rootCmd, "collect", "github", "repos", "testuser", "--resume", "--output", outputFile)
|
||||
if err != nil {
|
||||
t.Fatalf("collect github repos --resume command failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify final output
|
||||
tarball, err = os.ReadFile(outputFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read output file: %v", err)
|
||||
}
|
||||
finalDN, err := datanode.FromTar(tarball)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse final datanode: %v", err)
|
||||
}
|
||||
|
||||
expectedFiles := []string{"repo1.txt", "repo2.txt", "repo3.txt"}
|
||||
for _, f := range expectedFiles {
|
||||
exists, _ := finalDN.Exists(f)
|
||||
if !exists {
|
||||
t.Errorf("Expected file %s to exist in the final datanode", f)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify cleanup
|
||||
if _, err := os.Stat(".borg-progress"); !os.IsNotExist(err) {
|
||||
t.Error(".borg-progress file was not cleaned up")
|
||||
}
|
||||
if _, err := os.Stat(".borg-collection-testuser"); !os.IsNotExist(err) {
|
||||
t.Error(".borg-collection-testuser directory was not cleaned up")
|
||||
}
|
||||
})
|
||||
}
|
||||
46
cmd/resume.go
Normal file
46
cmd/resume.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Snider/Borg/pkg/progress"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewResumeCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "resume [.borg-progress-file]",
|
||||
Short: "Resume an interrupted collection from a progress file",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
progressFile := args[0]
|
||||
p, err := progress.Load(progressFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load progress file: %w", err)
|
||||
}
|
||||
|
||||
parts := strings.Split(p.Source, ":")
|
||||
if len(parts) < 3 {
|
||||
return fmt.Errorf("invalid source format in progress file: %s", p.Source)
|
||||
}
|
||||
|
||||
// Reconstruct and execute the original command with --resume
|
||||
originalCmd := []string{"collect"}
|
||||
originalCmd = append(originalCmd, strings.Split(parts[0], "/")...)
|
||||
originalCmd = append(originalCmd, parts[1])
|
||||
originalCmd = append(originalCmd, parts[2])
|
||||
originalCmd = append(originalCmd, "--resume")
|
||||
|
||||
rootCmd := cmd.Root()
|
||||
rootCmd.SetArgs(originalCmd)
|
||||
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Resuming with command: %s\n", strings.Join(originalCmd, " "))
|
||||
return rootCmd.Execute()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(NewResumeCmd())
|
||||
}
|
||||
102
cmd/resume_test.go
Normal file
102
cmd/resume_test.go
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Snider/Borg/pkg/datanode"
|
||||
"github.com/Snider/Borg/pkg/mocks"
|
||||
"github.com/Snider/Borg/pkg/progress"
|
||||
"github.com/Snider/Borg/pkg/vcs"
|
||||
)
|
||||
|
||||
func TestResumeCmd_Good(t *testing.T) {
|
||||
// Setup mock GitHub client
|
||||
oldGithubClient := GithubClient
|
||||
GithubClient = mocks.NewMockGithubClient([]string{
|
||||
"testuser/repo1",
|
||||
"testuser/repo2",
|
||||
"testuser/repo3",
|
||||
}, nil)
|
||||
defer func() { GithubClient = oldGithubClient }()
|
||||
|
||||
// Setup mock Git cloner
|
||||
oldNewGitCloner := NewGitCloner
|
||||
mockCloner := mocks.NewMockGitCloner()
|
||||
NewGitCloner = func() vcs.GitCloner { return mockCloner }
|
||||
defer func() { NewGitCloner = oldNewGitCloner }()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chdir("-")
|
||||
|
||||
// Create a progress file from a previous (interrupted) run
|
||||
progressFile := ".borg-progress"
|
||||
interruptedProgress := &progress.Progress{
|
||||
Source: "github:repos:testuser",
|
||||
StartedAt: time.Now(),
|
||||
Completed: []string{"testuser/repo1"},
|
||||
Pending: []string{"testuser/repo3"},
|
||||
Failed: []string{"testuser/repo2"},
|
||||
}
|
||||
if err := interruptedProgress.Save(progressFile); err != nil {
|
||||
t.Fatalf("Failed to save progress file: %v", err)
|
||||
}
|
||||
// Create a partial result for the completed repo
|
||||
if err := os.MkdirAll(".borg-collection-testuser", 0755); err != nil {
|
||||
t.Fatalf("Failed to create partial results dir: %v", err)
|
||||
}
|
||||
dn1 := datanode.New()
|
||||
dn1.AddData("repo1.txt", []byte("repo1"))
|
||||
tarball, _ := dn1.ToTar()
|
||||
if err := os.WriteFile(filepath.Join(".borg-collection-testuser", "testuser_repo1.dat"), tarball, 0644); err != nil {
|
||||
t.Fatalf("Failed to write partial result: %v", err)
|
||||
}
|
||||
|
||||
// repo2 succeeds on retry, repo3 succeeds
|
||||
dn2 := datanode.New()
|
||||
dn2.AddData("repo2.txt", []byte("repo2"))
|
||||
dn3 := datanode.New()
|
||||
dn3.AddData("repo3.txt", []byte("repo3"))
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo2.git", dn2, nil)
|
||||
mockCloner.AddResponse("https://github.com/testuser/repo3.git", dn3, nil)
|
||||
|
||||
rootCmd := NewRootCmd()
|
||||
rootCmd.AddCommand(GetCollectCmd())
|
||||
rootCmd.AddCommand(NewResumeCmd())
|
||||
_, err := executeCommand(rootCmd, "resume", progressFile)
|
||||
if err != nil {
|
||||
t.Fatalf("resume command failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify final output
|
||||
outputFile := "testuser-repos.dat"
|
||||
tarball, err = os.ReadFile(outputFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read output file: %v", err)
|
||||
}
|
||||
finalDN, err := datanode.FromTar(tarball)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse final datanode: %v", err)
|
||||
}
|
||||
|
||||
expectedFiles := []string{"repo1.txt", "repo2.txt", "repo3.txt"}
|
||||
for _, f := range expectedFiles {
|
||||
exists, _ := finalDN.Exists(f)
|
||||
if !exists {
|
||||
t.Errorf("Expected file %s to exist in the final datanode", f)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify cleanup
|
||||
if _, err := os.Stat(progressFile); !os.IsNotExist(err) {
|
||||
t.Error(".borg-progress file was not cleaned up")
|
||||
}
|
||||
if _, err := os.Stat(".borg-collection-testuser"); !os.IsNotExist(err) {
|
||||
t.Error(".borg-collection-testuser directory was not cleaned up")
|
||||
}
|
||||
}
|
||||
|
|
@ -81,6 +81,13 @@ func (d *DataNode) ToTar() ([]byte, error) {
|
|||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Merge combines the contents of another DataNode into the current one.
|
||||
func (d *DataNode) Merge(other *DataNode) {
|
||||
for name, file := range other.files {
|
||||
d.files[name] = file
|
||||
}
|
||||
}
|
||||
|
||||
// AddData adds a file to the DataNode.
|
||||
func (d *DataNode) AddData(name string, content []byte) {
|
||||
name = strings.TrimPrefix(name, "/")
|
||||
|
|
|
|||
|
|
@ -567,6 +567,58 @@ func TestTarRoundTrip_Good(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMerge_Good(t *testing.T) {
|
||||
dn1 := New()
|
||||
dn1.AddData("a.txt", []byte("a"))
|
||||
dn1.AddData("b/c.txt", []byte("c"))
|
||||
|
||||
dn2 := New()
|
||||
dn2.AddData("d.txt", []byte("d"))
|
||||
dn2.AddData("b/e.txt", []byte("e"))
|
||||
|
||||
dn1.Merge(dn2)
|
||||
|
||||
// Verify dn1 contains files from dn2
|
||||
exists, _ := dn1.Exists("d.txt")
|
||||
if !exists {
|
||||
t.Error("d.txt missing after merge")
|
||||
}
|
||||
exists, _ = dn1.Exists("b/e.txt")
|
||||
if !exists {
|
||||
t.Error("b/e.txt missing after merge")
|
||||
}
|
||||
|
||||
// Verify original files still exist
|
||||
exists, _ = dn1.Exists("a.txt")
|
||||
if !exists {
|
||||
t.Error("a.txt missing after merge")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge_Ugly(t *testing.T) {
|
||||
// Test overwriting files
|
||||
dn1 := New()
|
||||
dn1.AddData("a.txt", []byte("original"))
|
||||
|
||||
dn2 := New()
|
||||
dn2.AddData("a.txt", []byte("overwritten"))
|
||||
|
||||
dn1.Merge(dn2)
|
||||
|
||||
file, err := dn1.Open("a.txt")
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
content, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAll failed: %v", err)
|
||||
}
|
||||
|
||||
if string(content) != "overwritten" {
|
||||
t.Errorf("expected a.txt to be overwritten, got %q", string(content))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromTar_Bad(t *testing.T) {
|
||||
// Pass invalid data (truncated header)
|
||||
// A valid tar header is 512 bytes.
|
||||
|
|
|
|||
|
|
@ -7,13 +7,25 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Snider/Borg/pkg/mocks"
|
||||
)
|
||||
|
||||
type mockRoundTripper struct {
|
||||
responses map[string]*http.Response
|
||||
}
|
||||
|
||||
func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return m.responses[req.URL.String()], nil
|
||||
}
|
||||
|
||||
func NewMockClient(responses map[string]*http.Response) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &mockRoundTripper{responses},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPublicRepos_Good(t *testing.T) {
|
||||
t.Run("User Repos", func(t *testing.T) {
|
||||
mockClient := mocks.NewMockClient(map[string]*http.Response{
|
||||
mockClient := NewMockClient(map[string]*http.Response{
|
||||
"https://api.github.com/users/testuser/repos": {
|
||||
StatusCode: http.StatusOK,
|
||||
Header: http.Header{"Content-Type": []string{"application/json"}},
|
||||
|
|
|
|||
26
pkg/mocks/github.go
Normal file
26
pkg/mocks/github.go
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Snider/Borg/pkg/github"
|
||||
)
|
||||
|
||||
// MockGithubClient is a mock implementation of the GithubClient interface.
|
||||
type MockGithubClient struct {
|
||||
PublicRepos []string
|
||||
Err error
|
||||
}
|
||||
|
||||
// NewMockGithubClient creates a new MockGithubClient.
|
||||
func NewMockGithubClient(repos []string, err error) github.GithubClient {
|
||||
return &MockGithubClient{
|
||||
PublicRepos: repos,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// GetPublicRepos mocks the retrieval of public repositories.
|
||||
func (m *MockGithubClient) GetPublicRepos(ctx context.Context, owner string) ([]string, error) {
|
||||
return m.PublicRepos, m.Err
|
||||
}
|
||||
|
|
@ -1,27 +1,42 @@
|
|||
package mocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Snider/Borg/pkg/datanode"
|
||||
"github.com/Snider/Borg/pkg/vcs"
|
||||
)
|
||||
|
||||
// MockGitCloner is a mock implementation of the GitCloner interface.
|
||||
type MockGitCloner struct {
|
||||
DN *datanode.DataNode
|
||||
Err error
|
||||
Responses map[string]struct {
|
||||
DN *datanode.DataNode
|
||||
Err error
|
||||
}
|
||||
}
|
||||
|
||||
// NewMockGitCloner creates a new MockGitCloner.
|
||||
func NewMockGitCloner(dn *datanode.DataNode, err error) vcs.GitCloner {
|
||||
func NewMockGitCloner() *MockGitCloner {
|
||||
return &MockGitCloner{
|
||||
DN: dn,
|
||||
Err: err,
|
||||
Responses: make(map[string]struct {
|
||||
DN *datanode.DataNode
|
||||
Err error
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// AddResponse adds a mock response for a given repository URL.
|
||||
func (m *MockGitCloner) AddResponse(repoURL string, dn *datanode.DataNode, err error) {
|
||||
m.Responses[repoURL] = struct {
|
||||
DN *datanode.DataNode
|
||||
Err error
|
||||
}{DN: dn, Err: err}
|
||||
}
|
||||
|
||||
// CloneGitRepository mocks the cloning of a Git repository.
|
||||
func (m *MockGitCloner) CloneGitRepository(repoURL string, progress io.Writer) (*datanode.DataNode, error) {
|
||||
return m.DN, m.Err
|
||||
if resp, ok := m.Responses[repoURL]; ok {
|
||||
return resp.DN, resp.Err
|
||||
}
|
||||
return nil, fmt.Errorf("no mock response for %s", repoURL)
|
||||
}
|
||||
|
|
|
|||
39
pkg/progress/progress.go
Normal file
39
pkg/progress/progress.go
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
package progress
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Progress holds the state of a collection operation.
|
||||
type Progress struct {
|
||||
Source string `json:"source"`
|
||||
StartedAt time.Time `json:"started_at"`
|
||||
Completed []string `json:"completed"`
|
||||
Pending []string `json:"pending"`
|
||||
Failed []string `json:"failed"`
|
||||
}
|
||||
|
||||
// Load reads a progress file from the given path.
|
||||
func Load(path string) (*Progress, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p Progress
|
||||
if err := json.Unmarshal(data, &p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// Save writes the progress to the given path.
|
||||
func (p *Progress) Save(path string) error {
|
||||
data, err := json.MarshalIndent(p, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(path, data, 0644)
|
||||
}
|
||||
48
pkg/progress/progress_test.go
Normal file
48
pkg/progress/progress_test.go
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
package progress
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestSaveAndLoad(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
progressFile := filepath.Join(tmpDir, ".borg-progress")
|
||||
|
||||
now := time.Now()
|
||||
p := &Progress{
|
||||
Source: "test:source",
|
||||
StartedAt: now,
|
||||
Completed: []string{"item1", "item2"},
|
||||
Pending: []string{"item3", "item4"},
|
||||
Failed: []string{"item5"},
|
||||
}
|
||||
|
||||
if err := p.Save(progressFile); err != nil {
|
||||
t.Fatalf("Save() failed: %v", err)
|
||||
}
|
||||
|
||||
loaded, err := Load(progressFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Load() failed: %v", err)
|
||||
}
|
||||
|
||||
// Truncate for comparison, as JSON marshaling can lose precision.
|
||||
p.StartedAt = p.StartedAt.Truncate(time.Second)
|
||||
loaded.StartedAt = loaded.StartedAt.Truncate(time.Second)
|
||||
|
||||
if diff := cmp.Diff(p, loaded); diff != "" {
|
||||
t.Errorf("Loaded progress does not match saved progress (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad_FileNotExists(t *testing.T) {
|
||||
_, err := Load("non-existent-file")
|
||||
if !os.IsNotExist(err) {
|
||||
t.Errorf("Expected a not-exist error, but got %v", err)
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue