feat: S3/cloud storage backend
This commit introduces a new cloud storage backend with support for S3 and S3-compatible services. Key changes: - Created a `storage` package with a `Storage` interface. - Implemented an S3 backend with multipart uploads and custom endpoint support. - Added `push`, `pull`, `ls`, and `remote add` commands. - Integrated cloud storage with the `collect` command, enabling data streaming. - Added unit and integration tests for the new functionality. Note: The `MockStorage` test helper is duplicated across several test files. An attempt to centralize it was blocked by technical issues I encountered during the refactoring process. This refactoring is left for a future commit. Co-authored-by: Snider <631881+Snider@users.noreply.github.com>
This commit is contained in:
parent
cf2af53ed3
commit
3ac7b365ef
17 changed files with 883 additions and 20 deletions
|
|
@ -3,9 +3,11 @@ package cmd
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/Snider/Borg/pkg/compress"
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
"github.com/Snider/Borg/pkg/tim"
|
||||
"github.com/Snider/Borg/pkg/trix"
|
||||
"github.com/Snider/Borg/pkg/ui"
|
||||
|
|
@ -93,11 +95,6 @@ func NewCollectGithubRepoCmd() *cobra.Command {
|
|||
}
|
||||
}
|
||||
|
||||
compressedData, err := compress.Compress(data, compression)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error compressing data: %w", err)
|
||||
}
|
||||
|
||||
if outputFile == "" {
|
||||
outputFile = "repo." + format
|
||||
if compression != "none" {
|
||||
|
|
@ -105,9 +102,48 @@ func NewCollectGithubRepoCmd() *cobra.Command {
|
|||
}
|
||||
}
|
||||
|
||||
err = os.WriteFile(outputFile, compressedData, defaultFilePermission)
|
||||
u, err := url.Parse(outputFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing DataNode to file: %w", err)
|
||||
return fmt.Errorf("invalid output URL: %w", err)
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
compressWriter, err := compress.NewWriter(pw, compression)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("error creating compress writer: %w", err))
|
||||
return
|
||||
}
|
||||
defer compressWriter.Close()
|
||||
_, err = compressWriter.Write(data)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("error writing compressed data: %w", err))
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "" || u.Scheme == "file" {
|
||||
f, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, pr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to file: %w", err)
|
||||
}
|
||||
} else {
|
||||
s, err := storage.NewStorage(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.Write(u.Path, pr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error uploading file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmd.OutOrStdout(), "Repository saved to", outputFile)
|
||||
|
|
|
|||
45
cmd/ls.go
Normal file
45
cmd/ls.go
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewLsCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "ls [remote-url]",
|
||||
Short: "List the contents of a remote storage path",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
remoteURL := args[0]
|
||||
|
||||
u, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid remote URL: %w", err)
|
||||
}
|
||||
|
||||
s, err := storage.NewStorage(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
paths, err := s.List(u.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing contents: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
fmt.Fprintln(cmd.OutOrStdout(), path)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(NewLsCmd())
|
||||
}
|
||||
36
cmd/ls_test.go
Normal file
36
cmd/ls_test.go
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
)
|
||||
|
||||
func TestLsCmd(t *testing.T) {
|
||||
// Mock the storage backend
|
||||
storage.NewStorage = func(u *url.URL) (storage.Storage, error) {
|
||||
return &MockStorage{
|
||||
ListFunc: func(path string) ([]string, error) {
|
||||
if path != "/remote/path" {
|
||||
t.Errorf("expected path '/remote/path', got '%s'", path)
|
||||
}
|
||||
return []string{"file1.txt", "file2.txt"}, nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Execute the ls command
|
||||
root := NewRootCmd()
|
||||
root.AddCommand(NewLsCmd())
|
||||
output, err := executeCommand(root, "ls", "mock://bucket/remote/path")
|
||||
if err != nil {
|
||||
t.Fatalf("ls command failed: %v", err)
|
||||
}
|
||||
|
||||
// Assertions
|
||||
expectedOutput := "file1.txt\nfile2.txt\n"
|
||||
if output != expectedOutput {
|
||||
t.Errorf("expected output '%s', got '%s'", expectedOutput, output)
|
||||
}
|
||||
}
|
||||
51
cmd/main_test.go
Normal file
51
cmd/main_test.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
)
|
||||
|
||||
// MockStorage is a mock implementation of the Storage interface for testing.
|
||||
type MockStorage struct {
|
||||
WriteFunc func(path string, data io.Reader) error
|
||||
ReadFunc func(path string) (io.ReadCloser, error)
|
||||
ListFunc func(path string) ([]string, error)
|
||||
}
|
||||
|
||||
func (m *MockStorage) Write(path string, data io.Reader) error {
|
||||
if m.WriteFunc != nil {
|
||||
return m.WriteFunc(path, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockStorage) Read(path string) (io.ReadCloser, error) {
|
||||
if m.ReadFunc != nil {
|
||||
return m.ReadFunc(path)
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
}
|
||||
|
||||
func (m *MockStorage) List(path string) ([]string, error) {
|
||||
if m.ListFunc != nil {
|
||||
return m.ListFunc(path)
|
||||
}
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Mock the storage backend for all tests in this package
|
||||
originalNewStorage := storage.NewStorage
|
||||
storage.NewStorage = func(u *url.URL) (storage.Storage, error) {
|
||||
if u.Scheme == "mock" {
|
||||
return &MockStorage{}, nil
|
||||
}
|
||||
return originalNewStorage(u)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
57
cmd/pull.go
Normal file
57
cmd/pull.go
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewPullCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "pull [remote-url] [local-path]",
|
||||
Short: "Pull a remote file from a remote storage URL",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
remoteURL := args[0]
|
||||
localPath := args[1]
|
||||
|
||||
u, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid remote URL: %w", err)
|
||||
}
|
||||
|
||||
s, err := storage.NewStorage(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := s.Read(u.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error downloading file: %w", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
f, err := os.Create(localPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating local file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = io.Copy(f, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to local file: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmd.OutOrStdout(), "File pulled successfully")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(NewPullCmd())
|
||||
}
|
||||
46
cmd/pull_test.go
Normal file
46
cmd/pull_test.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
)
|
||||
|
||||
func TestPullCmd(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
localPath := filepath.Join(tmpDir, "testfile.txt")
|
||||
|
||||
// Mock the storage backend
|
||||
storage.NewStorage = func(u *url.URL) (storage.Storage, error) {
|
||||
return &MockStorage{
|
||||
ReadFunc: func(path string) (io.ReadCloser, error) {
|
||||
if path != "/remote/path" {
|
||||
t.Errorf("expected path '/remote/path', got '%s'", path)
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader([]byte("pull test"))), nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Execute the pull command
|
||||
root := NewRootCmd()
|
||||
root.AddCommand(NewPullCmd())
|
||||
_, err := executeCommand(root, "pull", "mock://bucket/remote/path", localPath)
|
||||
if err != nil {
|
||||
t.Fatalf("pull command failed: %v", err)
|
||||
}
|
||||
|
||||
// Assertions
|
||||
data, err := os.ReadFile(localPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read pulled file: %v", err)
|
||||
}
|
||||
if string(data) != "pull test" {
|
||||
t.Errorf("expected data 'pull test', got '%s'", string(data))
|
||||
}
|
||||
}
|
||||
50
cmd/push.go
Normal file
50
cmd/push.go
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewPushCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "push [local-path] [remote-url]",
|
||||
Short: "Push a local file to a remote storage URL",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
localPath := args[0]
|
||||
remoteURL := args[1]
|
||||
|
||||
u, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid remote URL: %w", err)
|
||||
}
|
||||
|
||||
s, err := storage.NewStorage(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(localPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening local file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
err = s.Write(u.Path, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error uploading file: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmd.OutOrStdout(), "File pushed successfully")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(NewPushCmd())
|
||||
}
|
||||
52
cmd/push_test.go
Normal file
52
cmd/push_test.go
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/Snider/Borg/pkg/storage"
|
||||
)
|
||||
|
||||
func TestPushCmd(t *testing.T) {
|
||||
// Create a temporary file to push
|
||||
tmpDir := t.TempDir()
|
||||
localPath := filepath.Join(tmpDir, "testfile.txt")
|
||||
err := os.WriteFile(localPath, []byte("push test"), 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test file: %v", err)
|
||||
}
|
||||
|
||||
var writtenPath string
|
||||
var writtenData []byte
|
||||
|
||||
// Mock the storage backend
|
||||
storage.NewStorage = func(u *url.URL) (storage.Storage, error) {
|
||||
return &MockStorage{
|
||||
WriteFunc: func(path string, data io.Reader) error {
|
||||
writtenPath = path
|
||||
writtenData, _ = io.ReadAll(data)
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Execute the push command
|
||||
root := NewRootCmd()
|
||||
root.AddCommand(NewPushCmd())
|
||||
_, err = executeCommand(root, "push", localPath, "mock://bucket/remote/path")
|
||||
if err != nil {
|
||||
t.Fatalf("push command failed: %v", err)
|
||||
}
|
||||
|
||||
// Assertions
|
||||
if writtenPath != "/remote/path" {
|
||||
t.Errorf("expected path '/remote/path', got '%s'", writtenPath)
|
||||
}
|
||||
if string(writtenData) != "push test" {
|
||||
t.Errorf("expected data 'push test', got '%s'", string(writtenData))
|
||||
}
|
||||
}
|
||||
21
cmd/remote.go
Normal file
21
cmd/remote.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
package cmd
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
var remoteCmd = NewRemoteCmd()
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(GetRemoteCmd())
|
||||
}
|
||||
|
||||
func NewRemoteCmd() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "remote",
|
||||
Short: "Manage remote storage configurations",
|
||||
Long: `Add, remove, and list remote storage configurations for S3, R2, B2, etc.`,
|
||||
}
|
||||
}
|
||||
|
||||
func GetRemoteCmd() *cobra.Command {
|
||||
return remoteCmd
|
||||
}
|
||||
48
cmd/remote_add.go
Normal file
48
cmd/remote_add.go
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/Snider/Borg/pkg/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewRemoteAddCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "add [name] [url]",
|
||||
Short: "Add a new remote storage configuration",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
name := args[0]
|
||||
url := args[1]
|
||||
accessKey, _ := cmd.Flags().GetString("access-key")
|
||||
secretKey, _ := cmd.Flags().GetString("secret-key")
|
||||
|
||||
endpoint, _ := cmd.Flags().GetString("endpoint")
|
||||
remote := config.Remote{
|
||||
Name: name,
|
||||
URL: url,
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
|
||||
remotes, err := config.LoadRemotes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
remotes = append(remotes, remote)
|
||||
|
||||
return config.SaveRemotes(remotes)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().String("access-key", "", "Access key for the remote")
|
||||
cmd.Flags().String("secret-key", "", "Secret key for the remote")
|
||||
cmd.Flags().String("endpoint", "", "Custom endpoint for S3-compatible storage")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
remoteCmd.AddCommand(NewRemoteAddCmd())
|
||||
}
|
||||
19
go.mod
19
go.mod
|
|
@ -22,6 +22,25 @@ require (
|
|||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/bep/debounce v1.2.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
|
|
|
|||
38
go.sum
38
go.sum
|
|
@ -11,6 +11,44 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
|
|||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
||||
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||
github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM=
|
||||
|
|
|
|||
|
|
@ -8,22 +8,30 @@ import (
|
|||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
// NewWriter returns a new compression writer.
|
||||
func NewWriter(w io.Writer, format string) (io.WriteCloser, error) {
|
||||
switch format {
|
||||
case "gz":
|
||||
return gzip.NewWriter(w), nil
|
||||
case "xz":
|
||||
return xz.NewWriter(w)
|
||||
default:
|
||||
return &nopCloser{w}, nil
|
||||
}
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (nopCloser) Close() error { return nil }
|
||||
|
||||
// Compress compresses data using the specified format.
|
||||
func Compress(data []byte, format string) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
var writer io.WriteCloser
|
||||
var err error
|
||||
|
||||
switch format {
|
||||
case "gz":
|
||||
writer = gzip.NewWriter(&buf)
|
||||
case "xz":
|
||||
writer, err = xz.NewWriter(&buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return data, nil
|
||||
writer, err := NewWriter(&buf, format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = writer.Write(data)
|
||||
|
|
|
|||
69
pkg/config/config.go
Normal file
69
pkg/config/config.go
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Remote struct {
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
AccessKey string `json:"access_key"`
|
||||
SecretKey string `json:"secret_key"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
}
|
||||
|
||||
func GetConfigPath() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get user home directory: %w", err)
|
||||
}
|
||||
return filepath.Join(home, ".borg", "config.json"), nil
|
||||
}
|
||||
|
||||
func LoadRemotes() ([]Remote, error) {
|
||||
path, err := GetConfigPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return []Remote{}, nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read config file: %w", err)
|
||||
}
|
||||
|
||||
var remotes []Remote
|
||||
err = json.Unmarshal(data, &remotes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse config file: %w", err)
|
||||
}
|
||||
|
||||
return remotes, nil
|
||||
}
|
||||
|
||||
func SaveRemotes(remotes []Remote) error {
|
||||
path, err := GetConfigPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := json.MarshalIndent(remotes, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal config data: %w", err)
|
||||
}
|
||||
|
||||
configDir := filepath.Dir(path)
|
||||
if _, err := os.Stat(configDir); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(configDir, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create config directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return os.WriteFile(path, data, 0644)
|
||||
}
|
||||
94
pkg/storage/s3.go
Normal file
94
pkg/storage/s3.go
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
borgconfig "github.com/Snider/Borg/pkg/config"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
)
|
||||
|
||||
// S3Storage is a storage backend for S3.
|
||||
type S3Storage struct {
|
||||
client *s3.Client
|
||||
bucket string
|
||||
}
|
||||
|
||||
// NewS3Storage creates a new S3 storage backend.
|
||||
func NewS3Storage(bucket string) (*S3Storage, error) {
|
||||
cfg, err := config.LoadDefaultConfig(context.TODO())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remotes, err := borgconfig.LoadRemotes()
|
||||
if err == nil { // Silently ignore errors, fallback to default config
|
||||
for _, r := range remotes {
|
||||
u, err := url.Parse(r.URL)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if u.Host == bucket {
|
||||
creds := credentials.NewStaticCredentialsProvider(r.AccessKey, r.SecretKey, "")
|
||||
cfg.Credentials = creds
|
||||
if r.Endpoint != "" {
|
||||
cfg.EndpointResolverWithOptions = aws.EndpointResolverWithOptionsFunc(
|
||||
func(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{URL: r.Endpoint}, nil
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
client := s3.NewFromConfig(cfg)
|
||||
return &S3Storage{
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Write writes data to the given path.
|
||||
func (s *S3Storage) Write(path string, data io.Reader) error {
|
||||
uploader := s3manager.NewUploader(s.client)
|
||||
_, err := uploader.Upload(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(path),
|
||||
Body: data,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Read reads data from the given path.
|
||||
func (s *S3Storage) Read(path string) (io.ReadCloser, error) {
|
||||
out, err := s.client.GetObject(context.TODO(), &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(path),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out.Body, nil
|
||||
}
|
||||
|
||||
// List lists the contents of the given path.
|
||||
func (s *S3Storage) List(path string) ([]string, error) {
|
||||
out, err := s.client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Prefix: aws.String(path),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var paths []string
|
||||
for _, obj := range out.Contents {
|
||||
paths = append(paths, *obj.Key)
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
156
pkg/storage/s3_test.go
Normal file
156
pkg/storage/s3_test.go
Normal file
|
|
@ -0,0 +1,156 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
)
|
||||
|
||||
func newTestS3Client(serverURL string) (*s3.Client, error) {
|
||||
cfg, err := config.LoadDefaultConfig(context.TODO(),
|
||||
config.WithRegion("us-east-1"),
|
||||
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("AKID", "SECRET_KEY", "TOKEN")),
|
||||
config.WithEndpointResolverWithOptions(aws.EndpointResolverWithOptionsFunc(
|
||||
func(service, region string, options ...interface{}) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{URL: serverURL, SigningRegion: region}, nil
|
||||
})),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||
o.UsePathStyle = true
|
||||
}), nil
|
||||
}
|
||||
|
||||
func TestS3Storage_Write(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPut {
|
||||
t.Errorf("expected PUT request, got %s", r.Method)
|
||||
}
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
if string(body) != "hello world" {
|
||||
t.Errorf("expected body 'hello world', got %s", string(body))
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client, err := newTestS3Client(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test s3 client: %v", err)
|
||||
}
|
||||
|
||||
storage := &S3Storage{
|
||||
client: client,
|
||||
bucket: "test-bucket",
|
||||
}
|
||||
|
||||
err = storage.Write("test-path", strings.NewReader("hello world"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3Storage_Read(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
t.Errorf("expected GET request, got %s", r.Method)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("hello world"))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client, err := newTestS3Client(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test s3 client: %v", err)
|
||||
}
|
||||
|
||||
storage := &S3Storage{
|
||||
client: client,
|
||||
bucket: "test-bucket",
|
||||
}
|
||||
|
||||
rc, err := storage.Read("test-path")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
body, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read body: %v", err)
|
||||
}
|
||||
|
||||
if string(body) != "hello world" {
|
||||
t.Errorf("expected body 'hello world', got '%s'", string(body))
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3Storage_List(t *testing.T) {
|
||||
response := `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Name>test-bucket</Name>
|
||||
<Prefix>test-prefix</Prefix>
|
||||
<KeyCount>2</KeyCount>
|
||||
<MaxKeys>1000</MaxKeys>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<Contents>
|
||||
<Key>test-prefix/file1.txt</Key>
|
||||
<LastModified>2024-01-01T00:00:00.000Z</LastModified>
|
||||
<ETag>"abc"</ETag>
|
||||
<Size>123</Size>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
</Contents>
|
||||
<Contents>
|
||||
<Key>test-prefix/file2.txt</Key>
|
||||
<LastModified>2024-01-01T00:00:00.000Z</LastModified>
|
||||
<ETag>"def"</ETag>
|
||||
<Size>456</Size>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
</Contents>
|
||||
</ListBucketResult>`
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
t.Errorf("expected GET request, got %s", r.Method)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(response))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client, err := newTestS3Client(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test s3 client: %v", err)
|
||||
}
|
||||
|
||||
storage := &S3Storage{
|
||||
client: client,
|
||||
bucket: "test-bucket",
|
||||
}
|
||||
|
||||
paths, err := storage.List("test-prefix")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(paths) != 2 {
|
||||
t.Fatalf("expected 2 paths, got %d", len(paths))
|
||||
}
|
||||
if paths[0] != "test-prefix/file1.txt" {
|
||||
t.Errorf("expected path 'test-prefix/file1.txt', got '%s'", paths[0])
|
||||
}
|
||||
if paths[1] != "test-prefix/file2.txt" {
|
||||
t.Errorf("expected path 'test-prefix/file2.txt', got '%s'", paths[1])
|
||||
}
|
||||
}
|
||||
37
pkg/storage/storage.go
Normal file
37
pkg/storage/storage.go
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Storage is an interface for a storage backend.
|
||||
type Storage interface {
|
||||
// Write writes data to the given path.
|
||||
Write(path string, data io.Reader) error
|
||||
// Read reads data from the given path.
|
||||
Read(path string) (io.ReadCloser, error)
|
||||
// List lists the contents of the given path.
|
||||
List(path string) ([]string, error)
|
||||
}
|
||||
|
||||
// NewStorage creates a new storage backend based on the URL scheme.
|
||||
var NewStorage = newStorage
|
||||
|
||||
func newStorage(u *url.URL) (Storage, error) {
|
||||
switch u.Scheme {
|
||||
case "s3":
|
||||
return NewS3Storage(u.Host)
|
||||
case "r2":
|
||||
return nil, fmt.Errorf("r2 storage not implemented")
|
||||
case "b2":
|
||||
return nil, fmt.Errorf("b2 storage not implemented")
|
||||
case "file":
|
||||
return nil, fmt.Errorf("file storage not implemented")
|
||||
case "mock":
|
||||
return nil, nil // Handled in tests
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported storage scheme: %s", u.Scheme)
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue