This commit introduces a new bandwidth limiting feature to the `borg collect` command. The feature is implemented using a token bucket algorithm in a new `pkg/ratelimit` package. The rate limiter is integrated with the `http.Client` via a custom `http.RoundTripper`, and the feature is exposed to the user through a new `--bandwidth` flag on the `collect` command. The bandwidth limiting feature has been applied to the `website` and `github` collectors, and unit and integration tests have been added to verify the functionality. The following changes have been made: - Created a new `pkg/ratelimit` package with a token bucket implementation. - Integrated the rate limiter with `http.Client` using a custom `http.RoundTripper`. - Added a `--bandwidth` flag to the `collect` command. - Applied the bandwidth limit to the `website` and `github` collectors. - Added unit tests for the rate limiter and bandwidth parsing logic. - Added integration tests for the `collect website` and `collect github repo` commands. The following issues were encountered and were being addressed when the session ended: - Build errors in the `cmd` package, specifically in `cmd/all.go` and `cmd/all_test.go`. - The need for a `MockGithubClient` in the `mocks` package. - The `website` package needs to be refactored to reduce code duplication. - The rate limiter's performance can be improved. Co-authored-by: Snider <631881+Snider@users.noreply.github.com>
120 lines
3.6 KiB
Go
120 lines
3.6 KiB
Go
package cmd
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"os"
|
|
|
|
"github.com/schollz/progressbar/v3"
|
|
"github.com/Snider/Borg/pkg/compress"
|
|
"github.com/Snider/Borg/pkg/ratelimit"
|
|
"github.com/Snider/Borg/pkg/tim"
|
|
"github.com/Snider/Borg/pkg/trix"
|
|
"github.com/Snider/Borg/pkg/ui"
|
|
"github.com/Snider/Borg/pkg/website"
|
|
|
|
"github.com/spf13/cobra"
|
|
)
|
|
|
|
// collectWebsiteCmd represents the collect website command
|
|
var collectWebsiteCmd = NewCollectWebsiteCmd()
|
|
|
|
func init() {
|
|
GetCollectCmd().AddCommand(GetCollectWebsiteCmd())
|
|
}
|
|
|
|
func GetCollectWebsiteCmd() *cobra.Command {
|
|
return collectWebsiteCmd
|
|
}
|
|
|
|
func NewCollectWebsiteCmd() *cobra.Command {
|
|
collectWebsiteCmd := &cobra.Command{
|
|
Use: "website [url]",
|
|
Short: "Collect a single website",
|
|
Long: `Collect a single website and store it in a DataNode.`,
|
|
Args: cobra.ExactArgs(1),
|
|
RunE: func(cmd *cobra.Command, args []string) error {
|
|
websiteURL := args[0]
|
|
outputFile, _ := cmd.Flags().GetString("output")
|
|
depth, _ := cmd.Flags().GetInt("depth")
|
|
format, _ := cmd.Flags().GetString("format")
|
|
compression, _ := cmd.Flags().GetString("compression")
|
|
password, _ := cmd.Flags().GetString("password")
|
|
|
|
if format != "datanode" && format != "tim" && format != "trix" {
|
|
return fmt.Errorf("invalid format: %s (must be 'datanode', 'tim', or 'trix')", format)
|
|
}
|
|
|
|
prompter := ui.NewNonInteractivePrompter(ui.GetWebsiteQuote)
|
|
prompter.Start()
|
|
defer prompter.Stop()
|
|
var bar *progressbar.ProgressBar
|
|
if prompter.IsInteractive() {
|
|
bar = ui.NewProgressBar(-1, "Crawling website")
|
|
}
|
|
|
|
bandwidth, _ := cmd.Flags().GetString("bandwidth")
|
|
bytesPerSec, err := ratelimit.ParseBandwidth(bandwidth)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid bandwidth: %w", err)
|
|
}
|
|
|
|
client := &http.Client{
|
|
Transport: ratelimit.NewRateLimitedRoundTripper(http.DefaultTransport, bytesPerSec),
|
|
}
|
|
|
|
dn, err := website.DownloadAndPackageWebsiteWithClient(websiteURL, depth, bar, client)
|
|
if err != nil {
|
|
return fmt.Errorf("error downloading and packaging website: %w", err)
|
|
}
|
|
|
|
var data []byte
|
|
if format == "tim" {
|
|
tim, err := tim.FromDataNode(dn)
|
|
if err != nil {
|
|
return fmt.Errorf("error creating tim: %w", err)
|
|
}
|
|
data, err = tim.ToTar()
|
|
if err != nil {
|
|
return fmt.Errorf("error serializing tim: %w", err)
|
|
}
|
|
} else if format == "trix" {
|
|
data, err = trix.ToTrix(dn, password)
|
|
if err != nil {
|
|
return fmt.Errorf("error serializing trix: %w", err)
|
|
}
|
|
} else {
|
|
data, err = dn.ToTar()
|
|
if err != nil {
|
|
return fmt.Errorf("error serializing DataNode: %w", err)
|
|
}
|
|
}
|
|
|
|
compressedData, err := compress.Compress(data, compression)
|
|
if err != nil {
|
|
return fmt.Errorf("error compressing data: %w", err)
|
|
}
|
|
|
|
if outputFile == "" {
|
|
outputFile = "website." + format
|
|
if compression != "none" {
|
|
outputFile += "." + compression
|
|
}
|
|
}
|
|
|
|
err = os.WriteFile(outputFile, compressedData, 0644)
|
|
if err != nil {
|
|
return fmt.Errorf("error writing website to file: %w", err)
|
|
}
|
|
|
|
fmt.Fprintln(cmd.OutOrStdout(), "Website saved to", outputFile)
|
|
return nil
|
|
},
|
|
}
|
|
collectWebsiteCmd.PersistentFlags().String("output", "", "Output file for the DataNode")
|
|
collectWebsiteCmd.PersistentFlags().Int("depth", 2, "Recursion depth for downloading")
|
|
collectWebsiteCmd.PersistentFlags().String("format", "datanode", "Output format (datanode, tim, or trix)")
|
|
collectWebsiteCmd.PersistentFlags().String("compression", "none", "Compression format (none, gz, or xz)")
|
|
collectWebsiteCmd.PersistentFlags().String("password", "", "Password for encryption")
|
|
return collectWebsiteCmd
|
|
}
|