simplecloud

package module
v0.0.7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 9, 2026 License: MIT Imports: 21 Imported by: 2

README

simplecloud

A tiny Go package for reading and writing objects across different storage backends with a unified interface.

Installation

go get github.com/mtgban/simplecloud

Supported Backends

Backend Read Write Constructor
Local filesystem &FileBucket{}
HTTP/HTTPS NewHTTPBucket(client, baseURL)
Backblaze B2 NewB2Client(ctx, accessKey, secretKey, bucket)
Google Cloud Storage NewGCSClient(ctx, serviceAccountFile, bucket)
Amazon S3 NewS3Client(ctx, accessKey, secretKey, bucket, endpoint, region)

Usage

All backends implement the same interface:

type Reader interface {
    NewReader(context.Context, string) (io.ReadCloser, error)
}

type Writer interface {
    NewWriter(context.Context, string) (io.WriteCloser, error)
}
Reading from GCS
bucket, err := simplecloud.NewGCSClient(ctx, "service-account.json", "my-bucket")
if err != nil {
    log.Fatal(err)
}

reader, err := bucket.NewReader(ctx, "path/to/file.txt")
if err != nil {
    log.Fatal(err)
}
defer reader.Close()

data, err := io.ReadAll(reader)
Writing to B2
bucket, err := simplecloud.NewB2Client(ctx, accessKey, secretKey, "my-bucket")
if err != nil {
    log.Fatal(err)
}

writer, err := bucket.NewWriter(ctx, "path/to/file.txt")
if err != nil {
    log.Fatal(err)
}

_, err = writer.Write([]byte("hello world"))
if err != nil {
    writer.Close()
    log.Fatal(err)
}

if err := writer.Close(); err != nil {
    log.Fatal(err)  // important: Close() flushes to cloud storage
}

Transparent Compression

Use InitReader and InitWriter to automatically handle compressed files based on extension:

Extension Compression
.gz gzip
.bz2 bzip2
.xz xz/lzma
// Automatically decompresses .gz file
reader, err := simplecloud.InitReader(ctx, bucket, "data.json.gz")
if err != nil {
    log.Fatal(err)
}
defer reader.Close()
// reader yields decompressed data

// Automatically compresses to .xz
writer, err := simplecloud.InitWriter(ctx, bucket, "output.json.xz")
if err != nil {
    log.Fatal(err)
}
// writes are compressed before storage

Copying Between Backends

Copy files between any backends, with automatic compression/decompression:

src, _ := simplecloud.NewGCSClient(ctx, "sa.json", "source-bucket")
dst, _ := simplecloud.NewB2Client(ctx, key, secret, "dest-bucket")

// Copy and transcode: decompress gzip, recompress as xz
n, err := simplecloud.Copy(ctx, src, dst, "input.json.gz", "output.json.xz")
if err != nil {
    log.Fatal(err)
}
fmt.Printf("copied %d bytes\n", n)

Limitations

This is a lightweight helper, and some operations are not covered:

  • No List or Delete API
  • No retry logic or exponential backoff
  • No ACL or permission management
  • No multipart upload configuration
  • Context cancellation doesn't interrupt local file operations
  • Cloud clients aren't exposed for cleanup (create short-lived or manage externally)

For advanced use cases, use the underlying SDKs directly:

License

MIT

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func Copy added in v0.0.5

func Copy(ctx context.Context, src Reader, dst Writer, srcPath, dstPath string) (int64, error)

func InitReader

func InitReader(ctx context.Context, bucket Reader, path string) (io.ReadCloser, error)

func InitWriter

func InitWriter(ctx context.Context, bucket Writer, path string) (io.WriteCloser, error)

Types

type B2Bucket

type B2Bucket struct {
	Bucket *b2.Bucket

	ConcurrentDownloads int
}

func NewB2Client

func NewB2Client(ctx context.Context, accessKey, secretKey, bucketName string) (*B2Bucket, error)

func (*B2Bucket) NewReader

func (b *B2Bucket) NewReader(ctx context.Context, path string) (io.ReadCloser, error)

func (*B2Bucket) NewWriter

func (b *B2Bucket) NewWriter(ctx context.Context, path string) (io.WriteCloser, error)

type FileBucket

type FileBucket struct{}

func (*FileBucket) NewReader

func (f *FileBucket) NewReader(ctx context.Context, path string) (io.ReadCloser, error)

func (*FileBucket) NewWriter added in v0.0.2

func (f *FileBucket) NewWriter(ctx context.Context, path string) (io.WriteCloser, error)

type GCSBucket

type GCSBucket struct {
	Bucket *storage.BucketHandle
}

func NewGCSClient

func NewGCSClient(ctx context.Context, serviceAccountFile, bucketName string) (*GCSBucket, error)

func (*GCSBucket) NewReader

func (g *GCSBucket) NewReader(ctx context.Context, path string) (io.ReadCloser, error)

func (*GCSBucket) NewWriter

func (g *GCSBucket) NewWriter(ctx context.Context, path string) (io.WriteCloser, error)

type HTTPBucket

type HTTPBucket struct {
	Client *http.Client
	URL    *url.URL
}

func NewHTTPBucket

func NewHTTPBucket(client *http.Client, path string) (*HTTPBucket, error)

func (*HTTPBucket) NewReader

func (h *HTTPBucket) NewReader(ctx context.Context, path string) (io.ReadCloser, error)

type MultiCloser

type MultiCloser struct {
	io.Reader
	io.Writer
	// contains filtered or unexported fields
}

func (*MultiCloser) Close

func (m *MultiCloser) Close() error

type ReadWriter

type ReadWriter interface {
	Reader
	Writer
}

type Reader

type Reader interface {
	NewReader(context.Context, string) (io.ReadCloser, error)
}

type S3Bucket added in v0.0.6

type S3Bucket struct {
	Bucket string
	// contains filtered or unexported fields
}

func NewS3Client added in v0.0.6

func NewS3Client(ctx context.Context, accessKey, secretKey, bucketName, endpoint, region string) (*S3Bucket, error)

func (*S3Bucket) NewReader added in v0.0.6

func (s *S3Bucket) NewReader(ctx context.Context, path string) (io.ReadCloser, error)

func (*S3Bucket) NewWriter added in v0.0.6

func (s *S3Bucket) NewWriter(ctx context.Context, path string) (io.WriteCloser, error)

type Writer

type Writer interface {
	NewWriter(context.Context, string) (io.WriteCloser, error)
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL