From 1fc015f92f47599433e863e3dae69f0feae5bc19 Mon Sep 17 00:00:00 2001 From: Manuel Wassermann Date: Sun, 16 Oct 2016 15:34:31 +0200 Subject: [PATCH 1/1] Initial commit of rclone --- Makefile | 1 + abi_used_libs | 2 + files/rclone.patch | 5914 ++++++++++++++++++++++++++++++++++++++++++++++++++++ package.yml | 31 + pspec_x86_64.xml | 35 + 5 files changed, 5983 insertions(+) create mode 100644 Makefile create mode 100644 abi_used_libs create mode 100644 files/rclone.patch create mode 100644 package.yml create mode 100644 pspec_x86_64.xml diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..0a42375 --- /dev/null +++ b/Makefile @@ -0,0 +1 @@ +include ../Makefile.common diff --git a/abi_used_libs b/abi_used_libs new file mode 100644 index 0000000..3fcd837 --- /dev/null +++ b/abi_used_libs @@ -0,0 +1,2 @@ +libc.so.6 +libpthread.so.0 diff --git a/files/rclone.patch b/files/rclone.patch new file mode 100644 index 0000000..28d7980 --- /dev/null +++ b/files/rclone.patch @@ -0,0 +1,5914 @@ +diff --git a/rclone-1.33/.gitignore b/rclone-1.33/.gitignore +deleted file mode 100644 +index 32e2394..0000000 +--- a/rclone-1.33/.gitignore ++++ /dev/null +@@ -1,6 +0,0 @@ +-*~ +-_junk/ +-rclone +-rclonetest/rclonetest +-build +-docs/public +diff --git a/rclone-1.33/.travis.yml b/rclone-1.33/.travis.yml +deleted file mode 100644 +index 0fe4142..0000000 +--- a/rclone-1.33/.travis.yml ++++ /dev/null +@@ -1,21 +0,0 @@ +-language: go +-sudo: false +-osx_image: xcode7.3 +- +-os: +- - linux +- - osx +- +-go: +- - 1.5.4 +- - 1.6.3 +- - 1.7 +- +-# - tip +- +-install: +- - make build_dep +- +-script: +- - make check +- - make quicktest +diff --git a/rclone-1.33/CONTRIBUTING.md b/rclone/CONTRIBUTING.md +index 3c23696..1935082 100644 +--- a/rclone-1.33/CONTRIBUTING.md ++++ b/rclone/CONTRIBUTING.md +@@ -159,4 +159,4 @@ Add your fs to the docs + * `docs/content/docs.md` - list of remotes in config section + * `docs/content/about.md` - front page of rclone.org + * `docs/layouts/chrome/navbar.html` - add it to the website navigation +- * `make_manual.py` - add the page to the `docs` constant ++ * `bin/make_manual.py` - add the page to the `docs` constant +diff --git a/rclone-1.33/ISSUE_TEMPLATE.md b/rclone/ISSUE_TEMPLATE.md +index 538232e..fe48793 100644 +--- a/rclone-1.33/ISSUE_TEMPLATE.md ++++ b/rclone/ISSUE_TEMPLATE.md +@@ -1,6 +1,6 @@ + When filing an issue, please include the following information if +-possible as well as a description of the problem. Make sure you are +-using the [latest version of rclone](http://rclone.org/downloads/). ++possible as well as a description of the problem. Make sure you test ++with the [latest beta of rclone](http://rclone.org/downloads/). + + > What is your rclone version (eg output from `rclone -V`) + +diff --git a/rclone-1.33/MANUAL.html b/rclone/MANUAL.html +index 71b2650..24e8523 100644 +--- a/rclone-1.33/MANUAL.html ++++ b/rclone/MANUAL.html +@@ -35,10 +35,12 @@ +
  • MD5/SHA1 hashes checked at all times for file integrity
  • +
  • Timestamps preserved on files
  • +
  • Partial syncs supported on a whole file basis
  • +-
  • Copy mode to just copy new/changed files
  • +-
  • Sync (one way) mode to make a directory identical
  • +-
  • Check mode to check for file hash equality
  • ++
  • Copy mode to just copy new/changed files
  • ++
  • Sync (one way) mode to make a directory identical
  • ++
  • Check mode to check for file hash equality
  • +
  • Can sync to and from network, eg two different cloud accounts
  • ++
  • Optional encryption (Crypt)
  • ++
  • Optional FUSE mount (rclone mount)
  • + +

    Links

    + +

    Usage

    +

    Rclone syncs a directory tree from one storage system to another.

    +diff --git a/rclone-1.33/MANUAL.md b/rclone/MANUAL.md +index 17b9786..1080107 100644 +--- a/rclone-1.33/MANUAL.md ++++ b/rclone/MANUAL.md +@@ -26,10 +26,12 @@ Features + * MD5/SHA1 hashes checked at all times for file integrity + * Timestamps preserved on files + * Partial syncs supported on a whole file basis +- * Copy mode to just copy new/changed files +- * Sync (one way) mode to make a directory identical +- * Check mode to check for file hash equality ++ * [Copy](http://rclone.org/commands/rclone_copy/) mode to just copy new/changed files ++ * [Sync](http://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical ++ * [Check](http://rclone.org/commands/rclone_check/) mode to check for file hash equality + * Can sync to and from network, eg two different cloud accounts ++ * Optional encryption ([Crypt](http://rclone.org/crypt/)) ++ * Optional FUSE mount ([rclone mount](http://rclone.org/commands/rclone_mount/)) + + Links + +@@ -115,6 +117,7 @@ See the following for detailed instructions for + * [Hubic](http://rclone.org/hubic/) + * [Microsoft One Drive](http://rclone.org/onedrive/) + * [Yandex Disk](http://rclone.org/yandex/) ++ * [Crypt](http://rclone.org/crypt/) - to encrypt other remotes + + Usage + ----- +diff --git a/rclone-1.33/MANUAL.txt b/rclone/MANUAL.txt +index 5f0f40c..255e452 100644 +--- a/rclone-1.33/MANUAL.txt ++++ b/rclone/MANUAL.txt +@@ -33,6 +33,8 @@ Features + - Sync (one way) mode to make a directory identical + - Check mode to check for file hash equality + - Can sync to and from network, eg two different cloud accounts ++- Optional encryption (Crypt) ++- Optional FUSE mount (rclone mount) + + Links + +@@ -115,6 +117,7 @@ See the following for detailed instructions for + - Hubic + - Microsoft One Drive + - Yandex Disk ++- Crypt - to encrypt other remotes + + + Usage +diff --git a/rclone-1.33/Makefile b/rclone/Makefile +index 330e158..ff04d7b 100644 +--- a/rclone-1.33/Makefile ++++ b/rclone/Makefile +@@ -1,12 +1,23 @@ + SHELL = /bin/bash +-TAG := $(shell git describe --tags) ++TAG := $(shell echo `git describe --tags`-`git rev-parse --abbrev-ref HEAD` | sed 's/-\([0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//') + LAST_TAG := $(shell git describe --tags --abbrev=0) + NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)') ++GO_VERSION := $(shell go version) ++GO_LATEST := $(findstring go1.7,$(GO_VERSION)) ++BETA_URL := http://beta.rclone.org/$(TAG)/ + + rclone: +- @go version + go install -v ./... + ++vars: ++ @echo SHELL="'$(SHELL)'" ++ @echo TAG="'$(TAG)'" ++ @echo LAST_TAG="'$(LAST_TAG)'" ++ @echo NEW_TAG="'$(NEW_TAG)'" ++ @echo GO_VERSION="'$(GO_VERSION)'" ++ @echo GO_LATEST="'$(GO_LATEST)'" ++ @echo BETA_URL="'$(BETA_URL)'" ++ + # Full suite of integration tests + test: rclone + go test ./... +@@ -19,17 +30,25 @@ quicktest: + + # Do source code quality checks + check: rclone ++ifdef GO_LATEST + go vet ./... + errcheck ./... + goimports -d . | grep . ; test $$? -eq 1 + golint ./... | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1 ++else ++ @echo Skipping tests as not on Go stable ++endif + + # Get the build dependencies + build_dep: + go get -t ./... ++ifdef GO_LATEST + go get -u github.com/kisielk/errcheck + go get -u golang.org/x/tools/cmd/goimports + go get -u github.com/golang/lint/golint ++ go get -u github.com/mitchellh/gox ++ go get -u github.com/inconshreveable/mousetrap ++endif + + # Update dependencies + update: +@@ -40,8 +59,8 @@ doc: rclone.1 MANUAL.html MANUAL.txt + rclone.1: MANUAL.md + pandoc -s --from markdown --to man MANUAL.md -o rclone.1 + +-MANUAL.md: make_manual.py docs/content/*.md commanddocs +- ./make_manual.py ++MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs ++ ./bin/make_manual.py + + MANUAL.html: MANUAL.md + pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html +@@ -72,17 +91,23 @@ upload: + rclone -v copy build/ memstore:downloads-rclone-org + + upload_github: +- ./upload-github $(TAG) ++ ./bin/upload-github $(TAG) + + cross: doc +- ./cross-compile $(TAG) ++ ./bin/cross-compile $(TAG) + + beta: +- ./cross-compile $(TAG)β ++ ./bin/cross-compile $(TAG)β + rm build/*-current-* + rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β + @echo Beta release ready at http://pub.rclone.org/$(TAG)%CE%B2/ + ++travis_beta: ++ ./bin/cross-compile $(TAG)β ++ rm build/*-current-* ++ rclone --config bin/travis.rclone.conf -v copy build/ memstore:beta-rclone-org/$(TAG) ++ @echo Beta release ready at $(BETA_URL) ++ + serve: website + cd docs && hugo server -v -w + +diff --git a/rclone-1.33/README.md b/rclone/README.md +index d21170b..43a21c0 100644 +--- a/rclone-1.33/README.md ++++ b/rclone/README.md +@@ -33,6 +33,8 @@ Features + * Sync (one way) mode to make a directory identical + * Check mode to check for file hash equality + * Can sync to and from network, eg two different cloud accounts ++ * Optional encryption (Crypt) ++ * Optional FUSE mount + + See the home page for installation, usage, documentation, changelog + and configuration walkthroughs. +diff --git a/rclone-1.33/amazonclouddrive/amazonclouddrive.go b/rclone/amazonclouddrive/amazonclouddrive.go +index 12bf394..2b1e806 100644 +--- a/rclone-1.33/amazonclouddrive/amazonclouddrive.go ++++ b/rclone/amazonclouddrive/amazonclouddrive.go +@@ -172,7 +172,6 @@ func NewFs(name, root string) (fs.Fs, error) { + } + + c := acd.NewClient(oAuthClient) +- c.UserAgent = fs.UserAgent + f := &Fs{ + name: name, + root: root, +@@ -479,11 +478,15 @@ func (f *Fs) List(out fs.ListOpts, dir string) { + // At the end of large uploads. The speculation is that the timeout + // is waiting for the sha1 hashing to complete and the file may well + // be properly uploaded. +-func (f *Fs) checkUpload(in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error) (fixedError bool, info *acd.File, err error) { ++func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error) (fixedError bool, info *acd.File, err error) { + // Return if no error - all is well + if inErr == nil { + return false, inInfo, inErr + } ++ // If not one of the errors we can fix return ++ if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 { ++ return false, inInfo, inErr ++ } + const sleepTime = 5 * time.Second // sleep between tries + retries := int(*uploadWaitTime / sleepTime) // number of retries + if retries <= 0 { +@@ -562,7 +565,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) { + } + f.stopUpload() + var ok bool +- ok, info, err = f.checkUpload(in, src, info, err) ++ ok, info, err = f.checkUpload(resp, in, src, info, err) + if ok { + return false, nil + } +@@ -783,18 +786,19 @@ func (o *Object) Storable() bool { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { + bigObject := o.Size() >= int64(tempLinkThreshold) + if bigObject { + fs.Debug(o, "Dowloading large object via tempLink") + } + file := acd.File{Node: o.info} + var resp *http.Response ++ headers := fs.OpenOptionHeaders(options) + err = o.fs.pacer.Call(func() (bool, error) { + if !bigObject { +- in, resp, err = file.Open() ++ in, resp, err = file.OpenHeaders(headers) + } else { +- in, resp, err = file.OpenTempURL(o.fs.noAuthClient) ++ in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers) + } + return o.fs.shouldRetry(resp, err) + }) +@@ -819,7 +823,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + } + o.fs.stopUpload() + var ok bool +- ok, info, err = o.fs.checkUpload(in, src, info, err) ++ ok, info, err = o.fs.checkUpload(resp, in, src, info, err) + if ok { + return false, nil + } +@@ -843,6 +847,14 @@ func (o *Object) Remove() error { + return err + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ if o.info.ContentProperties.ContentType != nil { ++ return *o.info.ContentProperties.ContentType ++ } ++ return "" ++} ++ + // Check the interfaces are satisfied + var ( + _ fs.Fs = (*Fs)(nil) +@@ -850,5 +862,6 @@ var ( + // _ fs.Copier = (*Fs)(nil) + // _ fs.Mover = (*Fs)(nil) + // _ fs.DirMover = (*Fs)(nil) +- _ fs.Object = (*Object)(nil) ++ _ fs.Object = (*Object)(nil) ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/amazonclouddrive/amazonclouddrive_test.go b/rclone/amazonclouddrive/amazonclouddrive_test.go +index 39d4d9c..44dfca0 100644 +--- a/rclone-1.33/amazonclouddrive/amazonclouddrive_test.go ++++ b/rclone/amazonclouddrive/amazonclouddrive_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/b2/b2.go b/rclone/b2/b2.go +index 05cf0e8..f28630b 100644 +--- a/rclone-1.33/b2/b2.go ++++ b/rclone/b2/b2.go +@@ -35,6 +35,7 @@ const ( + timeHeader = headerPrefix + timeKey + sha1Key = "large_file_sha1" + sha1Header = "X-Bz-Content-Sha1" ++ sha1InfoHeader = headerPrefix + sha1Key + testModeHeader = "X-Bz-Test-Mode" + retryAfterHeader = "Retry-After" + minSleep = 10 * time.Millisecond +@@ -97,12 +98,13 @@ type Fs struct { + + // Object describes a b2 object + type Object struct { +- fs *Fs // what this object is part of +- remote string // The remote path +- id string // b2 id of the file +- modTime time.Time // The modified time of the object if known +- sha1 string // SHA-1 hash if known +- size int64 // Size of the object ++ fs *Fs // what this object is part of ++ remote string // The remote path ++ id string // b2 id of the file ++ modTime time.Time // The modified time of the object if known ++ sha1 string // SHA-1 hash if known ++ size int64 // Size of the object ++ mimeType string // Content-Type of the object + } + + // ------------------------------------------------------------ +@@ -695,7 +697,16 @@ func (f *Fs) Mkdir() error { + if err != nil { + if apiErr, ok := err.(*api.Error); ok { + if apiErr.Code == "duplicate_bucket_name" { +- return nil ++ // Check this is our bucket - buckets are globally unique and this ++ // might be someone elses. ++ _, getBucketErr := f.getBucketID() ++ if getBucketErr == nil { ++ // found so it is our bucket ++ return nil ++ } ++ if getBucketErr != fs.ErrorDirNotFound { ++ fs.Debug(f, "Error checking bucket exists: %v", getBucketErr) ++ } + } + } + return errors.Wrap(err, "failed to create bucket") +@@ -789,9 +800,9 @@ func (f *Fs) purge(oldOnly bool) error { + go func() { + defer wg.Done() + for object := range toBeDeleted { +- fs.Stats.Transferring(object.Name) ++ fs.Stats.Checking(object.Name) + checkErr(f.deleteByID(object.ID, object.Name)) +- fs.Stats.DoneTransferring(object.Name) ++ fs.Stats.DoneChecking(object.Name) + } + }() + } +@@ -886,9 +897,10 @@ func (o *Object) Size() int64 { + // o.modTime + // o.size + // o.sha1 +-func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string) (err error) { ++func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) { + o.id = ID + o.sha1 = SHA1 ++ o.mimeType = mimeType + // Read SHA1 from metadata if it exists and isn't set + if o.sha1 == "" || o.sha1 == "none" { + o.sha1 = Info[sha1Key] +@@ -907,7 +919,7 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp + // o.size + // o.sha1 + func (o *Object) decodeMetaData(info *api.File) (err error) { +- return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info) ++ return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType) + } + + // decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo +@@ -918,7 +930,7 @@ func (o *Object) decodeMetaData(info *api.File) (err error) { + // o.size + // o.sha1 + func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) { +- return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info) ++ return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType) + } + + // readMetaData gets the metadata if it hasn't already been fetched +@@ -1057,7 +1069,7 @@ func (file *openFile) Close() (err error) { + } + + // Check the SHA1 +- receivedSHA1 := file.resp.Header.Get(sha1Header) ++ receivedSHA1 := file.o.sha1 + calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedSHA1 != calculatedSHA1 { + return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1) +@@ -1070,11 +1082,12 @@ func (file *openFile) Close() (err error) { + var _ io.ReadCloser = &openFile{} + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { + opts := rest.Opts{ + Method: "GET", + Absolute: true, + Path: o.fs.info.DownloadURL, ++ Options: options, + } + // Download by id if set otherwise by name + if o.id != "" { +@@ -1097,8 +1110,20 @@ func (o *Object) Open() (in io.ReadCloser, err error) { + _ = resp.Body.Close() + return nil, err + } ++ // Read sha1 from header if it isn't set + if o.sha1 == "" { + o.sha1 = resp.Header.Get(sha1Header) ++ fs.Debug(o, "Reading sha1 from header - %q", o.sha1) ++ // if sha1 header is "none" (in big files), then need ++ // to read it from the metadata ++ if o.sha1 == "none" { ++ o.sha1 = resp.Header.Get(sha1InfoHeader) ++ fs.Debug(o, "Reading sha1 from info - %q", o.sha1) ++ } ++ } ++ // Don't check length or hash on partial content ++ if resp.StatusCode == http.StatusPartialContent { ++ return resp.Body, nil + } + return newOpenFile(o, resp), nil + } +@@ -1267,7 +1292,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) { + ExtraHeaders: map[string]string{ + "Authorization": upload.AuthorizationToken, + "X-Bz-File-Name": urlEncode(o.fs.root + o.remote), +- "Content-Type": fs.MimeType(o), ++ "Content-Type": fs.MimeType(src), + sha1Header: calculatedSha1, + timeHeader: timeString(modTime), + }, +@@ -1319,10 +1344,16 @@ func (o *Object) Remove() error { + return nil + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ return o.mimeType ++} ++ + // Check the interfaces are satisfied + var ( + _ fs.Fs = &Fs{} + _ fs.Purger = &Fs{} + _ fs.CleanUpper = &Fs{} + _ fs.Object = &Object{} ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/b2/b2_test.go b/rclone/b2/b2_test.go +index 64c4553..519f99c 100644 +--- a/rclone-1.33/b2/b2_test.go ++++ b/rclone/b2/b2_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/cross-compile b/rclone/bin/cross-compile +similarity index 93% +rename from rclone-1.33/cross-compile +rename to rclone/bin/cross-compile +index e633d37..8a02d5f 100755 +--- a/rclone-1.33/cross-compile ++++ b/rclone/bin/cross-compile +@@ -17,7 +17,8 @@ rm -rf build + export CGO_ENABLED=0 + + # Arch pairs we build for +-# gox -osarch-list for definitive list ++# gox -osarch-list - for definitive list ++# go tool dist list - or this + + OSARCH="\ + windows/386 +@@ -27,6 +28,7 @@ darwin/amd64 + linux/386 + linux/amd64 + linux/arm ++linux/arm64 + freebsd/386 + freebsd/amd64 + freebsd/arm +diff --git a/rclone-1.33/make_manual.py b/rclone/bin/make_manual.py +similarity index 100% +rename from rclone-1.33/make_manual.py +rename to rclone/bin/make_manual.py +diff --git a/rclone/bin/travis.rclone.conf b/rclone/bin/travis.rclone.conf +new file mode 100644 +index 0000000..597253c +--- /dev/null ++++ b/rclone/bin/travis.rclone.conf +@@ -0,0 +1,4 @@ ++# Encrypted rclone configuration File ++ ++RCLONE_ENCRYPT_V0: ++XIkAr3p+y+zai82cHFH8UoW1y1XTe6dpTzo/g4uSwqI2pfsnSSJ4JbAsRZ9nGVpx3NzROKEewlusVHNokiA4/nD4NbT+2DJrpMLg/OtLREICfuRk3tVWPKLGsmA+TLKU+IfQMO4LfrrCe2DF/lW0qA5Xu16E0Vn++jNhbwW2oB+JTkaGka8Ae3CyisM/3NUGnCOG/yb5wLH7ybUstNYPHsNFCiU1brFXQ4DNIbUFMmca+5S44vrOWvhp9QijQXlG7/JjwrkqbB/LK2gMJPTuhY2OW+4tRw1IoCXbWmwJXv5xmhPqanW92A== +\ No newline at end of file +diff --git a/rclone-1.33/upload-github b/rclone/bin/upload-github +similarity index 100% +rename from rclone-1.33/upload-github +rename to rclone/bin/upload-github +diff --git a/rclone-1.33/cmd/all/all.go b/rclone/cmd/all/all.go +index 51f427f..320d5f2 100644 +--- a/rclone-1.33/cmd/all/all.go ++++ b/rclone/cmd/all/all.go +@@ -14,6 +14,7 @@ import ( + _ "github.com/ncw/rclone/cmd/delete" + _ "github.com/ncw/rclone/cmd/genautocomplete" + _ "github.com/ncw/rclone/cmd/gendocs" ++ _ "github.com/ncw/rclone/cmd/listremotes" + _ "github.com/ncw/rclone/cmd/ls" + _ "github.com/ncw/rclone/cmd/lsd" + _ "github.com/ncw/rclone/cmd/lsl" +diff --git a/rclone-1.33/cmd/cmd.go b/rclone/cmd/cmd.go +index 0e23d23..64a6eaf 100644 +--- a/rclone-1.33/cmd/cmd.go ++++ b/rclone/cmd/cmd.go +@@ -161,20 +161,23 @@ func Run(Retry bool, cmd *cobra.Command, f func() error) { + for try := 1; try <= *retries; try++ { + err = f() + if !Retry || (err == nil && !fs.Stats.Errored()) { ++ if try > 1 { ++ fs.ErrorLog(nil, "Attempt %d/%d succeeded", try, *retries) ++ } + break + } + if fs.IsFatalError(err) { +- fs.Log(nil, "Fatal error received - not attempting retries") ++ fs.ErrorLog(nil, "Fatal error received - not attempting retries") + break + } + if fs.IsNoRetryError(err) { +- fs.Log(nil, "Can't retry this error - not attempting retries") ++ fs.ErrorLog(nil, "Can't retry this error - not attempting retries") + break + } + if err != nil { +- fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err) ++ fs.ErrorLog(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err) + } else { +- fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors()) ++ fs.ErrorLog(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors()) + } + if try < *retries { + fs.Stats.ResetErrors() +diff --git a/rclone/cmd/listremotes/listremotes.go b/rclone/cmd/listremotes/listremotes.go +new file mode 100644 +index 0000000..ab06ba2 +--- /dev/null ++++ b/rclone/cmd/listremotes/listremotes.go +@@ -0,0 +1,49 @@ ++package ls ++ ++import ( ++ "fmt" ++ "sort" ++ ++ "github.com/ncw/rclone/cmd" ++ "github.com/ncw/rclone/fs" ++ "github.com/spf13/cobra" ++) ++ ++// Globals ++var ( ++ listLong bool ++) ++ ++func init() { ++ cmd.Root.AddCommand(listremotesCmd) ++ listremotesCmd.Flags().BoolVarP(&listLong, "long", "l", listLong, "Show the type as well as names.") ++} ++ ++var listremotesCmd = &cobra.Command{ ++ Use: "listremotes", ++ Short: `List all the remotes in the config file.`, ++ Long: ` ++rclone listremotes lists all the available remotes from the config file. ++ ++When uses with the -l flag it lists the types too. ++`, ++ Run: func(command *cobra.Command, args []string) { ++ cmd.CheckArgs(0, 0, command, args) ++ remotes := fs.ConfigFile.GetSectionList() ++ sort.Strings(remotes) ++ maxlen := 1 ++ for _, remote := range remotes { ++ if len(remote) > maxlen { ++ maxlen = len(remote) ++ } ++ } ++ for _, remote := range remotes { ++ if listLong { ++ remoteType := fs.ConfigFile.MustValue(remote, "type", "UNKNOWN") ++ fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType) ++ } else { ++ fmt.Printf("%s:\n", remote) ++ } ++ } ++ }, ++} +diff --git a/rclone-1.33/cmd/mount/dir.go b/rclone/cmd/mount/dir.go +index e99c71c..0ec53e8 100644 +--- a/rclone-1.33/cmd/mount/dir.go ++++ b/rclone/cmd/mount/dir.go +@@ -130,6 +130,8 @@ var _ fusefs.Node = (*Dir)(nil) + // Attr updates the attribes of a directory + func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) error { + fs.Debug(d.path, "Dir.Attr") ++ a.Gid = gid ++ a.Uid = uid + a.Mode = os.ModeDir | dirPerms + // FIXME include Valid so get some caching? Also mtime + return nil +diff --git a/rclone-1.33/cmd/mount/file.go b/rclone/cmd/mount/file.go +index c287be6..965f15a 100644 +--- a/rclone-1.33/cmd/mount/file.go ++++ b/rclone/cmd/mount/file.go +@@ -46,6 +46,8 @@ func (f *File) Attr(ctx context.Context, a *fuse.Attr) error { + f.mu.Lock() + defer f.mu.Unlock() + fs.Debug(f.o, "File.Attr") ++ a.Gid = gid ++ a.Uid = uid + a.Mode = filePerms + // if o is nil it isn't valid yet, so return the size so far + if f.o == nil { +@@ -110,13 +112,14 @@ func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenR + + fs.Debug(o, "File.Open") + +- // Files aren't seekable +- resp.Flags |= fuse.OpenNonSeekable +- + switch { + case req.Flags.IsReadOnly(): ++ if noSeek { ++ resp.Flags |= fuse.OpenNonSeekable ++ } + return newReadFileHandle(o) + case req.Flags.IsWriteOnly(): ++ resp.Flags |= fuse.OpenNonSeekable + src := newCreateInfo(f.d.f, o.Remote()) + fh, err := newWriteFileHandle(f.d, f, src) + if err != nil { +diff --git a/rclone-1.33/cmd/mount/fs.go b/rclone/cmd/mount/fs.go +index f3b1fb4..71f1a05 100644 +--- a/rclone-1.33/cmd/mount/fs.go ++++ b/rclone/cmd/mount/fs.go +@@ -10,12 +10,6 @@ import ( + "github.com/ncw/rclone/fs" + ) + +-// Default permissions +-const ( +- dirPerms = 0755 +- filePerms = 0644 +-) +- + // FS represents the top level filing system + type FS struct { + f fs.Fs +@@ -30,6 +24,43 @@ func (f *FS) Root() (fusefs.Node, error) { + return newDir(f.f, ""), nil + } + ++// mountOptions configures the options from the command line flags ++func mountOptions(device string) (options []fuse.MountOption) { ++ options = []fuse.MountOption{ ++ fuse.MaxReadahead(uint32(maxReadAhead)), ++ fuse.Subtype("rclone"), ++ fuse.FSName(device), fuse.VolumeName(device), ++ fuse.NoAppleDouble(), ++ fuse.NoAppleXattr(), ++ ++ // Options from benchmarking in the fuse module ++ //fuse.MaxReadahead(64 * 1024 * 1024), ++ //fuse.AsyncRead(), - FIXME this causes ++ // ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor ++ // which is probably related to errors people are having ++ //fuse.WritebackCache(), ++ } ++ if allowNonEmpty { ++ options = append(options, fuse.AllowNonEmptyMount()) ++ } ++ if allowOther { ++ options = append(options, fuse.AllowOther()) ++ } ++ if allowRoot { ++ options = append(options, fuse.AllowRoot()) ++ } ++ if defaultPermissions { ++ options = append(options, fuse.DefaultPermissions()) ++ } ++ if readOnly { ++ options = append(options, fuse.ReadOnly()) ++ } ++ if writebackCache { ++ options = append(options, fuse.WritebackCache()) ++ } ++ return options ++} ++ + // mount the file system + // + // The mount point will be ready when this returns. +@@ -37,7 +68,7 @@ func (f *FS) Root() (fusefs.Node, error) { + // returns an error, and an error channel for the serve process to + // report an error when fusermount is called. + func mount(f fs.Fs, mountpoint string) (<-chan error, error) { +- c, err := fuse.Mount(mountpoint) ++ c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...) + if err != nil { + return nil, err + } +diff --git a/rclone-1.33/cmd/mount/mount.go b/rclone/cmd/mount/mount.go +index e8a08e0..737c68d 100644 +--- a/rclone-1.33/cmd/mount/mount.go ++++ b/rclone/cmd/mount/mount.go +@@ -5,23 +5,58 @@ + package mount + + import ( ++ "log" ++ "os" ++ + "bazil.org/fuse" + "github.com/ncw/rclone/cmd" + "github.com/ncw/rclone/fs" + "github.com/pkg/errors" + "github.com/spf13/cobra" ++ "golang.org/x/sys/unix" + ) + + // Globals + var ( + noModTime = false + debugFUSE = false ++ noSeek = false ++ // mount options ++ readOnly = false ++ allowNonEmpty = false ++ allowRoot = false ++ allowOther = false ++ defaultPermissions = false ++ writebackCache = false ++ maxReadAhead fs.SizeSuffix = 128 * 1024 ++ umask = 0 ++ uid = uint32(unix.Geteuid()) ++ gid = uint32(unix.Getegid()) ++ // foreground = false ++ // default permissions for directories - modified by umask in Mount ++ dirPerms = os.FileMode(0777) ++ filePerms = os.FileMode(0666) + ) + + func init() { ++ umask = unix.Umask(0) // read the umask ++ unix.Umask(umask) // set it back to what it was + cmd.Root.AddCommand(mountCmd) +- mountCmd.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).") +- mountCmd.Flags().BoolVarP(&debugFUSE, "debug-fuse", "", false, "Debug the FUSE internals - needs -v.") ++ mountCmd.Flags().BoolVarP(&noModTime, "no-modtime", "", noModTime, "Don't read the modification time (can speed things up).") ++ mountCmd.Flags().BoolVarP(&debugFUSE, "debug-fuse", "", debugFUSE, "Debug the FUSE internals - needs -v.") ++ mountCmd.Flags().BoolVarP(&noSeek, "no-seek", "", noSeek, "Don't allow seeking in files.") ++ // mount options ++ mountCmd.Flags().BoolVarP(&readOnly, "read-only", "", readOnly, "Mount read-only.") ++ mountCmd.Flags().BoolVarP(&allowNonEmpty, "allow-non-empty", "", allowNonEmpty, "Allow mounting over a non-empty directory.") ++ mountCmd.Flags().BoolVarP(&allowRoot, "allow-root", "", allowRoot, "Allow access to root user.") ++ mountCmd.Flags().BoolVarP(&allowOther, "allow-other", "", allowOther, "Allow access to other users.") ++ mountCmd.Flags().BoolVarP(&defaultPermissions, "default-permissions", "", defaultPermissions, "Makes kernel enforce access control based on the file mode.") ++ mountCmd.Flags().BoolVarP(&writebackCache, "write-back-cache", "", writebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.") ++ mountCmd.Flags().VarP(&maxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.") ++ mountCmd.Flags().IntVarP(&umask, "umask", "", umask, "Override the permission bits set by the filesystem.") ++ mountCmd.Flags().Uint32VarP(&uid, "uid", "", uid, "Override the uid field set by the filesystem.") ++ mountCmd.Flags().Uint32VarP(&gid, "gid", "", gid, "Override the gid field set by the filesystem.") ++ //mountCmd.Flags().BoolVarP(&foreground, "foreground", "", foreground, "Do not detach.") + } + + var mountCmd = &cobra.Command{ +@@ -49,10 +84,9 @@ Or with OS X + + ### Limitations ### + +-This can only read files seqentially, or write files sequentially. It +-can't read and write or seek in files. ++This can only write files seqentially, it can only seek when reading. + +-rclonefs inherits rclone's directory handling. In rclone's world ++Rclone mount inherits rclone's directory handling. In rclone's world + directories don't really exist. This means that empty directories + will have a tendency to disappear once they fall out of the directory + cache. +@@ -85,10 +119,13 @@ mount won't do that, so will be less reliable than the rclone command. + * Preserve timestamps + * Move directories + `, +- RunE: func(command *cobra.Command, args []string) error { ++ Run: func(command *cobra.Command, args []string) { + cmd.CheckArgs(2, 2, command, args) + fdst := cmd.NewFsDst(args) +- return Mount(fdst, args[1]) ++ err := Mount(fdst, args[1]) ++ if err != nil { ++ log.Fatalf("Fatal error: %v", err) ++ } + }, + } + +@@ -102,6 +139,10 @@ func Mount(f fs.Fs, mountpoint string) error { + } + } + ++ // Set permissions ++ dirPerms = 0777 &^ os.FileMode(umask) ++ filePerms = 0666 &^ os.FileMode(umask) ++ + // Mount it + errChan, err := mount(f, mountpoint) + if err != nil { +diff --git a/rclone-1.33/cmd/mount/read.go b/rclone/cmd/mount/read.go +index 97f7aaa..4da3989 100644 +--- a/rclone-1.33/cmd/mount/read.go ++++ b/rclone/cmd/mount/read.go +@@ -19,6 +19,7 @@ type ReadFileHandle struct { + r io.ReadCloser + o fs.Object + readCalled bool // set if read has been called ++ offset int64 + } + + func newReadFileHandle(o fs.Object) (*ReadFileHandle, error) { +@@ -38,14 +39,38 @@ var _ fusefs.Handle = (*ReadFileHandle)(nil) + // Check interface satisfied + var _ fusefs.HandleReader = (*ReadFileHandle)(nil) + ++// seek to a new offset ++func (fh *ReadFileHandle) seek(offset int64) error { ++ fs.Debug(fh.o, "ReadFileHandle.seek from %d to %d", fh.offset, offset) ++ r, err := fh.o.Open(&fs.SeekOption{Offset: offset}) ++ if err != nil { ++ fs.Debug(fh.o, "ReadFileHandle.Read seek failed: %v", err) ++ return err ++ } ++ err = fh.r.Close() ++ if err != nil { ++ fs.Debug(fh.o, "ReadFileHandle.Read seek close old failed: %v", err) ++ } ++ fh.r = r ++ return nil ++} ++ + // Read from the file handle + func (fh *ReadFileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { +- fs.Debug(fh.o, "ReadFileHandle.Open") ++ fs.Debug(fh.o, "ReadFileHandle.Read size %d offset %d", req.Size, req.Offset) + if fh.closed { + fs.ErrorLog(fh.o, "ReadFileHandle.Read error: %v", errClosedFileHandle) + return errClosedFileHandle + } +- fh.readCalled = true ++ if req.Offset != fh.offset { ++ err := fh.seek(req.Offset) ++ if err != nil { ++ return err ++ } ++ } ++ if req.Size > 0 { ++ fh.readCalled = true ++ } + // We don't actually enforce Offset to match where previous read + // ended. Maybe we should, but that would mean'd we need to track + // it. The kernel *should* do it for us, based on the +@@ -60,10 +85,11 @@ func (fh *ReadFileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp + err = nil + } + resp.Data = buf[:n] ++ fh.offset += int64(n) + if err != nil { +- fs.ErrorLog(fh.o, "ReadFileHandle.Open error: %v", err) ++ fs.ErrorLog(fh.o, "ReadFileHandle.Read error: %v", err) + } else { +- fs.Debug(fh.o, "ReadFileHandle.Open OK") ++ fs.Debug(fh.o, "ReadFileHandle.Read OK") + } + return err + } +@@ -90,17 +116,23 @@ func (fh *ReadFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) err + fh.mu.Lock() + defer fh.mu.Unlock() + fs.Debug(fh.o, "ReadFileHandle.Flush") +- // If Read hasn't been called then ignore the Flush - Release +- // will pick it up +- if !fh.readCalled { +- fs.Debug(fh.o, "ReadFileHandle.Flush ignoring flush on unread handle") +- return nil + +- } +- err := fh.close() +- if err != nil { +- fs.ErrorLog(fh.o, "ReadFileHandle.Flush error: %v", err) +- return err ++ // Ignore the Flush as there is nothing we can sensibly do and ++ // it seems quite common for Flush to be called from ++ // different threads each of which have read some data. ++ if false { ++ // If Read hasn't been called then ignore the Flush - Release ++ // will pick it up ++ if !fh.readCalled { ++ fs.Debug(fh.o, "ReadFileHandle.Flush ignoring flush on unread handle") ++ return nil ++ ++ } ++ err := fh.close() ++ if err != nil { ++ fs.ErrorLog(fh.o, "ReadFileHandle.Flush error: %v", err) ++ return err ++ } + } + fs.Debug(fh.o, "ReadFileHandle.Flush OK") + return nil +diff --git a/rclone-1.33/cmd/mount/read_test.go b/rclone/cmd/mount/read_test.go +index 7b385af..885d864 100644 +--- a/rclone-1.33/cmd/mount/read_test.go ++++ b/rclone/cmd/mount/read_test.go +@@ -4,6 +4,7 @@ package mount + + import ( + "io" ++ "io/ioutil" + "os" + "syscall" + "testing" +@@ -71,9 +72,40 @@ func TestReadFileDoubleClose(t *testing.T) { + assert.NoError(t, err) + assert.Equal(t, 1, n) + +- // close the dup - should produce an error ++ // close the dup - should not produce an error + err = syscall.Close(fd2) +- assert.Error(t, err, "input/output error") ++ assert.NoError(t, err, "input/output error") + + run.rm(t, "testdoubleclose") + } ++ ++// Test seeking ++func TestReadSeek(t *testing.T) { ++ run.skipIfNoFUSE(t) ++ ++ var data = []byte("helloHELLO") ++ run.createFile(t, "testfile", string(data)) ++ run.checkDir(t, "testfile 10") ++ ++ fd, err := os.Open(run.path("testfile")) ++ assert.NoError(t, err) ++ ++ _, err = fd.Seek(5, 0) ++ assert.NoError(t, err) ++ ++ buf, err := ioutil.ReadAll(fd) ++ assert.NoError(t, err) ++ assert.Equal(t, buf, []byte("HELLO")) ++ ++ _, err = fd.Seek(0, 0) ++ assert.NoError(t, err) ++ ++ buf, err = ioutil.ReadAll(fd) ++ assert.NoError(t, err) ++ assert.Equal(t, buf, []byte("helloHELLO")) ++ ++ err = fd.Close() ++ assert.NoError(t, err) ++ ++ run.rm(t, "testfile") ++} +diff --git a/rclone-1.33/cmd/redirect_stderr_unix.go b/rclone/cmd/redirect_stderr_unix.go +index f31c8e0..86e8364 100644 +--- a/rclone-1.33/cmd/redirect_stderr_unix.go ++++ b/rclone/cmd/redirect_stderr_unix.go +@@ -1,18 +1,19 @@ + // Log the panic under unix to the log file + +-// +build darwin dragonfly freebsd linux nacl netbsd openbsd ++// +build !windows,!solaris,!plan9 + + package cmd + + import ( + "log" + "os" +- "syscall" ++ ++ "golang.org/x/sys/unix" + ) + + // redirectStderr to the file passed in + func redirectStderr(f *os.File) { +- err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd())) ++ err := unix.Dup2(int(f.Fd()), int(os.Stderr.Fd())) + if err != nil { + log.Fatalf("Failed to redirect stderr to file: %v", err) + } +diff --git a/rclone-1.33/cmd/versioncheck.go b/rclone-1.33/cmd/versioncheck.go +deleted file mode 100644 +index 9caa533..0000000 +--- a/rclone-1.33/cmd/versioncheck.go ++++ /dev/null +@@ -1,6 +0,0 @@ +-//+build !go1.5 +- +-package cmd +- +-// Upgrade to Go version 1.5 to compile rclone. +-func init() { Go_version_1_5_required_for_compilation() } +diff --git a/rclone-1.33/crypt/cipher.go b/rclone/crypt/cipher.go +index 4ac3de5..1021448 100644 +--- a/rclone-1.33/crypt/cipher.go ++++ b/rclone/crypt/cipher.go +@@ -355,9 +355,9 @@ func (n *nonce) fromBuf(buf []byte) { + } + } + +-// increment to add 1 to the nonce +-func (n *nonce) increment() { +- for i := 0; i < len(*n); i++ { ++// carry 1 up the nonce from position i ++func (n *nonce) carry(i int) { ++ for ; i < len(*n); i++ { + digit := (*n)[i] + newDigit := digit + 1 + (*n)[i] = newDigit +@@ -368,6 +368,27 @@ func (n *nonce) increment() { + } + } + ++// increment to add 1 to the nonce ++func (n *nonce) increment() { ++ n.carry(0) ++} ++ ++// add an uint64 to the nonce ++func (n *nonce) add(x uint64) { ++ carry := uint16(0) ++ for i := 0; i < 8; i++ { ++ digit := (*n)[i] ++ xDigit := byte(x) ++ x >>= 8 ++ carry += uint16(digit) + uint16(xDigit) ++ (*n)[i] = byte(carry) ++ carry >>= 8 ++ } ++ if carry != 0 { ++ n.carry(8) ++ } ++} ++ + // encrypter encrypts an io.Reader on the fly + type encrypter struct { + in io.Reader +@@ -528,6 +549,17 @@ func (fh *decrypter) Read(p []byte) (n int, err error) { + return n, nil + } + ++// seek the decryption forwards the amount given ++// ++// returns an offset for the underlying rc to be seeked and the number ++// of bytes to be discarded ++func (fh *decrypter) seek(offset int64) (underlyingOffset int64, discard int64) { ++ blocks, discard := offset/blockDataSize, offset%blockDataSize ++ underlyingOffset = int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize) ++ fh.nonce.add(uint64(blocks)) ++ return ++} ++ + // finish sets the final error and tidies up + func (fh *decrypter) finish(err error) error { + if fh.err != nil { +diff --git a/rclone-1.33/crypt/cipher_test.go b/rclone/crypt/cipher_test.go +index 6ec09cc..405c048 100644 +--- a/rclone-1.33/crypt/cipher_test.go ++++ b/rclone/crypt/cipher_test.go +@@ -464,6 +464,144 @@ func TestNonceIncrement(t *testing.T) { + } + } + ++func TestNonceAdd(t *testing.T) { ++ for _, test := range []struct { ++ add uint64 ++ in nonce ++ out nonce ++ }{ ++ { ++ 0x01, ++ nonce{0x00}, ++ nonce{0x01}, ++ }, ++ { ++ 0xFF, ++ nonce{0xFF}, ++ nonce{0xFE, 0x01}, ++ }, ++ { ++ 0xFFFF, ++ nonce{0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0x01}, ++ }, ++ { ++ 0xFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0x01}, ++ }, ++ { ++ 0xFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFe, 0xFF, 0xFF, 0xFF, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, ++ }, ++ { ++ 0xFFFFFFFFFFFFFFFF, ++ nonce{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, ++ nonce{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ++ }, ++ } { ++ x := test.in ++ x.add(test.add) ++ assert.Equal(t, test.out, x) ++ } ++} ++ + // randomSource can read or write a random sequence + type randomSource struct { + counter int64 +diff --git a/rclone-1.33/crypt/crypt.go b/rclone/crypt/crypt.go +index 7365b3e..57d6896 100644 +--- a/rclone-1.33/crypt/crypt.go ++++ b/rclone/crypt/crypt.go +@@ -4,6 +4,7 @@ package crypt + import ( + "fmt" + "io" ++ "io/ioutil" + "path" + "sync" + +@@ -19,7 +20,7 @@ func init() { + NewFs: NewFs, + Options: []fs.Option{{ + Name: "remote", +- Help: "Remote to encrypt/decrypt.", ++ Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", + }, { + Name: "filename_encryption", + Help: "How to encrypt the filenames.", +@@ -86,6 +87,8 @@ func NewFs(name, rpath string) (fs.Fs, error) { + Fs: wrappedFs, + cipher: cipher, + mode: mode, ++ name: name, ++ root: rpath, + } + return f, err + } +@@ -95,6 +98,18 @@ type Fs struct { + fs.Fs + cipher Cipher + mode NameEncryptionMode ++ name string ++ root string ++} ++ ++// Name of the remote (as passed into NewFs) ++func (f *Fs) Name() string { ++ return f.name ++} ++ ++// Root of the remote (as passed into NewFs) ++func (f *Fs) Root() string { ++ return f.root + } + + // String returns a description of the FS +@@ -283,12 +298,59 @@ func (o *Object) Hash(hash fs.HashType) (string, error) { + } + + // Open opens the file for read. Call Close() on the returned io.ReadCloser +-func (o *Object) Open() (io.ReadCloser, error) { ++func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) { ++ var offset int64 ++ for _, option := range options { ++ switch x := option.(type) { ++ case *fs.SeekOption: ++ offset = x.Offset ++ default: ++ if option.Mandatory() { ++ fs.Log(o, "Unsupported mandatory option: %v", option) ++ } ++ } ++ } + in, err := o.Object.Open() + if err != nil { +- return in, err ++ return nil, err + } +- return o.f.cipher.DecryptData(in) ++ ++ // This reads the header and checks it is OK ++ rc, err := o.f.cipher.DecryptData(in) ++ if err != nil { ++ return nil, err ++ } ++ ++ // If seeking required, then... ++ if offset != 0 { ++ // FIXME could cache the unseeked decrypter as we re-read the header on every seek ++ decrypter := rc.(*decrypter) ++ ++ // Seek the decrypter and work out where to seek the ++ // underlying file and how many bytes to discard ++ underlyingOffset, discard := decrypter.seek(offset) ++ ++ // Re-open stream with a seek of underlyingOffset ++ err = in.Close() ++ if err != nil { ++ return nil, err ++ } ++ in, err := o.Object.Open(&fs.SeekOption{Offset: underlyingOffset}) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Update the stream ++ decrypter.rc = in ++ ++ // Discard the bytes ++ _, err = io.CopyN(ioutil.Discard, decrypter, discard) ++ if err != nil { ++ return nil, err ++ } ++ } ++ ++ return rc, err + } + + // Update in to the object with the modTime given of the given size +@@ -343,6 +405,12 @@ func (o *ObjectInfo) Size() int64 { + return o.f.cipher.EncryptedSize(o.ObjectInfo.Size()) + } + ++// Hash returns the selected checksum of the file ++// If no checksum is available it returns "" ++func (o *ObjectInfo) Hash(hash fs.HashType) (string, error) { ++ return "", nil ++} ++ + // ListOpts wraps a listopts decrypting the directory listing and + // replacing the Objects + type ListOpts struct { +diff --git a/rclone-1.33/crypt/crypt2_test.go b/rclone/crypt/crypt2_test.go +index 9fd7db8..ffd359b 100644 +--- a/rclone-1.33/crypt/crypt2_test.go ++++ b/rclone/crypt/crypt2_test.go +@@ -47,9 +47,11 @@ func TestObjectFs2(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote2(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes2(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime2(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType2(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime2(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize2(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen2(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek2(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate2(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable2(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile2(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/crypt/crypt_test.go b/rclone/crypt/crypt_test.go +index f994fa9..d7481f4 100644 +--- a/rclone-1.33/crypt/crypt_test.go ++++ b/rclone/crypt/crypt_test.go +@@ -47,9 +47,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/docs/content/about.md b/rclone/docs/content/about.md +index e40038a..4177c2e 100644 +--- a/rclone-1.33/docs/content/about.md ++++ b/rclone/docs/content/about.md +@@ -30,10 +30,12 @@ Features + * MD5/SHA1 hashes checked at all times for file integrity + * Timestamps preserved on files + * Partial syncs supported on a whole file basis +- * Copy mode to just copy new/changed files +- * Sync (one way) mode to make a directory identical +- * Check mode to check for file hash equality ++ * [Copy](/commands/rclone_copy/) mode to just copy new/changed files ++ * [Sync](/commands/rclone_sync/) (one way) mode to make a directory identical ++ * [Check](/commands/rclone_check/) mode to check for file hash equality + * Can sync to and from network, eg two different cloud accounts ++ * Optional encryption ([Crypt](/crypt/)) ++ * Optional FUSE mount ([rclone mount](/commands/rclone_mount/)) + + Links + +diff --git a/rclone-1.33/docs/content/amazonclouddrive.md b/rclone/docs/content/amazonclouddrive.md +index 077637b..584e393 100644 +--- a/rclone-1.33/docs/content/amazonclouddrive.md ++++ b/rclone/docs/content/amazonclouddrive.md +@@ -154,5 +154,5 @@ This means that larger files are likely to fail. + + Unfortunatly there is no way for rclone to see that this failure is + because of file size, so it will retry the operation, as any other +-failure. To avoid this problem, use `--max-size=50GB` option to limit ++failure. To avoid this problem, use `--max-size 50G` option to limit + the maximum size of uploaded files. +diff --git a/rclone-1.33/docs/content/authors.md b/rclone/docs/content/authors.md +index bb05bcc..e03722c 100644 +--- a/rclone-1.33/docs/content/authors.md ++++ b/rclone/docs/content/authors.md +@@ -37,3 +37,7 @@ Contributors + * Stefan G. Weichinger + * Per Cederberg + * Radek Šenfeld ++ * Fredrik Fornwall ++ * Asko Tamm ++ * xor-zz ++ +diff --git a/rclone-1.33/docs/content/commands/rclone.md b/rclone/docs/content/commands/rclone.md +index f79d325..557240c 100644 +--- a/rclone-1.33/docs/content/commands/rclone.md ++++ b/rclone/docs/content/commands/rclone.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone" + slug: rclone + url: /commands/rclone/ +diff --git a/rclone-1.33/docs/content/commands/rclone_authorize.md b/rclone/docs/content/commands/rclone_authorize.md +index 5decf4a..2cbcc1c 100644 +--- a/rclone-1.33/docs/content/commands/rclone_authorize.md ++++ b/rclone/docs/content/commands/rclone_authorize.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone authorize" + slug: rclone_authorize + url: /commands/rclone_authorize/ +diff --git a/rclone/docs/content/commands/rclone_cat.md b/rclone/docs/content/commands/rclone_cat.md +new file mode 100644 +index 0000000..6724acc +--- /dev/null ++++ b/rclone/docs/content/commands/rclone_cat.md +@@ -0,0 +1,104 @@ ++--- ++date: 2016-08-24T23:47:55+01:00 ++title: "rclone cat" ++slug: rclone_cat ++url: /commands/rclone_cat/ ++--- ++## rclone cat ++ ++Concatenates any files and sends them to stdout. ++ ++### Synopsis ++ ++ ++ ++rclone cat sends any files to standard output. ++ ++You can use it like this to output a single file ++ ++ rclone cat remote:path/to/file ++ ++Or like this to output any file in dir or subdirectories. ++ ++ rclone cat remote:path/to/dir ++ ++Or like this to output any .txt files in dir or subdirectories. ++ ++ rclone --include "*.txt" cat remote:path/to/dir ++ ++ ++``` ++rclone cat remote:path ++``` ++ ++### Options inherited from parent commands ++ ++``` ++ --acd-templink-threshold int Files >= this size will be downloaded via their tempLink. ++ --acd-upload-wait-time duration Time to wait after a failed complete upload to see if it appears. (default 2m0s) ++ --ask-password Allow prompt for password for encrypted configuration. (default true) ++ --b2-chunk-size int Upload chunk size. Must fit in memory. ++ --b2-test-mode string A flag string for X-Bz-Test-Mode header. ++ --b2-upload-cutoff int Cutoff for switching to chunked upload ++ --b2-versions Include old versions in directory listings. ++ --bwlimit int Bandwidth limit in kBytes/s, or use suffix b|k|M|G ++ --checkers int Number of checkers to run in parallel. (default 8) ++ -c, --checksum Skip based on checksum & size, not mod-time & size ++ --config string Config file. (default "/home/ncw/.rclone.conf") ++ --contimeout duration Connect timeout (default 1m0s) ++ --cpuprofile string Write cpu profile to file ++ --delete-after When synchronizing, delete files on destination after transfering ++ --delete-before When synchronizing, delete files on destination before transfering ++ --delete-during When synchronizing, delete files during transfer (default) ++ --delete-excluded Delete files on dest excluded from sync ++ --drive-auth-owner-only Only consider files owned by the authenticated user. Requires drive-full-list. ++ --drive-chunk-size int Upload chunk size. Must a power of 2 >= 256k. ++ --drive-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg") ++ --drive-full-list Use a full listing for directory list. More data but usually quicker. (obsolete) ++ --drive-upload-cutoff int Cutoff for switching to chunked upload ++ --drive-use-trash Send files to the trash instead of deleting permanently. ++ --dropbox-chunk-size int Upload chunk size. Max 150M. ++ -n, --dry-run Do a trial run with no permanent changes ++ --dump-bodies Dump HTTP headers and bodies - may contain sensitive info ++ --dump-filters Dump the filters to the output ++ --dump-headers Dump HTTP headers - may contain sensitive info ++ --exclude string Exclude files matching pattern ++ --exclude-from string Read exclude patterns from file ++ --files-from string Read list of source-file names from file ++ -f, --filter string Add a file-filtering rule ++ --filter-from string Read filtering patterns from a file ++ --ignore-existing Skip all files that exist on destination ++ --ignore-size Ignore size when skipping use mod-time or checksum. ++ -I, --ignore-times Don't skip files that match size and time - transfer all files ++ --include string Include files matching pattern ++ --include-from string Read include patterns from file ++ --log-file string Log everything to this file ++ --low-level-retries int Number of low level retries to do. (default 10) ++ --max-age string Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y ++ --max-depth int If set limits the recursion depth to this. (default -1) ++ --max-size int Don't transfer any file larger than this in k or suffix b|k|M|G ++ --memprofile string Write memory profile to file ++ --min-age string Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y ++ --min-size int Don't transfer any file smaller than this in k or suffix b|k|M|G ++ --modify-window duration Max time diff to be considered the same (default 1ns) ++ --no-check-certificate Do not verify the server SSL certificate. Insecure. ++ --no-gzip-encoding Don't set Accept-Encoding: gzip. ++ --no-traverse Don't traverse destination file system on copy. ++ --no-update-modtime Don't update destination mod-time if files identical. ++ --onedrive-chunk-size int Above this size files will be chunked - must be multiple of 320k. ++ --onedrive-upload-cutoff int Cutoff for switching to chunked upload - must be <= 100MB ++ -q, --quiet Print as little stuff as possible ++ --retries int Retry operations this many times if they fail (default 3) ++ --size-only Skip based on size only, not mod-time or checksum ++ --stats duration Interval to print stats (0 to disable) (default 1m0s) ++ --swift-chunk-size int Above this size files will be chunked into a _segments container. ++ --timeout duration IO idle timeout (default 5m0s) ++ --transfers int Number of file transfers to run in parallel. (default 4) ++ -u, --update Skip files that are newer on the destination. ++ -v, --verbose Print lots more stuff ++``` ++ ++### SEE ALSO ++* [rclone](/commands/rclone/) - Sync files and directories to and from local and remote object stores - v1.33-DEV ++ ++###### Auto generated by spf13/cobra on 24-Aug-2016 +diff --git a/rclone-1.33/docs/content/commands/rclone_check.md b/rclone/docs/content/commands/rclone_check.md +index e3c9363..f7ef4ec 100644 +--- a/rclone-1.33/docs/content/commands/rclone_check.md ++++ b/rclone/docs/content/commands/rclone_check.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone check" + slug: rclone_check + url: /commands/rclone_check/ +diff --git a/rclone-1.33/docs/content/commands/rclone_cleanup.md b/rclone/docs/content/commands/rclone_cleanup.md +index 5653319..9cd4a4e 100644 +--- a/rclone-1.33/docs/content/commands/rclone_cleanup.md ++++ b/rclone/docs/content/commands/rclone_cleanup.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone cleanup" + slug: rclone_cleanup + url: /commands/rclone_cleanup/ +diff --git a/rclone-1.33/docs/content/commands/rclone_config.md b/rclone/docs/content/commands/rclone_config.md +index fd99455..15674f2 100644 +--- a/rclone-1.33/docs/content/commands/rclone_config.md ++++ b/rclone/docs/content/commands/rclone_config.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone config" + slug: rclone_config + url: /commands/rclone_config/ +diff --git a/rclone-1.33/docs/content/commands/rclone_copy.md b/rclone/docs/content/commands/rclone_copy.md +index 109add0..7d973ab 100644 +--- a/rclone-1.33/docs/content/commands/rclone_copy.md ++++ b/rclone/docs/content/commands/rclone_copy.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone copy" + slug: rclone_copy + url: /commands/rclone_copy/ +diff --git a/rclone-1.33/docs/content/commands/rclone_dedupe.md b/rclone/docs/content/commands/rclone_dedupe.md +index 077035e..2bb8e3b 100644 +--- a/rclone-1.33/docs/content/commands/rclone_dedupe.md ++++ b/rclone/docs/content/commands/rclone_dedupe.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone dedupe" + slug: rclone_dedupe + url: /commands/rclone_dedupe/ +diff --git a/rclone-1.33/docs/content/commands/rclone_delete.md b/rclone/docs/content/commands/rclone_delete.md +index 19acaea..142589b 100644 +--- a/rclone-1.33/docs/content/commands/rclone_delete.md ++++ b/rclone/docs/content/commands/rclone_delete.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone delete" + slug: rclone_delete + url: /commands/rclone_delete/ +diff --git a/rclone-1.33/docs/content/commands/rclone_genautocomplete.md b/rclone/docs/content/commands/rclone_genautocomplete.md +index 592edc0..c35915a 100644 +--- a/rclone-1.33/docs/content/commands/rclone_genautocomplete.md ++++ b/rclone/docs/content/commands/rclone_genautocomplete.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone genautocomplete" + slug: rclone_genautocomplete + url: /commands/rclone_genautocomplete/ +diff --git a/rclone-1.33/docs/content/commands/rclone_gendocs.md b/rclone/docs/content/commands/rclone_gendocs.md +index ba1084c..0f5f4c9 100644 +--- a/rclone-1.33/docs/content/commands/rclone_gendocs.md ++++ b/rclone/docs/content/commands/rclone_gendocs.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone gendocs" + slug: rclone_gendocs + url: /commands/rclone_gendocs/ +diff --git a/rclone-1.33/docs/content/commands/rclone_ls.md b/rclone/docs/content/commands/rclone_ls.md +index e9126f9..be8ade2 100644 +--- a/rclone-1.33/docs/content/commands/rclone_ls.md ++++ b/rclone/docs/content/commands/rclone_ls.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone ls" + slug: rclone_ls + url: /commands/rclone_ls/ +diff --git a/rclone-1.33/docs/content/commands/rclone_lsd.md b/rclone/docs/content/commands/rclone_lsd.md +index ccdfdc6..eb9ad08 100644 +--- a/rclone-1.33/docs/content/commands/rclone_lsd.md ++++ b/rclone/docs/content/commands/rclone_lsd.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone lsd" + slug: rclone_lsd + url: /commands/rclone_lsd/ +diff --git a/rclone-1.33/docs/content/commands/rclone_lsl.md b/rclone/docs/content/commands/rclone_lsl.md +index 68082cd..1266b2c 100644 +--- a/rclone-1.33/docs/content/commands/rclone_lsl.md ++++ b/rclone/docs/content/commands/rclone_lsl.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone lsl" + slug: rclone_lsl + url: /commands/rclone_lsl/ +diff --git a/rclone-1.33/docs/content/commands/rclone_md5sum.md b/rclone/docs/content/commands/rclone_md5sum.md +index 26201c6..706c988 100644 +--- a/rclone-1.33/docs/content/commands/rclone_md5sum.md ++++ b/rclone/docs/content/commands/rclone_md5sum.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone md5sum" + slug: rclone_md5sum + url: /commands/rclone_md5sum/ +diff --git a/rclone-1.33/docs/content/commands/rclone_mkdir.md b/rclone/docs/content/commands/rclone_mkdir.md +index 032c4ec..0bec433 100644 +--- a/rclone-1.33/docs/content/commands/rclone_mkdir.md ++++ b/rclone/docs/content/commands/rclone_mkdir.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone mkdir" + slug: rclone_mkdir + url: /commands/rclone_mkdir/ +diff --git a/rclone/docs/content/commands/rclone_mount.md b/rclone/docs/content/commands/rclone_mount.md +new file mode 100644 +index 0000000..37976c2 +--- /dev/null ++++ b/rclone/docs/content/commands/rclone_mount.md +@@ -0,0 +1,154 @@ ++--- ++date: 2016-08-24T23:47:55+01:00 ++title: "rclone mount" ++slug: rclone_mount ++url: /commands/rclone_mount/ ++--- ++## rclone mount ++ ++Mount the remote as a mountpoint. **EXPERIMENTAL** ++ ++### Synopsis ++ ++ ++ ++rclone mount allows Linux, FreeBSD and macOS to mount any of Rclone's ++cloud storage systems as a file system with FUSE. ++ ++This is **EXPERIMENTAL** - use with care. ++ ++First set up your remote using `rclone config`. Check it works with `rclone ls` etc. ++ ++Start the mount like this ++ ++ rclone mount remote:path/to/files /path/to/local/mount & ++ ++Stop the mount with ++ ++ fusermount -u /path/to/local/mount ++ ++Or with OS X ++ ++ umount -u /path/to/local/mount ++ ++### Limitations ### ++ ++This can only read files seqentially, or write files sequentially. It ++can't read and write or seek in files. ++ ++rclonefs inherits rclone's directory handling. In rclone's world ++directories don't really exist. This means that empty directories ++will have a tendency to disappear once they fall out of the directory ++cache. ++ ++The bucket based FSes (eg swift, s3, google compute storage, b2) won't ++work from the root - you will need to specify a bucket, or a path ++within the bucket. So `swift:` won't work whereas `swift:bucket` will ++as will `swift:bucket/path`. ++ ++Only supported on Linux, FreeBSD and OS X at the moment. ++ ++### rclone mount vs rclone sync/copy ## ++ ++File systems expect things to be 100% reliable, whereas cloud storage ++systems are a long way from 100% reliable. The rclone sync/copy ++commands cope with this with lots of retries. However rclone mount ++can't use retries in the same way without making local copies of the ++uploads. This might happen in the future, but for the moment rclone ++mount won't do that, so will be less reliable than the rclone command. ++ ++### Bugs ### ++ ++ * All the remotes should work for read, but some may not for write ++ * those which need to know the size in advance won't - eg B2 ++ * maybe should pass in size as -1 to mean work it out ++ ++### TODO ### ++ ++ * Check hashes on upload/download ++ * Preserve timestamps ++ * Move directories ++ ++ ++``` ++rclone mount remote:path /path/to/mountpoint ++``` ++ ++### Options ++ ++``` ++ --debug-fuse Debug the FUSE internals - needs -v. ++ --no-modtime Don't read the modification time (can speed things up). ++``` ++ ++### Options inherited from parent commands ++ ++``` ++ --acd-templink-threshold int Files >= this size will be downloaded via their tempLink. ++ --acd-upload-wait-time duration Time to wait after a failed complete upload to see if it appears. (default 2m0s) ++ --ask-password Allow prompt for password for encrypted configuration. (default true) ++ --b2-chunk-size int Upload chunk size. Must fit in memory. ++ --b2-test-mode string A flag string for X-Bz-Test-Mode header. ++ --b2-upload-cutoff int Cutoff for switching to chunked upload ++ --b2-versions Include old versions in directory listings. ++ --bwlimit int Bandwidth limit in kBytes/s, or use suffix b|k|M|G ++ --checkers int Number of checkers to run in parallel. (default 8) ++ -c, --checksum Skip based on checksum & size, not mod-time & size ++ --config string Config file. (default "/home/ncw/.rclone.conf") ++ --contimeout duration Connect timeout (default 1m0s) ++ --cpuprofile string Write cpu profile to file ++ --delete-after When synchronizing, delete files on destination after transfering ++ --delete-before When synchronizing, delete files on destination before transfering ++ --delete-during When synchronizing, delete files during transfer (default) ++ --delete-excluded Delete files on dest excluded from sync ++ --drive-auth-owner-only Only consider files owned by the authenticated user. Requires drive-full-list. ++ --drive-chunk-size int Upload chunk size. Must a power of 2 >= 256k. ++ --drive-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg") ++ --drive-full-list Use a full listing for directory list. More data but usually quicker. (obsolete) ++ --drive-upload-cutoff int Cutoff for switching to chunked upload ++ --drive-use-trash Send files to the trash instead of deleting permanently. ++ --dropbox-chunk-size int Upload chunk size. Max 150M. ++ -n, --dry-run Do a trial run with no permanent changes ++ --dump-bodies Dump HTTP headers and bodies - may contain sensitive info ++ --dump-filters Dump the filters to the output ++ --dump-headers Dump HTTP headers - may contain sensitive info ++ --exclude string Exclude files matching pattern ++ --exclude-from string Read exclude patterns from file ++ --files-from string Read list of source-file names from file ++ -f, --filter string Add a file-filtering rule ++ --filter-from string Read filtering patterns from a file ++ --ignore-existing Skip all files that exist on destination ++ --ignore-size Ignore size when skipping use mod-time or checksum. ++ -I, --ignore-times Don't skip files that match size and time - transfer all files ++ --include string Include files matching pattern ++ --include-from string Read include patterns from file ++ --log-file string Log everything to this file ++ --low-level-retries int Number of low level retries to do. (default 10) ++ --max-age string Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y ++ --max-depth int If set limits the recursion depth to this. (default -1) ++ --max-size int Don't transfer any file larger than this in k or suffix b|k|M|G ++ --memprofile string Write memory profile to file ++ --min-age string Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y ++ --min-size int Don't transfer any file smaller than this in k or suffix b|k|M|G ++ --modify-window duration Max time diff to be considered the same (default 1ns) ++ --no-check-certificate Do not verify the server SSL certificate. Insecure. ++ --no-gzip-encoding Don't set Accept-Encoding: gzip. ++ --no-traverse Don't traverse destination file system on copy. ++ --no-update-modtime Don't update destination mod-time if files identical. ++ --onedrive-chunk-size int Above this size files will be chunked - must be multiple of 320k. ++ --onedrive-upload-cutoff int Cutoff for switching to chunked upload - must be <= 100MB ++ -q, --quiet Print as little stuff as possible ++ --retries int Retry operations this many times if they fail (default 3) ++ --size-only Skip based on size only, not mod-time or checksum ++ --stats duration Interval to print stats (0 to disable) (default 1m0s) ++ --swift-chunk-size int Above this size files will be chunked into a _segments container. ++ --timeout duration IO idle timeout (default 5m0s) ++ --transfers int Number of file transfers to run in parallel. (default 4) ++ -u, --update Skip files that are newer on the destination. ++ -v, --verbose Print lots more stuff ++``` ++ ++### SEE ALSO ++* [rclone](/commands/rclone/) - Sync files and directories to and from local and remote object stores - v1.33-DEV ++ ++###### Auto generated by spf13/cobra on 24-Aug-2016 +diff --git a/rclone-1.33/docs/content/commands/rclone_move.md b/rclone/docs/content/commands/rclone_move.md +index 2d102d1..e5452e4 100644 +--- a/rclone-1.33/docs/content/commands/rclone_move.md ++++ b/rclone/docs/content/commands/rclone_move.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone move" + slug: rclone_move + url: /commands/rclone_move/ +diff --git a/rclone-1.33/docs/content/commands/rclone_purge.md b/rclone/docs/content/commands/rclone_purge.md +index b565e26..50ba7f1 100644 +--- a/rclone-1.33/docs/content/commands/rclone_purge.md ++++ b/rclone/docs/content/commands/rclone_purge.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone purge" + slug: rclone_purge + url: /commands/rclone_purge/ +diff --git a/rclone-1.33/docs/content/commands/rclone_rmdir.md b/rclone/docs/content/commands/rclone_rmdir.md +index 1a99518..a6b22af 100644 +--- a/rclone-1.33/docs/content/commands/rclone_rmdir.md ++++ b/rclone/docs/content/commands/rclone_rmdir.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone rmdir" + slug: rclone_rmdir + url: /commands/rclone_rmdir/ +diff --git a/rclone-1.33/docs/content/commands/rclone_sha1sum.md b/rclone/docs/content/commands/rclone_sha1sum.md +index 040a1ea..b290718 100644 +--- a/rclone-1.33/docs/content/commands/rclone_sha1sum.md ++++ b/rclone/docs/content/commands/rclone_sha1sum.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone sha1sum" + slug: rclone_sha1sum + url: /commands/rclone_sha1sum/ +diff --git a/rclone-1.33/docs/content/commands/rclone_size.md b/rclone/docs/content/commands/rclone_size.md +index 37c530a..666c42f 100644 +--- a/rclone-1.33/docs/content/commands/rclone_size.md ++++ b/rclone/docs/content/commands/rclone_size.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone size" + slug: rclone_size + url: /commands/rclone_size/ +diff --git a/rclone-1.33/docs/content/commands/rclone_sync.md b/rclone/docs/content/commands/rclone_sync.md +index 2d2f7f8..f8ed4ba 100644 +--- a/rclone-1.33/docs/content/commands/rclone_sync.md ++++ b/rclone/docs/content/commands/rclone_sync.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone sync" + slug: rclone_sync + url: /commands/rclone_sync/ +diff --git a/rclone-1.33/docs/content/commands/rclone_version.md b/rclone/docs/content/commands/rclone_version.md +index 39cbe30..294ee9c 100644 +--- a/rclone-1.33/docs/content/commands/rclone_version.md ++++ b/rclone/docs/content/commands/rclone_version.md +@@ -1,5 +1,5 @@ + --- +-date: 2016-08-24T23:01:36+01:00 ++date: 2016-08-24T23:47:55+01:00 + title: "rclone version" + slug: rclone_version + url: /commands/rclone_version/ +diff --git a/rclone-1.33/docs/content/contact.md b/rclone/docs/content/contact.md +index 470c93c..c366664 100644 +--- a/rclone-1.33/docs/content/contact.md ++++ b/rclone/docs/content/contact.md +@@ -4,8 +4,15 @@ description: "Contact the rclone project" + date: "2014-04-26" + --- + +-Contact the rclone project +--------------------------- ++# Contact the rclone project # ++ ++## Forum ## ++ ++Forum for general discussions and questions: ++ ++ * https://forum.rclone.org ++ ++## Gitub project ## + + The project website is at: + +@@ -14,8 +21,19 @@ The project website is at: + There you can file bug reports, ask for help or contribute pull + requests. + +-See also ++## Google+ ## ++ ++Rclone has a Google+ page which announcements are posted to ++ ++ * Google+ page for general comments ++ ++## Twitter ## ++ ++You can also follow me on twitter for rclone announcments ++ ++ * [@njcw](https://twitter.com/njcw) + +- * Google+ page for general comments ++## Email ## + +-Or email [Nick Craig-Wood](mailto:nick@craig-wood.com) ++Or if all else fails or you want to ask something private or ++confidential email [Nick Craig-Wood](mailto:nick@craig-wood.com) +diff --git a/rclone-1.33/docs/content/crypt.md b/rclone/docs/content/crypt.md +index ba69590..1901d3b 100644 +--- a/rclone-1.33/docs/content/crypt.md ++++ b/rclone/docs/content/crypt.md +@@ -60,6 +60,8 @@ Choose a number from below, or type in your own value + \ "yandex" + Storage> 5 + Remote to encrypt/decrypt. ++Normally should contain a ':' and a path, eg "myremote:path/to/dir", ++"myremote:bucket" or "myremote:" + remote> remote:path + How to encrypt the filenames. + Choose a number from below, or type in your own value +@@ -97,8 +99,8 @@ Remote config + [secret] + remote = remote:path + filename_encryption = standard +-password = CfDxopZIXFG0Oo-ac7dPLWWOHkNJbw +-password2 = HYUpfuzHJL8qnX9fOaIYijq0xnVLwyVzp3y4SF3TwYqAU6HLysk ++password = *** ENCRYPTED *** ++password2 = *** ENCRYPTED *** + -------------------- + y) Yes this is OK + e) Edit this remote +@@ -119,6 +121,27 @@ Note that rclone does not encrypt + * file length - this can be calcuated within 16 bytes + * modification time - used for syncing + ++## Specifying the remote ## ++ ++In normal use, make sure the remote has a `:` in. If you specify the ++remote without a `:` then rclone will use a local directory of that ++name. So if you use a remote of `/path/to/secret/files` then rclone ++will encrypt stuff to that directory. If you use a remote of `name` ++then rclone will put files in a directory called `name` in the current ++directory. ++ ++If you specify the remote as `remote:path/to/dir` then rclone will ++store encrypted files in `path/to/dir` on the remote. If you are using ++file name encryption, then when you save files to ++`secret:subdir/subfile` this will store them in the unencrypted path ++`path/to/dir` but the `subdir/subpath` bit will be encrypted. ++ ++Note that unless you want encrypted bucket names (which are difficult ++to manage because you won't know what directory they represent in web ++interfaces etc), you should probably specify a bucket, eg ++`remote:secretbucket` when using bucket based remotes such as S3, ++Swift, Hubic, B2, GCS. ++ + ## Example ## + + To test I made a little directory of files using "standard" file name +@@ -205,6 +228,14 @@ characters in length then you should be OK on all providers. + There may be an even more secure file name encryption mode in the + future which will address the long file name problem. + ++### Modified time and hashes ### ++ ++Crypt stores modification times using the underlying remote so support ++depends on that. ++ ++Hashes are not stored for crypt. However the data integrity is ++protected by an extremely strong crypto authenticator. ++ + ## File formats ## + + ### File encryption ### +diff --git a/rclone-1.33/docs/content/docs.md b/rclone/docs/content/docs.md +index 82880d2..3f5392f 100644 +--- a/rclone-1.33/docs/content/docs.md ++++ b/rclone/docs/content/docs.md +@@ -30,6 +30,7 @@ See the following for detailed instructions for + * [Hubic](/hubic/) + * [Microsoft One Drive](/onedrive/) + * [Yandex Disk](/yandex/) ++ * [Crypt](/crypt/) - to encrypt other remotes + + Usage + ----- +@@ -628,7 +629,18 @@ If you use the `--log-file=FILE` option, rclone will redirect `Error`, + Exit Code + --------- + +-If any errors occurred during the command, rclone will set a non zero +-exit code. This allows scripts to detect when rclone operations have +-failed. +- ++If any errors occurred during the command, rclone with an exit code of ++`1`. This allows scripts to detect when rclone operations have failed. ++ ++During the startup phase rclone will exit immediately if an error is ++detected in the configuration. There will always be a log message ++immediately before exiting. ++ ++When rclone is running it will accumulate errors as it goes along, and ++only exit with an non-zero exit code if (after retries) there were no ++transfers with errors remaining. For every error counted there will ++be a high priority log message (visibile with `-q`) showing the ++message and which file caused the problem. A high priority message is ++also shown when starting a retry so the user can see that any previous ++error messages may not be valid after the retry. If rclone has done a ++retry it will log a high priority message if the retry was successful. +diff --git a/rclone-1.33/docs/content/downloads.md b/rclone/docs/content/downloads.md +index c94af82..0164a5c 100644 +--- a/rclone-1.33/docs/content/downloads.md ++++ b/rclone/docs/content/downloads.md +@@ -37,6 +37,23 @@ Rclone Download v1.33 + + You can also find a [mirror of the downloads on github](https://github.com/ncw/rclone/releases/tag/v1.33). + ++Beta releases ++============= ++ ++[Beta releases](http://beta.rclone.org) are generated from each commit ++to master. Note these are named like ++ ++ {Version Tag}-{Commit Number}-g{Git Commit Hash} ++ ++You can match the `Git Commit Hash` up with the [git ++log](https://github.com/ncw/rclone/commits/master). The most recent ++release will have the largest `Version Tag` and `Commit Number` and ++will normally be at the end of the list. ++ ++The beta releases haven't been through the full integration test suite ++like the releases. However it is useful to try the latest beta before ++reporting an issue. ++ + Downloads for scripting + ======================= + +diff --git a/rclone-1.33/docs/content/downloads.md.in b/rclone/docs/content/downloads.md.in +index 9cf816c..fbcd7a4 100644 +--- a/rclone-1.33/docs/content/downloads.md.in ++++ b/rclone/docs/content/downloads.md.in +@@ -18,6 +18,7 @@ Rclone Download VERSION + * [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-linux-386.zip) + * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-linux-amd64.zip) + * [ARM - 32 Bit](http://downloads.rclone.org/rclone-VERSION-linux-arm.zip) ++ * [ARM - 64 Bit](http://downloads.rclone.org/rclone-VERSION-linux-arm64.zip) + * FreeBSD + * [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-freebsd-386.zip) + * [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-freebsd-amd64.zip) +@@ -37,6 +38,25 @@ Rclone Download VERSION + + You can also find a [mirror of the downloads on github](https://github.com/ncw/rclone/releases/tag/VERSION). + ++You can also download [the releases using SSL](https://downloads-rclone-org-7d7d567e.cdn.memsites.com/). ++ ++Beta releases ++============= ++ ++[Beta releases](http://beta.rclone.org) are generated from each commit ++to master. Note these are named like ++ ++ {Version Tag}-{Commit Number}-g{Git Commit Hash} ++ ++You can match the `Git Commit Hash` up with the [git ++log](https://github.com/ncw/rclone/commits/master). The most recent ++release will have the largest `Version Tag` and `Commit Number` and ++will normally be at the end of the list. ++ ++The beta releases haven't been through the full integration test suite ++like the releases. However it is useful to try the latest beta before ++reporting an issue. ++ + Downloads for scripting + ======================= + +diff --git a/rclone-1.33/docs/content/drive.md b/rclone/docs/content/drive.md +index 03a8fd1..2f038ba 100644 +--- a/rclone-1.33/docs/content/drive.md ++++ b/rclone/docs/content/drive.md +@@ -183,8 +183,10 @@ Here are the possible extensions with their corresponding mime types. + | csv | text/csv | Standard CSV format for Spreadsheets | + | doc | application/msword | Micosoft Office Document | + | docx | application/vnd.openxmlformats-officedocument.wordprocessingml.document | Microsoft Office Document | ++| epub | application/epub+zip | E-book format | + | html | text/html | An HTML Document | + | jpg | image/jpeg | A JPEG Image File | ++| odp | application/vnd.oasis.opendocument.presentation | Openoffice Presentation | + | ods | application/vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet | + | ods | application/x-vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet | + | odt | application/vnd.oasis.opendocument.text | Openoffice Document | +@@ -193,6 +195,7 @@ Here are the possible extensions with their corresponding mime types. + | pptx | application/vnd.openxmlformats-officedocument.presentationml.presentation | Microsoft Office Powerpoint | + | rtf | application/rtf | Rich Text Format | + | svg | image/svg+xml | Scalable Vector Graphics Format | ++| tsv | text/tab-separated-values | Standard TSV format for spreadsheets | + | txt | text/plain | Plain Text | + | xls | application/vnd.ms-excel | Microsoft Office Spreadsheet | + | xlsx | application/vnd.openxmlformats-officedocument.spreadsheetml.sheet | Microsoft Office Spreadsheet | +diff --git a/rclone-1.33/docs/content/filtering.md b/rclone/docs/content/filtering.md +index ba63c97..d1d287c 100644 +--- a/rclone-1.33/docs/content/filtering.md ++++ b/rclone/docs/content/filtering.md +@@ -97,13 +97,13 @@ Rclone keeps track of directories that could match any file patterns. + + Eg if you add the include rule + +- \a\*.jpg ++ /a/*.jpg + + Rclone will synthesize the directory include rule + +- \a\ ++ /a/ + +-If you put any rules which end in `\` then it will only match ++If you put any rules which end in `/` then it will only match + directories. + + Directory matches are **only** used to optimise directory access +diff --git a/rclone-1.33/docs/content/install.md b/rclone/docs/content/install.md +index 7ee3387..6b04ba8 100644 +--- a/rclone-1.33/docs/content/install.md ++++ b/rclone/docs/content/install.md +@@ -4,42 +4,81 @@ description: "Rclone Installation" + date: "2016-03-28" + --- + +-Install +-------- ++# Install # + + Rclone is a Go program and comes as a single binary file. + +-[Download](/downloads/) the relevant binary. ++## Quickstart ## + +-Or alternatively if you have Go 1.5+ installed use ++ * [Download](/downloads/) the relevant binary. ++ * Unpack and the `rclone` binary. ++ * Run `rclone config` to setup. See [rclone config docs](http://rclone.org/docs/) for more details. + +- go get github.com/ncw/rclone +- +-and this will build the binary in `$GOPATH/bin`. If you have built +-rclone before then you will want to update its dependencies first with +-this +- +- go get -u -v github.com/ncw/rclone/... ++See below for some expanded Linux / macOS instructions. + + See the [Usage section](/docs/) of the docs for how to use rclone, or + run `rclone -h`. + +-linux binary downloaded files install example +-------- ++## Linux installation from precompiled binary ## ++ ++Fetch and unpack ++ ++ curl -O http://downloads.rclone.org/rclone-current-linux-amd64.zip ++ unzip rclone-current-linux-amd64.zip ++ cd rclone-*-linux-amd64 ++ ++Copy binary file + +- unzip rclone-v1.17-linux-amd64.zip +- cd rclone-v1.17-linux-amd64 +- #copy binary file + sudo cp rclone /usr/sbin/ + sudo chown root:root /usr/sbin/rclone + sudo chmod 755 /usr/sbin/rclone +- #install manpage ++ ++Install manpage ++ + sudo mkdir -p /usr/local/share/man/man1 + sudo cp rclone.1 /usr/local/share/man/man1/ + sudo mandb + +-Installation with Ansible +-------- ++Run `rclone config` to setup. See [rclone config docs](http://rclone.org/docs/) for more details. ++ ++ rclone config ++ ++## macOS installation from precompiled binary ## ++ ++Download the latest version of rclone. ++ ++ cd && curl -O http://downloads.rclone.org/rclone-current-osx-amd64.zip ++ ++Unzip the download and cd to the extracted folder. ++ ++ unzip -a rclone-current-osx-amd64.zip && cd rclone-*-osx-amd64 ++ ++Move rclone to your $PATH. You will be prompted for your password. ++ ++ sudo mv rclone /usr/local/bin/ ++ ++Remove the leftover files. ++ ++ cd .. && rm -rf rclone-*-osx-amd64 rclone-current-osx-amd64.zip ++ ++Run `rclone config` to setup. See [rclone config docs](http://rclone.org/docs/) for more details. ++ ++ rclone config ++ ++## Install from source ## ++ ++Make sure you have at least [Go](https://golang.org/) 1.5 installed. ++Make sure your `GOPATH` is set, then: ++ ++ go get -u -v github.com/ncw/rclone ++ ++and this will build the binary in `$GOPATH/bin`. If you have built ++rclone before then you will want to update its dependencies first with ++this ++ ++ go get -u -v github.com/ncw/rclone/... ++ ++## Installation with Ansible ## + + This can be done with [Stefan Weichinger's ansible + role](https://github.com/stefangweichinger/ansible-rclone). +diff --git a/rclone-1.33/docs/content/overview.md b/rclone/docs/content/overview.md +index eba8898..5fbbee9 100644 +--- a/rclone-1.33/docs/content/overview.md ++++ b/rclone/docs/content/overview.md +@@ -15,19 +15,19 @@ show through. + + Here is an overview of the major features of each cloud storage system. + +-| Name | Hash | ModTime | Case Insensitive | Duplicate Files | +-| ---------------------- |:-------:|:-------:|:----------------:|:---------------:| +-| Google Drive | MD5 | Yes | No | Yes | +-| Amazon S3 | MD5 | Yes | No | No | +-| Openstack Swift | MD5 | Yes | No | No | +-| Dropbox | - | No | Yes | No | +-| Google Cloud Storage | MD5 | Yes | No | No | +-| Amazon Drive | MD5 | No | Yes | No | +-| Microsoft One Drive | SHA1 | Yes | Yes | No | +-| Hubic | MD5 | Yes | No | No | +-| Backblaze B2 | SHA1 | Yes | No | No | +-| Yandex Disk | MD5 | Yes | No | No | +-| The local filesystem | All | Yes | Depends | No | ++| Name | Hash | ModTime | Case Insensitive | Duplicate Files | MIME Type | ++| ---------------------- |:-------:|:-------:|:----------------:|:---------------:|:---------:| ++| Google Drive | MD5 | Yes | No | Yes | R/W | ++| Amazon S3 | MD5 | Yes | No | No | R/W | ++| Openstack Swift | MD5 | Yes | No | No | R/W | ++| Dropbox | - | No | Yes | No | R | ++| Google Cloud Storage | MD5 | Yes | No | No | R/W | ++| Amazon Drive | MD5 | No | Yes | No | R | ++| Microsoft One Drive | SHA1 | Yes | Yes | No | R | ++| Hubic | MD5 | Yes | No | No | R/W | ++| Backblaze B2 | SHA1 | Yes | No | No | R/W | ++| Yandex Disk | MD5 | Yes | No | No | R/W | ++| The local filesystem | All | Yes | Depends | No | - | + + ### Hash ### + +@@ -77,3 +77,83 @@ objects with the same name. + + This confuses rclone greatly when syncing - use the `rclone dedupe` + command to rename or remove duplicates. ++ ++### MIME Type ### ++ ++MIME types (also known as media types) classify types of documents ++using a simple text classification, eg `text/html` or ++`application/pdf`. ++ ++Some cloud storage systems support reading (`R`) the MIME type of ++objects and some support writing (`W`) the MIME type of objects. ++ ++The MIME type can be important if you are serving files directly to ++HTTP from the storage system. ++ ++If you are copying from a remote which supports reading (`R`) to a ++remote which supports writing (`W`) then rclone will preserve the MIME ++types. Otherwise they will be guessed from the extension, or the ++remote itself may assign the MIME type. ++ ++## Optional Features ## ++ ++All the remotes support a basic set of features, but there are some ++optional features supported by some remotes used to make some ++operations more efficient. ++ ++| Name | Purge | Copy | Move | DirMove | CleanUp | ++| ---------------------- |:-----:|:----:|:----:|:-------:|:-------:| ++| Google Drive | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | ++| Amazon S3 | No | Yes | No | No | No | ++| Openstack Swift | Yes † | Yes | No | No | No | ++| Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/ncw/rclone/issues/575) | ++| Google Cloud Storage | Yes | Yes | No | No | No | ++| Amazon Drive | Yes | No | No [#721](https://github.com/ncw/rclone/issues/721) | No [#721](https://github.com/ncw/rclone/issues/721) | No [#575](https://github.com/ncw/rclone/issues/575) | ++| Microsoft One Drive | Yes | Yes | No [#197](https://github.com/ncw/rclone/issues/197) | No [#197](https://github.com/ncw/rclone/issues/197) | No [#575](https://github.com/ncw/rclone/issues/575) | ++| Hubic | Yes † | Yes | No | No | No | ++| Backblaze B2 | No | No | No | No | Yes | ++| Yandex Disk | Yes | No | No | No | No [#575](https://github.com/ncw/rclone/issues/575) | ++| The local filesystem | Yes | No | Yes | Yes | No | ++ ++ ++### Purge ### ++ ++This deletes a directory quicker than just deleting all the files in ++the directory. ++ ++† Note Swift and Hubic implement this in order to delete directory ++markers but they don't actually have a quicker way of deleting files ++other than deleting them individually. ++ ++### Copy ### ++ ++Used when copying an object to and from the same remote. This known ++as a server side copy so you can copy a file without downloading it ++and uploading it again. It is used if you use `rclone copy` or ++`rclone move` if the remote doesn't support `Move` directly. ++ ++If the server doesn't support `Copy` directly then for copy operations ++the file is downloaded then re-uploaded. ++ ++### Move ### ++ ++Used when moving/renaming an object on the same remote. This is known ++as a server side move of a file. This is used in `rclone move` if the ++server doesn't support `DirMove`. ++ ++If the server isn't capable of `Move` then rclone simulates it with ++`Copy` then delete. If the server doesn't support `Copy` then rclone ++will download the file and re-upload it. ++ ++### DirMove ### ++ ++This is used to implement `rclone move` to move a directory if ++possible. If it isn't then it will use `Move` on each file (which ++falls back to `Copy` then download and upload - see `Move` section). ++ ++### CleanUp ### ++ ++This is used for emptying the trash for a remote by `rclone cleanup`. ++ ++If the server can't do `CleanUp` then `rclone cleanup` will return an ++error. +diff --git a/rclone-1.33/docs/content/s3.md b/rclone/docs/content/s3.md +index 5b63e53..5c25687 100644 +--- a/rclone-1.33/docs/content/s3.md ++++ b/rclone/docs/content/s3.md +@@ -147,6 +147,17 @@ Choose a number from below, or type in your own value + 2 / AES256 + \ "AES256" + server_side_encryption> ++The storage class to use when storing objects in S3. ++Choose a number from below, or type in your own value ++ 1 / Default ++ \ "" ++ 2 / Standard storage class ++ \ "STANDARD" ++ 3 / Reduced redundancy storage class ++ \ "REDUCED_REDUNDANCY" ++ 4 / Standard Infrequent Access storage class ++ \ "STANDARD_IA" ++storage_class> + Remote config + -------------------- + [remote] +@@ -217,6 +228,21 @@ credentials. In order of precedence: + If none of these option actually end up providing `rclone` with AWS + credentials then S3 interaction will be non-authenticated (see below). + ++### Specific options ### ++ ++Here are the command line options specific to this cloud storage ++system. ++ ++#### ----s3-storage-class #### ++ ++Storage class to upload new objects with. ++ ++Available options include: ++ ++ - STANDARD - default storage class ++ - STANDARD_IA - for less frequently accessed data (e.g backups) ++ - REDUCED_REDUNDANCY (only for noncritical, reproducible data, has lower redundancy) ++ + ### Anonymous access to public buckets ### + + If you want to use rclone to access a public bucket, configure with a +diff --git a/rclone-1.33/docs/content/swift.md b/rclone/docs/content/swift.md +index 6729edc..9bebdfe 100644 +--- a/rclone-1.33/docs/content/swift.md ++++ b/rclone/docs/content/swift.md +@@ -119,6 +119,38 @@ excess files in the container. + + rclone sync /home/local/directory remote:container + ++### Configuration from an Openstack credentials file ### ++ ++An Opentstack credentials file typically looks something something ++like this (without the comments) ++ ++``` ++export OS_AUTH_URL=https://a.provider.net/v2.0 ++export OS_TENANT_ID=ffffffffffffffffffffffffffffffff ++export OS_TENANT_NAME="1234567890123456" ++export OS_USERNAME="123abc567xy" ++echo "Please enter your OpenStack Password: " ++read -sr OS_PASSWORD_INPUT ++export OS_PASSWORD=$OS_PASSWORD_INPUT ++export OS_REGION_NAME="SBG1" ++if [ -z "$OS_REGION_NAME" ]; then unset OS_REGION_NAME; fi ++``` ++ ++The config file needs to look something like this where `$OS_USERNAME` ++represents the value of the `OS_USERNAME` variable - `123abc567xy` in ++the example above. ++ ++``` ++[remote] ++type = swift ++user = $OS_USERNAME ++key = $OS_PASSWORD ++auth = $OS_AUTH_URL ++tenant = $OS_TENANT_NAME ++``` ++ ++Note that you may (or may not) need to set `region` too - try without first. ++ + ### Specific options ### + + Here are the command line options specific to this cloud storage +@@ -155,6 +187,9 @@ authentication fails for Swift. + So this most likely means your username / password is wrong. You can + investigate further with the `--dump-bodies` flag. + ++This may also be caused by specifying the region when you shouldn't ++have (eg OVH). ++ + #### Rclone gives Failed to create file system: Response didn't have storage storage url and auth token #### + + This is most likely caused by forgetting to specify your tenant when +diff --git a/rclone-1.33/docs/layouts/chrome/menu.html b/rclone/docs/layouts/chrome/menu.html +index 2f3a2c5..aedf8ad 100644 +--- a/rclone-1.33/docs/layouts/chrome/menu.html ++++ b/rclone/docs/layouts/chrome/menu.html +@@ -11,3 +11,17 @@ +
    + + ++ ++
    ++
    ++

    Links.

    ++
    ++ ++
    +diff --git a/rclone-1.33/docs/layouts/chrome/navbar.html b/rclone/docs/layouts/chrome/navbar.html +index bd38c3b..bbf4c5a 100644 +--- a/rclone-1.33/docs/layouts/chrome/navbar.html ++++ b/rclone/docs/layouts/chrome/navbar.html +@@ -60,6 +60,7 @@ +
  • Backblaze B2
  • +
  • Local
  • +
  • Yandex Disk
  • ++
  • Crypt (encrypts the above)
  • + + +
  • Contact
  • +diff --git a/rclone-1.33/docs/static/css/font-awesome.css b/rclone/docs/static/css/font-awesome.css +index 880eb82..a0b879f 100644 +--- a/rclone-1.33/docs/static/css/font-awesome.css ++++ b/rclone/docs/static/css/font-awesome.css +@@ -1,13 +1,13 @@ + /*! +- * Font Awesome 4.4.0 by @davegandy - http://fontawesome.io - @fontawesome ++ * Font Awesome 4.6.3 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */ + /* FONT PATH + * -------------------------- */ + @font-face { + font-family: 'FontAwesome'; +- src: url('../fonts/fontawesome-webfont.eot?v=4.4.0'); +- src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.4.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.4.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.4.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.4.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.4.0#fontawesomeregular') format('svg'); ++ src: url('../fonts/fontawesome-webfont.eot?v=4.6.3'); ++ src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.6.3') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.6.3') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.6.3') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.6.3') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.6.3#fontawesomeregular') format('svg'); + font-weight: normal; + font-style: normal; + } +@@ -118,31 +118,31 @@ + } + } + .fa-rotate-90 { +- filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1); ++ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)"; + -webkit-transform: rotate(90deg); + -ms-transform: rotate(90deg); + transform: rotate(90deg); + } + .fa-rotate-180 { +- filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2); ++ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)"; + -webkit-transform: rotate(180deg); + -ms-transform: rotate(180deg); + transform: rotate(180deg); + } + .fa-rotate-270 { +- filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3); ++ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)"; + -webkit-transform: rotate(270deg); + -ms-transform: rotate(270deg); + transform: rotate(270deg); + } + .fa-flip-horizontal { +- filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1); ++ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)"; + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); + } + .fa-flip-vertical { +- filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1); ++ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"; + -webkit-transform: scale(1, -1); + -ms-transform: scale(1, -1); + transform: scale(1, -1); +@@ -1383,7 +1383,7 @@ + .fa-digg:before { + content: "\f1a6"; + } +-.fa-pied-piper:before { ++.fa-pied-piper-pp:before { + content: "\f1a7"; + } + .fa-pied-piper-alt:before { +@@ -1509,6 +1509,7 @@ + content: "\f1ce"; + } + .fa-ra:before, ++.fa-resistance:before, + .fa-rebel:before { + content: "\f1d0"; + } +@@ -2024,3 +2025,175 @@ + .fa-fonticons:before { + content: "\f280"; + } ++.fa-reddit-alien:before { ++ content: "\f281"; ++} ++.fa-edge:before { ++ content: "\f282"; ++} ++.fa-credit-card-alt:before { ++ content: "\f283"; ++} ++.fa-codiepie:before { ++ content: "\f284"; ++} ++.fa-modx:before { ++ content: "\f285"; ++} ++.fa-fort-awesome:before { ++ content: "\f286"; ++} ++.fa-usb:before { ++ content: "\f287"; ++} ++.fa-product-hunt:before { ++ content: "\f288"; ++} ++.fa-mixcloud:before { ++ content: "\f289"; ++} ++.fa-scribd:before { ++ content: "\f28a"; ++} ++.fa-pause-circle:before { ++ content: "\f28b"; ++} ++.fa-pause-circle-o:before { ++ content: "\f28c"; ++} ++.fa-stop-circle:before { ++ content: "\f28d"; ++} ++.fa-stop-circle-o:before { ++ content: "\f28e"; ++} ++.fa-shopping-bag:before { ++ content: "\f290"; ++} ++.fa-shopping-basket:before { ++ content: "\f291"; ++} ++.fa-hashtag:before { ++ content: "\f292"; ++} ++.fa-bluetooth:before { ++ content: "\f293"; ++} ++.fa-bluetooth-b:before { ++ content: "\f294"; ++} ++.fa-percent:before { ++ content: "\f295"; ++} ++.fa-gitlab:before { ++ content: "\f296"; ++} ++.fa-wpbeginner:before { ++ content: "\f297"; ++} ++.fa-wpforms:before { ++ content: "\f298"; ++} ++.fa-envira:before { ++ content: "\f299"; ++} ++.fa-universal-access:before { ++ content: "\f29a"; ++} ++.fa-wheelchair-alt:before { ++ content: "\f29b"; ++} ++.fa-question-circle-o:before { ++ content: "\f29c"; ++} ++.fa-blind:before { ++ content: "\f29d"; ++} ++.fa-audio-description:before { ++ content: "\f29e"; ++} ++.fa-volume-control-phone:before { ++ content: "\f2a0"; ++} ++.fa-braille:before { ++ content: "\f2a1"; ++} ++.fa-assistive-listening-systems:before { ++ content: "\f2a2"; ++} ++.fa-asl-interpreting:before, ++.fa-american-sign-language-interpreting:before { ++ content: "\f2a3"; ++} ++.fa-deafness:before, ++.fa-hard-of-hearing:before, ++.fa-deaf:before { ++ content: "\f2a4"; ++} ++.fa-glide:before { ++ content: "\f2a5"; ++} ++.fa-glide-g:before { ++ content: "\f2a6"; ++} ++.fa-signing:before, ++.fa-sign-language:before { ++ content: "\f2a7"; ++} ++.fa-low-vision:before { ++ content: "\f2a8"; ++} ++.fa-viadeo:before { ++ content: "\f2a9"; ++} ++.fa-viadeo-square:before { ++ content: "\f2aa"; ++} ++.fa-snapchat:before { ++ content: "\f2ab"; ++} ++.fa-snapchat-ghost:before { ++ content: "\f2ac"; ++} ++.fa-snapchat-square:before { ++ content: "\f2ad"; ++} ++.fa-pied-piper:before { ++ content: "\f2ae"; ++} ++.fa-first-order:before { ++ content: "\f2b0"; ++} ++.fa-yoast:before { ++ content: "\f2b1"; ++} ++.fa-themeisle:before { ++ content: "\f2b2"; ++} ++.fa-google-plus-circle:before, ++.fa-google-plus-official:before { ++ content: "\f2b3"; ++} ++.fa-fa:before, ++.fa-font-awesome:before { ++ content: "\f2b4"; ++} ++.sr-only { ++ position: absolute; ++ width: 1px; ++ height: 1px; ++ padding: 0; ++ margin: -1px; ++ overflow: hidden; ++ clip: rect(0, 0, 0, 0); ++ border: 0; ++} ++.sr-only-focusable:active, ++.sr-only-focusable:focus { ++ position: static; ++ width: auto; ++ height: auto; ++ margin: 0; ++ overflow: visible; ++ clip: auto; ++} +diff --git a/rclone-1.33/docs/static/fonts/FontAwesome.otf b/rclone/docs/static/fonts/FontAwesome.otf +index 681bdd4..d4de13e 100644 +Binary files a/rclone-1.33/docs/static/fonts/FontAwesome.otf and b/rclone/docs/static/fonts/FontAwesome.otf differ +diff --git a/rclone-1.33/docs/static/fonts/fontawesome-webfont.eot b/rclone/docs/static/fonts/fontawesome-webfont.eot +index a30335d..c7b00d2 100644 +Binary files a/rclone-1.33/docs/static/fonts/fontawesome-webfont.eot and b/rclone/docs/static/fonts/fontawesome-webfont.eot differ +diff --git a/rclone-1.33/docs/static/fonts/fontawesome-webfont.svg b/rclone/docs/static/fonts/fontawesome-webfont.svg +index 6fd19ab..8b66187 100644 +--- a/rclone-1.33/docs/static/fonts/fontawesome-webfont.svg ++++ b/rclone/docs/static/fonts/fontawesome-webfont.svg +@@ -1,6 +1,6 @@ + + +- ++ + + + +@@ -169,7 +169,7 @@ + + + +- ++ + + + +@@ -178,7 +178,7 @@ + + + +- ++ + + + +@@ -219,8 +219,8 @@ + + + +- +- ++ ++ + + + +@@ -362,8 +362,8 @@ + + + +- +- ++ ++ + + + +@@ -410,7 +410,7 @@ + + + +- ++ + + + +@@ -454,7 +454,7 @@ + + + +- ++ + + + +@@ -484,7 +484,7 @@ + + + +- ++ + + + +@@ -555,7 +555,7 @@ + + + +- ++ + + + +@@ -600,11 +600,11 @@ + + + +- +- ++ ++ + + +- ++ + + + +@@ -621,20 +621,65 @@ + + + +- +- +- +- +- +- +- +- +- +- +- +- +- +- ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + +\ No newline at end of file +diff --git a/rclone-1.33/docs/static/fonts/fontawesome-webfont.ttf b/rclone/docs/static/fonts/fontawesome-webfont.ttf +index d7994e1..f221e50 100644 +Binary files a/rclone-1.33/docs/static/fonts/fontawesome-webfont.ttf and b/rclone/docs/static/fonts/fontawesome-webfont.ttf differ +diff --git a/rclone-1.33/docs/static/fonts/fontawesome-webfont.woff b/rclone/docs/static/fonts/fontawesome-webfont.woff +index 6fd4ede..6e7483c 100644 +Binary files a/rclone-1.33/docs/static/fonts/fontawesome-webfont.woff and b/rclone/docs/static/fonts/fontawesome-webfont.woff differ +diff --git a/rclone-1.33/docs/static/fonts/fontawesome-webfont.woff2 b/rclone/docs/static/fonts/fontawesome-webfont.woff2 +index 5560193..7eb74fd 100644 +Binary files a/rclone-1.33/docs/static/fonts/fontawesome-webfont.woff2 and b/rclone/docs/static/fonts/fontawesome-webfont.woff2 differ +diff --git a/rclone-1.33/drive/drive.go b/rclone/drive/drive.go +index 542950b..75c1404 100644 +--- a/rclone-1.33/drive/drive.go ++++ b/rclone/drive/drive.go +@@ -61,10 +61,12 @@ var ( + RedirectURL: oauthutil.TitleBarRedirectURL, + } + mimeTypeToExtension = map[string]string{ ++ "application/epub+zip": "epub", + "application/msword": "doc", + "application/pdf": "pdf", + "application/rtf": "rtf", + "application/vnd.ms-excel": "xls", ++ "application/vnd.oasis.opendocument.presentation": "odp", + "application/vnd.oasis.opendocument.spreadsheet": "ods", + "application/vnd.oasis.opendocument.text": "odt", + "application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx", +@@ -78,6 +80,7 @@ var ( + "text/csv": "csv", + "text/html": "html", + "text/plain": "txt", ++ "text/tab-separated-values": "tsv", + } + extensionToMimeType map[string]string + ) +@@ -134,6 +137,7 @@ type Object struct { + bytes int64 // size of the object + modifiedDate string // RFC3339 time it was last modified + isDocument bool // if set this is a Google doc ++ mimeType string + } + + // ------------------------------------------------------------ +@@ -530,7 +534,7 @@ func (f *Fs) createFileInfo(remote string, modTime time.Time, size int64) (*Obje + Title: leaf, + Description: leaf, + Parents: []*drive.ParentReference{{Id: directoryID}}, +- MimeType: fs.MimeType(o), ++ MimeType: fs.MimeTypeFromName(remote), + ModifiedDate: modTime.Format(timeFormatOut), + } + return o, createInfo, nil +@@ -823,7 +827,7 @@ func (o *Object) Size() int64 { + if o.isDocument && o.bytes < 0 { + // If it is a google doc then we must HEAD it to see + // how big it is +- res, err := o.httpResponse("HEAD") ++ _, res, err := o.httpResponse("HEAD", nil) + if err != nil { + fs.ErrorLog(o, "Error reading size: %v", err) + return 0 +@@ -842,6 +846,7 @@ func (o *Object) setMetaData(info *drive.File) { + o.md5sum = strings.ToLower(info.Md5Checksum) + o.bytes = info.FileSize + o.modifiedDate = info.ModifiedDate ++ o.mimeType = info.MimeType + } + + // readMetaData gets the info if it hasn't already been fetched +@@ -924,23 +929,23 @@ func (o *Object) Storable() bool { + + // httpResponse gets an http.Response object for the object o.url + // using the method passed in +-func (o *Object) httpResponse(method string) (res *http.Response, err error) { ++func (o *Object) httpResponse(method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) { + if o.url == "" { +- return nil, errors.New("forbidden to download - check sharing permission") ++ return nil, nil, errors.New("forbidden to download - check sharing permission") + } +- req, err := http.NewRequest(method, o.url, nil) ++ req, err = http.NewRequest(method, o.url, nil) + if err != nil { +- return nil, err ++ return req, nil, err + } +- req.Header.Set("User-Agent", fs.UserAgent) ++ fs.OpenOptionAddHTTPHeaders(req.Header, options) + err = o.fs.pacer.Call(func() (bool, error) { + res, err = o.fs.client.Do(req) + return shouldRetry(err) + }) + if err != nil { +- return nil, err ++ return req, nil, err + } +- return res, nil ++ return req, res, nil + } + + // openFile represents an Object open for reading +@@ -975,12 +980,13 @@ func (file *openFile) Close() (err error) { + var _ io.ReadCloser = &openFile{} + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { +- res, err := o.httpResponse("GET") ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { ++ req, res, err := o.httpResponse("GET", options) + if err != nil { + return nil, err + } +- if res.StatusCode != 200 { ++ _, isRanging := req.Header["Range"] ++ if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) { + _ = res.Body.Close() // ignore error + return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status) + } +@@ -1007,7 +1013,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + } + updateInfo := &drive.File{ + Id: o.id, +- MimeType: fs.MimeType(o), ++ MimeType: fs.MimeType(src), + ModifiedDate: modTime.Format(timeFormatOut), + } + +@@ -1025,7 +1031,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + } + } else { + // Upload the file in chunks +- info, err = o.fs.Upload(in, size, fs.MimeType(o), updateInfo, o.remote) ++ info, err = o.fs.Upload(in, size, updateInfo.MimeType, updateInfo, o.remote) + if err != nil { + return err + } +@@ -1051,6 +1057,16 @@ func (o *Object) Remove() error { + return err + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ err := o.readMetaData() ++ if err != nil { ++ fs.Log(o, "Failed to read metadata: %v", err) ++ return "" ++ } ++ return o.mimeType ++} ++ + // Check the interfaces are satisfied + var ( + _ fs.Fs = (*Fs)(nil) +@@ -1060,4 +1076,5 @@ var ( + _ fs.DirMover = (*Fs)(nil) + _ fs.PutUncheckeder = (*Fs)(nil) + _ fs.Object = (*Object)(nil) ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/drive/drive_test.go b/rclone/drive/drive_test.go +index 657a78a..1a92c3d 100644 +--- a/rclone-1.33/drive/drive_test.go ++++ b/rclone/drive/drive_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/drive/upload.go b/rclone/drive/upload.go +index 8ce653a..07007de 100644 +--- a/rclone-1.33/drive/upload.go ++++ b/rclone/drive/upload.go +@@ -84,7 +84,6 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType string, info *drive.Fi + req.Header.Set("Content-Type", "application/json; charset=UTF-8") + req.Header.Set("X-Upload-Content-Type", contentType) + req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size)) +- req.Header.Set("User-Agent", fs.UserAgent) + res, err = f.client.Do(req) + if err == nil { + defer googleapi.CloseBody(res) +@@ -118,7 +117,6 @@ func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request { + req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength)) + } + req.Header.Set("Content-Type", rx.MediaType) +- req.Header.Set("User-Agent", fs.UserAgent) + return req + } + +diff --git a/rclone-1.33/dropbox/dropbox.go b/rclone/dropbox/dropbox.go +index 7c62318..9c0e86f 100644 +--- a/rclone-1.33/dropbox/dropbox.go ++++ b/rclone/dropbox/dropbox.go +@@ -110,6 +110,7 @@ type Object struct { + bytes int64 // size of the object + modTime time.Time // time it was last modified + hasMetadata bool // metadata is valid ++ mimeType string // content type according to the server + } + + // ------------------------------------------------------------ +@@ -622,6 +623,7 @@ func (o *Object) Size() int64 { + func (o *Object) setMetadataFromEntry(info *dropbox.Entry) { + o.bytes = info.Bytes + o.modTime = time.Time(info.ClientMtime) ++ o.mimeType = info.MimeType + o.hasMetadata = true + } + +@@ -708,8 +710,21 @@ func (o *Object) Storable() bool { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { +- in, _, err = o.fs.db.Download(o.remotePath(), "", 0) ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { ++ // FIXME should send a patch for dropbox module which allow setting headers ++ var offset int64 ++ for _, option := range options { ++ switch x := option.(type) { ++ case *fs.SeekOption: ++ offset = x.Offset ++ default: ++ if option.Mandatory() { ++ fs.Log(o, "Unsupported mandatory option: %v", option) ++ } ++ } ++ } ++ ++ in, _, err = o.fs.db.Download(o.remotePath(), "", offset) + if dropboxErr, ok := err.(*dropbox.Error); ok { + // Dropbox return 461 for copyright violation so don't + // attempt to retry this error +@@ -745,12 +760,23 @@ func (o *Object) Remove() error { + return err + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ err := o.readMetaData() ++ if err != nil { ++ fs.Log(o, "Failed to read metadata: %v", err) ++ return "" ++ } ++ return o.mimeType ++} ++ + // Check the interfaces are satisfied + var ( +- _ fs.Fs = (*Fs)(nil) +- _ fs.Copier = (*Fs)(nil) +- _ fs.Purger = (*Fs)(nil) +- _ fs.Mover = (*Fs)(nil) +- _ fs.DirMover = (*Fs)(nil) +- _ fs.Object = (*Object)(nil) ++ _ fs.Fs = (*Fs)(nil) ++ _ fs.Copier = (*Fs)(nil) ++ _ fs.Purger = (*Fs)(nil) ++ _ fs.Mover = (*Fs)(nil) ++ _ fs.DirMover = (*Fs)(nil) ++ _ fs.Object = (*Object)(nil) ++ _ fs.MimeTyper = (*Object)(nil) + ) +diff --git a/rclone-1.33/dropbox/dropbox_test.go b/rclone/dropbox/dropbox_test.go +index 59c68e6..e41ffa6 100644 +--- a/rclone-1.33/dropbox/dropbox_test.go ++++ b/rclone/dropbox/dropbox_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/fs/accounting.go b/rclone/fs/accounting.go +index a7cea2d..3f1e9b2 100644 +--- a/rclone-1.33/fs/accounting.go ++++ b/rclone/fs/accounting.go +@@ -231,11 +231,15 @@ func (s *StatsInfo) Transferring(remote string) { + } + + // DoneTransferring removes a transfer from the stats +-func (s *StatsInfo) DoneTransferring(remote string) { ++// ++// if ok is true then it increments the transfers count ++func (s *StatsInfo) DoneTransferring(remote string, ok bool) { + s.lock.Lock() + defer s.lock.Unlock() + delete(s.transferring, remote) +- s.transfers++ ++ if ok { ++ s.transfers++ ++ } + } + + // Account limits and accounts for one transfer +@@ -468,6 +472,7 @@ func AccountByPart(obj Object) *Account { + acc := Stats.inProgress.get(obj.Remote()) + if acc == nil { + Debug(obj, "Didn't find object to account part transfer") ++ return nil + } + acc.disableWholeFileAccounting() + return acc +diff --git a/rclone-1.33/fs/config.go b/rclone/fs/config.go +index b2d41c8..5d96108 100644 +--- a/rclone-1.33/fs/config.go ++++ b/rclone/fs/config.go +@@ -9,14 +9,12 @@ import ( + "crypto/cipher" + "crypto/rand" + "crypto/sha256" +- "crypto/tls" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "log" + "math" +- "net/http" + "os" + "os/user" + "path" +@@ -27,7 +25,6 @@ import ( + "unicode/utf8" + + "github.com/Unknwon/goconfig" +- "github.com/mreiferson/go-httpclient" + "github.com/pkg/errors" + "github.com/spf13/pflag" + "golang.org/x/crypto/nacl/secretbox" +@@ -304,62 +301,6 @@ type ConfigInfo struct { + NoUpdateModTime bool + } + +-// Transport returns an http.RoundTripper with the correct timeouts +-func (ci *ConfigInfo) Transport() http.RoundTripper { +- t := &httpclient.Transport{ +- Proxy: http.ProxyFromEnvironment, +- MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1, +- +- // ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for +- // a connect to complete. +- ConnectTimeout: ci.ConnectTimeout, +- +- // ResponseHeaderTimeout, if non-zero, specifies the amount of +- // time to wait for a server's response headers after fully +- // writing the request (including its body, if any). This +- // time does not include the time to read the response body. +- ResponseHeaderTimeout: ci.Timeout, +- +- // RequestTimeout, if non-zero, specifies the amount of time for the entire +- // request to complete (including all of the above timeouts + entire response body). +- // This should never be less than the sum total of the above two timeouts. +- //RequestTimeout: NOT SET, +- +- // ReadWriteTimeout, if non-zero, will set a deadline for every Read and +- // Write operation on the request connection. +- ReadWriteTimeout: ci.Timeout, +- +- // InsecureSkipVerify controls whether a client verifies the +- // server's certificate chain and host name. +- // If InsecureSkipVerify is true, TLS accepts any certificate +- // presented by the server and any host name in that certificate. +- // In this mode, TLS is susceptible to man-in-the-middle attacks. +- // This should be used only for testing. +- TLSClientConfig: &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}, +- +- // DisableCompression, if true, prevents the Transport from +- // requesting compression with an "Accept-Encoding: gzip" +- // request header when the Request contains no existing +- // Accept-Encoding value. If the Transport requests gzip on +- // its own and gets a gzipped response, it's transparently +- // decoded in the Response.Body. However, if the user +- // explicitly requested gzip it is not automatically +- // uncompressed. +- DisableCompression: *noGzip, +- } +- if ci.DumpHeaders || ci.DumpBodies { +- return NewLoggedTransport(t, ci.DumpBodies) +- } +- return t +-} +- +-// Client returns an http.Client with the correct timeouts +-func (ci *ConfigInfo) Client() *http.Client { +- return &http.Client{ +- Transport: ci.Transport(), +- } +-} +- + // Find the config directory + func configHome() string { + // Find users home directory +@@ -811,8 +752,21 @@ func ChooseNumber(what string, min, max int) int { + func ShowRemote(name string) { + fmt.Printf("--------------------\n") + fmt.Printf("[%s]\n", name) ++ fs := MustFindByName(name) + for _, key := range ConfigFile.GetKeyList(name) { +- fmt.Printf("%s = %s\n", key, ConfigFile.MustValue(name, key)) ++ isPassword := false ++ for _, option := range fs.Options { ++ if option.Name == key && option.IsPassword { ++ isPassword = true ++ break ++ } ++ } ++ value := ConfigFile.MustValue(name, key) ++ if isPassword && value != "" { ++ fmt.Printf("%s = *** ENCRYPTED ***\n", key) ++ } else { ++ fmt.Printf("%s = %s\n", key, value) ++ } + } + fmt.Printf("--------------------\n") + } +@@ -834,17 +788,20 @@ func OkRemote(name string) bool { + return false + } + ++// MustFindByName finds the RegInfo for the remote name passed in or ++// exits with a fatal error. ++func MustFindByName(name string) *RegInfo { ++ fsType := ConfigFile.MustValue(name, "type") ++ if fsType == "" { ++ log.Fatalf("Couldn't find type of fs for %q", name) ++ } ++ return MustFind(fsType) ++} ++ + // RemoteConfig runs the config helper for the remote if needed + func RemoteConfig(name string) { + fmt.Printf("Remote config\n") +- fsName := ConfigFile.MustValue(name, "type") +- if fsName == "" { +- log.Fatalf("Couldn't find type of fs for %q", name) +- } +- f, err := Find(fsName) +- if err != nil { +- log.Fatalf("Didn't find filing system: %v", err) +- } ++ f := MustFindByName(name) + if f.Config != nil { + f.Config(name) + } +@@ -923,10 +880,7 @@ func fsOption() *Option { + func NewRemote(name string) { + newType := ChooseOption(fsOption()) + ConfigFile.SetValue(name, "type", newType) +- fs, err := Find(newType) +- if err != nil { +- log.Fatalf("Failed to find fs: %v", err) +- } ++ fs := MustFind(newType) + for _, option := range fs.Options { + ConfigFile.SetValue(name, option.Name, ChooseOption(&option)) + } +@@ -983,10 +937,7 @@ func EditConfig() { + switch i := Command(what); i { + case 'e': + name := ChooseRemote() +- fs, err := Find(ConfigFile.MustValue(name, "type")) +- if err != nil { +- log.Fatalf("Failed to find fs: %v", err) +- } ++ fs := MustFindByName(name) + EditRemote(fs, name) + case 'n': + nameLoop: +@@ -1069,11 +1020,7 @@ func Authorize(args []string) { + log.Fatalf("Invalid number of arguments: %d", len(args)) + } + newType := args[0] +- fs, err := Find(newType) +- if err != nil { +- log.Fatalf("Failed to find fs: %v", err) +- } +- ++ fs := MustFind(newType) + if fs.Config == nil { + log.Fatalf("Can't authorize fs %q", newType) + } +diff --git a/rclone-1.33/fs/filter.go b/rclone/fs/filter.go +index 5866f1a..e093cb8 100644 +--- a/rclone-1.33/fs/filter.go ++++ b/rclone/fs/filter.go +@@ -94,8 +94,8 @@ func (rs *rules) len() int { + return len(rs.rules) + } + +-// filesMap describes the map of files to transfer +-type filesMap map[string]struct{} ++// FilesMap describes the map of files to transfer ++type FilesMap map[string]struct{} + + // Filter describes any filtering in operation + type Filter struct { +@@ -106,8 +106,8 @@ type Filter struct { + ModTimeTo time.Time + fileRules rules + dirRules rules +- files filesMap // files if filesFrom +- dirs filesMap // dirs from filesFrom ++ files FilesMap // files if filesFrom ++ dirs FilesMap // dirs from filesFrom + } + + // We use time conventions +@@ -313,8 +313,8 @@ func (f *Filter) AddRule(rule string) error { + // AddFile adds a single file to the files from list + func (f *Filter) AddFile(file string) error { + if f.files == nil { +- f.files = make(filesMap) +- f.dirs = make(filesMap) ++ f.files = make(FilesMap) ++ f.dirs = make(FilesMap) + } + file = strings.Trim(file, "/") + f.files[file] = struct{}{} +@@ -332,6 +332,13 @@ func (f *Filter) AddFile(file string) error { + return nil + } + ++// Files returns all the files from the `--files-from` list ++// ++// It may be nil if the list is empty ++func (f *Filter) Files() FilesMap { ++ return f.files ++} ++ + // Clear clears all the filter rules + func (f *Filter) Clear() { + f.fileRules.clear() +diff --git a/rclone-1.33/fs/filter_test.go b/rclone/fs/filter_test.go +index d49717d..e582a3e 100644 +--- a/rclone-1.33/fs/filter_test.go ++++ b/rclone/fs/filter_test.go +@@ -180,11 +180,11 @@ func TestNewFilterIncludeFiles(t *testing.T) { + require.NoError(t, err) + err = f.AddFile("/file2.jpg") + require.NoError(t, err) +- assert.Equal(t, filesMap{ ++ assert.Equal(t, FilesMap{ + "file1.jpg": {}, + "file2.jpg": {}, + }, f.files) +- assert.Equal(t, filesMap{}, f.dirs) ++ assert.Equal(t, FilesMap{}, f.dirs) + testInclude(t, f, []includeTest{ + {"file1.jpg", 0, 0, true}, + {"file2.jpg", 1, 0, true}, +@@ -206,7 +206,7 @@ func TestNewFilterIncludeFilesDirs(t *testing.T) { + err = f.AddFile(path) + require.NoError(t, err) + } +- assert.Equal(t, filesMap{ ++ assert.Equal(t, FilesMap{ + "path": {}, + "path/to": {}, + "path/to/dir": {}, +diff --git a/rclone-1.33/fs/fs.go b/rclone/fs/fs.go +index c377d2c..48840b9 100644 +--- a/rclone-1.33/fs/fs.go ++++ b/rclone/fs/fs.go +@@ -26,7 +26,7 @@ const ( + + // Globals + var ( +- // UserAgent for Fs which can set it ++ // UserAgent set in the default Transport + UserAgent = "rclone/" + Version + // Filesystem registry + fsRegistry []*RegInfo +@@ -114,6 +114,10 @@ type ListFser interface { + // Fses must support recursion levels of fs.MaxLevel and 1. + // They may return ErrorLevelNotSupported otherwise. + List(out ListOpts, dir string) ++ ++ // NewObject finds the Object at remote. If it can't be found ++ // it returns the error ErrorObjectNotFound. ++ NewObject(remote string) (Object, error) + } + + // Fs is the interface a cloud storage system must provide +@@ -121,10 +125,6 @@ type Fs interface { + Info + ListFser + +- // NewObject finds the Object at remote. If it can't be found +- // it returns the error ErrorObjectNotFound. +- NewObject(remote string) (Object, error) +- + // Put in to the remote path with the modTime given of the given size + // + // May create the object even if it returns an error - if so +@@ -172,7 +172,7 @@ type Object interface { + SetModTime(time.Time) error + + // Open opens the file for read. Call Close() on the returned io.ReadCloser +- Open() (io.ReadCloser, error) ++ Open(options ...OpenOption) (io.ReadCloser, error) + + // Update in to the object with the modTime given of the given size + Update(in io.Reader, src ObjectInfo) error +@@ -210,6 +210,13 @@ type BasicInfo interface { + Size() int64 + } + ++// MimeTyper is an optional interface for Object ++type MimeTyper interface { ++ // MimeType returns the content type of the Object if ++ // known, or "" if not ++ MimeType() string ++} ++ + // Purger is an optional interfaces for Fs + type Purger interface { + // Purge all files in the root and the root directory +@@ -387,6 +394,19 @@ func Find(name string) (*RegInfo, error) { + return nil, errors.Errorf("didn't find filing system for %q", name) + } + ++// MustFind looks for an Info object for the type name passed in ++// ++// Services are looked up in the config file ++// ++// Exits with a fatal error if not found ++func MustFind(name string) *RegInfo { ++ fs, err := Find(name) ++ if err != nil { ++ log.Fatalf("Failed to find remote: %v", err) ++ } ++ return fs ++} ++ + // Pattern to match an rclone url + var matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`) + +diff --git a/rclone/fs/http.go b/rclone/fs/http.go +new file mode 100644 +index 0000000..e2b225e +--- /dev/null ++++ b/rclone/fs/http.go +@@ -0,0 +1,205 @@ ++// The HTTP based parts of the config, Transport and Client ++ ++package fs ++ ++import ( ++ "crypto/tls" ++ "net" ++ "net/http" ++ "net/http/httputil" ++ "reflect" ++ "sync" ++ "time" ++) ++ ++const ( ++ separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" ++ separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" ++) ++ ++var ( ++ transport http.RoundTripper ++ noTransport sync.Once ++) ++ ++// A net.Conn that sets a deadline for every Read or Write operation ++type timeoutConn struct { ++ net.Conn ++ readTimer *time.Timer ++ writeTimer *time.Timer ++ timeout time.Duration ++ off time.Time ++} ++ ++// create a timeoutConn using the timeout ++func newTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn { ++ return &timeoutConn{ ++ Conn: conn, ++ timeout: timeout, ++ } ++} ++ ++// Nudge the deadline for an idle timeout on by c.timeout if non-zero ++func (c *timeoutConn) nudgeDeadline() (err error) { ++ if c.timeout == 0 { ++ return nil ++ } ++ when := time.Now().Add(c.timeout) ++ return c.Conn.SetDeadline(when) ++} ++ ++// readOrWrite bytes doing idle timeouts ++func (c *timeoutConn) readOrWrite(f func([]byte) (int, error), b []byte) (n int, err error) { ++ err = c.nudgeDeadline() ++ if err != nil { ++ return n, err ++ } ++ n, err = f(b) ++ cerr := c.nudgeDeadline() ++ if err == nil && cerr != nil { ++ err = cerr ++ } ++ return n, err ++} ++ ++// Read bytes doing idle timeouts ++func (c *timeoutConn) Read(b []byte) (n int, err error) { ++ return c.readOrWrite(c.Conn.Read, b) ++} ++ ++// Write bytes doing idle timeouts ++func (c *timeoutConn) Write(b []byte) (n int, err error) { ++ return c.readOrWrite(c.Conn.Write, b) ++} ++ ++// setDefaults for a from b ++// ++// Copy the public members from b to a. We can't just use a struct ++// copy as Transport contains a private mutex. ++func setDefaults(a, b interface{}) { ++ pt := reflect.TypeOf(a) ++ t := pt.Elem() ++ va := reflect.ValueOf(a).Elem() ++ vb := reflect.ValueOf(b).Elem() ++ for i := 0; i < t.NumField(); i++ { ++ aField := va.Field(i) ++ // Set a from b if it is public ++ if aField.CanSet() { ++ bField := vb.Field(i) ++ aField.Set(bField) ++ } ++ } ++} ++ ++// Transport returns an http.RoundTripper with the correct timeouts ++func (ci *ConfigInfo) Transport() http.RoundTripper { ++ noTransport.Do(func() { ++ // Start with a sensible set of defaults then override. ++ // This also means we get new stuff when it gets added to go ++ t := new(http.Transport) ++ setDefaults(t, http.DefaultTransport.(*http.Transport)) ++ t.Proxy = http.ProxyFromEnvironment ++ t.MaxIdleConnsPerHost = 4 * (ci.Checkers + ci.Transfers + 1) ++ t.TLSHandshakeTimeout = ci.ConnectTimeout ++ t.ResponseHeaderTimeout = ci.Timeout ++ t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify} ++ t.DisableCompression = *noGzip ++ // Set in http_old.go initTransport ++ // t.Dial ++ // Set in http_new.go initTransport ++ // t.DialContext ++ // t.IdelConnTimeout ++ // t.ExpectContinueTimeout ++ ci.initTransport(t) ++ // Wrap that http.Transport in our own transport ++ transport = NewTransport(t, ci.DumpHeaders, ci.DumpBodies) ++ }) ++ return transport ++} ++ ++// Client returns an http.Client with the correct timeouts ++func (ci *ConfigInfo) Client() *http.Client { ++ return &http.Client{ ++ Transport: ci.Transport(), ++ } ++} ++ ++// Transport is a our http Transport which wraps an http.Transport ++// * Sets the User Agent ++// * Does logging ++type Transport struct { ++ *http.Transport ++ logHeader bool ++ logBody bool ++} ++ ++// NewTransport wraps the http.Transport passed in and logs all ++// roundtrips including the body if logBody is set. ++func NewTransport(transport *http.Transport, logHeader, logBody bool) *Transport { ++ return &Transport{ ++ Transport: transport, ++ logHeader: logHeader, ++ logBody: logBody, ++ } ++} ++ ++// A map of servers we have checked for time ++var checkedHost = make(map[string]struct{}, 1) ++ ++// Check the server time is the same as ours, once for each server ++func checkServerTime(req *http.Request, resp *http.Response) { ++ host := req.URL.Host ++ if req.Host != "" { ++ host = req.Host ++ } ++ if _, ok := checkedHost[host]; ok { ++ return ++ } ++ dateString := resp.Header.Get("Date") ++ if dateString == "" { ++ return ++ } ++ date, err := http.ParseTime(dateString) ++ if err != nil { ++ Debug(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err) ++ return ++ } ++ dt := time.Since(date) ++ const window = 5 * 60 * time.Second ++ if dt > window || dt < -window { ++ Log(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt) ++ } ++ checkedHost[host] = struct{}{} ++} ++ ++// RoundTrip implements the RoundTripper interface. ++func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { ++ // Force user agent ++ req.Header.Set("User-Agent", UserAgent) ++ // Log request ++ if t.logHeader || t.logBody { ++ buf, _ := httputil.DumpRequestOut(req, t.logBody) ++ Debug(nil, "%s", separatorReq) ++ Debug(nil, "%s", "HTTP REQUEST") ++ Debug(nil, "%s", string(buf)) ++ Debug(nil, "%s", separatorReq) ++ } ++ // Do round trip ++ resp, err = t.Transport.RoundTrip(req) ++ // Log response ++ if t.logHeader || t.logBody { ++ Debug(nil, "%s", separatorResp) ++ Debug(nil, "%s", "HTTP RESPONSE") ++ if err != nil { ++ Debug(nil, "Error: %v", err) ++ } else { ++ buf, _ := httputil.DumpResponse(resp, t.logBody) ++ Debug(nil, "%s", string(buf)) ++ } ++ Debug(nil, "%s", separatorResp) ++ } ++ if err == nil { ++ checkServerTime(req, resp) ++ } ++ return resp, err ++} +diff --git a/rclone/fs/http_new.go b/rclone/fs/http_new.go +new file mode 100644 +index 0000000..4099ffa +--- /dev/null ++++ b/rclone/fs/http_new.go +@@ -0,0 +1,34 @@ ++// HTTP parts go1.7+ ++ ++//+build go1.7 ++ ++package fs ++ ++import ( ++ "context" ++ "net" ++ "net/http" ++ "time" ++) ++ ++// dial with context and timeouts ++func dialContextTimeout(ctx context.Context, network, address string, connectTimeout, timeout time.Duration) (net.Conn, error) { ++ dialer := net.Dialer{ ++ Timeout: connectTimeout, ++ KeepAlive: 30 * time.Second, ++ } ++ c, err := dialer.DialContext(ctx, network, address) ++ if err != nil { ++ return c, err ++ } ++ return newTimeoutConn(c, timeout), nil ++} ++ ++// Initialise the http.Transport for go1.7+ ++func (ci *ConfigInfo) initTransport(t *http.Transport) { ++ t.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) { ++ return dialContextTimeout(ctx, network, address, ci.ConnectTimeout, ci.Timeout) ++ } ++ t.IdleConnTimeout = 60 * time.Second ++ t.ExpectContinueTimeout = ci.ConnectTimeout ++} +diff --git a/rclone/fs/http_old.go b/rclone/fs/http_old.go +new file mode 100644 +index 0000000..2fc9df0 +--- /dev/null ++++ b/rclone/fs/http_old.go +@@ -0,0 +1,31 @@ ++// HTTP parts pre go1.7 ++ ++//+build !go1.7 ++ ++package fs ++ ++import ( ++ "net" ++ "net/http" ++ "time" ++) ++ ++// dial with timeouts ++func dialTimeout(network, address string, connectTimeout, timeout time.Duration) (net.Conn, error) { ++ dialer := net.Dialer{ ++ Timeout: connectTimeout, ++ KeepAlive: 30 * time.Second, ++ } ++ c, err := dialer.Dial(network, address) ++ if err != nil { ++ return c, err ++ } ++ return newTimeoutConn(c, timeout), nil ++} ++ ++// Initialise the http.Transport for pre go1.7 ++func (ci *ConfigInfo) initTransport(t *http.Transport) { ++ t.Dial = func(network, address string) (net.Conn, error) { ++ return dialTimeout(network, address, ci.ConnectTimeout, ci.Timeout) ++ } ++} +diff --git a/rclone/fs/http_test.go b/rclone/fs/http_test.go +new file mode 100644 +index 0000000..b5b8e78 +--- /dev/null ++++ b/rclone/fs/http_test.go +@@ -0,0 +1,40 @@ ++//+build go1.7 ++ ++package fs ++ ++import ( ++ "fmt" ++ "net/http" ++ "testing" ++ ++ "github.com/stretchr/testify/assert" ++) ++ ++// returns the "%p" reprentation of the thing passed in ++func ptr(p interface{}) string { ++ return fmt.Sprintf("%p", p) ++} ++ ++func TestSetDefaults(t *testing.T) { ++ old := http.DefaultTransport.(*http.Transport) ++ new := new(http.Transport) ++ setDefaults(new, old) ++ // Can't use assert.Equal or reflect.DeepEqual for this as it has functions in ++ // Check functions by comparing the "%p" representations of them ++ assert.Equal(t, ptr(old.Proxy), ptr(new.Proxy), "when checking .Proxy") ++ assert.Equal(t, ptr(old.DialContext), ptr(new.DialContext), "when checking .DialContext") ++ // Check the other public fields ++ assert.Equal(t, old.Dial, new.Dial, "when checking .Dial") ++ assert.Equal(t, old.DialTLS, new.DialTLS, "when checking .DialTLS") ++ assert.Equal(t, old.TLSClientConfig, new.TLSClientConfig, "when checking .TLSClientConfig") ++ assert.Equal(t, old.TLSHandshakeTimeout, new.TLSHandshakeTimeout, "when checking .TLSHandshakeTimeout") ++ assert.Equal(t, old.DisableKeepAlives, new.DisableKeepAlives, "when checking .DisableKeepAlives") ++ assert.Equal(t, old.DisableCompression, new.DisableCompression, "when checking .DisableCompression") ++ assert.Equal(t, old.MaxIdleConns, new.MaxIdleConns, "when checking .MaxIdleConns") ++ assert.Equal(t, old.MaxIdleConnsPerHost, new.MaxIdleConnsPerHost, "when checking .MaxIdleConnsPerHost") ++ assert.Equal(t, old.IdleConnTimeout, new.IdleConnTimeout, "when checking .IdleConnTimeout") ++ assert.Equal(t, old.ResponseHeaderTimeout, new.ResponseHeaderTimeout, "when checking .ResponseHeaderTimeout") ++ assert.Equal(t, old.ExpectContinueTimeout, new.ExpectContinueTimeout, "when checking .ExpectContinueTimeout") ++ assert.Equal(t, old.TLSNextProto, new.TLSNextProto, "when checking .TLSNextProto") ++ assert.Equal(t, old.MaxResponseHeaderBytes, new.MaxResponseHeaderBytes, "when checking .MaxResponseHeaderBytes") ++} +diff --git a/rclone-1.33/fs/lister.go b/rclone/fs/lister.go +index 3361a75..c9daf58 100644 +--- a/rclone-1.33/fs/lister.go ++++ b/rclone/fs/lister.go +@@ -31,13 +31,55 @@ func NewLister() *Lister { + return o.SetLevel(-1).SetBuffer(Config.Checkers) + } + ++// Finds and lists the files passed in ++// ++// Note we ignore the dir and just return all the files in the list ++func (o *Lister) listFiles(f ListFser, dir string, files FilesMap) { ++ buffer := o.Buffer() ++ jobs := make(chan string, buffer) ++ var wg sync.WaitGroup ++ ++ // Start some listing go routines so we find those name in parallel ++ wg.Add(buffer) ++ for i := 0; i < buffer; i++ { ++ go func() { ++ defer wg.Done() ++ for remote := range jobs { ++ obj, err := f.NewObject(remote) ++ if err == ErrorObjectNotFound { ++ // silently ignore files that aren't found in the files list ++ } else if err != nil { ++ o.SetError(err) ++ } else { ++ o.Add(obj) ++ } ++ } ++ }() ++ } ++ ++ // Pump the names in ++ for name := range files { ++ jobs <- name ++ if o.IsFinished() { ++ break ++ } ++ } ++ close(jobs) ++ wg.Wait() ++ ++ // Signal that this listing is over ++ o.Finished() ++} ++ + // Start starts a go routine listing the Fs passed in. It returns the + // same Lister that was passed in for convenience. + func (o *Lister) Start(f ListFser, dir string) *Lister { + o.results = make(chan listerResult, o.buffer) +- go func() { +- f.List(o, dir) +- }() ++ if o.filter != nil && o.filter.Files() != nil { ++ go o.listFiles(f, dir, o.filter.Files()) ++ } else { ++ go f.List(o, dir) ++ } + return o + } + +diff --git a/rclone-1.33/fs/lister_test.go b/rclone/fs/lister_test.go +index 3414f89..7e5f2a8 100644 +--- a/rclone-1.33/fs/lister_test.go ++++ b/rclone/fs/lister_test.go +@@ -2,6 +2,7 @@ package fs + + import ( + "io" ++ "sort" + "testing" + "time" + +@@ -21,17 +22,17 @@ var errNotImpl = errors.New("not implemented") + + type mockObject string + +-func (o mockObject) String() string { return string(o) } +-func (o mockObject) Fs() Info { return nil } +-func (o mockObject) Remote() string { return string(o) } +-func (o mockObject) Hash(HashType) (string, error) { return "", errNotImpl } +-func (o mockObject) ModTime() (t time.Time) { return t } +-func (o mockObject) Size() int64 { return 0 } +-func (o mockObject) Storable() bool { return true } +-func (o mockObject) SetModTime(time.Time) error { return errNotImpl } +-func (o mockObject) Open() (io.ReadCloser, error) { return nil, errNotImpl } +-func (o mockObject) Update(in io.Reader, src ObjectInfo) error { return errNotImpl } +-func (o mockObject) Remove() error { return errNotImpl } ++func (o mockObject) String() string { return string(o) } ++func (o mockObject) Fs() Info { return nil } ++func (o mockObject) Remote() string { return string(o) } ++func (o mockObject) Hash(HashType) (string, error) { return "", errNotImpl } ++func (o mockObject) ModTime() (t time.Time) { return t } ++func (o mockObject) Size() int64 { return 0 } ++func (o mockObject) Storable() bool { return true } ++func (o mockObject) SetModTime(time.Time) error { return errNotImpl } ++func (o mockObject) Open(options ...OpenOption) (io.ReadCloser, error) { return nil, errNotImpl } ++func (o mockObject) Update(in io.Reader, src ObjectInfo) error { return errNotImpl } ++func (o mockObject) Remove() error { return errNotImpl } + + type mockFs struct { + listFn func(o ListOpts, dir string) +@@ -42,6 +43,10 @@ func (f *mockFs) List(o ListOpts, dir string) { + f.listFn(o, dir) + } + ++func (f *mockFs) NewObject(remote string) (Object, error) { ++ return mockObject(remote), nil ++} ++ + func TestListerStart(t *testing.T) { + f := &mockFs{} + ranList := false +@@ -56,6 +61,33 @@ func TestListerStart(t *testing.T) { + assert.Equal(t, true, ranList) + } + ++func TestListerStartWithFiles(t *testing.T) { ++ f := &mockFs{} ++ ranList := false ++ f.listFn = func(o ListOpts, dir string) { ++ ranList = true ++ } ++ filter, err := NewFilter() ++ require.NoError(t, err) ++ wantNames := []string{"potato", "sausage", "rutabaga", "carrot", "lettuce"} ++ sort.Strings(wantNames) ++ for _, name := range wantNames { ++ err = filter.AddFile(name) ++ require.NoError(t, err) ++ } ++ o := NewLister().SetFilter(filter).Start(f, "") ++ objs, dirs, err := o.GetAll() ++ require.Nil(t, err) ++ assert.Len(t, dirs, 0) ++ assert.Equal(t, false, ranList) ++ var gotNames []string ++ for _, obj := range objs { ++ gotNames = append(gotNames, obj.Remote()) ++ } ++ sort.Strings(gotNames) ++ assert.Equal(t, wantNames, gotNames) ++} ++ + func TestListerSetLevel(t *testing.T) { + o := NewLister() + o.SetLevel(1) +diff --git a/rclone-1.33/fs/loghttp.go b/rclone-1.33/fs/loghttp.go +deleted file mode 100644 +index c332c25..0000000 +--- a/rclone-1.33/fs/loghttp.go ++++ /dev/null +@@ -1,60 +0,0 @@ +-// A logging http transport +- +-package fs +- +-import ( +- "net/http" +- "net/http/httputil" +-) +- +-const ( +- separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" +- separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" +-) +- +-// LoggedTransport is an http transport which logs the traffic +-type LoggedTransport struct { +- wrapped http.RoundTripper +- logBody bool +-} +- +-// NewLoggedTransport wraps the transport passed in and logs all roundtrips +-// including the body if logBody is set. +-func NewLoggedTransport(transport http.RoundTripper, logBody bool) *LoggedTransport { +- return &LoggedTransport{ +- wrapped: transport, +- logBody: logBody, +- } +-} +- +-// CancelRequest cancels an in-flight request by closing its +-// connection. CancelRequest should only be called after RoundTrip has +-// returned. +-func (t *LoggedTransport) CancelRequest(req *http.Request) { +- if wrapped, ok := t.wrapped.(interface { +- CancelRequest(*http.Request) +- }); ok { +- Debug(nil, "CANCEL REQUEST %v", req) +- wrapped.CancelRequest(req) +- } +-} +- +-// RoundTrip implements the RoundTripper interface. +-func (t *LoggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { +- buf, _ := httputil.DumpRequestOut(req, t.logBody) +- Debug(nil, "%s", separatorReq) +- Debug(nil, "%s", "HTTP REQUEST") +- Debug(nil, "%s", string(buf)) +- Debug(nil, "%s", separatorReq) +- resp, err = t.wrapped.RoundTrip(req) +- Debug(nil, "%s", separatorResp) +- Debug(nil, "%s", "HTTP RESPONSE") +- if err != nil { +- Debug(nil, "Error: %v", err) +- } else { +- buf, _ = httputil.DumpResponse(resp, t.logBody) +- Debug(nil, "%s", string(buf)) +- } +- Debug(nil, "%s", separatorResp) +- return resp, err +-} +diff --git a/rclone-1.33/fs/operations.go b/rclone/fs/operations.go +index ed49d58..d81a8a3 100644 +--- a/rclone-1.33/fs/operations.go ++++ b/rclone/fs/operations.go +@@ -169,15 +169,29 @@ func Equal(src, dst Object) bool { + return true + } + +-// MimeType returns a guess at the mime type from the extension +-func MimeType(o ObjectInfo) string { +- mimeType := mime.TypeByExtension(path.Ext(o.Remote())) ++// MimeTypeFromName returns a guess at the mime type from the name ++func MimeTypeFromName(remote string) (mimeType string) { ++ mimeType = mime.TypeByExtension(path.Ext(remote)) + if !strings.ContainsRune(mimeType, '/') { + mimeType = "application/octet-stream" + } + return mimeType + } + ++// MimeType returns the MimeType from the object, either by calling ++// the MimeTyper interface or using MimeTypeFromName ++func MimeType(o ObjectInfo) (mimeType string) { ++ // Read the MimeType from the optional interface if available ++ if do, ok := o.(MimeTyper); ok { ++ mimeType = do.MimeType() ++ Debug(o, "Read MimeType as %q", mimeType) ++ if mimeType != "" { ++ return mimeType ++ } ++ } ++ return MimeTypeFromName(o.Remote()) ++} ++ + // Used to remove a failed copy + // + // Returns whether the file was succesfully removed or not +@@ -1017,8 +1031,11 @@ func CleanUp(f Fs) error { + func Cat(f Fs, w io.Writer) error { + var mu sync.Mutex + return ListFn(f, func(o Object) { ++ var err error + Stats.Transferring(o.Remote()) +- defer Stats.DoneTransferring(o.Remote()) ++ defer func() { ++ Stats.DoneTransferring(o.Remote(), err == nil) ++ }() + mu.Lock() + defer mu.Unlock() + in, err := o.Open() +@@ -1041,5 +1058,4 @@ func Cat(f Fs, w io.Writer) error { + ErrorLog(o, "Failed to send to output: %v", err) + } + }) +- + } +diff --git a/rclone/fs/options.go b/rclone/fs/options.go +new file mode 100644 +index 0000000..e1f24c0 +--- /dev/null ++++ b/rclone/fs/options.go +@@ -0,0 +1,137 @@ ++// Define the options for Open ++ ++package fs ++ ++import ( ++ "fmt" ++ "net/http" ++ "strconv" ++) ++ ++// OpenOption is an interface describing options for Open ++type OpenOption interface { ++ fmt.Stringer ++ ++ // Header returns the option as an HTTP header ++ Header() (key string, value string) ++ ++ // Mandatory returns whether this option can be ignored or not ++ Mandatory() bool ++} ++ ++// RangeOption defines an HTTP Range option with start and end. If ++// either start or end are < 0 then they will be omitted. ++type RangeOption struct { ++ Start int64 ++ End int64 ++} ++ ++// Header formats the option as an http header ++func (o *RangeOption) Header() (key string, value string) { ++ key = "Range" ++ value = "bytes=" ++ if o.Start >= 0 { ++ value += strconv.FormatInt(o.Start, 64) ++ ++ } ++ value += "-" ++ if o.End >= 0 { ++ value += strconv.FormatInt(o.End, 64) ++ } ++ return key, value ++} ++ ++// String formats the option into human readable form ++func (o *RangeOption) String() string { ++ return fmt.Sprintf("RangeOption(%d,%d)", o.Start, o.End) ++} ++ ++// Mandatory returns whether the option must be parsed or can be ignored ++func (o *RangeOption) Mandatory() bool { ++ return false ++} ++ ++// SeekOption defines an HTTP Range option with start only. ++type SeekOption struct { ++ Offset int64 ++} ++ ++// Header formats the option as an http header ++func (o *SeekOption) Header() (key string, value string) { ++ key = "Range" ++ value = fmt.Sprintf("bytes=%d-", o.Offset) ++ return key, value ++} ++ ++// String formats the option into human readable form ++func (o *SeekOption) String() string { ++ return fmt.Sprintf("SeekOption(%d)", o.Offset) ++} ++ ++// Mandatory returns whether the option must be parsed or can be ignored ++func (o *SeekOption) Mandatory() bool { ++ return true ++} ++ ++// HTTPOption defines a general purpose HTTP option ++type HTTPOption struct { ++ Key string ++ Value string ++} ++ ++// Header formats the option as an http header ++func (o *HTTPOption) Header() (key string, value string) { ++ return o.Key, o.Value ++} ++ ++// String formats the option into human readable form ++func (o *HTTPOption) String() string { ++ return fmt.Sprintf("HTTPOption(%q,%q)", o.Key, o.Value) ++} ++ ++// Mandatory returns whether the option must be parsed or can be ignored ++func (o *HTTPOption) Mandatory() bool { ++ return false ++} ++ ++// OpenOptionAddHeaders adds each header found in options to the ++// headers map provided the key was non empty. ++func OpenOptionAddHeaders(options []OpenOption, headers map[string]string) { ++ for _, option := range options { ++ key, value := option.Header() ++ if key != "" && value != "" { ++ headers[key] = value ++ } ++ } ++} ++ ++// OpenOptionHeaders adds each header found in options to the ++// headers map provided the key was non empty. ++// ++// It returns a nil map if options was empty ++func OpenOptionHeaders(options []OpenOption) (headers map[string]string) { ++ if len(options) == 0 { ++ return nil ++ } ++ headers = make(map[string]string, len(options)) ++ OpenOptionAddHeaders(options, headers) ++ return headers ++} ++ ++// OpenOptionAddHTTPHeaders Sets each header found in options to the ++// http.Header map provided the key was non empty. ++func OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) { ++ for _, option := range options { ++ key, value := option.Header() ++ if key != "" && value != "" { ++ headers.Set(key, value) ++ } ++ } ++} ++ ++// check interface ++var ( ++ _ OpenOption = (*RangeOption)(nil) ++ _ OpenOption = (*SeekOption)(nil) ++ _ OpenOption = (*HTTPOption)(nil) ++) +diff --git a/rclone-1.33/fs/sync.go b/rclone/fs/sync.go +index cdb152d..12d2cc1 100644 +--- a/rclone-1.33/fs/sync.go ++++ b/rclone/fs/sync.go +@@ -116,27 +116,23 @@ func (s *syncCopyMove) readDstFiles() { + } + + // Check to see if src needs to be copied to dst and if so puts it in out +-func (s *syncCopyMove) checkOne(pair ObjectPair, out ObjectPairChan) { ++// ++// Returns a flag which indicates whether the file needs to be transferred or not. ++func (s *syncCopyMove) checkOne(pair ObjectPair) bool { + src, dst := pair.src, pair.dst + if dst == nil { + Debug(src, "Couldn't find file - need to transfer") +- out <- pair +- return +- } +- // Check to see if can store this +- if !src.Storable() { +- return ++ return true + } + // If we should ignore existing files, don't transfer + if Config.IgnoreExisting { + Debug(src, "Destination exists, skipping") +- return ++ return false + } + // If we should upload unconditionally + if Config.IgnoreTimes { +- Debug(src, "Uploading unconditionally as --ignore-times is in use") +- out <- pair +- return ++ Debug(src, "Transferring unconditionally as --ignore-times is in use") ++ return true + } + // If UpdateOlder is in effect, skip if dst is newer than src + if Config.UpdateOlder { +@@ -154,13 +150,13 @@ func (s *syncCopyMove) checkOne(pair ObjectPair, out ObjectPairChan) { + switch { + case dt >= modifyWindow: + Debug(src, "Destination is newer than source, skipping") +- return ++ return false + case dt <= -modifyWindow: + Debug(src, "Destination is older than source, transferring") + default: + if src.Size() == dst.Size() { + Debug(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow) +- return ++ return false + } + Debug(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow) + } +@@ -168,10 +164,10 @@ func (s *syncCopyMove) checkOne(pair ObjectPair, out ObjectPairChan) { + // Check to see if changed or not + if Equal(src, dst) { + Debug(src, "Unchanged skipping") +- return ++ return false + } + } +- out <- pair ++ return true + } + + // This checks the types of errors returned while copying files +@@ -210,7 +206,18 @@ func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sy + } + src := pair.src + Stats.Checking(src.Remote()) +- s.checkOne(pair, out) ++ // Check to see if can store this ++ if src.Storable() { ++ if s.checkOne(pair) { ++ out <- pair ++ } else { ++ // If moving need to delete the files we don't need to copy ++ if s.DoMove { ++ // Delete src if no error on copy ++ s.processError(DeleteFile(src)) ++ } ++ } ++ } + Stats.DoneChecking(src.Remote()) + case <-s.abort: + return +@@ -232,12 +239,14 @@ func (s *syncCopyMove) pairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup + } + src := pair.src + Stats.Transferring(src.Remote()) ++ var err error + if Config.DryRun { + Log(src, "Not copying as --dry-run") + } else { +- s.processError(Copy(fdst, pair.dst, src)) ++ err = Copy(fdst, pair.dst, src) ++ s.processError(err) + } +- Stats.DoneTransferring(src.Remote()) ++ Stats.DoneTransferring(src.Remote(), err == nil) + case <-s.abort: + return + } +@@ -259,6 +268,7 @@ func (s *syncCopyMove) pairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) + if !ok { + return + } ++ transferredOK := true + src := pair.src + dst := pair.dst + Stats.Transferring(src.Remote()) +@@ -267,6 +277,7 @@ func (s *syncCopyMove) pairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) + err := Copy(fdst, dst, src) + s.processError(err) + if err != nil { ++ transferredOK = false + ErrorLog(src, "Not deleting as copy failed: %v", err) + } else { + // Delete src if no error on copy +@@ -278,7 +289,7 @@ func (s *syncCopyMove) pairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) + } else if haveMover && src.Fs().Name() == fdst.Name() { + // Delete destination if it exists + if dst != nil { +- s.processError(DeleteFile(src)) ++ s.processError(DeleteFile(dst)) + } + // Move dst <- src + _, err := fdstMover.Move(src, src.Remote()) +@@ -293,6 +304,7 @@ func (s *syncCopyMove) pairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) + Stats.Error() + ErrorLog(dst, "Couldn't move: %v", err) + s.processError(err) ++ transferredOK = false + } + } else { + Debug(src, "Moved") +@@ -300,7 +312,7 @@ func (s *syncCopyMove) pairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) + } else { + doCopy() + } +- Stats.DoneTransferring(src.Remote()) ++ Stats.DoneTransferring(src.Remote(), transferredOK) + case <-s.abort: + return + } +diff --git a/rclone-1.33/fs/sync_test.go b/rclone/fs/sync_test.go +index 49f1eef..d06c170 100644 +--- a/rclone-1.33/fs/sync_test.go ++++ b/rclone/fs/sync_test.go +@@ -577,22 +577,28 @@ func TestSyncWithUpdateOlder(t *testing.T) { + func testServerSideMove(t *testing.T, r *Run, fremoteMove fs.Fs, withFilter bool) { + file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) + file2 := r.WriteBoth("empty space", "", t2) ++ file3u := r.WriteBoth("potato3", "------------------------------------------------------------ UPDATED", t2) + +- fstest.CheckItems(t, r.fremote, file2, file1) ++ fstest.CheckItems(t, r.fremote, file2, file1, file3u) + + t.Logf("Server side move (if possible) %v -> %v", r.fremote, fremoteMove) + + // Write just one file in the new remote + r.WriteObjectTo(fremoteMove, "empty space", "", t2, false) +- fstest.CheckItems(t, fremoteMove, file2) ++ file3 := r.WriteObjectTo(fremoteMove, "potato3", "------------------------------------------------------------", t1, false) ++ fstest.CheckItems(t, fremoteMove, file2, file3) + + // Do server side move + fs.Stats.ResetCounters() + err := fs.MoveDir(fremoteMove, r.fremote) + require.NoError(t, err) + +- fstest.CheckItems(t, r.fremote, file2) +- fstest.CheckItems(t, fremoteMove, file2, file1) ++ if withFilter { ++ fstest.CheckItems(t, r.fremote, file2) ++ } else { ++ fstest.CheckItems(t, r.fremote) ++ } ++ fstest.CheckItems(t, fremoteMove, file2, file1, file3u) + + // Purge the original before moving + require.NoError(t, fs.Purge(r.fremote)) +@@ -603,10 +609,10 @@ func testServerSideMove(t *testing.T, r *Run, fremoteMove fs.Fs, withFilter bool + require.NoError(t, err) + + if withFilter { +- fstest.CheckItems(t, r.fremote, file1) ++ fstest.CheckItems(t, r.fremote, file1, file3u) + fstest.CheckItems(t, fremoteMove, file2) + } else { +- fstest.CheckItems(t, r.fremote, file2, file1) ++ fstest.CheckItems(t, r.fremote, file2, file1, file3u) + fstest.CheckItems(t, fremoteMove) + } + } +diff --git a/rclone/fs/versioncheck.go b/rclone/fs/versioncheck.go +new file mode 100644 +index 0000000..5aa76fb +--- /dev/null ++++ b/rclone/fs/versioncheck.go +@@ -0,0 +1,7 @@ ++//+build !go1.5 ++ ++package fs ++ ++// Upgrade to Go version 1.5 to compile rclone - latest stable go ++// compiler recommended. ++func init() { Go_version_1_5_required_for_compilation() } +diff --git a/rclone-1.33/fstest/fstests/fstests.go b/rclone/fstest/fstests/fstests.go +index 24aaf1f..2d7626e 100644 +--- a/rclone-1.33/fstest/fstests/fstests.go ++++ b/rclone/fstest/fstests/fstests.go +@@ -10,6 +10,7 @@ import ( + "flag" + "fmt" + "io" ++ "io/ioutil" + "os" + "path" + "sort" +@@ -37,14 +38,16 @@ var ( + ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), + Path: "file name.txt", + } +- file2 = fstest.Item{ ++ file1Contents = "" ++ file2 = fstest.Item{ + ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"), + Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`, + WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt`, + } +- verbose = flag.Bool("verbose", false, "Set to enable logging") +- dumpHeaders = flag.Bool("dump-headers", false, "Dump HTTP headers - may contain sensitive info") +- dumpBodies = flag.Bool("dump-bodies", false, "Dump HTTP headers and bodies - may contain sensitive info") ++ file2Contents = "" ++ verbose = flag.Bool("verbose", false, "Set to enable logging") ++ dumpHeaders = flag.Bool("dump-headers", false, "Dump HTTP headers - may contain sensitive info") ++ dumpBodies = flag.Bool("dump-bodies", false, "Dump HTTP headers and bodies - may contain sensitive info") + ) + + // ExtraConfigItem describes a config item added on the fly while testing +@@ -195,9 +198,10 @@ func findObject(t *testing.T, Name string) fs.Object { + return obj + } + +-func testPut(t *testing.T, file *fstest.Item) { ++func testPut(t *testing.T, file *fstest.Item) string { + again: +- buf := bytes.NewBufferString(fstest.RandomString(100)) ++ contents := fstest.RandomString(100) ++ buf := bytes.NewBufferString(contents) + hash := fs.NewMultiHasher() + in := io.TeeReader(buf, hash) + +@@ -222,24 +226,25 @@ again: + // Re-read the object and check again + obj = findObject(t, file.Path) + file.Check(t, obj, remote.Precision()) ++ return contents + } + + // TestFsPutFile1 tests putting a file + func TestFsPutFile1(t *testing.T) { + skipIfNotOk(t) +- testPut(t, &file1) ++ file1Contents = testPut(t, &file1) + } + + // TestFsPutFile2 tests putting a file into a subdirectory + func TestFsPutFile2(t *testing.T) { + skipIfNotOk(t) +- testPut(t, &file2) ++ file2Contents = testPut(t, &file2) + } + + // TestFsUpdateFile1 tests updating file1 with new contents + func TestFsUpdateFile1(t *testing.T) { + skipIfNotOk(t) +- testPut(t, &file1) ++ file1Contents = testPut(t, &file1) + // Note that the next test will check there are no duplicated file names + } + +@@ -501,6 +506,22 @@ func TestObjectModTime(t *testing.T) { + file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision()) + } + ++// TestObjectMimeType tests the MimeType of the object is correct ++func TestObjectMimeType(t *testing.T) { ++ skipIfNotOk(t) ++ obj := findObject(t, file1.Path) ++ do, ok := obj.(fs.MimeTyper) ++ if !ok { ++ t.Skip("MimeType method not supported") ++ } ++ mimeType := do.MimeType() ++ if strings.ContainsRune(mimeType, ';') { ++ assert.Equal(t, "text/plain; charset=utf-8", mimeType) ++ } else { ++ assert.Equal(t, "text/plain", mimeType) ++ } ++} ++ + // TestObjectSetModTime tests that SetModTime works + func TestObjectSetModTime(t *testing.T) { + skipIfNotOk(t) +@@ -525,42 +546,56 @@ func TestObjectSize(t *testing.T) { + assert.Equal(t, file1.Size, obj.Size()) + } + ++// read the contents of an object as a string ++func readObject(t *testing.T, obj fs.Object, options ...fs.OpenOption) string { ++ in, err := obj.Open(options...) ++ require.NoError(t, err) ++ contents, err := ioutil.ReadAll(in) ++ require.NoError(t, err) ++ err = in.Close() ++ require.NoError(t, err) ++ return string(contents) ++} ++ + // TestObjectOpen tests that Open works + func TestObjectOpen(t *testing.T) { + skipIfNotOk(t) + obj := findObject(t, file1.Path) +- in, err := obj.Open() +- require.NoError(t, err) +- hasher := fs.NewMultiHasher() +- n, err := io.Copy(hasher, in) +- require.NoError(t, err, fmt.Sprintf("hasher copy error: %v", err)) +- require.Equal(t, file1.Size, n, "Read wrong number of bytes") +- err = in.Close() +- require.NoError(t, err) +- // Check content of file by comparing the calculated hashes +- for hashType, got := range hasher.Sums() { +- assert.Equal(t, file1.Hashes[hashType], got) +- } ++ assert.Equal(t, file1Contents, readObject(t, obj), "contents of file1 differ") ++} + ++// TestObjectOpenSeek tests that Open works with Seek ++func TestObjectOpenSeek(t *testing.T) { ++ skipIfNotOk(t) ++ obj := findObject(t, file1.Path) ++ assert.Equal(t, file1Contents[50:], readObject(t, obj, &fs.SeekOption{Offset: 50}), "contents of file1 differ after seek") + } + + // TestObjectUpdate tests that Update works + func TestObjectUpdate(t *testing.T) { + skipIfNotOk(t) +- buf := bytes.NewBufferString(fstest.RandomString(200)) ++ contents := fstest.RandomString(200) ++ buf := bytes.NewBufferString(contents) + hash := fs.NewMultiHasher() + in := io.TeeReader(buf, hash) + + file1.Size = int64(buf.Len()) + obj := findObject(t, file1.Path) +- obji := fs.NewStaticObjectInfo(file1.Path, file1.ModTime, file1.Size, true, nil, obj.Fs()) ++ obji := fs.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs()) + err := obj.Update(in, obji) + require.NoError(t, err) + file1.Hashes = hash.Sums() ++ ++ // check the object has been updated + file1.Check(t, obj, remote.Precision()) ++ + // Re-read the object and check again + obj = findObject(t, file1.Path) + file1.Check(t, obj, remote.Precision()) ++ ++ // check contents correct ++ assert.Equal(t, contents, readObject(t, obj), "contents of updated file1 differ") ++ file1Contents = contents + } + + // TestObjectStorable tests that Storable works +diff --git a/rclone-1.33/googlecloudstorage/googlecloudstorage.go b/rclone/googlecloudstorage/googlecloudstorage.go +index d3cf449..fb87fac 100644 +--- a/rclone-1.33/googlecloudstorage/googlecloudstorage.go ++++ b/rclone/googlecloudstorage/googlecloudstorage.go +@@ -135,20 +135,21 @@ type Fs struct { + bucket string // the bucket we are working on + root string // the path we are working on if any + projectNumber string // used for finding buckets +- objectAcl string // used when creating new objects +- bucketAcl string // used when creating new buckets ++ objectACL string // used when creating new objects ++ bucketACL string // used when creating new buckets + } + + // Object describes a storage object + // + // Will definitely have info but maybe not meta + type Object struct { +- fs *Fs // what this object is part of +- remote string // The remote path +- url string // download path +- md5sum string // The MD5Sum of the object +- bytes int64 // Bytes in the object +- modTime time.Time // Modified time of the object ++ fs *Fs // what this object is part of ++ remote string // The remote path ++ url string // download path ++ md5sum string // The MD5Sum of the object ++ bytes int64 // Bytes in the object ++ modTime time.Time // Modified time of the object ++ mimeType string + } + + // ------------------------------------------------------------ +@@ -230,14 +231,14 @@ func NewFs(name, root string) (fs.Fs, error) { + bucket: bucket, + root: directory, + projectNumber: fs.ConfigFile.MustValue(name, "project_number"), +- objectAcl: fs.ConfigFile.MustValue(name, "object_acl"), +- bucketAcl: fs.ConfigFile.MustValue(name, "bucket_acl"), ++ objectACL: fs.ConfigFile.MustValue(name, "object_acl"), ++ bucketACL: fs.ConfigFile.MustValue(name, "bucket_acl"), + } +- if f.objectAcl == "" { +- f.objectAcl = "private" ++ if f.objectACL == "" { ++ f.objectACL = "private" + } +- if f.bucketAcl == "" { +- f.bucketAcl = "private" ++ if f.bucketACL == "" { ++ f.bucketACL = "private" + } + + // Create a new authorized Drive client. +@@ -461,7 +462,7 @@ func (f *Fs) Mkdir() error { + bucket := storage.Bucket{ + Name: f.bucket, + } +- _, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketAcl).Do() ++ _, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do() + return err + } + +@@ -558,6 +559,7 @@ func (o *Object) Size() int64 { + func (o *Object) setMetaData(info *storage.Object) { + o.url = info.MediaLink + o.bytes = int64(info.Size) ++ o.mimeType = info.ContentType + + // Read md5sum + md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash) +@@ -649,27 +651,18 @@ func (o *Object) Storable() bool { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { +- // This is slightly complicated by Go here insisting on +- // decoding the %2F in URLs into / which is legal in http, but +- // unfortunately not what the storage server wants. +- // +- // So first encode all the % into their encoded form +- // URL will decode them giving our original escaped string +- url := strings.Replace(o.url, "%", "%25", -1) +- req, err := http.NewRequest("GET", url, nil) ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { ++ req, err := http.NewRequest("GET", o.url, nil) + if err != nil { + return nil, err + } +- // SetOpaque sets Opaque such that HTTP requests to it don't +- // alter any hex-escaped characters +- googleapi.SetOpaque(req.URL) +- req.Header.Set("User-Agent", fs.UserAgent) ++ fs.OpenOptionAddHTTPHeaders(req.Header, options) + res, err := o.fs.client.Do(req) + if err != nil { + return nil, err + } +- if res.StatusCode != 200 { ++ _, isRanging := req.Header["Range"] ++ if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) { + _ = res.Body.Close() // ignore error + return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status) + } +@@ -686,12 +679,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + object := storage.Object{ + Bucket: o.fs.bucket, + Name: o.fs.root + o.remote, +- ContentType: fs.MimeType(o), ++ ContentType: fs.MimeType(src), + Size: uint64(size), + Updated: modTime.Format(timeFormatOut), // Doesn't get set + Metadata: metadataFromModTime(modTime), + } +- newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectAcl).Do() ++ newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do() + if err != nil { + return err + } +@@ -705,9 +698,15 @@ func (o *Object) Remove() error { + return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do() + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ return o.mimeType ++} ++ + // Check the interfaces are satisfied + var ( +- _ fs.Fs = &Fs{} +- _ fs.Copier = &Fs{} +- _ fs.Object = &Object{} ++ _ fs.Fs = &Fs{} ++ _ fs.Copier = &Fs{} ++ _ fs.Object = &Object{} ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/googlecloudstorage/googlecloudstorage_test.go b/rclone/googlecloudstorage/googlecloudstorage_test.go +index 2084353..24147e3 100644 +--- a/rclone-1.33/googlecloudstorage/googlecloudstorage_test.go ++++ b/rclone/googlecloudstorage/googlecloudstorage_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone/graphics/rclone-1024x1024.png b/rclone/graphics/rclone-1024x1024.png +new file mode 100644 +index 0000000..c733a79 +Binary files /dev/null and b/rclone/graphics/rclone-1024x1024.png differ +diff --git a/rclone/graphics/rclone-64x64-forum.png b/rclone/graphics/rclone-64x64-forum.png +new file mode 100644 +index 0000000..fcaad16 +Binary files /dev/null and b/rclone/graphics/rclone-64x64-forum.png differ +diff --git a/rclone/graphics/rclone-64x64-forum.xcf b/rclone/graphics/rclone-64x64-forum.xcf +new file mode 100644 +index 0000000..d3f8ce5 +Binary files /dev/null and b/rclone/graphics/rclone-64x64-forum.xcf differ +diff --git a/rclone-1.33/hubic/hubic.go b/rclone/hubic/hubic.go +index b62aada..47d127f 100644 +--- a/rclone-1.33/hubic/hubic.go ++++ b/rclone/hubic/hubic.go +@@ -112,7 +112,6 @@ func (f *Fs) getCredentials() (err error) { + if err != nil { + return err + } +- req.Header.Add("User-Agent", fs.UserAgent) + resp, err := f.client.Do(req) + if err != nil { + return err +@@ -155,7 +154,6 @@ func NewFs(name, root string) (fs.Fs, error) { + // Make the swift Connection + c := &swiftLib.Connection{ + Auth: newAuth(f), +- UserAgent: fs.UserAgent, + ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport + Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport + Transport: fs.Config.Transport(), +diff --git a/rclone-1.33/hubic/hubic_test.go b/rclone/hubic/hubic_test.go +index 46eda57..124bccb 100644 +--- a/rclone-1.33/hubic/hubic_test.go ++++ b/rclone/hubic/hubic_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/local/local.go b/rclone/local/local.go +index 53da8f5..bf058ab 100644 +--- a/rclone-1.33/local/local.go ++++ b/rclone/local/local.go +@@ -174,7 +174,9 @@ func (f *Fs) list(out fs.ListOpts, remote string, dirpath string, level int) (su + newRemote := path.Join(remote, name) + newPath := filepath.Join(dirpath, name) + if fi.IsDir() { +- if out.IncludeDirectory(newRemote) { ++ // Ignore directories which are symlinks. These are junction points under windows which ++ // are kind of a souped up symlink. Unix doesn't have directories which are symlinks. ++ if (fi.Mode()&os.ModeSymlink) == 0 && out.IncludeDirectory(newRemote) { + dir := &fs.Dir{ + Name: f.cleanRemote(newRemote), + When: fi.ModTime(), +@@ -538,6 +540,11 @@ func (o *Object) SetModTime(modTime time.Time) error { + // Storable returns a boolean showing if this object is storable + func (o *Object) Storable() bool { + mode := o.info.Mode() ++ // On windows a file with os.ModeSymlink represents a file with reparse points ++ if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 { ++ fs.Debug(o, "Clearing symlink bit to allow a file with reparse points to be copied") ++ mode &^= os.ModeSymlink ++ } + if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + fs.Debug(o, "Can't transfer non file/directory") + return false +@@ -578,18 +585,36 @@ func (file *localOpenFile) Close() (err error) { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { +- in, err = os.Open(o.path) ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { ++ var offset int64 ++ for _, option := range options { ++ switch x := option.(type) { ++ case *fs.SeekOption: ++ offset = x.Offset ++ default: ++ if option.Mandatory() { ++ fs.Log(o, "Unsupported mandatory option: %v", option) ++ } ++ } ++ } ++ ++ fd, err := os.Open(o.path) + if err != nil { + return + } ++ if offset != 0 { ++ // seek the object ++ _, err = fd.Seek(offset, 0) ++ // don't attempt to make checksums ++ return fd, err ++ } + // Update the md5sum as we go along + in = &localOpenFile{ + o: o, +- in: in, ++ in: fd, + hash: fs.NewMultiHasher(), + } +- return ++ return in, nil + } + + // mkdirAll makes all the directories needed to store the object +diff --git a/rclone-1.33/local/local_test.go b/rclone/local/local_test.go +index 4619032..eb9f91a 100644 +--- a/rclone-1.33/local/local_test.go ++++ b/rclone/local/local_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/onedrive/onedrive.go b/rclone/onedrive/onedrive.go +index ec8bf30..35d2d2f 100644 +--- a/rclone-1.33/onedrive/onedrive.go ++++ b/rclone/onedrive/onedrive.go +@@ -98,6 +98,7 @@ type Object struct { + modTime time.Time // modification time of the object + id string // ID of the object + sha1 string // SHA-1 of the object content ++ mimeType string // Content-Type of object from server (may not be as uploaded) + } + + // ------------------------------------------------------------ +@@ -686,8 +687,11 @@ func (o *Object) setMetaData(info *api.Item) { + // fact uppercase hex strings. + // + // In OneDrive for Business, SHA1 and CRC32 hash values are not returned for files. +- if info.File != nil && info.File.Hashes.Sha1Hash != "" { +- o.sha1 = strings.ToLower(info.File.Hashes.Sha1Hash) ++ if info.File != nil { ++ o.mimeType = info.File.MimeType ++ if info.File.Hashes.Sha1Hash != "" { ++ o.sha1 = strings.ToLower(info.File.Hashes.Sha1Hash) ++ } + } + if info.FileSystemInfo != nil { + o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime) +@@ -771,14 +775,15 @@ func (o *Object) Storable() bool { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { + if o.id == "" { + return nil, errors.New("can't download - no id") + } + var resp *http.Response + opts := rest.Opts{ +- Method: "GET", +- Path: "/drive/items/" + o.id + "/content", ++ Method: "GET", ++ Path: "/drive/items/" + o.id + "/content", ++ Options: options, + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(&opts) +@@ -935,6 +940,11 @@ func (o *Object) Remove() error { + return o.fs.deleteObject(o.id) + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ return o.mimeType ++} ++ + // Check the interfaces are satisfied + var ( + _ fs.Fs = (*Fs)(nil) +@@ -942,5 +952,6 @@ var ( + _ fs.Copier = (*Fs)(nil) + // _ fs.Mover = (*Fs)(nil) + // _ fs.DirMover = (*Fs)(nil) +- _ fs.Object = (*Object)(nil) ++ _ fs.Object = (*Object)(nil) ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/onedrive/onedrive_test.go b/rclone/onedrive/onedrive_test.go +index e8e2300..2fdcab3 100644 +--- a/rclone-1.33/onedrive/onedrive_test.go ++++ b/rclone/onedrive/onedrive_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/rclone.1 b/rclone/rclone.1 +index 9ac04b9..912a217 100644 +--- a/rclone-1.33/rclone.1 ++++ b/rclone/rclone.1 +@@ -40,13 +40,21 @@ Timestamps preserved on files + .IP \[bu] 2 + Partial syncs supported on a whole file basis + .IP \[bu] 2 +-Copy mode to just copy new/changed files ++Copy (http://rclone.org/commands/rclone_copy/) mode to just copy ++new/changed files + .IP \[bu] 2 +-Sync (one way) mode to make a directory identical ++Sync (http://rclone.org/commands/rclone_sync/) (one way) mode to make a ++directory identical + .IP \[bu] 2 +-Check mode to check for file hash equality ++Check (http://rclone.org/commands/rclone_check/) mode to check for file ++hash equality + .IP \[bu] 2 + Can sync to and from network, eg two different cloud accounts ++.IP \[bu] 2 ++Optional encryption (Crypt (http://rclone.org/crypt/)) ++.IP \[bu] 2 ++Optional FUSE mount (rclone ++mount (http://rclone.org/commands/rclone_mount/)) + .PP + Links + .IP \[bu] 2 +@@ -163,6 +171,8 @@ Hubic (http://rclone.org/hubic/) + Microsoft One Drive (http://rclone.org/onedrive/) + .IP \[bu] 2 + Yandex Disk (http://rclone.org/yandex/) ++.IP \[bu] 2 ++Crypt (http://rclone.org/crypt/) \- to encrypt other remotes + .SS Usage + .PP + Rclone syncs a directory tree from one storage system to another. +diff --git a/rclone-1.33/rclone.go b/rclone/rclone.go +index c88d94d..873b9f9 100644 +--- a/rclone-1.33/rclone.go ++++ b/rclone/rclone.go +@@ -4,8 +4,7 @@ + package main + + import ( +- "fmt" +- "os" ++ "log" + + "github.com/ncw/rclone/cmd" + _ "github.com/ncw/rclone/cmd/all" // import all commands +@@ -14,8 +13,6 @@ import ( + + func main() { + if err := cmd.Root.Execute(); err != nil { +- fmt.Println(err) +- os.Exit(-1) ++ log.Fatalf("Fatal error: %v", err) + } +- os.Exit(0) + } +diff --git a/rclone-1.33/rest/rest.go b/rclone/rest/rest.go +index 5f90c3d..5907074 100644 +--- a/rclone-1.33/rest/rest.go ++++ b/rclone/rest/rest.go +@@ -31,7 +31,6 @@ func NewClient(c *http.Client) *Client { + errorHandler: defaultErrorHandler, + headers: make(map[string]string), + } +- api.SetHeader("User-Agent", fs.UserAgent) + return api + } + +@@ -84,6 +83,7 @@ type Opts struct { + ExtraHeaders map[string]string + UserName string // username for Basic Auth + Password string // password for Basic Auth ++ Options []fs.OpenOption + } + + // DecodeJSON decodes resp.Body into result +@@ -93,6 +93,27 @@ func DecodeJSON(resp *http.Response, result interface{}) (err error) { + return decoder.Decode(result) + } + ++// Make a new http client which resets the headers passed in on redirect ++func clientWithHeaderReset(c *http.Client, headers map[string]string) *http.Client { ++ if len(headers) == 0 { ++ return c ++ } ++ clientCopy := *c ++ clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error { ++ if len(via) >= 10 { ++ return errors.New("stopped after 10 redirects") ++ } ++ // Reset the headers in the new request ++ for k, v := range headers { ++ if v != "" { ++ req.Header.Add(k, v) ++ } ++ } ++ return nil ++ } ++ return &clientCopy ++} ++ + // Call makes the call and returns the http.Response + // + // if err != nil then resp.Body will need to be closed +@@ -137,6 +158,8 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) { + headers[k] = v + } + } ++ // add any options to the headers ++ fs.OpenOptionAddHeaders(opts.Options, headers) + // Now set the headers + for k, v := range headers { + if v != "" { +@@ -146,8 +169,9 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) { + if opts.UserName != "" || opts.Password != "" { + req.SetBasicAuth(opts.UserName, opts.Password) + } ++ c := clientWithHeaderReset(api.c, headers) + api.mu.RUnlock() +- resp, err = api.c.Do(req) ++ resp, err = c.Do(req) + api.mu.RLock() + if err != nil { + return nil, err +diff --git a/rclone-1.33/s3/s3.go b/rclone/s3/s3.go +index 57c653d..9c56596 100644 +--- a/rclone-1.33/s3/s3.go ++++ b/rclone/s3/s3.go +@@ -36,6 +36,7 @@ import ( + "github.com/ncw/rclone/fs" + "github.com/ncw/swift" + "github.com/pkg/errors" ++ "github.com/spf13/pflag" + ) + + // Register with Fs +@@ -178,6 +179,22 @@ func init() { + Value: "AES256", + Help: "AES256", + }}, ++ }, { ++ Name: "storage_class", ++ Help: "The storage class to use when storing objects in S3.", ++ Examples: []fs.OptionExample{{ ++ Value: "", ++ Help: "Default", ++ }, { ++ Value: "STANDARD", ++ Help: "Standard storage class", ++ }, { ++ Value: "REDUCED_REDUNDANCY", ++ Help: "Reduced redundancy storage class", ++ }, { ++ Value: "STANDARD_IA", ++ Help: "Standard Infrequent Access storage class", ++ }}, + }}, + }) + } +@@ -190,6 +207,12 @@ const ( + maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY + ) + ++// Globals ++var ( ++ // Flags ++ s3StorageClass = pflag.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)") ++) ++ + // Fs represents a remote s3 server + type Fs struct { + name string // the name of the remote +@@ -200,20 +223,22 @@ type Fs struct { + root string // root of the bucket - ignore all objects above this + locationConstraint string // location constraint of new buckets + sse string // the type of server-side encryption ++ storageClass string // storage class + } + + // Object describes a s3 object + type Object struct { + // Will definitely have everything but meta which may be nil + // +- // List will read everything but meta - to fill that in need to call +- // readMetaData ++ // List will read everything but meta & mimeType - to fill ++ // that in you need to call readMetaData + fs *Fs // what this object is part of + remote string // The remote path + etag string // md5sum of the object + bytes int64 // size of the object + lastModified time.Time // Last modified + meta map[string]*string // The object metadata if known - may be nil ++ mimeType string // MimeType of object - may be "" + } + + // ------------------------------------------------------------ +@@ -324,14 +349,10 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) { + c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + c.Handlers.Sign.PushBack(signer) + } +- // Add user agent +- c.Handlers.Build.PushBack(func(r *request.Request) { +- r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent) +- }) + return c, ses, nil + } + +-// NewFs contstructs an Fs from the path, bucket:path ++// NewFs constructs an Fs from the path, bucket:path + func NewFs(name, root string) (fs.Fs, error) { + bucket, directory, err := s3ParsePath(root) + if err != nil { +@@ -350,6 +371,10 @@ func NewFs(name, root string) (fs.Fs, error) { + root: directory, + locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"), + sse: fs.ConfigFile.MustValue(name, "server_side_encryption"), ++ storageClass: fs.ConfigFile.MustValue(name, "storage_class"), ++ } ++ if *s3StorageClass != "" { ++ f.storageClass = *s3StorageClass + } + if f.root != "" { + f.root += "/" +@@ -753,6 +778,7 @@ func (o *Object) readMetaData() (err error) { + } else { + o.lastModified = *resp.LastModified + } ++ o.mimeType = aws.StringValue(resp.ContentType) + return nil + } + +@@ -794,7 +820,7 @@ func (o *Object) SetModTime(modTime time.Time) error { + } + + // Guess the content type +- contentType := fs.MimeType(o) ++ mimeType := fs.MimeType(o) + + // Copy the object to itself to update the metadata + key := o.fs.root + o.remote +@@ -804,7 +830,7 @@ func (o *Object) SetModTime(modTime time.Time) error { + Bucket: &o.fs.bucket, + ACL: &o.fs.acl, + Key: &key, +- ContentType: &contentType, ++ ContentType: &mimeType, + CopySource: aws.String(url.QueryEscape(sourceKey)), + Metadata: o.meta, + MetadataDirective: &directive, +@@ -819,12 +845,23 @@ func (o *Object) Storable() bool { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { + key := o.fs.root + o.remote + req := s3.GetObjectInput{ + Bucket: &o.fs.bucket, + Key: &key, + } ++ for _, option := range options { ++ switch option.(type) { ++ case *fs.RangeOption, *fs.SeekOption: ++ _, value := option.Header() ++ req.Range = &value ++ default: ++ if option.Mandatory() { ++ fs.Log(o, "Unsupported mandatory option: %v", option) ++ } ++ } ++ } + resp, err := o.fs.c.GetObject(&req) + if err != nil { + return nil, err +@@ -856,7 +893,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + } + + // Guess the content type +- contentType := fs.MimeType(o) ++ mimeType := fs.MimeType(src) + + key := o.fs.root + o.remote + req := s3manager.UploadInput{ +@@ -864,13 +901,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + ACL: &o.fs.acl, + Key: &key, + Body: in, +- ContentType: &contentType, ++ ContentType: &mimeType, + Metadata: metadata, + //ContentLength: &size, + } + if o.fs.sse != "" { + req.ServerSideEncryption = &o.fs.sse + } ++ if o.fs.storageClass != "" { ++ req.StorageClass = &o.fs.storageClass ++ } + _, err := uploader.Upload(&req) + if err != nil { + return err +@@ -893,9 +933,20 @@ func (o *Object) Remove() error { + return err + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ err := o.readMetaData() ++ if err != nil { ++ fs.Log(o, "Failed to read metadata: %v", err) ++ return "" ++ } ++ return o.mimeType ++} ++ + // Check the interfaces are satisfied + var ( +- _ fs.Fs = &Fs{} +- _ fs.Copier = &Fs{} +- _ fs.Object = &Object{} ++ _ fs.Fs = &Fs{} ++ _ fs.Copier = &Fs{} ++ _ fs.Object = &Object{} ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/s3/s3_test.go b/rclone/s3/s3_test.go +index 5bde96a..fdb646f 100644 +--- a/rclone-1.33/s3/s3_test.go ++++ b/rclone/s3/s3_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/swift/swift.go b/rclone/swift/swift.go +index 17382a8..429a8c2 100644 +--- a/rclone-1.33/swift/swift.go ++++ b/rclone/swift/swift.go +@@ -162,7 +162,6 @@ func swiftConnection(name string) (*swift.Connection, error) { + ApiKey: apiKey, + AuthUrl: authURL, + AuthVersion: fs.ConfigFile.MustInt(name, "auth_version", 0), +- UserAgent: fs.UserAgent, + Tenant: fs.ConfigFile.MustValue(name, "tenant"), + Region: fs.ConfigFile.MustValue(name, "region"), + Domain: fs.ConfigFile.MustValue(name, "domain"), +@@ -236,7 +235,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er + // Note that due to a quirk of swift, dynamic large objects are + // returned as 0 bytes in the listing. Correct this here by + // making sure we read the full metadata for all 0 byte files. +- if info != nil && info.Bytes == 0 { ++ // We don't read the metadata for directory marker objects. ++ if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" { + info = nil + } + if info != nil { +@@ -629,8 +629,10 @@ func (o *Object) Storable() bool { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { +- in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, true, nil) ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { ++ headers := fs.OpenOptionHeaders(options) ++ _, isRanging := headers["Range"] ++ in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers) + return + } + +@@ -689,7 +691,7 @@ func urlEncode(str string) string { + + // updateChunks updates the existing object using chunks to a separate + // container. It returns a string which prefixes current segments. +-func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) { ++func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64, contentType string) (string, error) { + // Create the segmentsContainer if it doesn't exist + err := o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil) + if err != nil { +@@ -718,7 +720,7 @@ func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) ( + headers["Content-Length"] = "0" // set Content-Length as we know it + emptyReader := bytes.NewReader(nil) + manifestName := o.fs.root + o.remote +- _, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", "", headers) ++ _, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers) + return uniquePrefix + "/", err + } + +@@ -738,16 +740,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + // Set the mtime + m := swift.Metadata{} + m.SetModTime(modTime) ++ contentType := fs.MimeType(src) + headers := m.ObjectHeaders() + uniquePrefix := "" + if size > int64(chunkSize) { +- uniquePrefix, err = o.updateChunks(in, headers, size) ++ uniquePrefix, err = o.updateChunks(in, headers, size, contentType) + if err != nil { + return err + } + } else { + headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it +- _, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", "", headers) ++ _, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers) + if err != nil { + return err + } +@@ -787,10 +790,16 @@ func (o *Object) Remove() error { + return nil + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ return o.info.ContentType ++} ++ + // Check the interfaces are satisfied + var ( +- _ fs.Fs = &Fs{} +- _ fs.Purger = &Fs{} +- _ fs.Copier = &Fs{} +- _ fs.Object = &Object{} ++ _ fs.Fs = &Fs{} ++ _ fs.Purger = &Fs{} ++ _ fs.Copier = &Fs{} ++ _ fs.Object = &Object{} ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/swift/swift_test.go b/rclone/swift/swift_test.go +index 941c6e7..27d78b4 100644 +--- a/rclone-1.33/swift/swift_test.go ++++ b/rclone/swift/swift_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } +diff --git a/rclone-1.33/yandex/api/download.go b/rclone/yandex/api/download.go +index 10c01d9..310d346 100644 +--- a/rclone-1.33/yandex/api/download.go ++++ b/rclone/yandex/api/download.go +@@ -13,13 +13,13 @@ type DownloadResponse struct { + Templated bool `json:"templated"` + } + +-// Download will get specified data from Yandex.Disk. +-func (c *Client) Download(remotePath string) (io.ReadCloser, error) { //io.Writer ++// Download will get specified data from Yandex.Disk supplying the extra headers ++func (c *Client) Download(remotePath string, headers map[string]string) (io.ReadCloser, error) { //io.Writer + ur, err := c.DownloadRequest(remotePath) + if err != nil { + return nil, err + } +- return c.PerformDownload(ur.HRef) ++ return c.PerformDownload(ur.HRef, headers) + } + + // DownloadRequest will make an download request and return a URL to download data to. +diff --git a/rclone-1.33/yandex/api/performdownload.go b/rclone/yandex/api/performdownload.go +index d9c74f8..20e8026 100644 +--- a/rclone-1.33/yandex/api/performdownload.go ++++ b/rclone/yandex/api/performdownload.go +@@ -8,13 +8,18 @@ import ( + "github.com/pkg/errors" + ) + +-// PerformDownload does the actual download via unscoped PUT request. +-func (c *Client) PerformDownload(url string) (out io.ReadCloser, err error) { ++// PerformDownload does the actual download via unscoped GET request. ++func (c *Client) PerformDownload(url string, headers map[string]string) (out io.ReadCloser, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + ++ // Set any extra headers ++ for k, v := range headers { ++ req.Header.Set(k, v) ++ } ++ + //c.setRequestScope(req) + + resp, err := c.HTTPClient.Do(req) +@@ -22,7 +27,8 @@ func (c *Client) PerformDownload(url string) (out io.ReadCloser, err error) { + return nil, err + } + +- if resp.StatusCode != 200 { ++ _, isRanging := req.Header["Range"] ++ if !(resp.StatusCode == http.StatusOK || (isRanging && resp.StatusCode == http.StatusPartialContent)) { + defer CheckClose(resp.Body, &err) + body, err := ioutil.ReadAll(resp.Body) + if err != nil { +diff --git a/rclone-1.33/yandex/api/performupload.go b/rclone/yandex/api/performupload.go +index cfedb93..39f55cd 100644 +--- a/rclone-1.33/yandex/api/performupload.go ++++ b/rclone/yandex/api/performupload.go +@@ -11,11 +11,12 @@ import ( + ) + + // PerformUpload does the actual upload via unscoped PUT request. +-func (c *Client) PerformUpload(url string, data io.Reader) (err error) { ++func (c *Client) PerformUpload(url string, data io.Reader, contentType string) (err error) { + req, err := http.NewRequest("PUT", url, data) + if err != nil { + return err + } ++ req.Header.Set("Content-Type", contentType) + + //c.setRequestScope(req) + +diff --git a/rclone-1.33/yandex/api/upload.go b/rclone/yandex/api/upload.go +index b2eff2f..946e659 100644 +--- a/rclone-1.33/yandex/api/upload.go ++++ b/rclone/yandex/api/upload.go +@@ -17,13 +17,13 @@ type UploadResponse struct { + } + + // Upload will put specified data to Yandex.Disk. +-func (c *Client) Upload(data io.Reader, remotePath string, overwrite bool) error { ++func (c *Client) Upload(data io.Reader, remotePath string, overwrite bool, contentType string) error { + ur, err := c.UploadRequest(remotePath, overwrite) + if err != nil { + return err + } + +- if err := c.PerformUpload(ur.HRef, data); err != nil { ++ if err := c.PerformUpload(ur.HRef, data, contentType); err != nil { + return err + } + +diff --git a/rclone-1.33/yandex/yandex.go b/rclone/yandex/yandex.go +index 93c46aa..67247fe 100644 +--- a/rclone-1.33/yandex/yandex.go ++++ b/rclone/yandex/yandex.go +@@ -73,11 +73,12 @@ type Fs struct { + + // Object describes a swift object + type Object struct { +- fs *Fs // what this object is part of +- remote string // The remote path +- md5sum string // The MD5Sum of the object +- bytes uint64 // Bytes in the object +- modTime time.Time // Modified time of the object ++ fs *Fs // what this object is part of ++ remote string // The remote path ++ md5sum string // The MD5Sum of the object ++ bytes uint64 // Bytes in the object ++ modTime time.Time // Modified time of the object ++ mimeType string // Content type according to the server + } + + // ------------------------------------------------------------ +@@ -326,31 +327,33 @@ func (f *Fs) newObjectWithInfo(remote string, info *yandex.ResourceInfoResponse) + func (o *Object) setMetaData(info *yandex.ResourceInfoResponse) { + o.bytes = info.Size + o.md5sum = info.Md5 ++ o.mimeType = info.MimeType + +- if info.CustomProperties["rclone_modified"] == nil { +- //read modTime from Modified property of object +- t, err := time.Parse(time.RFC3339Nano, info.Modified) +- if err != nil { +- return +- } +- o.modTime = t ++ var modTimeString string ++ modTimeObj, ok := info.CustomProperties["rclone_modified"] ++ if ok { ++ // read modTime from rclone_modified custom_property of object ++ modTimeString, ok = modTimeObj.(string) ++ } ++ if !ok { ++ // read modTime from Modified property of object as a fallback ++ modTimeString = info.Modified ++ } ++ t, err := time.Parse(time.RFC3339Nano, modTimeString) ++ if err != nil { ++ fs.Log("Failed to parse modtime from %q: %v", modTimeString, err) + } else { +- // interface{} to string type assertion +- if modtimestr, ok := info.CustomProperties["rclone_modified"].(string); ok { +- //read modTime from rclone_modified custom_property of object +- t, err := time.Parse(time.RFC3339Nano, modtimestr) +- if err != nil { +- return +- } +- o.modTime = t +- } else { +- return //if it is not a string +- } ++ o.modTime = t + } + } + + // readMetaData gets the info if it hasn't already been fetched + func (o *Object) readMetaData() (err error) { ++ // exit if already fetched ++ if !o.modTime.IsZero() { ++ return nil ++ } ++ + //request meta info + var opt2 yandex.ResourceInfoRequestOptions + ResourceInfoResponse, err := o.fs.yd.NewResourceInfoRequest(o.remotePath(), opt2).Exec() +@@ -484,8 +487,8 @@ func (o *Object) ModTime() time.Time { + } + + // Open an object for read +-func (o *Object) Open() (in io.ReadCloser, err error) { +- return o.fs.yd.Download(o.remotePath()) ++func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) { ++ return o.fs.yd.Download(o.remotePath(), fs.OpenOptionHeaders(options)) + } + + // Remove an object +@@ -498,8 +501,13 @@ func (o *Object) Remove() error { + // Commits the datastore + func (o *Object) SetModTime(modTime time.Time) error { + remote := o.remotePath() +- //set custom_property 'rclone_modified' of object to modTime +- return o.fs.yd.SetCustomProperty(remote, "rclone_modified", modTime.Format(time.RFC3339Nano)) ++ // set custom_property 'rclone_modified' of object to modTime ++ err := o.fs.yd.SetCustomProperty(remote, "rclone_modified", modTime.Format(time.RFC3339Nano)) ++ if err != nil { ++ return err ++ } ++ o.modTime = modTime ++ return nil + } + + // Storable returns whether this object is storable +@@ -529,7 +537,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error { + } + //upload file + overwrite := true //overwrite existing file +- err := o.fs.yd.Upload(in, remote, overwrite) ++ mimeType := fs.MimeType(src) ++ err := o.fs.yd.Upload(in, remote, overwrite, mimeType) + if err == nil { + //if file uploaded sucessfully then return metadata + o.bytes = uint64(size) +@@ -587,10 +596,21 @@ func mkDirFullPath(client *yandex.Client, path string) error { + return nil + } + ++// MimeType of an Object if known, "" otherwise ++func (o *Object) MimeType() string { ++ err := o.readMetaData() ++ if err != nil { ++ fs.Log(o, "Failed to read metadata: %v", err) ++ return "" ++ } ++ return o.mimeType ++} ++ + // Check the interfaces are satisfied + var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + //_ fs.Copier = (*Fs)(nil) +- _ fs.Object = (*Object)(nil) ++ _ fs.Object = (*Object)(nil) ++ _ fs.MimeTyper = &Object{} + ) +diff --git a/rclone-1.33/yandex/yandex_test.go b/rclone/yandex/yandex_test.go +index 23ef7a2..cd35dc0 100644 +--- a/rclone-1.33/yandex/yandex_test.go ++++ b/rclone/yandex/yandex_test.go +@@ -46,9 +46,11 @@ func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) } + func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) } + func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) } + func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) } ++func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) } + func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) } + func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) } + func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) } ++func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) } + func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) } + func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) } + func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) } diff --git a/package.yml b/package.yml new file mode 100644 index 0000000..c9080ad --- /dev/null +++ b/package.yml @@ -0,0 +1,31 @@ +name : rclone +version : 1.33 +release : 1 +source : + - https://github.com/ncw/rclone/archive/v1.33.tar.gz : c1f947b9fa624bb70da151327d3b7e4652746ae56a2e66772a0808f2061efde3 +license : MIT +component : network.util +summary : Sync files to and from Google Drive, S3, Swift, Cloudfiles, etc. +description: | + Sync files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage +builddeps : + - golang + - git +setup : | + #Applying Patch + %patch -p2 < $pkgfiles/rclone.patch + + # Set GOPATH + export GOPATH="$workdir" + + # Downloading Dependencies + go get github.com/go-ini/ini github.com/jmespath/go-jmespath + go get github.com/ncw/rclone +build : | + # Building rclone + export GOPATH="$workdir" + go build -o "rclone" +install : | + # Install rclone an Man + install -D -m 00755 rclone $installdir/usr/bin/rclone + install -D -m 00644 rclone.1 $installdir/usr/share/man/man1 diff --git a/pspec_x86_64.xml b/pspec_x86_64.xml new file mode 100644 index 0000000..ef6b083 --- /dev/null +++ b/pspec_x86_64.xml @@ -0,0 +1,35 @@ + + + rclone + + Manuel Wassermann + manuel.wassermann97@gmail.com + + MIT + network.util + Sync files to and from Google Drive, S3, Swift, Cloudfiles, etc. + Sync files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage + + https://solus-project.com/sources/README.Solus + + + rclone + Sync files to and from Google Drive, S3, Swift, Cloudfiles, etc. + Sync files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage + + network.util + + /usr/bin + /usr/share/man + + + + + 2016-10-16 + 1.33 + Packaging update + Manuel Wassermann + manuel.wassermann97@gmail.com + + + \ No newline at end of file -- 2.10.1