mirror of
https://github.com/taigrr/go-selfupdate
synced 2025-01-18 04:33:12 -08:00
Update to go mod and parameterize check time and randomize time
This commit is contained in:
parent
f041b81ae5
commit
f247155ad6
28
Gopkg.lock
generated
28
Gopkg.lock
generated
@ -1,28 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
revision = "6e7f843663477789fac7c02def0d0909e969b4e5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/kr/binarydist"
|
||||
packages = ["."]
|
||||
revision = "9955b0ab8708602d411341e55fffd7e0700f86bd"
|
||||
|
||||
[[projects]]
|
||||
branch = "v0"
|
||||
name = "gopkg.in/inconshreveable/go-update.v0"
|
||||
packages = [
|
||||
".",
|
||||
"download"
|
||||
]
|
||||
revision = "d8b0b1d421aa1cbf392c05869f8abbc669bb7066"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "2dc15c6a0e0dda650516ca1c04eb3ce579602cb928acda004089b48b0dce8a9d"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
34
Gopkg.toml
34
Gopkg.toml
@ -1,34 +0,0 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "v0"
|
||||
name = "gopkg.in/inconshreveable/go-update.v0"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
9
go.mod
Normal file
9
go.mod
Normal file
@ -0,0 +1,9 @@
|
||||
module github.com/sanbornm/go-selfupdate
|
||||
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/kr/binarydist v0.1.0
|
||||
gopkg.in/inconshreveable/go-update.v0 v0.0.0-20150814200126-d8b0b1d421aa
|
||||
)
|
6
go.sum
Normal file
6
go.sum
Normal file
@ -0,0 +1,6 @@
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kr/binarydist v0.1.0 h1:6kAoLA9FMMnNGSehX0s1PdjbEaACznAv/W219j2uvyo=
|
||||
github.com/kr/binarydist v0.1.0/go.mod h1:DY7S//GCoz1BCd0B0EVrinCKAZN3pXe+MDaIZbXQVgM=
|
||||
gopkg.in/inconshreveable/go-update.v0 v0.0.0-20150814200126-d8b0b1d421aa h1:drvf2JoUL1fz3ttkGNkw+rf3kZa2//7XkYGpSO4NHNA=
|
||||
gopkg.in/inconshreveable/go-update.v0 v0.0.0-20150814200126-d8b0b1d421aa/go.mod h1:tuNm0ntQ7IH9VSA39XxzLMpee5c2DwgIbjD4x3ydo8Y=
|
@ -83,6 +83,8 @@ type Updater struct {
|
||||
DiffURL string // Base URL for diff downloads.
|
||||
Dir string // Directory to store selfupdate state.
|
||||
ForceCheck bool // Check for update regardless of cktime timestamp
|
||||
CheckTime int // Time in hours before next check
|
||||
RandomizeTime int // Time in hours to randomize with CheckTime
|
||||
Requester Requester //Optional parameter to override existing http request handler
|
||||
Info struct {
|
||||
Version string
|
||||
@ -102,34 +104,62 @@ func (u *Updater) BackgroundRun() error {
|
||||
// fail
|
||||
return err
|
||||
}
|
||||
if u.wantUpdate() {
|
||||
if u.WantUpdate() {
|
||||
if err := up.CanUpdate(); err != nil {
|
||||
// fail
|
||||
return err
|
||||
}
|
||||
|
||||
u.SetUpdateTime()
|
||||
|
||||
//self, err := osext.Executable()
|
||||
//if err != nil {
|
||||
// fail update, couldn't figure out path to self
|
||||
//return
|
||||
//}
|
||||
// TODO(bgentry): logger isn't on Windows. Replace w/ proper error reports.
|
||||
if err := u.update(); err != nil {
|
||||
if err := u.Update(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Updater) wantUpdate() bool {
|
||||
path := u.getExecRelativeDir(u.Dir + upcktimePath)
|
||||
if u.CurrentVersion == "dev" || (!u.ForceCheck && readTime(path).After(time.Now())) {
|
||||
// WantUpdate returns boolean designating if an update is desired
|
||||
func (u *Updater) WantUpdate() bool {
|
||||
if u.CurrentVersion == "dev" || (!u.ForceCheck && u.NextUpdate().After(time.Now())) {
|
||||
return false
|
||||
}
|
||||
wait := 24*time.Hour + randDuration(24*time.Hour)
|
||||
return writeTime(path, time.Now().Add(wait))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (u *Updater) update() error {
|
||||
// NextUpdate returns the next time update should be checked
|
||||
func (u *Updater) NextUpdate() time.Time {
|
||||
path := u.getExecRelativeDir(u.Dir + upcktimePath)
|
||||
nextTime := readTime(path)
|
||||
|
||||
return nextTime
|
||||
}
|
||||
|
||||
// SetUpdateTime writes the next update time to the state file
|
||||
func (u *Updater) SetUpdateTime() bool {
|
||||
path := u.getExecRelativeDir(u.Dir + upcktimePath)
|
||||
wait := time.Duration(u.CheckTime) * time.Hour
|
||||
// Add 1 to random time since max is not included
|
||||
waitrand := time.Duration(rand.Intn(u.RandomizeTime+1)) * time.Hour
|
||||
|
||||
return writeTime(path, time.Now().Add(wait+waitrand))
|
||||
}
|
||||
|
||||
// ClearUpdateState writes current time to state file
|
||||
func (u *Updater) ClearUpdateState() {
|
||||
path := u.getExecRelativeDir(u.Dir + upcktimePath)
|
||||
os.Remove(path)
|
||||
}
|
||||
|
||||
// Update initiates the self update process
|
||||
func (u *Updater) Update() error {
|
||||
path, err := osext.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -250,11 +280,6 @@ func (u *Updater) fetchBin() ([]byte, error) {
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// returns a random duration in [0,n).
|
||||
func randDuration(n time.Duration) time.Duration {
|
||||
return time.Duration(rand.Int63n(int64(n)))
|
||||
}
|
||||
|
||||
func (u *Updater) fetch(url string) (io.ReadCloser, error) {
|
||||
if u.Requester == nil {
|
||||
return defaultHTTPRequester.Fetch(url)
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testHash = sha256.New()
|
||||
@ -17,7 +18,11 @@ func TestUpdaterFetchMustReturnNonNilReaderCloser(t *testing.T) {
|
||||
return nil, nil
|
||||
})
|
||||
updater := createUpdater(mr)
|
||||
updater.CheckTime = 24
|
||||
updater.RandomizeTime = 24
|
||||
|
||||
err := updater.BackgroundRun()
|
||||
|
||||
if err != nil {
|
||||
equals(t, "Fetch was expected to return non-nil ReadCloser", err.Error())
|
||||
} else {
|
||||
@ -34,6 +39,8 @@ func TestUpdaterWithEmptyPayloadNoErrorNoUpdate(t *testing.T) {
|
||||
return newTestReaderCloser("{}"), nil
|
||||
})
|
||||
updater := createUpdater(mr)
|
||||
updater.CheckTime = 24
|
||||
updater.RandomizeTime = 24
|
||||
|
||||
err := updater.BackgroundRun()
|
||||
if err != nil {
|
||||
@ -41,6 +48,46 @@ func TestUpdaterWithEmptyPayloadNoErrorNoUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdaterCheckTime(t *testing.T) {
|
||||
mr := &mockRequester{}
|
||||
mr.handleRequest(
|
||||
func(url string) (io.ReadCloser, error) {
|
||||
equals(t, "http://updates.yourdomain.com/myapp/darwin-amd64.json", url)
|
||||
return newTestReaderCloser("{}"), nil
|
||||
})
|
||||
|
||||
// Run test with various time
|
||||
runTestTimeChecks(t, mr, 0, 0, false)
|
||||
runTestTimeChecks(t, mr, 0, 5, true)
|
||||
runTestTimeChecks(t, mr, 1, 0, true)
|
||||
runTestTimeChecks(t, mr, 100, 100, true)
|
||||
}
|
||||
|
||||
// Helper function to run check time tests
|
||||
func runTestTimeChecks(t *testing.T, mr *mockRequester, checkTime int, randomizeTime int, expectUpdate bool) {
|
||||
updater := createUpdater(mr)
|
||||
updater.ClearUpdateState()
|
||||
updater.CheckTime = checkTime
|
||||
updater.RandomizeTime = randomizeTime
|
||||
|
||||
updater.BackgroundRun()
|
||||
|
||||
if updater.WantUpdate() == expectUpdate {
|
||||
t.Errorf("WantUpdate returned %v; want %v", updater.WantUpdate(), expectUpdate)
|
||||
}
|
||||
|
||||
maxHrs := time.Duration(updater.CheckTime+updater.RandomizeTime) * time.Hour
|
||||
maxTime := time.Now().Add(maxHrs)
|
||||
|
||||
if !updater.NextUpdate().Before(maxTime) {
|
||||
t.Errorf("NextUpdate should less than %s hrs (CheckTime + RandomizeTime) from now; now %s; next update %s", maxHrs, time.Now(), updater.NextUpdate())
|
||||
}
|
||||
|
||||
if maxHrs > 0 && !updater.NextUpdate().After(time.Now()) {
|
||||
t.Errorf("NextUpdate should be after now")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdaterWithEmptyPayloadNoErrorNoUpdateEscapedPath(t *testing.T) {
|
||||
mr := &mockRequester{}
|
||||
mr.handleRequest(
|
||||
|
1
selfupdate/update/cktime
Normal file
1
selfupdate/update/cktime
Normal file
@ -0,0 +1 @@
|
||||
2020-12-08T21:11:49-07:00
|
27
vendor/github.com/kardianos/osext/LICENSE
generated
vendored
27
vendor/github.com/kardianos/osext/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
16
vendor/github.com/kardianos/osext/README.md
generated
vendored
16
vendor/github.com/kardianos/osext/README.md
generated
vendored
@ -1,16 +0,0 @@
|
||||
### Extensions to the "os" package.
|
||||
|
||||
## Find the current Executable and ExecutableFolder.
|
||||
|
||||
There is sometimes utility in finding the current executable file
|
||||
that is running. This can be used for upgrading the current executable
|
||||
or finding resources located relative to the executable file. Both
|
||||
working directory and the os.Args[0] value are arbitrary and cannot
|
||||
be relied on; os.Args[0] can be "faked".
|
||||
|
||||
Multi-platform and supports:
|
||||
* Linux
|
||||
* OS X
|
||||
* Windows
|
||||
* Plan 9
|
||||
* BSDs.
|
27
vendor/github.com/kardianos/osext/osext.go
generated
vendored
27
vendor/github.com/kardianos/osext/osext.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Extensions to the standard "os" package.
|
||||
package osext // import "github.com/kardianos/osext"
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// Executable returns an absolute path that can be used to
|
||||
// re-invoke the current program.
|
||||
// It may not be valid after the current program exits.
|
||||
func Executable() (string, error) {
|
||||
p, err := executable()
|
||||
return filepath.Clean(p), err
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name and any trailing slash.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Dir(p), nil
|
||||
}
|
20
vendor/github.com/kardianos/osext/osext_plan9.go
generated
vendored
20
vendor/github.com/kardianos/osext/osext_plan9.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
}
|
36
vendor/github.com/kardianos/osext/osext_procfs.go
generated
vendored
36
vendor/github.com/kardianos/osext/osext_procfs.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux netbsd openbsd solaris dragonfly
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
const deletedTag = " (deleted)"
|
||||
execpath, err := os.Readlink("/proc/self/exe")
|
||||
if err != nil {
|
||||
return execpath, err
|
||||
}
|
||||
execpath = strings.TrimSuffix(execpath, deletedTag)
|
||||
execpath = strings.TrimPrefix(execpath, deletedTag)
|
||||
return execpath, nil
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd", "dragonfly":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
case "solaris":
|
||||
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
||||
}
|
||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
||||
}
|
79
vendor/github.com/kardianos/osext/osext_sysctl.go
generated
vendored
79
vendor/github.com/kardianos/osext/osext_sysctl.go
generated
vendored
@ -1,79 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var initCwd, initCwdErr = os.Getwd()
|
||||
|
||||
func executable() (string, error) {
|
||||
var mib [4]int32
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
||||
case "darwin":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
||||
}
|
||||
|
||||
n := uintptr(0)
|
||||
// Get length.
|
||||
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
for i, v := range buf {
|
||||
if v == 0 {
|
||||
buf = buf[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
execPath := string(buf)
|
||||
// execPath will not be empty due to above checks.
|
||||
// Try to get the absolute path if the execPath is not rooted.
|
||||
if execPath[0] != '/' {
|
||||
execPath, err = getAbs(execPath)
|
||||
if err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||
// actual executable.
|
||||
if runtime.GOOS == "darwin" {
|
||||
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
func getAbs(execPath string) (string, error) {
|
||||
if initCwdErr != nil {
|
||||
return execPath, initCwdErr
|
||||
}
|
||||
// The execPath may begin with a "../" or a "./" so clean it first.
|
||||
// Join the two paths, trailing and starting slashes undetermined, so use
|
||||
// the generic Join function.
|
||||
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
||||
}
|
34
vendor/github.com/kardianos/osext/osext_windows.go
generated
vendored
34
vendor/github.com/kardianos/osext/osext_windows.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
||||
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
||||
)
|
||||
|
||||
// GetModuleFileName() with hModule = NULL
|
||||
func executable() (exePath string, err error) {
|
||||
return getModuleFileName()
|
||||
}
|
||||
|
||||
func getModuleFileName() (string, error) {
|
||||
var n uint32
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
size := uint32(len(b))
|
||||
|
||||
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
||||
n = uint32(r0)
|
||||
if n == 0 {
|
||||
return "", e1
|
||||
}
|
||||
return string(utf16.Decode(b[0:n])), nil
|
||||
}
|
1
vendor/github.com/kr/binarydist/.gitignore
generated
vendored
1
vendor/github.com/kr/binarydist/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
test.*
|
22
vendor/github.com/kr/binarydist/License
generated
vendored
22
vendor/github.com/kr/binarydist/License
generated
vendored
@ -1,22 +0,0 @@
|
||||
Copyright 2012 Keith Rarick
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
7
vendor/github.com/kr/binarydist/Readme.md
generated
vendored
7
vendor/github.com/kr/binarydist/Readme.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
# binarydist
|
||||
|
||||
Package binarydist implements binary diff and patch as described on
|
||||
<http://www.daemonology.net/bsdiff/>. It reads and writes files
|
||||
compatible with the tools there.
|
||||
|
||||
Documentation at <http://go.pkgdoc.org/github.com/kr/binarydist>.
|
40
vendor/github.com/kr/binarydist/bzip2.go
generated
vendored
40
vendor/github.com/kr/binarydist/bzip2.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
type bzip2Writer struct {
|
||||
c *exec.Cmd
|
||||
w io.WriteCloser
|
||||
}
|
||||
|
||||
func (w bzip2Writer) Write(b []byte) (int, error) {
|
||||
return w.w.Write(b)
|
||||
}
|
||||
|
||||
func (w bzip2Writer) Close() error {
|
||||
if err := w.w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.c.Wait()
|
||||
}
|
||||
|
||||
// Package compress/bzip2 implements only decompression,
|
||||
// so we'll fake it by running bzip2 in another process.
|
||||
func newBzip2Writer(w io.Writer) (wc io.WriteCloser, err error) {
|
||||
var bw bzip2Writer
|
||||
bw.c = exec.Command("bzip2", "-c")
|
||||
bw.c.Stdout = w
|
||||
|
||||
if bw.w, err = bw.c.StdinPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = bw.c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bw, nil
|
||||
}
|
408
vendor/github.com/kr/binarydist/diff.go
generated
vendored
408
vendor/github.com/kr/binarydist/diff.go
generated
vendored
@ -1,408 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
func swap(a []int, i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func split(I, V []int, start, length, h int) {
|
||||
var i, j, k, x, jj, kk int
|
||||
|
||||
if length < 16 {
|
||||
for k = start; k < start+length; k += j {
|
||||
j = 1
|
||||
x = V[I[k]+h]
|
||||
for i = 1; k+i < start+length; i++ {
|
||||
if V[I[k+i]+h] < x {
|
||||
x = V[I[k+i]+h]
|
||||
j = 0
|
||||
}
|
||||
if V[I[k+i]+h] == x {
|
||||
swap(I, k+i, k+j)
|
||||
j++
|
||||
}
|
||||
}
|
||||
for i = 0; i < j; i++ {
|
||||
V[I[k+i]] = k + j - 1
|
||||
}
|
||||
if j == 1 {
|
||||
I[k] = -1
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
x = V[I[start+length/2]+h]
|
||||
jj = 0
|
||||
kk = 0
|
||||
for i = start; i < start+length; i++ {
|
||||
if V[I[i]+h] < x {
|
||||
jj++
|
||||
}
|
||||
if V[I[i]+h] == x {
|
||||
kk++
|
||||
}
|
||||
}
|
||||
jj += start
|
||||
kk += jj
|
||||
|
||||
i = start
|
||||
j = 0
|
||||
k = 0
|
||||
for i < jj {
|
||||
if V[I[i]+h] < x {
|
||||
i++
|
||||
} else if V[I[i]+h] == x {
|
||||
swap(I, i, jj+j)
|
||||
j++
|
||||
} else {
|
||||
swap(I, i, kk+k)
|
||||
k++
|
||||
}
|
||||
}
|
||||
|
||||
for jj+j < kk {
|
||||
if V[I[jj+j]+h] == x {
|
||||
j++
|
||||
} else {
|
||||
swap(I, jj+j, kk+k)
|
||||
k++
|
||||
}
|
||||
}
|
||||
|
||||
if jj > start {
|
||||
split(I, V, start, jj-start, h)
|
||||
}
|
||||
|
||||
for i = 0; i < kk-jj; i++ {
|
||||
V[I[jj+i]] = kk - 1
|
||||
}
|
||||
if jj == kk-1 {
|
||||
I[jj] = -1
|
||||
}
|
||||
|
||||
if start+length > kk {
|
||||
split(I, V, kk, start+length-kk, h)
|
||||
}
|
||||
}
|
||||
|
||||
func qsufsort(obuf []byte) []int {
|
||||
var buckets [256]int
|
||||
var i, h int
|
||||
I := make([]int, len(obuf)+1)
|
||||
V := make([]int, len(obuf)+1)
|
||||
|
||||
for _, c := range obuf {
|
||||
buckets[c]++
|
||||
}
|
||||
for i = 1; i < 256; i++ {
|
||||
buckets[i] += buckets[i-1]
|
||||
}
|
||||
copy(buckets[1:], buckets[:])
|
||||
buckets[0] = 0
|
||||
|
||||
for i, c := range obuf {
|
||||
buckets[c]++
|
||||
I[buckets[c]] = i
|
||||
}
|
||||
|
||||
I[0] = len(obuf)
|
||||
for i, c := range obuf {
|
||||
V[i] = buckets[c]
|
||||
}
|
||||
|
||||
V[len(obuf)] = 0
|
||||
for i = 1; i < 256; i++ {
|
||||
if buckets[i] == buckets[i-1]+1 {
|
||||
I[buckets[i]] = -1
|
||||
}
|
||||
}
|
||||
I[0] = -1
|
||||
|
||||
for h = 1; I[0] != -(len(obuf) + 1); h += h {
|
||||
var n int
|
||||
for i = 0; i < len(obuf)+1; {
|
||||
if I[i] < 0 {
|
||||
n -= I[i]
|
||||
i -= I[i]
|
||||
} else {
|
||||
if n != 0 {
|
||||
I[i-n] = -n
|
||||
}
|
||||
n = V[I[i]] + 1 - i
|
||||
split(I, V, i, n, h)
|
||||
i += n
|
||||
n = 0
|
||||
}
|
||||
}
|
||||
if n != 0 {
|
||||
I[i-n] = -n
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < len(obuf)+1; i++ {
|
||||
I[V[i]] = i
|
||||
}
|
||||
return I
|
||||
}
|
||||
|
||||
func matchlen(a, b []byte) (i int) {
|
||||
for i < len(a) && i < len(b) && a[i] == b[i] {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func search(I []int, obuf, nbuf []byte, st, en int) (pos, n int) {
|
||||
if en-st < 2 {
|
||||
x := matchlen(obuf[I[st]:], nbuf)
|
||||
y := matchlen(obuf[I[en]:], nbuf)
|
||||
|
||||
if x > y {
|
||||
return I[st], x
|
||||
} else {
|
||||
return I[en], y
|
||||
}
|
||||
}
|
||||
|
||||
x := st + (en-st)/2
|
||||
if bytes.Compare(obuf[I[x]:], nbuf) < 0 {
|
||||
return search(I, obuf, nbuf, x, en)
|
||||
} else {
|
||||
return search(I, obuf, nbuf, st, x)
|
||||
}
|
||||
panic("unreached")
|
||||
}
|
||||
|
||||
// Diff computes the difference between old and new, according to the bsdiff
|
||||
// algorithm, and writes the result to patch.
|
||||
func Diff(old, new io.Reader, patch io.Writer) error {
|
||||
obuf, err := ioutil.ReadAll(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nbuf, err := ioutil.ReadAll(new)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pbuf, err := diffBytes(obuf, nbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = patch.Write(pbuf)
|
||||
return err
|
||||
}
|
||||
|
||||
func diffBytes(obuf, nbuf []byte) ([]byte, error) {
|
||||
var patch seekBuffer
|
||||
err := diff(obuf, nbuf, &patch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return patch.buf, nil
|
||||
}
|
||||
|
||||
func diff(obuf, nbuf []byte, patch io.WriteSeeker) error {
|
||||
var lenf int
|
||||
I := qsufsort(obuf)
|
||||
db := make([]byte, len(nbuf))
|
||||
eb := make([]byte, len(nbuf))
|
||||
var dblen, eblen int
|
||||
|
||||
var hdr header
|
||||
hdr.Magic = magic
|
||||
hdr.NewSize = int64(len(nbuf))
|
||||
err := binary.Write(patch, signMagLittleEndian{}, &hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute the differences, writing ctrl as we go
|
||||
pfbz2, err := newBzip2Writer(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var scan, pos, length int
|
||||
var lastscan, lastpos, lastoffset int
|
||||
for scan < len(nbuf) {
|
||||
var oldscore int
|
||||
scan += length
|
||||
for scsc := scan; scan < len(nbuf); scan++ {
|
||||
pos, length = search(I, obuf, nbuf[scan:], 0, len(obuf))
|
||||
|
||||
for ; scsc < scan+length; scsc++ {
|
||||
if scsc+lastoffset < len(obuf) &&
|
||||
obuf[scsc+lastoffset] == nbuf[scsc] {
|
||||
oldscore++
|
||||
}
|
||||
}
|
||||
|
||||
if (length == oldscore && length != 0) || length > oldscore+8 {
|
||||
break
|
||||
}
|
||||
|
||||
if scan+lastoffset < len(obuf) && obuf[scan+lastoffset] == nbuf[scan] {
|
||||
oldscore--
|
||||
}
|
||||
}
|
||||
|
||||
if length != oldscore || scan == len(nbuf) {
|
||||
var s, Sf int
|
||||
lenf = 0
|
||||
for i := 0; lastscan+i < scan && lastpos+i < len(obuf); {
|
||||
if obuf[lastpos+i] == nbuf[lastscan+i] {
|
||||
s++
|
||||
}
|
||||
i++
|
||||
if s*2-i > Sf*2-lenf {
|
||||
Sf = s
|
||||
lenf = i
|
||||
}
|
||||
}
|
||||
|
||||
lenb := 0
|
||||
if scan < len(nbuf) {
|
||||
var s, Sb int
|
||||
for i := 1; (scan >= lastscan+i) && (pos >= i); i++ {
|
||||
if obuf[pos-i] == nbuf[scan-i] {
|
||||
s++
|
||||
}
|
||||
if s*2-i > Sb*2-lenb {
|
||||
Sb = s
|
||||
lenb = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if lastscan+lenf > scan-lenb {
|
||||
overlap := (lastscan + lenf) - (scan - lenb)
|
||||
s := 0
|
||||
Ss := 0
|
||||
lens := 0
|
||||
for i := 0; i < overlap; i++ {
|
||||
if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {
|
||||
s++
|
||||
}
|
||||
if nbuf[scan-lenb+i] == obuf[pos-lenb+i] {
|
||||
s--
|
||||
}
|
||||
if s > Ss {
|
||||
Ss = s
|
||||
lens = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
lenf += lens - overlap
|
||||
lenb -= lens
|
||||
}
|
||||
|
||||
for i := 0; i < lenf; i++ {
|
||||
db[dblen+i] = nbuf[lastscan+i] - obuf[lastpos+i]
|
||||
}
|
||||
for i := 0; i < (scan-lenb)-(lastscan+lenf); i++ {
|
||||
eb[eblen+i] = nbuf[lastscan+lenf+i]
|
||||
}
|
||||
|
||||
dblen += lenf
|
||||
eblen += (scan - lenb) - (lastscan + lenf)
|
||||
|
||||
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(lenf))
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
val := (scan - lenb) - (lastscan + lenf)
|
||||
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
val = (pos - lenb) - (lastpos + lenf)
|
||||
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
lastscan = scan - lenb
|
||||
lastpos = pos - lenb
|
||||
lastoffset = pos - scan
|
||||
}
|
||||
}
|
||||
err = pfbz2.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute size of compressed ctrl data
|
||||
l64, err := patch.Seek(0, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.CtrlLen = int64(l64 - 32)
|
||||
|
||||
// Write compressed diff data
|
||||
pfbz2, err = newBzip2Writer(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := pfbz2.Write(db[:dblen])
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
if n != dblen {
|
||||
pfbz2.Close()
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
err = pfbz2.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute size of compressed diff data
|
||||
n64, err := patch.Seek(0, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.DiffLen = n64 - l64
|
||||
|
||||
// Write compressed extra data
|
||||
pfbz2, err = newBzip2Writer(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err = pfbz2.Write(eb[:eblen])
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
if n != eblen {
|
||||
pfbz2.Close()
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
err = pfbz2.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Seek to the beginning, write the header, and close the file
|
||||
_, err = patch.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(patch, signMagLittleEndian{}, &hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
24
vendor/github.com/kr/binarydist/doc.go
generated
vendored
24
vendor/github.com/kr/binarydist/doc.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
// Package binarydist implements binary diff and patch as described on
|
||||
// http://www.daemonology.net/bsdiff/. It reads and writes files
|
||||
// compatible with the tools there.
|
||||
package binarydist
|
||||
|
||||
var magic = [8]byte{'B', 'S', 'D', 'I', 'F', 'F', '4', '0'}
|
||||
|
||||
// File format:
|
||||
// 0 8 "BSDIFF40"
|
||||
// 8 8 X
|
||||
// 16 8 Y
|
||||
// 24 8 sizeof(newfile)
|
||||
// 32 X bzip2(control block)
|
||||
// 32+X Y bzip2(diff block)
|
||||
// 32+X+Y ??? bzip2(extra block)
|
||||
// with control block a set of triples (x,y,z) meaning "add x bytes
|
||||
// from oldfile to x bytes from the diff block; copy y bytes from the
|
||||
// extra block; seek forwards in oldfile by z bytes".
|
||||
type header struct {
|
||||
Magic [8]byte
|
||||
CtrlLen int64
|
||||
DiffLen int64
|
||||
NewSize int64
|
||||
}
|
53
vendor/github.com/kr/binarydist/encoding.go
generated
vendored
53
vendor/github.com/kr/binarydist/encoding.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
// SignMagLittleEndian is the numeric encoding used by the bsdiff tools.
|
||||
// It implements binary.ByteOrder using a sign-magnitude format
|
||||
// and little-endian byte order. Only methods Uint64 and String
|
||||
// have been written; the rest panic.
|
||||
type signMagLittleEndian struct{}
|
||||
|
||||
func (signMagLittleEndian) Uint16(b []byte) uint16 { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) PutUint16(b []byte, v uint16) { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) Uint32(b []byte) uint32 { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) PutUint32(b []byte, v uint32) { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) Uint64(b []byte) uint64 {
|
||||
y := int64(b[0]) |
|
||||
int64(b[1])<<8 |
|
||||
int64(b[2])<<16 |
|
||||
int64(b[3])<<24 |
|
||||
int64(b[4])<<32 |
|
||||
int64(b[5])<<40 |
|
||||
int64(b[6])<<48 |
|
||||
int64(b[7]&0x7f)<<56
|
||||
|
||||
if b[7]&0x80 != 0 {
|
||||
y = -y
|
||||
}
|
||||
return uint64(y)
|
||||
}
|
||||
|
||||
func (signMagLittleEndian) PutUint64(b []byte, v uint64) {
|
||||
x := int64(v)
|
||||
neg := x < 0
|
||||
if neg {
|
||||
x = -x
|
||||
}
|
||||
|
||||
b[0] = byte(x)
|
||||
b[1] = byte(x >> 8)
|
||||
b[2] = byte(x >> 16)
|
||||
b[3] = byte(x >> 24)
|
||||
b[4] = byte(x >> 32)
|
||||
b[5] = byte(x >> 40)
|
||||
b[6] = byte(x >> 48)
|
||||
b[7] = byte(x >> 56)
|
||||
if neg {
|
||||
b[7] |= 0x80
|
||||
}
|
||||
}
|
||||
|
||||
func (signMagLittleEndian) String() string { return "signMagLittleEndian" }
|
109
vendor/github.com/kr/binarydist/patch.go
generated
vendored
109
vendor/github.com/kr/binarydist/patch.go
generated
vendored
@ -1,109 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
var ErrCorrupt = errors.New("corrupt patch")
|
||||
|
||||
// Patch applies patch to old, according to the bspatch algorithm,
|
||||
// and writes the result to new.
|
||||
func Patch(old io.Reader, new io.Writer, patch io.Reader) error {
|
||||
var hdr header
|
||||
err := binary.Read(patch, signMagLittleEndian{}, &hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hdr.Magic != magic {
|
||||
return ErrCorrupt
|
||||
}
|
||||
if hdr.CtrlLen < 0 || hdr.DiffLen < 0 || hdr.NewSize < 0 {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
ctrlbuf := make([]byte, hdr.CtrlLen)
|
||||
_, err = io.ReadFull(patch, ctrlbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cpfbz2 := bzip2.NewReader(bytes.NewReader(ctrlbuf))
|
||||
|
||||
diffbuf := make([]byte, hdr.DiffLen)
|
||||
_, err = io.ReadFull(patch, diffbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dpfbz2 := bzip2.NewReader(bytes.NewReader(diffbuf))
|
||||
|
||||
// The entire rest of the file is the extra block.
|
||||
epfbz2 := bzip2.NewReader(patch)
|
||||
|
||||
obuf, err := ioutil.ReadAll(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nbuf := make([]byte, hdr.NewSize)
|
||||
|
||||
var oldpos, newpos int64
|
||||
for newpos < hdr.NewSize {
|
||||
var ctrl struct{ Add, Copy, Seek int64 }
|
||||
err = binary.Read(cpfbz2, signMagLittleEndian{}, &ctrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sanity-check
|
||||
if newpos+ctrl.Add > hdr.NewSize {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Read diff string
|
||||
_, err = io.ReadFull(dpfbz2, nbuf[newpos:newpos+ctrl.Add])
|
||||
if err != nil {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Add old data to diff string
|
||||
for i := int64(0); i < ctrl.Add; i++ {
|
||||
if oldpos+i >= 0 && oldpos+i < int64(len(obuf)) {
|
||||
nbuf[newpos+i] += obuf[oldpos+i]
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust pointers
|
||||
newpos += ctrl.Add
|
||||
oldpos += ctrl.Add
|
||||
|
||||
// Sanity-check
|
||||
if newpos+ctrl.Copy > hdr.NewSize {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Read extra string
|
||||
_, err = io.ReadFull(epfbz2, nbuf[newpos:newpos+ctrl.Copy])
|
||||
if err != nil {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Adjust pointers
|
||||
newpos += ctrl.Copy
|
||||
oldpos += ctrl.Seek
|
||||
}
|
||||
|
||||
// Write the new file
|
||||
for len(nbuf) > 0 {
|
||||
n, err := new.Write(nbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nbuf = nbuf[n:]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
43
vendor/github.com/kr/binarydist/seek.go
generated
vendored
43
vendor/github.com/kr/binarydist/seek.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type seekBuffer struct {
|
||||
buf []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func (b *seekBuffer) Write(p []byte) (n int, err error) {
|
||||
n = copy(b.buf[b.pos:], p)
|
||||
if n == len(p) {
|
||||
b.pos += n
|
||||
return n, nil
|
||||
}
|
||||
b.buf = append(b.buf, p[n:]...)
|
||||
b.pos += len(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *seekBuffer) Seek(offset int64, whence int) (ret int64, err error) {
|
||||
var abs int64
|
||||
switch whence {
|
||||
case 0:
|
||||
abs = offset
|
||||
case 1:
|
||||
abs = int64(b.pos) + offset
|
||||
case 2:
|
||||
abs = int64(len(b.buf)) + offset
|
||||
default:
|
||||
return 0, errors.New("binarydist: invalid whence")
|
||||
}
|
||||
if abs < 0 {
|
||||
return 0, errors.New("binarydist: negative position")
|
||||
}
|
||||
if abs >= 1<<31 {
|
||||
return 0, errors.New("binarydist: position out of range")
|
||||
}
|
||||
b.pos = int(abs)
|
||||
return abs, nil
|
||||
}
|
13
vendor/gopkg.in/inconshreveable/go-update.v0/LICENSE
generated
vendored
13
vendor/gopkg.in/inconshreveable/go-update.v0/LICENSE
generated
vendored
@ -1,13 +0,0 @@
|
||||
Copyright 2014 Alan Shreve
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
37
vendor/gopkg.in/inconshreveable/go-update.v0/README.md
generated
vendored
37
vendor/gopkg.in/inconshreveable/go-update.v0/README.md
generated
vendored
@ -1,37 +0,0 @@
|
||||
# go-update: Automatically update Go programs from the internet
|
||||
|
||||
go-update allows a program to update itself by replacing its executable file
|
||||
with a new version. It provides the flexibility to implement different updating user experiences
|
||||
like auto-updating, or manual user-initiated updates. It also boasts
|
||||
advanced features like binary patching and code signing verification.
|
||||
|
||||
Updating your program to a new version is as easy as:
|
||||
|
||||
err, errRecover := update.New().FromUrl("http://release.example.com/2.0/myprogram")
|
||||
if err != nil {
|
||||
fmt.Printf("Update failed: %v\n", err)
|
||||
}
|
||||
|
||||
## Documentation and API Reference
|
||||
|
||||
Comprehensive API documentation and code examples are available in the code documentation available on godoc.org:
|
||||
|
||||
[](https://godoc.org/github.com/inconshreveable/go-update)
|
||||
|
||||
## Features
|
||||
|
||||
- Cross platform support (Windows too!)
|
||||
- Binary patch application
|
||||
- Checksum verification
|
||||
- Code signing verification
|
||||
- Support for updating arbitrary files
|
||||
|
||||
## [equinox.io](https://equinox.io)
|
||||
go-update provides the primitives for building self-updating applications, but there a number of other challenges
|
||||
involved in a complete updating solution such as hosting, code signing, update channels, gradual rollout,
|
||||
dynamically computing binary patches, tracking update metrics like versions and failures, plus more.
|
||||
|
||||
I provide this service, a complete solution, free for open source projects, at [equinox.io](https://equinox.io).
|
||||
|
||||
## License
|
||||
Apache
|
235
vendor/gopkg.in/inconshreveable/go-update.v0/download/download.go
generated
vendored
235
vendor/gopkg.in/inconshreveable/go-update.v0/download/download.go
generated
vendored
@ -1,235 +0,0 @@
|
||||
package download
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type roundTripper struct {
|
||||
RoundTripFn func(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
func (rt *roundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return rt.RoundTripFn(r)
|
||||
}
|
||||
|
||||
// Download encapsulates the state and parameters to download content
|
||||
// from a URL which:
|
||||
//
|
||||
// - Publishes the percentage of the download completed to a channel.
|
||||
// - May resume a previous download that was partially completed.
|
||||
//
|
||||
// Create an instance with the New() factory function.
|
||||
type Download struct {
|
||||
// net/http.Client to use when downloading the update.
|
||||
// If nil, a default http.Client is used
|
||||
HttpClient *http.Client
|
||||
|
||||
// As bytes are downloaded, they are written to Target.
|
||||
// Download also uses the Target's Seek method to determine
|
||||
// the size of partial-downloads so that it may properly
|
||||
// request the remaining bytes to resume the download.
|
||||
Target Target
|
||||
|
||||
// Progress returns the percentage of the download
|
||||
// completed as an integer between 0 and 100
|
||||
Progress chan (int)
|
||||
|
||||
// HTTP Method to use in the download request. Default is "GET"
|
||||
Method string
|
||||
|
||||
// HTTP URL to issue the download request to
|
||||
Url string
|
||||
}
|
||||
|
||||
// New initializes a new Download object which will download
|
||||
// the content from url into target.
|
||||
func New(url string, target Target, httpClient *http.Client) *Download {
|
||||
return &Download{
|
||||
HttpClient: httpClient,
|
||||
Progress: make(chan int),
|
||||
Method: "GET",
|
||||
Url: url,
|
||||
Target: target,
|
||||
}
|
||||
}
|
||||
|
||||
// Get() downloads the content of a url to a target destination.
|
||||
//
|
||||
// Only HTTP/1.1 servers that implement the Range header support resuming a
|
||||
// partially completed download.
|
||||
//
|
||||
// On success, the server must return 200 and the content, or 206 when resuming a partial download.
|
||||
// If the HTTP server returns a 3XX redirect, it will be followed according to d.HttpClient's redirect policy.
|
||||
//
|
||||
func (d *Download) Get() (err error) {
|
||||
// Close the progress channel whenever this function completes
|
||||
defer close(d.Progress)
|
||||
|
||||
// determine the size of the download target to determine if we're resuming a partial download
|
||||
offset, err := d.Target.Size()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// create the download request
|
||||
req, err := http.NewRequest(d.Method, d.Url, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// create an http client if one does not exist
|
||||
if d.HttpClient == nil {
|
||||
d.HttpClient = http.DefaultClient
|
||||
}
|
||||
|
||||
// we have to add headers like this so they get used across redirects
|
||||
trans := d.HttpClient.Transport
|
||||
if trans == nil {
|
||||
trans = http.DefaultTransport
|
||||
}
|
||||
|
||||
d.HttpClient.Transport = &roundTripper{
|
||||
RoundTripFn: func(r *http.Request) (*http.Response, error) {
|
||||
// add header for download continuation
|
||||
if offset > 0 {
|
||||
r.Header.Add("Range", fmt.Sprintf("%d-", offset))
|
||||
}
|
||||
|
||||
// ask for gzipped content so that net/http won't unzip it for us
|
||||
// and destroy the content length header we need for progress calculations
|
||||
r.Header.Add("Accept-Encoding", "gzip")
|
||||
|
||||
return trans.RoundTrip(r)
|
||||
},
|
||||
}
|
||||
|
||||
// issue the download request
|
||||
resp, err := d.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch resp.StatusCode {
|
||||
// ok
|
||||
case 200, 206:
|
||||
|
||||
// server error
|
||||
default:
|
||||
err = fmt.Errorf("Non 2XX response when downloading update: %s", resp.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// Determine how much we have to download
|
||||
// net/http sets this to -1 when it is unknown
|
||||
clength := resp.ContentLength
|
||||
|
||||
// Read the content from the response body
|
||||
rd := resp.Body
|
||||
|
||||
// meter the rate at which we download content for
|
||||
// progress reporting if we know how much to expect
|
||||
if clength > 0 {
|
||||
rd = &meteredReader{rd: rd, totalSize: clength, progress: d.Progress}
|
||||
}
|
||||
|
||||
// Decompress the content if necessary
|
||||
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||
rd, err = gzip.NewReader(rd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Download the update
|
||||
_, err = io.Copy(d.Target, rd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// meteredReader wraps a ReadCloser. Calls to a meteredReader's Read() method
|
||||
// publish updates to a progress channel with the percentage read so far.
|
||||
type meteredReader struct {
|
||||
rd io.ReadCloser
|
||||
totalSize int64
|
||||
progress chan int
|
||||
totalRead int64
|
||||
ticks int64
|
||||
}
|
||||
|
||||
func (m *meteredReader) Close() error {
|
||||
return m.rd.Close()
|
||||
}
|
||||
|
||||
func (m *meteredReader) Read(b []byte) (n int, err error) {
|
||||
chunkSize := (m.totalSize / 100) + 1
|
||||
lenB := int64(len(b))
|
||||
|
||||
var nChunk int
|
||||
for start := int64(0); start < lenB; start += int64(nChunk) {
|
||||
end := start + chunkSize
|
||||
if end > lenB {
|
||||
end = lenB
|
||||
}
|
||||
|
||||
nChunk, err = m.rd.Read(b[start:end])
|
||||
|
||||
n += nChunk
|
||||
m.totalRead += int64(nChunk)
|
||||
|
||||
if m.totalRead > (m.ticks * chunkSize) {
|
||||
m.ticks += 1
|
||||
// try to send on channel, but don't block if it's full
|
||||
select {
|
||||
case m.progress <- int(m.ticks + 1):
|
||||
default:
|
||||
}
|
||||
|
||||
// give the progress channel consumer a chance to run
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// A Target is what you can supply to Download,
|
||||
// it's just an io.Writer with a Size() method so that
|
||||
// the a Download can "resume" an interrupted download
|
||||
type Target interface {
|
||||
io.Writer
|
||||
Size() (int, error)
|
||||
}
|
||||
|
||||
type FileTarget struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (t *FileTarget) Size() (int, error) {
|
||||
if fi, err := t.File.Stat(); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return int(fi.Size()), nil
|
||||
}
|
||||
}
|
||||
|
||||
type MemoryTarget struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
|
||||
func (t *MemoryTarget) Size() (int, error) {
|
||||
return t.Buffer.Len(), nil
|
||||
}
|
7
vendor/gopkg.in/inconshreveable/go-update.v0/hide_noop.go
generated
vendored
7
vendor/gopkg.in/inconshreveable/go-update.v0/hide_noop.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package update
|
||||
|
||||
func hideFile(path string) error {
|
||||
return nil
|
||||
}
|
19
vendor/gopkg.in/inconshreveable/go-update.v0/hide_windows.go
generated
vendored
19
vendor/gopkg.in/inconshreveable/go-update.v0/hide_windows.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func hideFile(path string) error {
|
||||
kernel32 := syscall.NewLazyDLL("kernel32.dll")
|
||||
setFileAttributes := kernel32.NewProc("SetFileAttributesW")
|
||||
|
||||
r1, _, err := setFileAttributes.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), 2)
|
||||
|
||||
if r1 == 0 {
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
491
vendor/gopkg.in/inconshreveable/go-update.v0/update.go
generated
vendored
491
vendor/gopkg.in/inconshreveable/go-update.v0/update.go
generated
vendored
@ -1,491 +0,0 @@
|
||||
/*
|
||||
go-update allows a program to update itself by replacing its executable file
|
||||
with a new version. It provides the flexibility to implement different updating user experiences
|
||||
like auto-updating, or manual user-initiated updates. It also boasts
|
||||
advanced features like binary patching and code signing verification.
|
||||
|
||||
Updating your program to a new version is as easy as:
|
||||
|
||||
err, errRecover := update.New().FromUrl("http://release.example.com/2.0/myprogram")
|
||||
if err != nil {
|
||||
fmt.Printf("Update failed: %v\n", err)
|
||||
}
|
||||
|
||||
You may also choose to update from other data sources such as a file or an io.Reader:
|
||||
|
||||
err, errRecover := update.New().FromFile("/path/to/update")
|
||||
|
||||
Binary Diff Patching
|
||||
|
||||
Binary diff updates are supported and easy to use:
|
||||
|
||||
up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF)
|
||||
err, errRecover := up.FromUrl("http://release.example.com/2.0/mypatch")
|
||||
|
||||
Checksum Verification
|
||||
|
||||
You should also verify the checksum of new updates as well as verify
|
||||
the digital signature of an update. Note that even when you choose to apply
|
||||
a patch, the checksum is verified against the complete update after that patch
|
||||
has been applied.
|
||||
|
||||
up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
|
||||
err, errRecover := up.FromUrl("http://release.example.com/2.0/mypatch")
|
||||
|
||||
Updating other files
|
||||
|
||||
Updating arbitrary files is also supported. You may update files which are
|
||||
not the currently running program:
|
||||
|
||||
up := update.New().Target("/usr/local/bin/some-program")
|
||||
err, errRecover := up.FromUrl("http://release.example.com/2.0/some-program")
|
||||
|
||||
Code Signing
|
||||
|
||||
Truly secure updates use code signing to verify that the update was issued by a trusted party.
|
||||
To do this, you'll need to generate a public/private key pair. You can do this with openssl,
|
||||
or the equinox.io client (https://equinox.io/client) can easily generate one for you:
|
||||
|
||||
# with equinox client
|
||||
equinox genkey --private-key=private.pem --public-key=public.pem
|
||||
|
||||
# with openssl
|
||||
openssl genrsa -out private.pem 2048
|
||||
openssl rsa -in private.pem -out public.pem -pubout
|
||||
|
||||
Once you have your key pair, you can instruct your program to validate its updates
|
||||
with the public key:
|
||||
|
||||
const publicKey = `-----BEGIN PUBLIC KEY-----
|
||||
...
|
||||
-----END PUBLIC KEY-----`
|
||||
|
||||
up, err := update.New().VerifySignatureWithPEM(publicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Bad public key: '%v': %v", publicKey, err)
|
||||
}
|
||||
|
||||
Once you've configured your program this way, it will disallow all updates unless they
|
||||
are properly signed. You must now pass in the signature to verify with:
|
||||
|
||||
up.VerifySignature(signature).FromUrl("http://dl.example.com/update")
|
||||
|
||||
Error Handling and Recovery
|
||||
|
||||
To perform an update, the process must be able to read its executable file and to write
|
||||
to the directory that contains its executable file. It can be useful to check whether the process
|
||||
has the necessary permissions to perform an update before trying to apply one. Use the
|
||||
CanUpdate call to provide a useful message to the user if the update can't proceed without
|
||||
elevated permissions:
|
||||
|
||||
up := update.New().Target("/etc/hosts")
|
||||
err := up.CanUpdate()
|
||||
if err != nil {
|
||||
fmt.Printf("Can't update because: '%v'. Try as root or Administrator\n", err)
|
||||
return
|
||||
}
|
||||
err, errRecover := up.FromUrl("https://example.com/new/hosts")
|
||||
|
||||
Although exceedingly unlikely, the update operation itself is not atomic and can fail
|
||||
in such a way that a user's computer is left in an inconsistent state. If that happens,
|
||||
go-update attempts to recover to leave the system in a good state. If the recovery step
|
||||
fails (even more unlikely), a second error, referred to as "errRecover" will be non-nil
|
||||
so that you may inform your users of the bad news. You should handle this case as shown
|
||||
here:
|
||||
|
||||
err, errRecover := up.FromUrl("https://example.com/update")
|
||||
if err != nil {
|
||||
fmt.Printf("Update failed: %v\n", err)
|
||||
if errRecover != nil {
|
||||
fmt.Printf("Failed to recover bad update: %v!\n", errRecover)
|
||||
fmt.Printf("Program exectuable may be missing!\n")
|
||||
}
|
||||
}
|
||||
|
||||
Subpackages
|
||||
|
||||
Sub-package check contains the client functionality for a simple protocol for negotiating
|
||||
whether a new update is available, where it is, and the metadata needed for verifying it.
|
||||
|
||||
Sub-package download contains functionality for downloading from an HTTP endpoint
|
||||
while outputting a progress meter and supports resuming partial downloads.
|
||||
*/
|
||||
package update
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
_ "crypto/sha512" // for tls cipher support
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kardianos/osext"
|
||||
"github.com/kr/binarydist"
|
||||
"gopkg.in/inconshreveable/go-update.v0/download"
|
||||
)
|
||||
|
||||
// The type of a binary patch, if any. Only bsdiff is supported
|
||||
type PatchType string
|
||||
|
||||
const (
|
||||
PATCHTYPE_BSDIFF PatchType = "bsdiff"
|
||||
PATCHTYPE_NONE = ""
|
||||
)
|
||||
|
||||
type Update struct {
|
||||
// empty string means "path of the current executable"
|
||||
TargetPath string
|
||||
|
||||
// type of patch to apply. PATCHTYPE_NONE means "not a patch"
|
||||
PatchType
|
||||
|
||||
// sha256 checksum of the new binary to verify against
|
||||
Checksum []byte
|
||||
|
||||
// public key to use for signature verification
|
||||
PublicKey *rsa.PublicKey
|
||||
|
||||
// signature to use for signature verification
|
||||
Signature []byte
|
||||
|
||||
// configurable http client can be passed to download
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
func (u *Update) getPath() (string, error) {
|
||||
if u.TargetPath == "" {
|
||||
return osext.Executable()
|
||||
} else {
|
||||
return u.TargetPath, nil
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Update object.
|
||||
// A default update object assumes the complete binary
|
||||
// content will be used for update (not a patch) and that
|
||||
// the intended target is the running executable.
|
||||
//
|
||||
// Use this as the start of a chain of calls on the Update
|
||||
// object to build up your configuration. Example:
|
||||
//
|
||||
// up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
|
||||
//
|
||||
func New() *Update {
|
||||
return &Update{
|
||||
TargetPath: "",
|
||||
PatchType: PATCHTYPE_NONE,
|
||||
}
|
||||
}
|
||||
|
||||
// Target configures the update to update the file at the given path.
|
||||
// The emptry string means 'the executable file of the running program'.
|
||||
func (u *Update) Target(path string) *Update {
|
||||
u.TargetPath = path
|
||||
return u
|
||||
}
|
||||
|
||||
// ApplyPatch configures the update to treat the contents of the update
|
||||
// as a patch to apply to the existing to target. You must specify the
|
||||
// format of the patch. Only PATCHTYPE_BSDIFF is supported at the moment.
|
||||
func (u *Update) ApplyPatch(patchType PatchType) *Update {
|
||||
u.PatchType = patchType
|
||||
return u
|
||||
}
|
||||
|
||||
// VerifyChecksum configures the update to verify that the
|
||||
// the update has the given sha256 checksum.
|
||||
func (u *Update) VerifyChecksum(checksum []byte) *Update {
|
||||
u.Checksum = checksum
|
||||
return u
|
||||
}
|
||||
|
||||
// VerifySignature configures the update to verify the given
|
||||
// signature of the update. You must also call one of the
|
||||
// VerifySignatureWith* functions to specify a public key
|
||||
// to use for verification.
|
||||
func (u *Update) VerifySignature(signature []byte) *Update {
|
||||
u.Signature = signature
|
||||
return u
|
||||
}
|
||||
|
||||
// VerifySignatureWith configures the update to use the given RSA
|
||||
// public key to verify the update's signature. You must also call
|
||||
// VerifySignature() with a signature to check.
|
||||
//
|
||||
// You'll probably want to use VerifySignatureWithPEM instead of
|
||||
// parsing the public key yourself.
|
||||
func (u *Update) VerifySignatureWith(publicKey *rsa.PublicKey) *Update {
|
||||
u.PublicKey = publicKey
|
||||
return u
|
||||
}
|
||||
|
||||
// VerifySignatureWithPEM configures the update to use the given PEM-formatted
|
||||
// RSA public key to verify the update's signature. You must also call
|
||||
// VerifySignature() with a signature to check.
|
||||
//
|
||||
// A PEM formatted public key typically begins with
|
||||
// -----BEGIN PUBLIC KEY-----
|
||||
func (u *Update) VerifySignatureWithPEM(publicKeyPEM []byte) (*Update, error) {
|
||||
block, _ := pem.Decode(publicKeyPEM)
|
||||
if block == nil {
|
||||
return u, fmt.Errorf("Couldn't parse PEM data")
|
||||
}
|
||||
|
||||
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
if err != nil {
|
||||
return u, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
u.PublicKey, ok = pub.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return u, fmt.Errorf("Public key isn't an RSA public key")
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// FromUrl updates the target with the contents of the given URL.
|
||||
func (u *Update) FromUrl(url string) (err error, errRecover error) {
|
||||
target := new(download.MemoryTarget)
|
||||
err = download.New(url, target, u.HTTPClient).Get()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return u.FromStream(target)
|
||||
}
|
||||
|
||||
// FromFile updates the target the contents of the given file.
|
||||
func (u *Update) FromFile(path string) (err error, errRecover error) {
|
||||
// open the new updated contents
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fp.Close()
|
||||
|
||||
// do the update
|
||||
return u.FromStream(fp)
|
||||
}
|
||||
|
||||
// FromStream updates the target file with the contents of the supplied io.Reader.
|
||||
//
|
||||
// FromStream performs the following actions to ensure a safe cross-platform update:
|
||||
//
|
||||
// 1. If configured, applies the contents of the io.Reader as a binary patch.
|
||||
//
|
||||
// 2. If configured, computes the sha256 checksum and verifies it matches.
|
||||
//
|
||||
// 3. If configured, verifies the RSA signature with a public key.
|
||||
//
|
||||
// 4. Creates a new file, /path/to/.target.new with mode 0755 with the contents of the updated file
|
||||
//
|
||||
// 5. Renames /path/to/target to /path/to/.target.old
|
||||
//
|
||||
// 6. Renames /path/to/.target.new to /path/to/target
|
||||
//
|
||||
// 7. If the rename is successful, deletes /path/to/.target.old, returns no error
|
||||
//
|
||||
// 8. If the rename fails, attempts to rename /path/to/.target.old back to /path/to/target
|
||||
// If this operation fails, it is reported in the errRecover return value so as not to
|
||||
// mask the original error that caused the recovery attempt.
|
||||
//
|
||||
// On Windows, the removal of /path/to/.target.old always fails, so instead,
|
||||
// we just make the old file hidden instead.
|
||||
func (u *Update) FromStream(updateWith io.Reader) (err error, errRecover error) {
|
||||
updatePath, err := u.getPath()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var newBytes []byte
|
||||
// apply a patch if requested
|
||||
switch u.PatchType {
|
||||
case PATCHTYPE_BSDIFF:
|
||||
newBytes, err = applyPatch(updateWith, updatePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case PATCHTYPE_NONE:
|
||||
// no patch to apply, go on through
|
||||
newBytes, err = ioutil.ReadAll(updateWith)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("Unrecognized patch type: %s", u.PatchType)
|
||||
return
|
||||
}
|
||||
|
||||
// verify checksum if requested
|
||||
if u.Checksum != nil {
|
||||
if err = verifyChecksum(newBytes, u.Checksum); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// verify signature if requested
|
||||
if u.Signature != nil || u.PublicKey != nil {
|
||||
if u.Signature == nil {
|
||||
err = fmt.Errorf("No public key specified to verify signature")
|
||||
return
|
||||
}
|
||||
|
||||
if u.PublicKey == nil {
|
||||
err = fmt.Errorf("No signature to verify!")
|
||||
return
|
||||
}
|
||||
|
||||
if err = verifySignature(newBytes, u.Signature, u.PublicKey); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the directory the executable exists in
|
||||
updateDir := filepath.Dir(updatePath)
|
||||
filename := filepath.Base(updatePath)
|
||||
|
||||
// Copy the contents of of newbinary to a the new executable file
|
||||
newPath := filepath.Join(updateDir, fmt.Sprintf(".%s.new", filename))
|
||||
fp, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fp.Close()
|
||||
_, err = io.Copy(fp, bytes.NewReader(newBytes))
|
||||
|
||||
// if we don't call fp.Close(), windows won't let us move the new executable
|
||||
// because the file will still be "in use"
|
||||
fp.Close()
|
||||
|
||||
// this is where we'll move the executable to so that we can swap in the updated replacement
|
||||
oldPath := filepath.Join(updateDir, fmt.Sprintf(".%s.old", filename))
|
||||
|
||||
// delete any existing old exec file - this is necessary on Windows for two reasons:
|
||||
// 1. after a successful update, Windows can't remove the .old file because the process is still running
|
||||
// 2. windows rename operations fail if the destination file already exists
|
||||
_ = os.Remove(oldPath)
|
||||
|
||||
// move the existing executable to a new file in the same directory
|
||||
err = os.Rename(updatePath, oldPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// move the new exectuable in to become the new program
|
||||
err = os.Rename(newPath, updatePath)
|
||||
|
||||
if err != nil {
|
||||
// copy unsuccessful
|
||||
errRecover = os.Rename(oldPath, updatePath)
|
||||
} else {
|
||||
// copy successful, remove the old binary
|
||||
errRemove := os.Remove(oldPath)
|
||||
|
||||
// windows has trouble with removing old binaries, so hide it instead
|
||||
if errRemove != nil {
|
||||
_ = hideFile(oldPath)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CanUpdate() determines whether the process has the correct permissions to
|
||||
// perform the requested update. If the update can proceed, it returns nil, otherwise
|
||||
// it returns the error that would occur if an update were attempted.
|
||||
func (u *Update) CanUpdate() (err error) {
|
||||
// get the directory the file exists in
|
||||
path, err := u.getPath()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fileDir := filepath.Dir(path)
|
||||
fileName := filepath.Base(path)
|
||||
|
||||
// attempt to open a file in the file's directory
|
||||
newPath := filepath.Join(fileDir, fmt.Sprintf(".%s.new", fileName))
|
||||
fp, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
fp.Close()
|
||||
|
||||
_ = os.Remove(newPath)
|
||||
return
|
||||
}
|
||||
|
||||
func applyPatch(patch io.Reader, updatePath string) ([]byte, error) {
|
||||
// open the file to update
|
||||
old, err := os.Open(updatePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer old.Close()
|
||||
|
||||
// apply the patch
|
||||
applied := new(bytes.Buffer)
|
||||
if err = binarydist.Patch(old, applied, patch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return applied.Bytes(), nil
|
||||
}
|
||||
|
||||
func verifyChecksum(updated []byte, expectedChecksum []byte) error {
|
||||
checksum, err := ChecksumForBytes(updated)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(expectedChecksum, checksum) {
|
||||
return fmt.Errorf("Updated file has wrong checksum. Expected: %x, got: %x", expectedChecksum, checksum)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChecksumForFile returns the sha256 checksum for the given file
|
||||
func ChecksumForFile(path string) ([]byte, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return ChecksumForReader(f)
|
||||
}
|
||||
|
||||
// ChecksumForReader returns the sha256 checksum for the entire
|
||||
// contents of the given reader.
|
||||
func ChecksumForReader(rd io.Reader) ([]byte, error) {
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, rd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// ChecksumForBytes returns the sha256 checksum for the given bytes
|
||||
func ChecksumForBytes(source []byte) ([]byte, error) {
|
||||
return ChecksumForReader(bytes.NewReader(source))
|
||||
}
|
||||
|
||||
func verifySignature(source, signature []byte, publicKey *rsa.PublicKey) error {
|
||||
checksum, err := ChecksumForBytes(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, checksum, signature)
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user