2019-06-10 08:33:19 +00:00
|
|
|
package home
|
2019-04-25 11:57:03 +00:00
|
|
|
|
|
|
|
import (
|
2019-06-05 12:27:32 +00:00
|
|
|
"archive/tar"
|
2019-04-25 11:57:03 +00:00
|
|
|
"archive/zip"
|
2019-06-05 12:27:32 +00:00
|
|
|
"compress/gzip"
|
2019-04-25 11:57:03 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
|
|
|
"strings"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
2020-02-13 15:42:07 +00:00
|
|
|
"github.com/AdguardTeam/AdGuardHome/util"
|
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
"github.com/AdguardTeam/golibs/log"
|
|
|
|
)
|
|
|
|
|
2019-05-15 08:02:09 +00:00
|
|
|
// Convert version.json data to our JSON response
|
|
|
|
func getVersionResp(data []byte) []byte {
|
|
|
|
versionJSON := make(map[string]interface{})
|
|
|
|
err := json.Unmarshal(data, &versionJSON)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("version.json: %s", err)
|
|
|
|
return []byte{}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := make(map[string]interface{})
|
|
|
|
ret["can_autoupdate"] = false
|
|
|
|
|
|
|
|
var ok1, ok2, ok3 bool
|
|
|
|
ret["new_version"], ok1 = versionJSON["version"].(string)
|
|
|
|
ret["announcement"], ok2 = versionJSON["announcement"].(string)
|
|
|
|
ret["announcement_url"], ok3 = versionJSON["announcement_url"].(string)
|
2019-06-05 11:07:32 +00:00
|
|
|
selfUpdateMinVersion, ok4 := versionJSON["selfupdate_min_version"].(string)
|
|
|
|
if !ok1 || !ok2 || !ok3 || !ok4 {
|
2019-05-15 08:02:09 +00:00
|
|
|
log.Error("version.json: invalid data")
|
|
|
|
return []byte{}
|
|
|
|
}
|
|
|
|
|
2020-02-06 17:55:37 +00:00
|
|
|
// the key is download_linux_arm or download_linux_arm64 for regular ARM versions
|
2020-01-15 15:58:12 +00:00
|
|
|
dloadName := fmt.Sprintf("download_%s_%s", runtime.GOOS, runtime.GOARCH)
|
2020-02-06 17:55:37 +00:00
|
|
|
if runtime.GOARCH == "arm" && ARMVersion == "5" {
|
|
|
|
// the key is download_linux_armv5 for ARMv5
|
2020-01-15 15:58:12 +00:00
|
|
|
dloadName = fmt.Sprintf("download_%s_%sv%s", runtime.GOOS, runtime.GOARCH, ARMVersion)
|
|
|
|
}
|
|
|
|
_, ok := versionJSON[dloadName]
|
2019-06-20 11:36:26 +00:00
|
|
|
if ok && ret["new_version"] != versionString && versionString >= selfUpdateMinVersion {
|
2020-03-03 16:22:03 +00:00
|
|
|
canUpdate := true
|
|
|
|
|
|
|
|
tlsConf := tlsConfigSettings{}
|
|
|
|
Context.tls.WriteDiskConfig(&tlsConf)
|
|
|
|
|
|
|
|
if runtime.GOOS != "windows" &&
|
|
|
|
((tlsConf.Enabled && (tlsConf.PortHTTPS < 1024 || tlsConf.PortDNSOverTLS < 1024)) ||
|
|
|
|
config.BindPort < 1024 ||
|
|
|
|
config.DNS.Port < 1024) {
|
|
|
|
// On UNIX, if we're running under a regular user,
|
|
|
|
// but with CAP_NET_BIND_SERVICE set on a binary file,
|
|
|
|
// and we're listening on ports <1024,
|
|
|
|
// we won't be able to restart after we replace the binary file,
|
|
|
|
// because we'll lose CAP_NET_BIND_SERVICE capability.
|
|
|
|
canUpdate, _ = util.HaveAdminRights()
|
|
|
|
}
|
|
|
|
ret["can_autoupdate"] = canUpdate
|
2019-05-15 08:02:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
d, _ := json.Marshal(ret)
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
2019-06-25 12:55:09 +00:00
|
|
|
type getVersionJSONRequest struct {
|
|
|
|
RecheckNow bool `json:"recheck_now"`
|
|
|
|
}
|
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
// Get the latest available version from the Internet
|
|
|
|
func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
2020-02-13 15:42:07 +00:00
|
|
|
if Context.disableUpdate {
|
2019-05-27 15:48:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-06-25 12:55:09 +00:00
|
|
|
req := getVersionJSONRequest{}
|
|
|
|
err := json.NewDecoder(r.Body).Decode(&req)
|
|
|
|
if err != nil {
|
|
|
|
httpError(w, http.StatusBadRequest, "JSON parse: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
now := time.Now()
|
2019-06-25 12:55:09 +00:00
|
|
|
if !req.RecheckNow {
|
2020-02-13 15:42:07 +00:00
|
|
|
Context.controlLock.Lock()
|
2019-07-09 15:40:56 +00:00
|
|
|
cached := now.Sub(config.versionCheckLastTime) <= versionCheckPeriod && len(config.versionCheckJSON) != 0
|
|
|
|
data := config.versionCheckJSON
|
2020-02-13 15:42:07 +00:00
|
|
|
Context.controlLock.Unlock()
|
2019-04-25 11:57:03 +00:00
|
|
|
|
2019-06-25 12:55:09 +00:00
|
|
|
if cached {
|
|
|
|
log.Tracef("Returning cached data")
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
w.Write(getVersionResp(data))
|
|
|
|
return
|
|
|
|
}
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
|
2019-08-26 13:02:56 +00:00
|
|
|
var resp *http.Response
|
|
|
|
for i := 0; i != 3; i++ {
|
|
|
|
log.Tracef("Downloading data from %s", versionCheckURL)
|
2020-02-13 15:42:07 +00:00
|
|
|
resp, err = Context.client.Get(versionCheckURL)
|
2020-04-05 15:34:43 +00:00
|
|
|
if resp != nil && resp.Body != nil {
|
|
|
|
defer resp.Body.Close()
|
|
|
|
}
|
2019-08-26 13:02:56 +00:00
|
|
|
if err != nil && strings.HasSuffix(err.Error(), "i/o timeout") {
|
|
|
|
// This case may happen while we're restarting DNS server
|
2019-08-27 08:53:09 +00:00
|
|
|
// https://github.com/AdguardTeam/AdGuardHome/issues/934
|
2019-08-26 13:02:56 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2019-04-25 11:57:03 +00:00
|
|
|
if err != nil {
|
|
|
|
httpError(w, http.StatusBadGateway, "Couldn't get version check json from %s: %T %s\n", versionCheckURL, err, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// read the body entirely
|
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
httpError(w, http.StatusBadGateway, "Couldn't read response body from %s: %s", versionCheckURL, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-13 15:42:07 +00:00
|
|
|
Context.controlLock.Lock()
|
2019-07-09 15:40:56 +00:00
|
|
|
config.versionCheckLastTime = now
|
|
|
|
config.versionCheckJSON = body
|
2020-02-13 15:42:07 +00:00
|
|
|
Context.controlLock.Unlock()
|
2019-05-15 08:02:09 +00:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
_, err = w.Write(getVersionResp(body))
|
|
|
|
if err != nil {
|
|
|
|
httpError(w, http.StatusInternalServerError, "Couldn't write body: %s", err)
|
|
|
|
}
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy file on disk
|
|
|
|
func copyFile(src, dst string) error {
|
|
|
|
d, e := ioutil.ReadFile(src)
|
|
|
|
if e != nil {
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
e = ioutil.WriteFile(dst, d, 0644)
|
|
|
|
if e != nil {
|
|
|
|
return e
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type updateInfo struct {
|
|
|
|
pkgURL string // URL for the new package
|
|
|
|
pkgName string // Full path to package file
|
|
|
|
newVer string // New version string
|
|
|
|
updateDir string // Full path to the directory containing unpacked files from the new package
|
|
|
|
backupDir string // Full path to backup directory
|
|
|
|
configName string // Full path to the current configuration file
|
|
|
|
updateConfigName string // Full path to the configuration file to check by the new binary
|
|
|
|
curBinName string // Full path to the current executable file
|
|
|
|
bkpBinName string // Full path to the current executable file in backup directory
|
|
|
|
newBinName string // Full path to the new executable file
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in updateInfo object
|
|
|
|
func getUpdateInfo(jsonData []byte) (*updateInfo, error) {
|
|
|
|
var u updateInfo
|
|
|
|
|
2020-02-13 15:42:07 +00:00
|
|
|
workDir := Context.workDir
|
2019-04-25 11:57:03 +00:00
|
|
|
|
|
|
|
versionJSON := make(map[string]interface{})
|
|
|
|
err := json.Unmarshal(jsonData, &versionJSON)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("JSON parse: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
u.pkgURL = versionJSON[fmt.Sprintf("download_%s_%s", runtime.GOOS, runtime.GOARCH)].(string)
|
|
|
|
u.newVer = versionJSON["version"].(string)
|
|
|
|
if len(u.pkgURL) == 0 || len(u.newVer) == 0 {
|
2020-04-05 15:34:43 +00:00
|
|
|
return nil, fmt.Errorf("invalid JSON")
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 11:36:26 +00:00
|
|
|
if u.newVer == versionString {
|
2020-04-05 15:34:43 +00:00
|
|
|
return nil, fmt.Errorf("no need to update")
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 15:00:17 +00:00
|
|
|
u.updateDir = filepath.Join(workDir, fmt.Sprintf("agh-update-%s", u.newVer))
|
2019-06-27 12:23:16 +00:00
|
|
|
u.backupDir = filepath.Join(workDir, "agh-backup")
|
2019-06-05 15:00:17 +00:00
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
_, pkgFileName := filepath.Split(u.pkgURL)
|
|
|
|
if len(pkgFileName) == 0 {
|
2020-04-05 15:34:43 +00:00
|
|
|
return nil, fmt.Errorf("invalid JSON")
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
2019-06-05 15:00:17 +00:00
|
|
|
u.pkgName = filepath.Join(u.updateDir, pkgFileName)
|
2019-04-25 11:57:03 +00:00
|
|
|
|
|
|
|
u.configName = config.getConfigFilename()
|
|
|
|
u.updateConfigName = filepath.Join(u.updateDir, "AdGuardHome", "AdGuardHome.yaml")
|
|
|
|
if strings.HasSuffix(pkgFileName, ".zip") {
|
|
|
|
u.updateConfigName = filepath.Join(u.updateDir, "AdGuardHome.yaml")
|
|
|
|
}
|
|
|
|
|
|
|
|
binName := "AdGuardHome"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
binName = "AdGuardHome.exe"
|
|
|
|
}
|
|
|
|
u.curBinName = filepath.Join(workDir, binName)
|
2020-02-13 15:42:07 +00:00
|
|
|
if !util.FileExists(u.curBinName) {
|
2020-04-05 15:34:43 +00:00
|
|
|
return nil, fmt.Errorf("executable file %s doesn't exist", u.curBinName)
|
2020-01-10 09:40:16 +00:00
|
|
|
}
|
2019-04-25 11:57:03 +00:00
|
|
|
u.bkpBinName = filepath.Join(u.backupDir, binName)
|
|
|
|
u.newBinName = filepath.Join(u.updateDir, "AdGuardHome", binName)
|
|
|
|
if strings.HasSuffix(pkgFileName, ".zip") {
|
|
|
|
u.newBinName = filepath.Join(u.updateDir, binName)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &u, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unpack all files from .zip file to the specified directory
|
2019-06-05 12:33:09 +00:00
|
|
|
// Existing files are overwritten
|
|
|
|
// Return the list of files (not directories) written
|
|
|
|
func zipFileUnpack(zipfile, outdir string) ([]string, error) {
|
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
r, err := zip.OpenReader(zipfile)
|
|
|
|
if err != nil {
|
2019-06-05 12:33:09 +00:00
|
|
|
return nil, fmt.Errorf("zip.OpenReader(): %s", err)
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
defer r.Close()
|
|
|
|
|
2019-06-05 12:33:09 +00:00
|
|
|
var files []string
|
|
|
|
var err2 error
|
|
|
|
var zr io.ReadCloser
|
2019-04-25 11:57:03 +00:00
|
|
|
for _, zf := range r.File {
|
2019-06-05 12:33:09 +00:00
|
|
|
zr, err = zf.Open()
|
2019-04-25 11:57:03 +00:00
|
|
|
if err != nil {
|
2019-06-05 12:33:09 +00:00
|
|
|
err2 = fmt.Errorf("zip file Open(): %s", err)
|
|
|
|
break
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
2019-06-05 12:33:09 +00:00
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
fi := zf.FileInfo()
|
2019-06-05 12:33:09 +00:00
|
|
|
if len(fi.Name()) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
fn := filepath.Join(outdir, fi.Name())
|
|
|
|
|
|
|
|
if fi.IsDir() {
|
|
|
|
err = os.Mkdir(fn, fi.Mode())
|
2019-06-05 12:33:09 +00:00
|
|
|
if err != nil && !os.IsExist(err) {
|
|
|
|
err2 = fmt.Errorf("os.Mkdir(): %s", err)
|
|
|
|
break
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
2019-06-05 12:33:09 +00:00
|
|
|
log.Tracef("created directory %s", fn)
|
2019-04-25 11:57:03 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
|
|
|
|
if err != nil {
|
2019-06-05 12:33:09 +00:00
|
|
|
err2 = fmt.Errorf("os.OpenFile(): %s", err)
|
|
|
|
break
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
_, err = io.Copy(f, zr)
|
|
|
|
if err != nil {
|
2019-06-05 12:33:09 +00:00
|
|
|
f.Close()
|
|
|
|
err2 = fmt.Errorf("io.Copy(): %s", err)
|
|
|
|
break
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
2019-06-05 12:33:09 +00:00
|
|
|
f.Close()
|
|
|
|
|
|
|
|
log.Tracef("created file %s", fn)
|
|
|
|
files = append(files, fi.Name())
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
2019-06-05 12:33:09 +00:00
|
|
|
|
|
|
|
zr.Close()
|
|
|
|
return files, err2
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unpack all files from .tar.gz file to the specified directory
|
2019-06-05 12:27:32 +00:00
|
|
|
// Existing files are overwritten
|
|
|
|
// Return the list of files (not directories) written
|
|
|
|
func targzFileUnpack(tarfile, outdir string) ([]string, error) {
|
|
|
|
|
|
|
|
f, err := os.Open(tarfile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("os.Open(): %s", err)
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
2019-06-05 12:27:32 +00:00
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
gzReader, err := gzip.NewReader(f)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("gzip.NewReader(): %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var files []string
|
|
|
|
var err2 error
|
|
|
|
tarReader := tar.NewReader(gzReader)
|
|
|
|
for {
|
|
|
|
header, err := tarReader.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
err2 = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
err2 = fmt.Errorf("tarReader.Next(): %s", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if len(header.Name) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fn := filepath.Join(outdir, header.Name)
|
|
|
|
|
|
|
|
if header.Typeflag == tar.TypeDir {
|
|
|
|
err = os.Mkdir(fn, os.FileMode(header.Mode&0777))
|
|
|
|
if err != nil && !os.IsExist(err) {
|
|
|
|
err2 = fmt.Errorf("os.Mkdir(%s): %s", fn, err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
log.Tracef("created directory %s", fn)
|
|
|
|
continue
|
|
|
|
} else if header.Typeflag != tar.TypeReg {
|
|
|
|
log.Tracef("%s: unknown file type %d, skipping", header.Name, header.Typeflag)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(header.Mode&0777))
|
|
|
|
if err != nil {
|
|
|
|
err2 = fmt.Errorf("os.OpenFile(%s): %s", fn, err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
_, err = io.Copy(f, tarReader)
|
|
|
|
if err != nil {
|
|
|
|
f.Close()
|
|
|
|
err2 = fmt.Errorf("io.Copy(): %s", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
log.Tracef("created file %s", fn)
|
|
|
|
files = append(files, header.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
gzReader.Close()
|
|
|
|
return files, err2
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 14:54:25 +00:00
|
|
|
func copySupportingFiles(files []string, srcdir, dstdir string, useSrcNameOnly, useDstNameOnly bool) error {
|
|
|
|
for _, f := range files {
|
|
|
|
_, name := filepath.Split(f)
|
|
|
|
if name == "AdGuardHome" || name == "AdGuardHome.exe" || name == "AdGuardHome.yaml" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
src := filepath.Join(srcdir, f)
|
|
|
|
if useSrcNameOnly {
|
|
|
|
src = filepath.Join(srcdir, name)
|
|
|
|
}
|
|
|
|
|
|
|
|
dst := filepath.Join(dstdir, f)
|
|
|
|
if useDstNameOnly {
|
|
|
|
dst = filepath.Join(dstdir, name)
|
|
|
|
}
|
|
|
|
|
|
|
|
err := copyFile(src, dst)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-25 11:57:03 +00:00
|
|
|
|
2019-06-05 14:54:25 +00:00
|
|
|
log.Tracef("Copied: %s -> %s", src, dst)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Download package file and save it to disk
|
|
|
|
func getPackageFile(u *updateInfo) error {
|
2020-02-13 15:42:07 +00:00
|
|
|
resp, err := Context.client.Get(u.pkgURL)
|
2019-04-25 11:57:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("HTTP request failed: %s", err)
|
|
|
|
}
|
|
|
|
if resp != nil && resp.Body != nil {
|
|
|
|
defer resp.Body.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Tracef("Reading HTTP body")
|
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("ioutil.ReadAll() failed: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Tracef("Saving package to file")
|
|
|
|
err = ioutil.WriteFile(u.pkgName, body, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("ioutil.WriteFile() failed: %s", err)
|
|
|
|
}
|
2019-06-05 14:54:25 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform an update procedure
|
|
|
|
func doUpdate(u *updateInfo) error {
|
|
|
|
log.Info("Updating from %s to %s. URL:%s Package:%s",
|
2019-06-20 11:36:26 +00:00
|
|
|
versionString, u.newVer, u.pkgURL, u.pkgName)
|
2019-06-05 14:54:25 +00:00
|
|
|
|
2019-06-05 15:00:17 +00:00
|
|
|
_ = os.Mkdir(u.updateDir, 0755)
|
|
|
|
|
2019-06-05 14:54:25 +00:00
|
|
|
var err error
|
|
|
|
err = getPackageFile(u)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-25 11:57:03 +00:00
|
|
|
|
|
|
|
log.Tracef("Unpacking the package")
|
|
|
|
_, file := filepath.Split(u.pkgName)
|
2019-06-05 14:54:25 +00:00
|
|
|
var files []string
|
2019-04-25 11:57:03 +00:00
|
|
|
if strings.HasSuffix(file, ".zip") {
|
2019-06-05 14:54:25 +00:00
|
|
|
files, err = zipFileUnpack(u.pkgName, u.updateDir)
|
2019-04-25 11:57:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("zipFileUnpack() failed: %s", err)
|
|
|
|
}
|
|
|
|
} else if strings.HasSuffix(file, ".tar.gz") {
|
2019-06-05 14:54:25 +00:00
|
|
|
files, err = targzFileUnpack(u.pkgName, u.updateDir)
|
2019-04-25 11:57:03 +00:00
|
|
|
if err != nil {
|
2019-06-05 14:54:25 +00:00
|
|
|
return fmt.Errorf("targzFileUnpack() failed: %s", err)
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-04-05 15:34:43 +00:00
|
|
|
return fmt.Errorf("unknown package extension")
|
2019-04-25 11:57:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Tracef("Checking configuration")
|
|
|
|
err = copyFile(u.configName, u.updateConfigName)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("copyFile() failed: %s", err)
|
|
|
|
}
|
|
|
|
cmd := exec.Command(u.newBinName, "--check-config")
|
|
|
|
err = cmd.Run()
|
|
|
|
if err != nil || cmd.ProcessState.ExitCode() != 0 {
|
|
|
|
return fmt.Errorf("exec.Command(): %s %d", err, cmd.ProcessState.ExitCode())
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Tracef("Backing up the current configuration")
|
|
|
|
_ = os.Mkdir(u.backupDir, 0755)
|
|
|
|
err = copyFile(u.configName, filepath.Join(u.backupDir, "AdGuardHome.yaml"))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("copyFile() failed: %s", err)
|
|
|
|
}
|
|
|
|
|
2019-06-05 14:54:25 +00:00
|
|
|
// ./README.md -> backup/README.md
|
2020-02-13 15:42:07 +00:00
|
|
|
err = copySupportingFiles(files, Context.workDir, u.backupDir, true, true)
|
2019-06-05 14:54:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("copySupportingFiles(%s, %s) failed: %s",
|
2020-02-13 15:42:07 +00:00
|
|
|
Context.workDir, u.backupDir, err)
|
2019-06-05 14:54:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// update/[AdGuardHome/]README.md -> ./README.md
|
2020-02-13 15:42:07 +00:00
|
|
|
err = copySupportingFiles(files, u.updateDir, Context.workDir, false, true)
|
2019-06-05 14:54:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("copySupportingFiles(%s, %s) failed: %s",
|
2020-02-13 15:42:07 +00:00
|
|
|
u.updateDir, Context.workDir, err)
|
2019-06-05 14:54:25 +00:00
|
|
|
}
|
|
|
|
|
2019-04-25 11:57:03 +00:00
|
|
|
log.Tracef("Renaming: %s -> %s", u.curBinName, u.bkpBinName)
|
|
|
|
err = os.Rename(u.curBinName, u.bkpBinName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
// rename fails with "File in use" error
|
|
|
|
err = copyFile(u.newBinName, u.curBinName)
|
|
|
|
} else {
|
|
|
|
err = os.Rename(u.newBinName, u.curBinName)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Tracef("Renamed: %s -> %s", u.newBinName, u.curBinName)
|
|
|
|
|
|
|
|
_ = os.Remove(u.pkgName)
|
2019-06-05 15:00:17 +00:00
|
|
|
_ = os.RemoveAll(u.updateDir)
|
2019-04-25 11:57:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Complete an update procedure
|
|
|
|
func finishUpdate(u *updateInfo) {
|
|
|
|
log.Info("Stopping all tasks")
|
|
|
|
cleanup()
|
|
|
|
cleanupAlways()
|
|
|
|
|
|
|
|
if runtime.GOOS == "windows" {
|
2020-02-13 15:42:07 +00:00
|
|
|
if Context.runningAsService {
|
2019-04-25 11:57:03 +00:00
|
|
|
// Note:
|
|
|
|
// we can't restart the service via "kardianos/service" package - it kills the process first
|
|
|
|
// we can't start a new instance - Windows doesn't allow it
|
|
|
|
cmd := exec.Command("cmd", "/c", "net stop AdGuardHome & net start AdGuardHome")
|
|
|
|
err := cmd.Start()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("exec.Command() failed: %s", err)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd := exec.Command(u.curBinName, os.Args[1:]...)
|
|
|
|
log.Info("Restarting: %v", cmd.Args)
|
|
|
|
cmd.Stdin = os.Stdin
|
|
|
|
cmd.Stdout = os.Stdout
|
|
|
|
cmd.Stderr = os.Stderr
|
|
|
|
err := cmd.Start()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("exec.Command() failed: %s", err)
|
|
|
|
}
|
|
|
|
os.Exit(0)
|
|
|
|
} else {
|
|
|
|
log.Info("Restarting: %v", os.Args)
|
|
|
|
err := syscall.Exec(u.curBinName, os.Args, os.Environ())
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("syscall.Exec() failed: %s", err)
|
|
|
|
}
|
|
|
|
// Unreachable code
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform an update procedure to the latest available version
|
|
|
|
func handleUpdate(w http.ResponseWriter, r *http.Request) {
|
|
|
|
|
2019-07-09 15:40:56 +00:00
|
|
|
if len(config.versionCheckJSON) == 0 {
|
2019-04-25 11:57:03 +00:00
|
|
|
httpError(w, http.StatusBadRequest, "/update request isn't allowed now")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:40:56 +00:00
|
|
|
u, err := getUpdateInfo(config.versionCheckJSON)
|
2019-04-25 11:57:03 +00:00
|
|
|
if err != nil {
|
|
|
|
httpError(w, http.StatusInternalServerError, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err = doUpdate(u)
|
|
|
|
if err != nil {
|
|
|
|
httpError(w, http.StatusInternalServerError, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
returnOK(w)
|
2020-04-24 12:50:57 +00:00
|
|
|
if f, ok := w.(http.Flusher); ok {
|
|
|
|
f.Flush()
|
|
|
|
}
|
2019-04-25 11:57:03 +00:00
|
|
|
|
|
|
|
go finishUpdate(u)
|
|
|
|
}
|