add backup/restore REST API

This commit is contained in:
Nicola Murino 2019-12-27 23:12:44 +01:00
parent f49c280a7f
commit ae094d3479
26 changed files with 673 additions and 29 deletions

View file

@ -34,18 +34,22 @@ Regularly the test cases are manually executed and pass on Windows. Other UNIX v
## Requirements
- Go 1.12 or higher.
- Go 1.12 or higher as build only dependency.
- A suitable SQL server or key/value store to use as data provider: PostreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x or bbolt 1.3.x
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
Binary releases for Linux, macOS and Windows are available, please visit the [releases](https://github.com/drakkan/sftpgo/releases "releases") page.
Sample Dockerfiles for [Debian](https://www.debian.org "Debian") and [Alpine](https://alpinelinux.org "Alpine") are available inside the source tree [docker](./docker "docker") directory.
Alternately you can install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```
$ go get -u github.com/drakkan/sftpgo
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
Make sure [Git](https://git-scm.com/downloads) is installed on your machine and in your system's `PATH`.
SFTPGo depends on [go-sqlite3](https://github.com/mattn/go-sqlite3) that is a CGO package and so it requires a `C` compiler at build time.
On Linux and macOS a compiler is easy to install or already installed, on Windows you need to download [MinGW-w64](https://sourceforge.net/projects/mingw-w64/files/) and build SFTPGo from its command prompt.
@ -194,6 +198,7 @@ The `sftpgo` configuration file contains the following sections:
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "127.0.0.1"
- `templates_path`, string. Path to the HTML web templates. This can be an absolute path or a path relative to the config dir
- `static_files_path`, string. Path to the static files for the web interface. This can be an absolute path or a path relative to the config dir
- `backups_path`, string. Path to the backup directory. This can be an absolute path or a path relative to the config dir
Here is a full example showing the default config in JSON format:
@ -245,7 +250,8 @@ Here is a full example showing the default config in JSON format:
"bind_port": 8080,
"bind_address": "127.0.0.1",
"templates_path": "templates",
"static_files_path": "static"
"static_files_path": "static",
"backups_path": "backups"
}
}
```
@ -387,7 +393,7 @@ These properties are stored inside the data provider. If you want to use your ex
## REST API
SFTPGo exposes REST API to manage users and quota and to get real time reports for the active connections with possibility of forcibly closing a connection.
SFTPGo exposes REST API to manage users and quota, to backup and restore users and to get real time reports for the active connections with possibility of forcibly closing a connection.
If quota tracking is enabled in `sftpgo` configuration file, then the used size and number of files are updated each time a file is added/removed. If files are added/removed not using SFTP or if you change `track_quota` from `2` to `1`, you can rescan the user home dir and update the used quota using the REST API.

View file

@ -87,6 +87,7 @@ func init() {
BindAddress: "127.0.0.1",
TemplatesPath: "templates",
StaticFilesPath: "static",
BackupsPath: "backups",
},
}

View file

@ -304,6 +304,28 @@ func (p BoltProvider) deleteUser(user User) error {
})
}
func (p BoltProvider) dumpUsers() ([]User, error) {
users := []User{}
var err error
err = p.dbHandle.View(func(tx *bolt.Tx) error {
bucket, _, err := getBuckets(tx)
if err != nil {
return err
}
cursor := bucket.Cursor()
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
var user User
err = json.Unmarshal(v, &user)
if err != nil {
return err
}
users = append(users, user)
}
return err
})
return users, err
}
func (p BoltProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
users := []User{}
var err error

View file

@ -191,6 +191,7 @@ type Provider interface {
updateUser(user User) error
deleteUser(user User) error
getUsers(limit int, offset int, order string, username string) ([]User, error)
dumpUsers() ([]User, error)
getUserByID(ID int64) (User, error)
updateLastLogin(username string) error
checkAvailability() error
@ -311,6 +312,11 @@ func DeleteUser(p Provider, user User) error {
return err
}
// DumpUsers returns an array with all users including their hashed password
func DumpUsers(p Provider) ([]User, error) {
return p.dumpUsers()
}
// GetUsers returns an array of users respecting limit and offset and filtered by username exact match if not empty
func GetUsers(p Provider, limit int, offset int, order string, username string) ([]User, error) {
return p.getUsers(limit, offset, order, username)

View file

@ -214,6 +214,21 @@ func (p MemoryProvider) deleteUser(user User) error {
return nil
}
func (p MemoryProvider) dumpUsers() ([]User, error) {
users := []User{}
var err error
p.dbHandle.lock.Lock()
defer p.dbHandle.lock.Unlock()
if p.dbHandle.isClosed {
return users, errMemoryProviderClosed
}
for _, username := range p.dbHandle.usernames {
user := p.dbHandle.users[username]
users = append(users, user)
}
return users, err
}
func (p MemoryProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
users := []User{}
var err error

View file

@ -88,6 +88,10 @@ func (p MySQLProvider) deleteUser(user User) error {
return sqlCommonDeleteUser(user, p.dbHandle)
}
func (p MySQLProvider) dumpUsers() ([]User, error) {
return sqlCommonDumpUsers(p.dbHandle)
}
func (p MySQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
}

View file

@ -87,6 +87,10 @@ func (p PGSQLProvider) deleteUser(user User) error {
return sqlCommonDeleteUser(user, p.dbHandle)
}
func (p PGSQLProvider) dumpUsers() ([]User, error) {
return sqlCommonDumpUsers(p.dbHandle)
}
func (p PGSQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
}

View file

@ -200,6 +200,31 @@ func sqlCommonDeleteUser(user User, dbHandle *sql.DB) error {
return err
}
func sqlCommonDumpUsers(dbHandle *sql.DB) ([]User, error) {
users := []User{}
q := getDumpUsersQuery()
stmt, err := dbHandle.Prepare(q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query()
if err == nil {
defer rows.Close()
for rows.Next() {
u, err := getUserFromDbRow(nil, rows)
if err == nil {
users = append(users, u)
} else {
break
}
}
}
return users, err
}
func sqlCommonGetUsers(limit int, offset int, order string, username string, dbHandle *sql.DB) ([]User, error) {
users := []User{}
q := getUsersQuery(order, username)

View file

@ -94,6 +94,10 @@ func (p SQLiteProvider) deleteUser(user User) error {
return sqlCommonDeleteUser(user, p.dbHandle)
}
func (p SQLiteProvider) dumpUsers() ([]User, error) {
return sqlCommonDumpUsers(p.dbHandle)
}
func (p SQLiteProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
}

View file

@ -36,6 +36,10 @@ func getUsersQuery(order string, username string) string {
order, sqlPlaceholders[0], sqlPlaceholders[1])
}
func getDumpUsersQuery() string {
return fmt.Sprintf(`SELECT %v FROM %v`, selectUserFields, config.UsersTable)
}
func getUpdateQuotaQuery(reset bool) string {
if reset {
return fmt.Sprintf(`UPDATE %v SET used_quota_size = %v,used_quota_files = %v,last_quota_update = %v

View file

@ -10,7 +10,7 @@ RUN go build -i -ldflags "-s -w -X github.com/drakkan/sftpgo/utils.commit=`git d
FROM alpine:latest
RUN apk add --no-cache ca-certificates su-exec \
&& mkdir -p /data /etc/sftpgo /srv/sftpgo/config /srv/sftpgo/web
&& mkdir -p /data /etc/sftpgo /srv/sftpgo/config /srv/sftpgo/web /srv/sftpgo/backups
# git and rsync are optional, uncomment the next line to add support for them if needed
#RUN apk add --no-cache git rsync
@ -22,7 +22,7 @@ COPY --from=builder /go/src/github.com/drakkan/sftpgo/static /srv/sftpgo/web/sta
COPY docker-entrypoint.sh /bin/entrypoint.sh
RUN chmod +x /bin/entrypoint.sh
VOLUME [ "/data", "/srv/sftpgo/config" ]
VOLUME [ "/data", "/srv/sftpgo/config", "/srv/sftpgo/backups" ]
EXPOSE 2022 8080
ENTRYPOINT ["/bin/entrypoint.sh"]

View file

@ -24,12 +24,14 @@ sudo docker run --name sftpgo \
-e SFTPGO_CONFIG_DIR=/srv/sftpgo/config \
-e SFTPGO_HTTPD__TEMPLATES_PATH=/srv/sftpgo/web/templates \
-e SFTPGO_HTTPD__STATIC_FILES_PATH=/srv/sftpgo/web/static \
-e SFTPGO_HTTPD__BACKUPS_PATH=/srv/sftpgo/backups \
-p 8080:8080 \
-p 2022:2022 \
-e PUID=1003 \
-e GUID=1003 \
-v /home/sftpuser/conf/:/srv/sftpgo/config \
-v /home/sftpuser/data:/data \
-v /home/sftpuser/backups:/srv/sftpgo/backups \
sftpgo
```
The script `entrypoint.sh` makes sure to correct the permissions of directories and start the process with the right user

View file

@ -16,6 +16,7 @@ FROM debian:latest
ARG BASE_DIR=/app
ARG DATA_REL_DIR=data
ARG CONFIG_REL_DIR=config
ARG BACKUP_REL_DIR=backups
ARG USERNAME=sftpgo
ARG GROUPNAME=sftpgo
ARG UID=515
@ -28,9 +29,11 @@ ENV HOME_DIR=${BASE_DIR}/${USERNAME}
ENV DATA_DIR=${BASE_DIR}/${DATA_REL_DIR}
# CONFIG_DIR, this is a volume to persist the daemon private keys, configuration file ecc..
ENV CONFIG_DIR=${BASE_DIR}/${CONFIG_REL_DIR}
# BACKUPS_DIR, this is a volume to store backups done using "dumpdata" REST API
ENV BACKUPS_DIR=${BASE_DIR}/${BACKUP_REL_DIR}
ENV WEB_DIR=${BASE_DIR}/${WEB_REL_PATH}
RUN mkdir -p ${DATA_DIR} ${CONFIG_DIR} ${WEB_DIR}
RUN mkdir -p ${DATA_DIR} ${CONFIG_DIR} ${WEB_DIR} ${BACKUPS_DIR}
RUN groupadd --system -g ${GID} ${GROUPNAME}
RUN useradd --system --create-home --no-log-init --home-dir ${HOME_DIR} --comment "SFTPGo user" --shell /bin/false --gid ${GID} --uid ${UID} ${USERNAME}
@ -43,7 +46,7 @@ COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo bin/sftpgo
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo.json .config/sftpgo/
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/templates ${WEB_DIR}/templates
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/static ${WEB_DIR}/static
RUN chown -R ${UID}:${GID} ${DATA_DIR}
RUN chown -R ${UID}:${GID} ${DATA_DIR} ${BACKUPS_DIR}
# run as non root user
USER ${USERNAME}
@ -51,7 +54,7 @@ USER ${USERNAME}
EXPOSE 2022 8080
# the defined volumes must have write access for the UID and GID defined above
VOLUME [ "$DATA_DIR", "$CONFIG_DIR" ]
VOLUME [ "$DATA_DIR", "$CONFIG_DIR", "$BACKUPS_DIR" ]
# override some default configuration options using env vars
ENV SFTPGO_CONFIG_DIR=${CONFIG_DIR}
@ -61,6 +64,7 @@ ENV SFTPGO_HTTPD__BIND_ADDRESS=""
ENV SFTPGO_HTTPD__TEMPLATES_PATH=${WEB_DIR}/templates
ENV SFTPGO_HTTPD__STATIC_FILES_PATH=${WEB_DIR}/static
ENV SFTPGO_DATA_PROVIDER__USERS_BASE_DIR=${DATA_DIR}
ENV SFTPGO_HTTPD__BACKUPS_PATH=${BACKUPS_DIR}
ENTRYPOINT ["sftpgo"]
CMD ["serve"]

View file

@ -11,10 +11,10 @@ docker build -t="drakkan/sftpgo" .
and you can run the Dockerfile using something like this:
```bash
docker run --name sftpgo -p 8080:8080 -p 2022:2022 --mount type=bind,source=/srv/sftpgo/data,target=/app/data --mount type=bind,source=/srv/sftpgo/config,target=/app/config drakkan/sftpgo
docker run --name sftpgo -p 8080:8080 -p 2022:2022 --mount type=bind,source=/srv/sftpgo/data,target=/app/data --mount type=bind,source=/srv/sftpgo/config,target=/app/config --mount type=bind,source=/srv/sftpgo/backups,target=/app/backups drakkan/sftpgo
```
where `/srv/sftpgo/data` and `/srv/sftpgo/config` are two folders on the host system with write access for UID/GID defined inside the `Dockerfile`. You can choose to create a new user, on the host system, with a matching UID/GID pair or simply do something like:
where `/srv/sftpgo/data`, `/srv/sftpgo/config` and `/srv/sftpgo/backups` are folders on the host system with write access for UID/GID defined inside the `Dockerfile`. You can choose to create a new user, on the host system, with a matching UID/GID pair or simply do something like:
```bash

125
httpd/api_maintenance.go Normal file
View file

@ -0,0 +1,125 @@
package httpd
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
)
func dumpData(w http.ResponseWriter, r *http.Request) {
var outputFile string
if _, ok := r.URL.Query()["output_file"]; ok {
outputFile = strings.TrimSpace(r.URL.Query().Get("output_file"))
}
if len(outputFile) == 0 {
sendAPIResponse(w, r, errors.New("Invalid or missing output_file"), "", http.StatusBadRequest)
return
}
if filepath.IsAbs(outputFile) {
sendAPIResponse(w, r, fmt.Errorf("Invalid output_file %#v: it must be a relative path", outputFile), "", http.StatusBadRequest)
return
}
if strings.Contains(outputFile, "..") {
sendAPIResponse(w, r, fmt.Errorf("Invalid output_file %#v", outputFile), "", http.StatusBadRequest)
return
}
outputFile = filepath.Join(backupsPath, outputFile)
logger.Debug(logSender, "", "dumping data to: %#v", outputFile)
users, err := dataprovider.DumpUsers(dataProvider)
if err != nil {
logger.Warn(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
sendAPIResponse(w, r, err, "", getRespStatus(err))
return
}
dump, err := json.Marshal(BackupData{
Users: users,
})
if err == nil {
os.MkdirAll(filepath.Dir(outputFile), 0777)
err = ioutil.WriteFile(outputFile, dump, 0666)
}
if err != nil {
logger.Warn(logSender, "", "dumping data error: %v, output file: %#v", err, outputFile)
sendAPIResponse(w, r, err, "", getRespStatus(err))
return
}
logger.Debug(logSender, "", "dumping data completed, output file: %#v, error: %v", outputFile, err)
sendAPIResponse(w, r, err, "Data saved", http.StatusOK)
}
func loadData(w http.ResponseWriter, r *http.Request) {
var inputFile string
var err error
scanQuota := 0
if _, ok := r.URL.Query()["input_file"]; ok {
inputFile = strings.TrimSpace(r.URL.Query().Get("input_file"))
}
if _, ok := r.URL.Query()["scan_quota"]; ok {
scanQuota, err = strconv.Atoi(r.URL.Query().Get("scan_quota"))
if err != nil {
err = errors.New("Invalid scan_quota")
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
return
}
}
if !filepath.IsAbs(inputFile) {
sendAPIResponse(w, r, fmt.Errorf("Invalid input_file %#v: it must be an absolute path", inputFile), "", http.StatusBadRequest)
return
}
fi, err := os.Stat(inputFile)
if err != nil {
sendAPIResponse(w, r, err, "", getRespStatus(err))
return
}
if fi.Size() > maxRestoreSize {
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to restore input file: %#v size too big: %v/%v", inputFile, fi.Size(),
maxRestoreSize), http.StatusBadRequest)
return
}
content, err := ioutil.ReadFile(inputFile)
if err != nil {
sendAPIResponse(w, r, err, "", getRespStatus(err))
return
}
var dump BackupData
err = json.Unmarshal(content, &dump)
if err != nil {
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to parse input file: %#v", inputFile), http.StatusBadRequest)
return
}
for _, user := range dump.Users {
u, err := dataprovider.UserExists(dataProvider, user.Username)
if err == nil {
user.ID = u.ID
err = dataprovider.UpdateUser(dataProvider, user)
user.Password = "[redacted]"
logger.Debug(logSender, "", "restoring existing user: %+v, dump file: %#v, error: %v", user, inputFile, err)
} else {
err = dataprovider.AddUser(dataProvider, user)
user.Password = "[redacted]"
logger.Debug(logSender, "", "adding new user: %+v, dump file: %#v, error: %v", user, inputFile, err)
}
if err != nil {
sendAPIResponse(w, r, err, "", getRespStatus(err))
return
}
if scanQuota == 1 || (scanQuota == 2 && user.HasQuotaRestrictions()) {
logger.Debug(logSender, "", "starting quota scan for restored user: %#v", user.Username)
doQuotaScan(user)
}
}
logger.Debug(logSender, "", "backup restored, users: %v", len(dump.Users))
sendAPIResponse(w, r, err, "Data restored", http.StatusOK)
}

View file

@ -26,8 +26,16 @@ func startQuotaScan(w http.ResponseWriter, r *http.Request) {
sendAPIResponse(w, r, err, "", http.StatusNotFound)
return
}
if sftpd.AddQuotaScan(user.Username) {
if doQuotaScan(user) {
sendAPIResponse(w, r, err, "Scan started", http.StatusCreated)
} else {
sendAPIResponse(w, r, err, "Another scan is already in progress", http.StatusConflict)
}
}
func doQuotaScan(user dataprovider.User) bool {
result := sftpd.AddQuotaScan(user.Username)
if result {
go func() {
numFiles, size, _, err := utils.ScanDirContents(user.HomeDir)
if err != nil {
@ -38,7 +46,6 @@ func startQuotaScan(w http.ResponseWriter, r *http.Request) {
}
sftpd.RemoveQuotaScan(user.Username)
}()
} else {
sendAPIResponse(w, r, err, "Another scan is already in progress", http.StatusConflict)
}
return result
}

View file

@ -8,6 +8,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
@ -66,6 +67,9 @@ func getRespStatus(err error) int {
if _, ok := err.(*dataprovider.MethodDisabledError); ok {
return http.StatusForbidden
}
if os.IsNotExist(err) {
return http.StatusBadRequest
}
return http.StatusInternalServerError
}
@ -305,6 +309,62 @@ func GetProviderStatus(expectedStatusCode int) (map[string]interface{}, []byte,
return response, body, err
}
// Dumpdata requests a backup to outputFile.
// outputFile is relative to the configured backups_path
func Dumpdata(outputFile string, expectedStatusCode int) (map[string]interface{}, []byte, error) {
var response map[string]interface{}
var body []byte
url, err := url.Parse(buildURLRelativeToBase(dumpDataPath))
if err != nil {
return response, body, err
}
q := url.Query()
q.Add("output_file", outputFile)
url.RawQuery = q.Encode()
resp, err := getHTTPClient().Get(url.String())
if err != nil {
return response, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if err == nil && expectedStatusCode == http.StatusOK {
err = render.DecodeJSON(resp.Body, &response)
} else {
body, _ = getResponseBody(resp)
}
return response, body, err
}
// Loaddata restores a backup.
// New users are added, existing users are updated. Users will be restored one by one and the restore is stopped if a
// user cannot be added/updated, so it could happen a partial restore
func Loaddata(inputFile, scanQuota string, expectedStatusCode int) (map[string]interface{}, []byte, error) {
var response map[string]interface{}
var body []byte
url, err := url.Parse(buildURLRelativeToBase(loadDataPath))
if err != nil {
return response, body, err
}
q := url.Query()
q.Add("input_file", inputFile)
if len(scanQuota) > 0 {
q.Add("scan_quota", scanQuota)
}
url.RawQuery = q.Encode()
resp, err := getHTTPClient().Get(url.String())
if err != nil {
return response, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if err == nil && expectedStatusCode == http.StatusOK {
err = render.DecodeJSON(resp.Body, &response)
} else {
body, _ = getResponseBody(resp)
}
return response, body, err
}
func checkResponse(actual int, expected int) error {
if expected != actual {
return fmt.Errorf("wrong status code: got %v want %v", actual, expected)

View file

@ -24,17 +24,21 @@ const (
userPath = "/api/v1/user"
versionPath = "/api/v1/version"
providerStatusPath = "/api/v1/providerstatus"
dumpDataPath = "/api/v1/dumpdata"
loadDataPath = "/api/v1/loaddata"
metricsPath = "/metrics"
webBasePath = "/web"
webUsersPath = "/web/users"
webUserPath = "/web/user"
webConnectionsPath = "/web/connections"
webStaticFilesPath = "/static"
maxRestoreSize = 10485760 // 10 MB
)
var (
router *chi.Mux
dataProvider dataprovider.Provider
backupsPath string
)
// Conf httpd daemon configuration
@ -47,6 +51,13 @@ type Conf struct {
TemplatesPath string `json:"templates_path" mapstructure:"templates_path"`
// Path to the static files for the web interface. This can be an absolute path or a path relative to the config dir
StaticFilesPath string `json:"static_files_path" mapstructure:"static_files_path"`
// Path to the backup directory. This can be an absolute path or a path relative to the config dir
BackupsPath string `json:"backups_path" mapstructure:"backups_path"`
}
// BackupData defines the structure for the backup/restore files
type BackupData struct {
Users []dataprovider.User `json:"users"`
}
type apiResponse struct {
@ -63,6 +74,10 @@ func SetDataProvider(provider dataprovider.Provider) {
// Initialize the HTTP server
func (c Conf) Initialize(configDir string) error {
logger.Debug(logSender, "", "initializing HTTP server with config %+v", c)
backupsPath = c.BackupsPath
if !filepath.IsAbs(backupsPath) {
backupsPath = filepath.Join(configDir, backupsPath)
}
staticFilesPath := c.StaticFilesPath
if !filepath.IsAbs(staticFilesPath) {
staticFilesPath = filepath.Join(configDir, staticFilesPath)

View file

@ -2,8 +2,10 @@ package httpd_test
import (
"bytes"
"crypto/rand"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
@ -40,6 +42,8 @@ const (
quotaScanPath = "/api/v1/quota_scan"
versionPath = "/api/v1/version"
providerStatusPath = "/api/v1/providerstatus"
dumpDataPath = "/api/v1/dumpdata"
loadDataPath = "/api/v1/loaddata"
metricsPath = "/metrics"
webBasePath = "/web"
webUsersPath = "/web/users"
@ -51,16 +55,13 @@ const (
var (
defaultPerms = []string{dataprovider.PermAny}
homeBasePath string
backupsPath string
testServer *httptest.Server
providerDriverName string
)
func TestMain(m *testing.M) {
if runtime.GOOS == "windows" {
homeBasePath = "C:\\"
} else {
homeBasePath = "/tmp"
}
homeBasePath = os.TempDir()
logfilePath := filepath.Join(configDir, "sftpgo_api_test.log")
logger.InitLogger(logfilePath, 5, 1, 28, false, zerolog.DebugLevel)
config.LoadConfig(configDir, "")
@ -77,6 +78,11 @@ func TestMain(m *testing.M) {
httpdConf.BindPort = 8081
httpd.SetBaseURL("http://127.0.0.1:8081")
httpdConf.BackupsPath = "test_backups"
currentPath, _ := os.Getwd()
backupsPath = filepath.Join(currentPath, "..", httpdConf.BackupsPath)
logger.DebugToConsole("aaa: %v", backupsPath)
os.MkdirAll(backupsPath, 0777)
sftpd.SetDataProvider(dataProvider)
httpd.SetDataProvider(dataProvider)
@ -96,6 +102,7 @@ func TestMain(m *testing.M) {
exitCode := m.Run()
os.Remove(logfilePath)
os.RemoveAll(backupsPath)
os.Exit(exitCode)
}
@ -541,6 +548,22 @@ func TestProviderErrors(t *testing.T) {
if err != nil {
t.Errorf("get provider status with provider closed must fail: %v", err)
}
_, _, err = httpd.Dumpdata("backup.json", http.StatusInternalServerError)
if err != nil {
t.Errorf("get provider status with provider closed must fail: %v", err)
}
user := getTestUser()
user.ID = 1
backupData := httpd.BackupData{}
backupData.Users = append(backupData.Users, user)
backupContent, _ := json.Marshal(backupData)
backupFilePath := filepath.Join(backupsPath, "backup.json")
ioutil.WriteFile(backupFilePath, backupContent, 0666)
_, _, err = httpd.Loaddata(backupFilePath, "", http.StatusInternalServerError)
if err != nil {
t.Errorf("get provider status with provider closed must fail: %v", err)
}
os.Remove(backupFilePath)
config.LoadConfig(configDir, "")
providerConf := config.GetProviderConf()
err = dataprovider.Initialize(providerConf, configDir)
@ -551,6 +574,100 @@ func TestProviderErrors(t *testing.T) {
sftpd.SetDataProvider(dataprovider.GetProvider())
}
func TestDumpdata(t *testing.T) {
_, _, err := httpd.Dumpdata("", http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, _, err = httpd.Dumpdata(filepath.Join(backupsPath, "backup.json"), http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, _, err = httpd.Dumpdata("../backup.json", http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, _, err = httpd.Dumpdata("backup.json", http.StatusOK)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.Remove(filepath.Join(backupsPath, "backup.json"))
if runtime.GOOS != "windows" {
os.Chmod(backupsPath, 0001)
_, _, err = httpd.Dumpdata("bck.json", http.StatusInternalServerError)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.Chmod(backupsPath, 0755)
}
}
func TestLoaddata(t *testing.T) {
user := getTestUser()
user.ID = 1
user.Username = "test_user_restore"
backupData := httpd.BackupData{}
backupData.Users = append(backupData.Users, user)
backupContent, _ := json.Marshal(backupData)
backupFilePath := filepath.Join(backupsPath, "backup.json")
ioutil.WriteFile(backupFilePath, backupContent, 0666)
_, _, err := httpd.Loaddata(backupFilePath, "a", http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, _, err = httpd.Loaddata("backup.json", "1", http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, _, err = httpd.Loaddata(backupFilePath+"a", "1", http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if runtime.GOOS != "windows" {
os.Chmod(backupFilePath, 0111)
_, _, err = httpd.Loaddata(backupFilePath, "1", http.StatusInternalServerError)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.Chmod(backupFilePath, 0644)
}
// add user from backup
_, _, err = httpd.Loaddata(backupFilePath, "1", http.StatusOK)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// update user from backup
_, _, err = httpd.Loaddata(backupFilePath, "2", http.StatusOK)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
users, _, err := httpd.GetUsers(1, 0, user.Username, http.StatusOK)
if err != nil {
t.Errorf("unable to get users: %v", err)
}
if len(users) != 1 {
t.Error("Unable to get restored user")
}
user = users[0]
_, err = httpd.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove user: %v", err)
}
os.Remove(backupFilePath)
createTestFile(backupFilePath, 10485761)
_, _, err = httpd.Loaddata(backupFilePath, "1", http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.Remove(backupFilePath)
createTestFile(backupFilePath, 65535)
_, _, err = httpd.Loaddata(backupFilePath, "1", http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.Remove(backupFilePath)
}
// test using mock http server
func TestBasicUserHandlingMock(t *testing.T) {
@ -1293,3 +1410,16 @@ func checkResponseCode(t *testing.T, expected, actual int) {
t.Errorf("Expected response code %d. Got %d", expected, actual)
}
}
func createTestFile(path string, size int64) error {
baseDir := filepath.Dir(path)
if _, err := os.Stat(baseDir); os.IsNotExist(err) {
os.MkdirAll(baseDir, 0777)
}
content := make([]byte, size)
_, err := rand.Read(content)
if err != nil {
return err
}
return ioutil.WriteFile(path, content, 0666)
}

View file

@ -176,19 +176,27 @@ func TestApiCallsWithBadURL(t *testing.T) {
u := dataprovider.User{}
_, _, err := UpdateUser(u, http.StatusBadRequest)
if err == nil {
t.Errorf("request with invalid URL must fail")
t.Error("request with invalid URL must fail")
}
_, err = RemoveUser(u, http.StatusNotFound)
if err == nil {
t.Errorf("request with invalid URL must fail")
t.Error("request with invalid URL must fail")
}
_, _, err = GetUsers(1, 0, "", http.StatusBadRequest)
if err == nil {
t.Errorf("request with invalid URL must fail")
t.Error("request with invalid URL must fail")
}
_, err = CloseConnection("non_existent_id", http.StatusNotFound)
if err == nil {
t.Errorf("request with invalid URL must fail")
t.Error("request with invalid URL must fail")
}
_, _, err = Dumpdata("backup.json", http.StatusBadRequest)
if err == nil {
t.Error("request with invalid URL must fail")
}
_, _, err = Loaddata("/tmp/backup.json", "", http.StatusBadRequest)
if err == nil {
t.Error("request with invalid URL must fail")
}
SetBaseURL(oldBaseURL)
}
@ -241,6 +249,14 @@ func TestApiCallToNotListeningServer(t *testing.T) {
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = Dumpdata("backup.json", http.StatusOK)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = Loaddata("/tmp/backup.json", "", http.StatusOK)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
SetBaseURL(oldBaseURL)
}

View file

@ -92,6 +92,14 @@ func initializeRouter(staticFilesPath string) {
deleteUser(w, r)
})
router.Get(dumpDataPath, func(w http.ResponseWriter, r *http.Request) {
dumpData(w, r)
})
router.Get(loadDataPath, func(w http.ResponseWriter, r *http.Request) {
loadData(w, r)
})
router.Get(webUsersPath, func(w http.ResponseWriter, r *http.Request) {
handleGetWebUsers(w, r)
})

View file

@ -2,7 +2,7 @@ openapi: 3.0.1
info:
title: SFTPGo
description: 'SFTPGo REST API'
version: 1.3.0
version: 1.4.0
servers:
- url: /api/v1
@ -529,6 +529,130 @@ paths:
status: 500
message: ""
error: "Error description if any"
/dumpdata:
get:
tags:
- maintenance
summary: Backup SFTPGo data serializing them as JSON
description: The backup is saved to a local file to avoid to expose users hashed passwords over the network. The output of dumpdata can be used as input for loaddata
operationId: dumpdata
parameters:
- in: query
name: output_file
schema:
type: string
required: true
description: Path for the file to write the JSON serialized data to. This path is relative to the configured "backups_path". If this file already exists it will be overwritten
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref : '#/components/schemas/ApiResponse'
example:
status: 200
message: "Data saved"
error: ""
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
/loaddata:
get:
tags:
- maintenance
summary: Restore SFTPGo data from a JSON backup
description: New users are added, existing users are updated. Users will be restored one by one and the restore is stopped if a user cannot be added/updated, so it could happen a partial restore
operationId: loaddata
parameters:
- in: query
name: input_file
schema:
type: string
required: true
description: Path for the file to read the JSON serialized data from. This can be an absolute path or a path relative to the configured "backups_path". The max allowed file size is 10MB
- in: query
name: scan_quota
schema:
type: integer
enum:
- 0
- 1
- 2
description: >
Quota scan:
* `0` no quota scan is done, the imported user will have used_quota_size and used_quota_file = 0
* `1` scan quota
* `2` scan quota if the user has quota restrictions
required: false
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref : '#/components/schemas/ApiResponse'
example:
status: 200
message: "Data restored"
error: ""
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
components:
schemas:
Permission:

View file

@ -319,6 +319,42 @@ Output:
}
```
### Backup data
Command:
```
python sftpgo_api_cli.py dumpdata backup.json
```
Output:
```json
{
"error": "",
"message": "Data saved",
"status": 200
}
```
### Restore data
Command:
```
python sftpgo_api_cli.py loaddata /app/data/backups/backup.json --scan-quota 2
```
Output:
```json
{
"error": "",
"message": "Data restored",
"status": 200
}
```
### Colors highlight for Windows command prompt
If your Windows command prompt does not recognize ANSI/VT100 escape sequences you can download [ANSICON](https://github.com/adoxa/ansicon "ANSICON") extract proper files depending on your Windows OS, and install them using `ansicon -i`.

View file

@ -2,6 +2,7 @@
import argparse
from datetime import datetime
import json
import platform
import requests
@ -26,6 +27,8 @@ class SFTPGoApiRequests:
self.activeConnectionsPath = urlparse.urljoin(baseUrl, '/api/v1/connection')
self.versionPath = urlparse.urljoin(baseUrl, '/api/v1/version')
self.providerStatusPath = urlparse.urljoin(baseUrl, '/api/v1/providerstatus')
self.dumpDataPath = urlparse.urljoin(baseUrl, '/api/v1/dumpdata')
self.loadDataPath = urlparse.urljoin(baseUrl, '/api/v1/loaddata')
self.debug = debug
if authType == 'basic':
self.auth = requests.auth.HTTPBasicAuth(authUser, authPassword)
@ -149,6 +152,16 @@ class SFTPGoApiRequests:
r = requests.get(self.providerStatusPath, auth=self.auth, verify=self.verify)
self.printResponse(r)
def dumpData(self, output_file):
r = requests.get(self.dumpDataPath, params={"output_file":output_file}, auth=self.auth,
verify=self.verify)
self.printResponse(r)
def loadData(self, input_file, scan_quota):
r = requests.get(self.loadDataPath, params={"input_file":input_file, "scan_quota":scan_quota},
auth=self.auth, verify=self.verify)
self.printResponse(r)
def validDate(s):
if not s:
@ -210,7 +223,7 @@ if __name__ == '__main__':
parser.set_defaults(secure=True)
parser.add_argument('-t', '--no-color', dest='no_color', action='store_true',
help='Disable color highlight for JSON responses. You need python pygments module 1.5 or above to have highlighted output')
parser.set_defaults(no_color=(pygments is None))
parser.set_defaults(no_color=(pygments is None or platform.system() == "Windows"))
subparsers = parser.add_subparsers(dest='command', help='sub-command --help')
subparsers.required = True
@ -251,6 +264,15 @@ if __name__ == '__main__':
parserGetProviderStatus = subparsers.add_parser('get-provider-status', help='Get data provider status')
parserDumpData = subparsers.add_parser('dumpdata', help='Backup SFTPGo data serializing them as JSON')
parserDumpData.add_argument('output_file', type=str)
parserLoadData = subparsers.add_parser('loaddata', help='Restore SFTPGo data from a JSON backup')
parserLoadData.add_argument('input_file', type=str)
parserLoadData.add_argument('-q', '--scan-quota', type=int, choices=[0, 1, 2], default=0,
help='0 means no quota scan after a user is added/updated. 1 means always scan quota. 2 ' +
'means scan quota if the user has quota restrictions. Default: %(default)s')
args = parser.parse_args()
api = SFTPGoApiRequests(args.debug, args.base_url, args.auth_type, args.auth_user, args.auth_password, args.secure,
@ -283,4 +305,8 @@ if __name__ == '__main__':
api.getVersion()
elif args.command == 'get-provider-status':
api.getProviderStatus()
elif args.command == "dumpdata":
api.dumpData(args.output_file)
elif args.command == "loaddata":
api.loadData(args.input_file, args.scan_quota)

View file

@ -126,12 +126,11 @@ func TestMain(m *testing.M) {
// simply does not execute some code so if it works in atomic mode will
// work in non atomic mode too
sftpdConf.UploadMode = 2
homeBasePath = os.TempDir()
var scriptArgs string
if runtime.GOOS == "windows" {
homeBasePath = "C:\\"
scriptArgs = "%*"
} else {
homeBasePath = "/tmp"
sftpdConf.Actions.ExecuteOn = []string{"download", "upload", "rename", "delete", "ssh_cmd"}
sftpdConf.Actions.Command = "/usr/bin/true"
sftpdConf.Actions.HTTPNotificationURL = "http://127.0.0.1:8080/"

View file

@ -45,6 +45,7 @@
"bind_port": 8080,
"bind_address": "127.0.0.1",
"templates_path": "templates",
"static_files_path": "static"
"static_files_path": "static",
"backups_path": "backups"
}
}