quota: improve size check

get the remaining allowed size when an upload starts and check it against the
uploaded bytes

Fixes #128
This commit is contained in:
Nicola Murino 2020-06-18 22:38:03 +02:00
parent 3ceba7a147
commit e86089a9f3
11 changed files with 298 additions and 48 deletions

View file

@ -20,7 +20,7 @@ For each account, the following properties can be configured:
- `upload` upload files is allowed
- `overwrite` overwrite an existing file, while uploading, is allowed. `upload` permission is required to allow file overwrite
- `delete` delete files or directories is allowed
- `rename` rename files or directories is allowed if this permission is granted on target path. You can enable rename in a more controlled way granting `delete` permission on source directory and `upload` permission on target directory
- `rename` rename a file or a directory is allowed if this permission is granted on target path. You can enable rename in a more controlled way granting `delete` permission on source directory and `upload`/`create_dirs` permissions on target directory. Please be aware that no subdir permission is checked for the directories rename case
- `create_dirs` create directories is allowed
- `create_symlinks` create symbolic links is allowed
- `chmod` changing file or directory permissions is allowed. On Windows, only the 0200 bit (owner writable) of mode is used; it controls whether the file's read-only attribute is set or cleared. The other bits are currently unused. Use mode 0400 for a read-only file and 0600 for a readable+writable file.

View file

@ -3,7 +3,7 @@
The `actions` struct inside the "sftpd" configuration section allows to configure the actions for file operations and SSH commands.
The `hook` can be defined as the absolute path of your program or an HTTP URL.
The `upload` condition includes both uploads to new files and overwrite of existing files. The `ssh_cmd` condition will be triggered after a command is successfully executed via SSH. `scp` will trigger the `download` and `upload` conditions and not `ssh_cmd`.
The `upload` condition includes both uploads to new files and overwrite of existing files. If an upload is aborted for quota limits SFTPGo tries to remove the partial file, so if the notification reports a zero size file and a quota exceeded error the file has been deleted. The `ssh_cmd` condition will be triggered after a command is successfully executed via SSH. `scp` will trigger the `download` and `upload` conditions and not `ssh_cmd`.
The notification will indicate if an error is detected and so, for example, a partial file is uploaded.
The `pre-delete` action, if defined, will be called just before files deletion. If the external command completes with a zero exit status or the HTTP notification response code is `200` then SFTPGo will assume that the file was already deleted/moved and so it will not try to remove the file and it will not execute the hook defined for the `delete` action.
@ -26,7 +26,7 @@ The external program can also read the following environment variables:
- `SFTPGO_ACTION_FS_PROVIDER`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend
- `SFTPGO_ACTION_BUCKET`, non-empty for S3 and GCS backends
- `SFTPGO_ACTION_ENDPOINT`, non-empty for S3 backend if configured
- `SFTPGO_ACTION_STATUS`, integer. 0 means an error occurred. 1 means no error
- `SFTPGO_ACTION_STATUS`, integer. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error
Previous global environment variables aren't cleared when the script is called.
The program must finish within 30 seconds.
@ -42,7 +42,7 @@ If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. Th
- `fs_provider`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend
- `bucket`, not null for S3 and GCS backends
- `endpoint`, not null for S3 backend if configured
- `status`, integer. 0 means an error occurred. 1 means no error
- `status`, integer. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error
The HTTP request will use the global configuration for HTTP clients.

View file

@ -21,7 +21,7 @@ SFTPGo support the following built-in SSH commands:
- `scp`, SFTPGo implements the SCP protocol so we can support it for cloud filesystems too and we can avoid the other system commands limitations. SCP between two remote hosts is supported using the `-3` scp option.
- `md5sum`, `sha1sum`, `sha256sum`, `sha384sum`, `sha512sum`. Useful to check message digests for uploaded files.
- `cd`, `pwd`. Some SFTP clients do not support the SFTP SSH_FXP_REALPATH packet type, so they use `cd` and `pwd` SSH commands to get the initial directory. Currently `cd` does nothing and `pwd` always returns the `/` path.
- `sftpgo-copy`. This is a built-in copy implementation. It allows server side copy for files and directories. The first argument is the source file/directory and the second one is the destination file/directory, for example `sftpgo-copy <src> <dst>`. The command will fail if the destination directory exists. Copy for directories spanning virtual folders is not supported. Only local filesystem is supported: recursive copy for Cloud Storage filesystems requires a new request for every file in any case, so a server side copy is not possibile.
- `sftpgo-copy`. This is a built-in copy implementation. It allows server side copy for files and directories. The first argument is the source file/directory and the second one is the destination file/directory, for example `sftpgo-copy <src> <dst>`. The command will fail if the destination directory exists. Copy for directories spanning virtual folders is not supported. Only local filesystem is supported: recursive copy for Cloud Storage filesystems requires a new request for every file in any case, so a server side copy is not possibile. Please be aware that only the `list` permission for the source path and the `upload` and `create_dirs` (for directories) permissions for the destination path are checked.
- `sftpgo-remove`. This is a built-in remove implementation. It allows to remove single files and to recursively remove directories. The first argument is the file/directory to remove, for example `sftpgo-remove <dst>`. Only local filesystem is supported: recursive remove for Cloud Storage filesystems requires a new request for every file in any case, so a server side remove is not possibile.
The following SSH commands are enabled by default:

View file

@ -531,6 +531,7 @@ func (c Connection) handleSFTPUploadToNewFile(resolvedPath, filePath, requestPat
isFinished: false,
minWriteOffset: 0,
requestPath: requestPath,
maxWriteSize: quotaResult.GetRemainingSize(),
lock: new(sync.Mutex),
}
addTransfer(&transfer)
@ -570,6 +571,9 @@ func (c Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, re
}
initialSize := int64(0)
// if there is a size limit remaining size cannot be 0 here, since quotaResult.HasSpace
// will return false in this case and we deny the upload before
maxWriteSize := quotaResult.GetRemainingSize()
if pflags.Append && osFlags&os.O_TRUNC == 0 {
c.Log(logger.LevelDebug, logSender, "upload resume requested, file path: %#v initial size: %v", filePath, fileSize)
minWriteOffset = fileSize
@ -587,6 +591,9 @@ func (c Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, re
} else {
initialSize = fileSize
}
if maxWriteSize > 0 {
maxWriteSize += fileSize
}
}
vfs.SetPathPermissions(c.fs, filePath, c.User.GetUID(), c.User.GetGID())
@ -611,12 +618,69 @@ func (c Connection) handleSFTPUploadToExistingFile(pflags sftp.FileOpenFlags, re
minWriteOffset: minWriteOffset,
initialSize: initialSize,
requestPath: requestPath,
maxWriteSize: maxWriteSize,
lock: new(sync.Mutex),
}
addTransfer(&transfer)
return &transfer, nil
}
// hasSpaceForCrossRename checks the quota after a rename between different folders
func (c Connection) hasSpaceForCrossRename(quotaResult vfs.QuotaCheckResult, initialSize int64, sourcePath string) bool {
if !quotaResult.HasSpace && initialSize == -1 {
// we are over quota and this is not a file replace
return false
}
fi, err := c.fs.Lstat(sourcePath)
if err != nil {
c.Log(logger.LevelWarn, logSender, "cross rename denied, stat error for path %#v: %v", sourcePath, err)
return false
}
var sizeDiff int64
var filesDiff int
if fi.Mode().IsRegular() {
sizeDiff = fi.Size()
filesDiff = 1
if initialSize != -1 {
sizeDiff -= initialSize
filesDiff = 0
}
} else if fi.IsDir() {
filesDiff, sizeDiff, err = c.fs.GetDirSize(sourcePath)
if err != nil {
c.Log(logger.LevelWarn, logSender, "cross rename denied, error getting size for directory %#v: %v", sourcePath, err)
return false
}
}
if !quotaResult.HasSpace && initialSize != -1 {
// we are over quota but we are overwriting an existing file so we check if the quota size after the rename is ok
if quotaResult.QuotaSize == 0 {
return true
}
c.Log(logger.LevelDebug, logSender, "cross rename overwrite, source %#v, used size %v, size to add %v",
sourcePath, quotaResult.UsedSize, sizeDiff)
quotaResult.UsedSize += sizeDiff
return quotaResult.GetRemainingSize() >= 0
}
if quotaResult.QuotaFiles > 0 {
remainingFiles := quotaResult.GetRemainingFiles()
c.Log(logger.LevelDebug, logSender, "cross rename, source %#v remaining file %v to add %v", sourcePath,
remainingFiles, filesDiff)
if remainingFiles < filesDiff {
return false
}
}
if quotaResult.QuotaSize > 0 {
remainingSize := quotaResult.GetRemainingSize()
c.Log(logger.LevelDebug, logSender, "cross rename, source %#v remaining size %v to add %v", sourcePath,
remainingSize, sizeDiff)
if remainingSize < sizeDiff {
return false
}
}
return true
}
func (c Connection) hasSpaceForRename(request *sftp.Request, initialSize int64, sourcePath string) bool {
if dataprovider.GetQuotaTracking() == 0 {
return true
@ -639,24 +703,7 @@ func (c Connection) hasSpaceForRename(request *sftp.Request, initialSize int64,
return true
}
quotaResult := c.hasSpace(true, request.Target)
if !quotaResult.HasSpace {
if initialSize != -1 {
// we are overquota but we are overwriting a file so we check the quota size
quotaResult = c.hasSpace(false, request.Target)
if quotaResult.HasSpace {
// we have enough quota size
return true
}
if fi, err := c.fs.Lstat(sourcePath); err == nil {
if fi.Mode().IsRegular() {
// we have space if we are overwriting a bigger file with a smaller one
return initialSize >= fi.Size()
}
}
}
return false
}
return true
return c.hasSpaceForCrossRename(quotaResult, initialSize, sourcePath)
}
func (c Connection) hasSpace(checkFiles bool, requestPath string) vfs.QuotaCheckResult {
@ -772,6 +819,12 @@ func (c Connection) isRenamePermitted(sourcePath string, request *sftp.Request)
!c.User.HasPerm(dataprovider.PermUpload, path.Dir(request.Target))) {
return false
}
if !c.User.HasPerm(dataprovider.PermRename, path.Dir(request.Target)) &&
!c.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(request.Target)) {
if fi, err := c.fs.Lstat(sourcePath); err == nil && fi.IsDir() {
return false
}
}
return true
}

View file

@ -1042,6 +1042,40 @@ func TestRsyncOptions(t *testing.T) {
assert.EqualError(t, err, errUnsupportedConfig.Error())
}
func TestSpaceForCrossRename(t *testing.T) {
if runtime.GOOS == osWindows {
t.Skip("this test is not available on Windows")
}
permissions := make(map[string][]string)
permissions["/"] = []string{dataprovider.PermAny}
user := dataprovider.User{
Permissions: permissions,
HomeDir: os.TempDir(),
}
fs, err := user.GetFilesystem("123")
assert.NoError(t, err)
conn := Connection{
User: user,
fs: fs,
}
quotaResult := vfs.QuotaCheckResult{
HasSpace: true,
}
assert.False(t, conn.hasSpaceForCrossRename(quotaResult, -1, filepath.Join(os.TempDir(), "a missing file")))
testDir := filepath.Join(os.TempDir(), "dir")
err = os.MkdirAll(testDir, os.ModePerm)
assert.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(testDir, "afile"), []byte("content"), os.ModePerm)
assert.NoError(t, err)
err = os.Chmod(testDir, 0001)
assert.NoError(t, err)
assert.False(t, conn.hasSpaceForCrossRename(quotaResult, -1, testDir))
err = os.Chmod(testDir, os.ModePerm)
assert.NoError(t, err)
err = os.RemoveAll(testDir)
assert.NoError(t, err)
}
func TestSystemCommandSizeForPath(t *testing.T) {
permissions := make(map[string][]string)
permissions["/"] = []string{dataprovider.PermAny}
@ -1164,7 +1198,7 @@ func TestSystemCommandErrors(t *testing.T) {
lock: new(sync.Mutex)}
destBuff := make([]byte, 65535)
dst := bytes.NewBuffer(destBuff)
_, err = transfer.copyFromReaderToWriter(dst, sshCmd.connection.channel, 0)
_, err = transfer.copyFromReaderToWriter(dst, sshCmd.connection.channel)
assert.EqualError(t, err, readErr.Error())
mockSSHChannel = MockChannel{
@ -1174,7 +1208,8 @@ func TestSystemCommandErrors(t *testing.T) {
WriteError: nil,
}
sshCmd.connection.channel = &mockSSHChannel
_, err = transfer.copyFromReaderToWriter(dst, sshCmd.connection.channel, 1)
transfer.maxWriteSize = 1
_, err = transfer.copyFromReaderToWriter(dst, sshCmd.connection.channel)
assert.EqualError(t, err, errQuotaExceeded.Error())
mockSSHChannel = MockChannel{
@ -1185,9 +1220,10 @@ func TestSystemCommandErrors(t *testing.T) {
ShortWriteErr: true,
}
sshCmd.connection.channel = &mockSSHChannel
_, err = transfer.copyFromReaderToWriter(sshCmd.connection.channel, dst, 0)
_, err = transfer.copyFromReaderToWriter(sshCmd.connection.channel, dst)
assert.EqualError(t, err, io.ErrShortWrite.Error())
_, err = transfer.copyFromReaderToWriter(sshCmd.connection.channel, dst, -1)
transfer.maxWriteSize = -1
_, err = transfer.copyFromReaderToWriter(sshCmd.connection.channel, dst)
assert.EqualError(t, err, errQuotaExceeded.Error())
err = os.RemoveAll(homeDir)
assert.NoError(t, err)
@ -2032,9 +2068,21 @@ func TestRenamePermission(t *testing.T) {
assert.True(t, conn.isRenamePermitted("", request))
request = sftp.NewRequest("Rename", "/dir3/testfile")
request.Target = "/dir2/testfile"
// delete is granted on Source and Upload on Target, this is enough
// delete is granted on Source and Upload on Target, the target is a file this is enough
assert.True(t, conn.isRenamePermitted("", request))
request = sftp.NewRequest("Rename", "/dir2/testfile")
request.Target = "/dir3/testfile"
assert.False(t, conn.isRenamePermitted("", request))
tmpDir := filepath.Join(os.TempDir(), "dir")
err = os.Mkdir(tmpDir, os.ModePerm)
assert.NoError(t, err)
request.Filepath = "/dir"
request.Target = "/dir2/dir"
// the source is a dir and the target has no createDirs perm
assert.False(t, conn.isRenamePermitted(tmpDir, request))
conn.User.Permissions["/dir2"] = []string{dataprovider.PermUpload, dataprovider.PermCreateDirs}
// the source is a dir and the target has createDirs perm
assert.True(t, conn.isRenamePermitted(tmpDir, request))
err = os.RemoveAll(tmpDir)
assert.NoError(t, err)
}

View file

@ -188,7 +188,7 @@ func (c *scpCommand) getUploadFileData(sizeToRead int64, transfer *Transfer) err
}
func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead int64, isNewFile bool, fileSize int64, requestPath string) error {
quotaResult := c.connection.hasSpace(true, requestPath)
quotaResult := c.connection.hasSpace(isNewFile, requestPath)
if !quotaResult.HasSpace {
err := fmt.Errorf("denying file write due to quota limits")
c.connection.Log(logger.LevelWarn, logSenderSCP, "error uploading file: %#v, err: %v", filePath, err)
@ -204,6 +204,7 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
}
initialSize := int64(0)
maxWriteSize := quotaResult.GetRemainingSize()
if !isNewFile {
if vfs.IsLocalOsFs(c.connection.fs) {
vfolder, err := c.connection.User.GetVirtualFolderForPath(path.Dir(requestPath))
@ -218,6 +219,9 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
} else {
initialSize = fileSize
}
if maxWriteSize > 0 {
maxWriteSize += fileSize
}
}
vfs.SetPathPermissions(c.connection.fs, filePath, c.connection.User.GetUID(), c.connection.User.GetGID())
@ -242,6 +246,7 @@ func (c *scpCommand) handleUploadFile(resolvedPath, filePath string, sizeToRead
minWriteOffset: 0,
initialSize: initialSize,
requestPath: requestPath,
maxWriteSize: maxWriteSize,
lock: new(sync.Mutex),
}
addTransfer(&transfer)

View file

@ -170,7 +170,9 @@ func newActionNotification(user dataprovider.User, operation, filePath, target,
} else if user.FsConfig.Provider == 2 {
bucket = user.FsConfig.GCSConfig.Bucket
}
if err != nil {
if err == errQuotaExceeded {
status = 2
} else if err != nil {
status = 0
}
return actionNotification{

View file

@ -1970,6 +1970,16 @@ func TestQuotaLimits(t *testing.T) {
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
testFileSize1 := int64(131072)
testFileName1 := "test_file1.dat"
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
err = createTestFile(testFilePath1, testFileSize1)
assert.NoError(t, err)
testFileSize2 := int64(32768)
testFileName2 := "test_file2.dat" //nolint:goconst
testFilePath2 := filepath.Join(homeBasePath, testFileName2)
err = createTestFile(testFilePath2, testFileSize2)
assert.NoError(t, err)
// test quota files
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
@ -1994,9 +2004,37 @@ func TestQuotaLimits(t *testing.T) {
assert.Error(t, err, "user is over quota size, upload must fail")
err = client.Rename(testFileName, testFileName+".quota")
assert.NoError(t, err)
err = client.Rename(testFileName+".quota", testFileName)
assert.NoError(t, err)
}
// now test quota limits while uploading the current file, we have 1 bytes remaining
user.QuotaSize = testFileSize + 1
user.QuotaFiles = 0
user, _, err = httpd.UpdateUser(user, http.StatusOK)
assert.NoError(t, err)
client, err = getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
defer client.Close()
err = sftpUploadFile(testFilePath1, testFileName1, testFileSize1, client)
assert.Error(t, err)
_, err = client.Stat(testFileName1)
assert.Error(t, err)
// overwriting an existing file will work if the resulting size is lesser or equal than the current one
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath2, testFileName, testFileSize2, client)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath1, testFileName, testFileSize1, client)
assert.Error(t, err)
_, err := client.Stat(testFileName)
assert.Error(t, err)
}
err = os.Remove(testFilePath)
assert.NoError(t, err)
err = os.Remove(testFilePath1)
assert.NoError(t, err)
err = os.Remove(testFilePath2)
assert.NoError(t, err)
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
err = os.RemoveAll(user.GetHomeDir())
@ -2204,7 +2242,7 @@ func TestVirtualFoldersQuotaLimit(t *testing.T) {
err := createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
u2 := getTestUser(usePubKey)
u2.QuotaSize = testFileSize - 1
u2.QuotaSize = testFileSize + 1
u2.VirtualFolders = append(u2.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
MappedPath: mappedPath1,
@ -2219,7 +2257,7 @@ func TestVirtualFoldersQuotaLimit(t *testing.T) {
},
VirtualPath: vdirPath2,
QuotaFiles: 0,
QuotaSize: testFileSize - 1,
QuotaSize: testFileSize + 1,
})
users := []dataprovider.User{u1, u2}
for _, u := range users {
@ -2238,10 +2276,16 @@ func TestVirtualFoldersQuotaLimit(t *testing.T) {
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
assert.Error(t, err)
_, err = client.Stat(testFileName)
assert.Error(t, err)
err = sftpUploadFile(testFilePath, path.Join(vdirPath1, testFileName+"1"), testFileSize, client)
assert.Error(t, err)
_, err = client.Stat(path.Join(vdirPath1, testFileName+"1"))
assert.Error(t, err)
err = sftpUploadFile(testFilePath, path.Join(vdirPath2, testFileName+"1"), testFileSize, client)
assert.Error(t, err)
_, err = client.Stat(path.Join(vdirPath2, testFileName+"1"))
assert.Error(t, err)
err = client.Remove(path.Join(vdirPath1, testFileName))
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
@ -2300,6 +2344,8 @@ func TestVirtualFoldersQuotaRenameOverwrite(t *testing.T) {
vdirPath1 := "/vdir1"
mappedPath2 := filepath.Join(os.TempDir(), "vdir2")
vdirPath2 := "/vdir2"
mappedPath3 := filepath.Join(os.TempDir(), "vdir3")
vdirPath3 := "/vdir3"
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
MappedPath: mappedPath1,
@ -2314,12 +2360,22 @@ func TestVirtualFoldersQuotaRenameOverwrite(t *testing.T) {
},
VirtualPath: vdirPath2,
QuotaFiles: 0,
QuotaSize: testFileSize + testFileSize1 - 1,
QuotaSize: testFileSize + testFileSize1 + 1,
})
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
MappedPath: mappedPath3,
},
VirtualPath: vdirPath3,
QuotaFiles: 2,
QuotaSize: testFileSize * 2,
})
err = os.MkdirAll(mappedPath1, os.ModePerm)
assert.NoError(t, err)
err = os.MkdirAll(mappedPath2, os.ModePerm)
assert.NoError(t, err)
err = os.MkdirAll(mappedPath3, os.ModePerm)
assert.NoError(t, err)
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
client, err := getSftpClient(user, usePubKey)
@ -2337,6 +2393,10 @@ func TestVirtualFoldersQuotaRenameOverwrite(t *testing.T) {
assert.NoError(t, err)
err = sftpUploadFile(testFilePath1, testFileName1, testFileSize1, client)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, path.Join(vdirPath3, testFileName), testFileSize, client)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, path.Join(vdirPath3, testFileName+"1"), testFileSize, client)
assert.NoError(t, err)
err = client.Rename(testFileName, path.Join(vdirPath1, testFileName+".rename"))
assert.Error(t, err)
// we overwrite an existing file and we have unlimited size
@ -2352,6 +2412,41 @@ func TestVirtualFoldersQuotaRenameOverwrite(t *testing.T) {
// we have no space and we try to overwrite a smaller file with a bigger one, this should fail
err = client.Rename(testFileName, path.Join(vdirPath2, testFileName1))
assert.Error(t, err)
fi, err := client.Stat(path.Join(vdirPath1, testFileName1))
if assert.NoError(t, err) {
assert.Equal(t, testFileSize1, fi.Size())
}
// we are overquota inside vdir3 size 2/2 and size 262144/262144
err = client.Rename(path.Join(vdirPath1, testFileName1), path.Join(vdirPath3, testFileName1+".rename"))
assert.Error(t, err)
// we overwrite an existing file and we have enough size
err = client.Rename(path.Join(vdirPath1, testFileName1), path.Join(vdirPath3, testFileName))
assert.NoError(t, err)
testFileName2 := "test_file2.dat"
testFilePath2 := filepath.Join(homeBasePath, testFileName2)
err = createTestFile(testFilePath2, testFileSize+testFileSize1)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath2, testFileName2, testFileSize+testFileSize1, client)
assert.NoError(t, err)
// we overwrite an existing file and we haven't enough size
err = client.Rename(testFileName2, path.Join(vdirPath3, testFileName))
assert.Error(t, err)
err = os.Remove(testFilePath2)
assert.NoError(t, err)
// now remove a file from vdir3, create a dir with 2 files and try to rename it in vdir3
// this will fail since the rename will result in 3 files inside vdir3 and quota limits only
// allow 2 total files there
err = client.Remove(path.Join(vdirPath3, testFileName+"1"))
assert.NoError(t, err)
aDir := "a dir"
err = client.Mkdir(aDir)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath1, path.Join(aDir, testFileName1), testFileSize1, client)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath1, path.Join(aDir, testFileName1+"1"), testFileSize1, client)
assert.NoError(t, err)
err = client.Rename(aDir, path.Join(vdirPath3, aDir))
assert.Error(t, err)
}
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
@ -2359,12 +2454,16 @@ func TestVirtualFoldersQuotaRenameOverwrite(t *testing.T) {
assert.NoError(t, err)
_, err = httpd.RemoveFolder(vfs.BaseVirtualFolder{MappedPath: mappedPath2}, http.StatusOK)
assert.NoError(t, err)
_, err = httpd.RemoveFolder(vfs.BaseVirtualFolder{MappedPath: mappedPath3}, http.StatusOK)
assert.NoError(t, err)
err = os.RemoveAll(user.GetHomeDir())
assert.NoError(t, err)
err = os.RemoveAll(mappedPath1)
assert.NoError(t, err)
err = os.RemoveAll(mappedPath2)
assert.NoError(t, err)
err = os.RemoveAll(mappedPath3)
assert.NoError(t, err)
err = os.Remove(testFilePath)
assert.NoError(t, err)
err = os.Remove(testFilePath1)
@ -3795,7 +3894,7 @@ func TestVFolderQuotaSize(t *testing.T) {
u := getTestUser(usePubKey)
testFileSize := int64(131072)
u.QuotaFiles = 1
u.QuotaSize = testFileSize - 1
u.QuotaSize = testFileSize + 1
mappedPath1 := filepath.Join(os.TempDir(), "vdir1")
vdirPath1 := "/vpath1"
mappedPath2 := filepath.Join(os.TempDir(), "vdir2")
@ -3884,7 +3983,7 @@ func TestVFolderQuotaSize(t *testing.T) {
},
VirtualPath: vdirPath2,
QuotaFiles: 10,
QuotaSize: testFileSize*2 - 1,
QuotaSize: testFileSize*2 + 1,
})
user1, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
@ -5822,7 +5921,7 @@ func TestGitQuotaVirtualFolders(t *testing.T) {
repoName := "testrepo"
u := getTestUser(usePubKey)
u.QuotaFiles = 1
u.QuotaSize = 1
u.QuotaSize = 131072
mappedPath := filepath.Join(os.TempDir(), "repo")
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
@ -5841,11 +5940,10 @@ func TestGitQuotaVirtualFolders(t *testing.T) {
// we upload a file so the user is over quota
defer client.Close()
testFileName := "test_file.dat"
testFileSize := int64(131072)
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
err = createTestFile(testFilePath, u.QuotaSize)
assert.NoError(t, err)
err = sftpUploadFile(testFilePath, testFileName, testFileSize, client)
err = sftpUploadFile(testFilePath, testFileName, u.QuotaSize, client)
assert.NoError(t, err)
err = os.Remove(testFilePath)
assert.NoError(t, err)
@ -6437,21 +6535,49 @@ func TestSCPQuotaSize(t *testing.T) {
testFileSize := int64(65535)
u := getTestUser(usePubKey)
u.QuotaFiles = 1
u.QuotaSize = testFileSize - 1
u.QuotaSize = testFileSize + 1
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
testFileName := "test_file.dat"
testFilePath := filepath.Join(homeBasePath, testFileName)
err = createTestFile(testFilePath, testFileSize)
assert.NoError(t, err)
testFileSize1 := int64(131072)
testFileName1 := "test_file1.dat"
testFilePath1 := filepath.Join(homeBasePath, testFileName1)
err = createTestFile(testFilePath1, testFileSize1)
assert.NoError(t, err)
testFileSize2 := int64(32768)
testFileName2 := "test_file2.dat"
testFilePath2 := filepath.Join(homeBasePath, testFileName2)
err = createTestFile(testFilePath2, testFileSize2)
assert.NoError(t, err)
remoteUpPath := fmt.Sprintf("%v@127.0.0.1:%v", user.Username, path.Join("/", testFileName))
err = scpUpload(testFilePath, remoteUpPath, true, false)
assert.NoError(t, err)
err = scpUpload(testFilePath, remoteUpPath+".quota", true, false)
assert.Error(t, err, "user is over quota scp upload must fail")
// now test quota limits while uploading the current file, we have 1 bytes remaining
user.QuotaSize = testFileSize + 1
user.QuotaFiles = 0
user, _, err = httpd.UpdateUser(user, http.StatusOK)
assert.NoError(t, err)
err = scpUpload(testFilePath2, remoteUpPath+".quota", true, false)
assert.Error(t, err, "user is over quota scp upload must fail")
// overwriting an existing file will work if the resulting size is lesser or equal than the current one
err = scpUpload(testFilePath1, remoteUpPath, true, false)
assert.Error(t, err)
err = scpUpload(testFilePath2, remoteUpPath, true, false)
assert.NoError(t, err)
err = scpUpload(testFilePath, remoteUpPath, true, false)
assert.NoError(t, err)
err = os.Remove(testFilePath)
assert.NoError(t, err)
err = os.Remove(testFilePath1)
assert.NoError(t, err)
err = os.Remove(testFilePath2)
assert.NoError(t, err)
err = os.RemoveAll(user.GetHomeDir())
assert.NoError(t, err)
_, err = httpd.RemoveUser(user, http.StatusOK)

View file

@ -365,11 +365,12 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
transferError: nil,
isFinished: false,
minWriteOffset: 0,
maxWriteSize: remainingQuotaSize,
lock: new(sync.Mutex),
}
addTransfer(&transfer)
defer removeTransfer(&transfer) //nolint:errcheck
w, e := transfer.copyFromReaderToWriter(stdin, c.connection.channel, remainingQuotaSize)
w, e := transfer.copyFromReaderToWriter(stdin, c.connection.channel)
c.connection.Log(logger.LevelDebug, logSenderSSH, "command: %#v, copy from remote command to sdtin ended, written: %v, "+
"initial remaining quota: %v, err: %v", c.connection.command, w, remainingQuotaSize, e)
if e != nil {
@ -397,7 +398,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
}
addTransfer(&transfer)
defer removeTransfer(&transfer) //nolint:errcheck
w, e := transfer.copyFromReaderToWriter(c.connection.channel, stdout, 0)
w, e := transfer.copyFromReaderToWriter(c.connection.channel, stdout)
c.connection.Log(logger.LevelDebug, logSenderSSH, "command: %#v, copy from sdtout to remote command ended, written: %v err: %v",
c.connection.command, w, e)
if e != nil {
@ -426,7 +427,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
}
addTransfer(&transfer)
defer removeTransfer(&transfer) //nolint:errcheck
w, e := transfer.copyFromReaderToWriter(c.connection.channel.Stderr(), stderr, 0)
w, e := transfer.copyFromReaderToWriter(c.connection.channel.Stderr(), stderr)
c.connection.Log(logger.LevelDebug, logSenderSSH, "command: %#v, copy from sdterr to remote command ended, written: %v err: %v",
c.connection.command, w, e)
// os.ErrClosed means that the command is finished so we don't need to do anything

View file

@ -49,6 +49,7 @@ type Transfer struct {
isNewFile bool
isFinished bool
requestPath string
maxWriteSize int64
}
// TransferError is called if there is an unexpected error.
@ -108,6 +109,9 @@ func (t *Transfer) WriteAt(p []byte, off int64) (n int, err error) {
}
t.lock.Lock()
t.bytesReceived += int64(written)
if e == nil && t.maxWriteSize > 0 && t.bytesReceived > t.maxWriteSize {
e = errQuotaExceeded
}
t.lock.Unlock()
if e != nil {
t.TransferError(e)
@ -136,7 +140,17 @@ func (t *Transfer) Close() error {
numFiles = 1
}
metrics.TransferCompleted(t.bytesSent, t.bytesReceived, t.transferType, t.transferError)
if t.transferType == transferUpload && t.file != nil && t.file.Name() != t.path {
if t.transferError == errQuotaExceeded && t.file != nil {
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
err = os.Remove(t.file.Name())
if err == nil {
numFiles--
t.bytesReceived = 0
t.minWriteOffset = 0
}
logger.Warn(logSender, t.connectionID, "upload denied due to space limit, delete temporary file: %#v, deletion error: %v",
t.file.Name(), err)
} else if t.transferType == transferUpload && t.file != nil && t.file.Name() != t.path {
if t.transferError == nil || uploadMode == uploadModeAtomicWithResume {
err = os.Rename(t.file.Name(), t.path)
logger.Debug(logSender, t.connectionID, "atomic upload completed, rename: %#v -> %#v, error: %v",
@ -148,6 +162,7 @@ func (t *Transfer) Close() error {
if err == nil {
numFiles--
t.bytesReceived = 0
t.minWriteOffset = 0
}
}
}
@ -231,10 +246,10 @@ func (t *Transfer) handleThrottle() {
// used for ssh commands.
// It reads from src until EOF so it does not treat an EOF from Read as an error to be reported.
// EOF from Write is reported as error
func (t *Transfer) copyFromReaderToWriter(dst io.Writer, src io.Reader, maxWriteSize int64) (int64, error) {
func (t *Transfer) copyFromReaderToWriter(dst io.Writer, src io.Reader) (int64, error) {
var written int64
var err error
if maxWriteSize < 0 {
if t.maxWriteSize < 0 {
return 0, errQuotaExceeded
}
buf := make([]byte, 32768)
@ -250,7 +265,7 @@ func (t *Transfer) copyFromReaderToWriter(dst io.Writer, src io.Reader, maxWrite
} else {
t.bytesReceived = written
}
if maxWriteSize > 0 && written > maxWriteSize {
if t.maxWriteSize > 0 && written > t.maxWriteSize {
err = errQuotaExceeded
break
}

View file

@ -64,7 +64,7 @@ func (q *QuotaCheckResult) GetRemainingSize() int64 {
return 0
}
// GetRemainigFiles returns the remaining allowed files
// GetRemainingFiles returns the remaining allowed files
func (q *QuotaCheckResult) GetRemainingFiles() int {
if q.QuotaFiles > 0 {
return q.QuotaFiles - q.UsedFiles