add a dedicated struct to store encrypted credentials

also gcs credentials are now encrypted, both on disk and inside the
provider.

Data provider is automatically migrated and load data will accept
old format too but you should upgrade to the new format to avoid future
issues
This commit is contained in:
Nicola Murino 2020-11-22 21:53:04 +01:00
parent ac435b7890
commit dccc583b5d
No known key found for this signature in database
GPG key ID: 2F1FB59433D5A8CB
33 changed files with 1512 additions and 314 deletions

View file

@ -143,10 +143,13 @@ Please take a look at the usage below to customize the serving parameters`,
FsConfig: dataprovider.Filesystem{
Provider: dataprovider.FilesystemProvider(portableFsProvider),
S3Config: vfs.S3FsConfig{
Bucket: portableS3Bucket,
Region: portableS3Region,
AccessKey: portableS3AccessKey,
AccessSecret: portableS3AccessSecret,
Bucket: portableS3Bucket,
Region: portableS3Region,
AccessKey: portableS3AccessKey,
AccessSecret: vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: portableS3AccessSecret,
},
Endpoint: portableS3Endpoint,
StorageClass: portableS3StorageClass,
KeyPrefix: portableS3KeyPrefix,
@ -154,16 +157,22 @@ Please take a look at the usage below to customize the serving parameters`,
UploadConcurrency: portableS3ULConcurrency,
},
GCSConfig: vfs.GCSFsConfig{
Bucket: portableGCSBucket,
Credentials: portableGCSCredentials,
Bucket: portableGCSBucket,
Credentials: vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: string(portableGCSCredentials),
},
AutomaticCredentials: portableGCSAutoCredentials,
StorageClass: portableGCSStorageClass,
KeyPrefix: portableGCSKeyPrefix,
},
AzBlobConfig: vfs.AzBlobFsConfig{
Container: portableAzContainer,
AccountName: portableAzAccountName,
AccountKey: portableAzAccountKey,
Container: portableAzContainer,
AccountName: portableAzAccountName,
AccountKey: vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: portableAzAccountKey,
},
Endpoint: portableAzEndpoint,
AccessTier: portableAzAccessTier,
SASURL: portableAzSASURL,

View file

@ -19,7 +19,7 @@ import (
)
const (
boltDatabaseVersion = 4
boltDatabaseVersion = 5
)
var (
@ -35,28 +35,6 @@ type BoltProvider struct {
dbHandle *bolt.DB
}
type compatUserV2 struct {
ID int64 `json:"id"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
PublicKeys []string `json:"public_keys,omitempty"`
HomeDir string `json:"home_dir"`
UID int `json:"uid"`
GID int `json:"gid"`
MaxSessions int `json:"max_sessions"`
QuotaSize int64 `json:"quota_size"`
QuotaFiles int `json:"quota_files"`
Permissions []string `json:"permissions"`
UsedQuotaSize int64 `json:"used_quota_size"`
UsedQuotaFiles int `json:"used_quota_files"`
LastQuotaUpdate int64 `json:"last_quota_update"`
UploadBandwidth int64 `json:"upload_bandwidth"`
DownloadBandwidth int64 `json:"download_bandwidth"`
ExpirationDate int64 `json:"expiration_date"`
LastLogin int64 `json:"last_login"`
Status int `json:"status"`
}
func init() {
version.AddFeature("+bolt")
}
@ -425,7 +403,8 @@ func (p BoltProvider) getUserWithUsername(username string) ([]User, error) {
var user User
user, err := p.userExists(username)
if err == nil {
users = append(users, HideUserSensitiveData(&user))
user.HideConfidentialData()
users = append(users, user)
return users, nil
}
if _, ok := err.(*RecordNotFoundError); ok {
@ -465,7 +444,8 @@ func (p BoltProvider) getUsers(limit int, offset int, order string, username str
}
user, err := joinUserAndFolders(v, folderBucket)
if err == nil {
users = append(users, HideUserSensitiveData(&user))
user.HideConfidentialData()
users = append(users, user)
}
if len(users) >= limit {
break
@ -479,7 +459,8 @@ func (p BoltProvider) getUsers(limit int, offset int, order string, username str
}
user, err := joinUserAndFolders(v, folderBucket)
if err == nil {
users = append(users, HideUserSensitiveData(&user))
user.HideConfidentialData()
users = append(users, user)
}
if len(users) >= limit {
break
@ -718,28 +699,46 @@ func (p BoltProvider) migrateDatabase() error {
}
switch dbVersion.Version {
case 1:
err = updateDatabaseFrom1To2(p.dbHandle)
if err != nil {
return err
}
err = updateDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateDatabaseFrom3To4(p.dbHandle)
return updateBoltDatabaseFromV1(p.dbHandle)
case 2:
err = updateDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateDatabaseFrom3To4(p.dbHandle)
return updateBoltDatabaseFromV2(p.dbHandle)
case 3:
return updateDatabaseFrom3To4(p.dbHandle)
return updateBoltDatabaseFromV3(p.dbHandle)
case 4:
return updateBoltDatabaseFromV4(p.dbHandle)
default:
return fmt.Errorf("Database version not handled: %v", dbVersion.Version)
}
}
func updateBoltDatabaseFromV1(dbHandle *bolt.DB) error {
err := updateDatabaseFrom1To2(dbHandle)
if err != nil {
return err
}
return updateBoltDatabaseFromV2(dbHandle)
}
func updateBoltDatabaseFromV2(dbHandle *bolt.DB) error {
err := updateDatabaseFrom2To3(dbHandle)
if err != nil {
return err
}
return updateBoltDatabaseFromV3(dbHandle)
}
func updateBoltDatabaseFromV3(dbHandle *bolt.DB) error {
err := updateDatabaseFrom3To4(dbHandle)
if err != nil {
return err
}
return updateBoltDatabaseFromV4(dbHandle)
}
func updateBoltDatabaseFromV4(dbHandle *bolt.DB) error {
return updateDatabaseFrom4To5(dbHandle)
}
// itob returns an 8-byte big endian representation of v.
func itob(v int64) []byte {
b := make([]byte, 8)
@ -847,6 +846,27 @@ func removeUserFromFolderMapping(folder vfs.VirtualFolder, user User, bucket *bo
return err
}
func updateV4BoltUser(dbHandle *bolt.DB, user User) error {
err := validateUser(&user)
if err != nil {
return err
}
return dbHandle.Update(func(tx *bolt.Tx) error {
bucket, _, err := getBuckets(tx)
if err != nil {
return err
}
if u := bucket.Get([]byte(user.Username)); u == nil {
return &RecordNotFoundError{err: fmt.Sprintf("username %v does not exist", user.Username)}
}
buf, err := json.Marshal(user)
if err != nil {
return err
}
return bucket.Put([]byte(user.Username), buf)
})
}
func getBuckets(tx *bolt.Tx) (*bolt.Bucket, *bolt.Bucket, error) {
var err error
bucket := tx.Bucket(usersBucket)
@ -1007,6 +1027,46 @@ func updateDatabaseFrom3To4(dbHandle *bolt.DB) error {
return err
}
func updateDatabaseFrom4To5(dbHandle *bolt.DB) error {
logger.InfoToConsole("updating bolt database version: 4 -> 5")
providerLog(logger.LevelInfo, "updating bolt database version: 4 -> 5")
users := []User{}
err := dbHandle.View(func(tx *bolt.Tx) error {
bucket, _, err := getBuckets(tx)
if err != nil {
return err
}
cursor := bucket.Cursor()
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
var compatUser compatUserV4
err = json.Unmarshal(v, &compatUser)
if err != nil {
logger.WarnToConsole("failed to unmarshal v4 user %#v, is it already migrated?", string(k))
continue
}
fsConfig, err := convertFsConfigFromV4(compatUser.FsConfig, compatUser.Username)
if err != nil {
return err
}
users = append(users, createUserFromV4(compatUser, fsConfig))
}
return nil
})
if err != nil {
return err
}
for _, user := range users {
err = updateV4BoltUser(dbHandle, user)
if err != nil {
return err
}
providerLog(logger.LevelInfo, "filesystem config updated for user %#v", user.Username)
}
return updateBoltDatabaseVersion(dbHandle, 5)
}
func getBoltAvailableUsernames(dbHandle *bolt.DB) ([]string, error) {
usernames := []string{}
err := dbHandle.View(func(tx *bolt.Tx) error {

222
dataprovider/compat.go Normal file
View file

@ -0,0 +1,222 @@
package dataprovider
import (
"fmt"
"io/ioutil"
"path/filepath"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/vfs"
)
type compatUserV2 struct {
ID int64 `json:"id"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
PublicKeys []string `json:"public_keys,omitempty"`
HomeDir string `json:"home_dir"`
UID int `json:"uid"`
GID int `json:"gid"`
MaxSessions int `json:"max_sessions"`
QuotaSize int64 `json:"quota_size"`
QuotaFiles int `json:"quota_files"`
Permissions []string `json:"permissions"`
UsedQuotaSize int64 `json:"used_quota_size"`
UsedQuotaFiles int `json:"used_quota_files"`
LastQuotaUpdate int64 `json:"last_quota_update"`
UploadBandwidth int64 `json:"upload_bandwidth"`
DownloadBandwidth int64 `json:"download_bandwidth"`
ExpirationDate int64 `json:"expiration_date"`
LastLogin int64 `json:"last_login"`
Status int `json:"status"`
}
type compatS3FsConfigV4 struct {
Bucket string `json:"bucket,omitempty"`
KeyPrefix string `json:"key_prefix,omitempty"`
Region string `json:"region,omitempty"`
AccessKey string `json:"access_key,omitempty"`
AccessSecret string `json:"access_secret,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
StorageClass string `json:"storage_class,omitempty"`
UploadPartSize int64 `json:"upload_part_size,omitempty"`
UploadConcurrency int `json:"upload_concurrency,omitempty"`
}
type compatGCSFsConfigV4 struct {
Bucket string `json:"bucket,omitempty"`
KeyPrefix string `json:"key_prefix,omitempty"`
CredentialFile string `json:"-"`
Credentials []byte `json:"credentials,omitempty"`
AutomaticCredentials int `json:"automatic_credentials,omitempty"`
StorageClass string `json:"storage_class,omitempty"`
}
type compatAzBlobFsConfigV4 struct {
Container string `json:"container,omitempty"`
AccountName string `json:"account_name,omitempty"`
AccountKey string `json:"account_key,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
SASURL string `json:"sas_url,omitempty"`
KeyPrefix string `json:"key_prefix,omitempty"`
UploadPartSize int64 `json:"upload_part_size,omitempty"`
UploadConcurrency int `json:"upload_concurrency,omitempty"`
UseEmulator bool `json:"use_emulator,omitempty"`
AccessTier string `json:"access_tier,omitempty"`
}
type compatFilesystemV4 struct {
Provider FilesystemProvider `json:"provider"`
S3Config compatS3FsConfigV4 `json:"s3config,omitempty"`
GCSConfig compatGCSFsConfigV4 `json:"gcsconfig,omitempty"`
AzBlobConfig compatAzBlobFsConfigV4 `json:"azblobconfig,omitempty"`
}
type compatUserV4 struct {
ID int64 `json:"id"`
Status int `json:"status"`
Username string `json:"username"`
ExpirationDate int64 `json:"expiration_date"`
Password string `json:"password,omitempty"`
PublicKeys []string `json:"public_keys,omitempty"`
HomeDir string `json:"home_dir"`
VirtualFolders []vfs.VirtualFolder `json:"virtual_folders,omitempty"`
UID int `json:"uid"`
GID int `json:"gid"`
MaxSessions int `json:"max_sessions"`
QuotaSize int64 `json:"quota_size"`
QuotaFiles int `json:"quota_files"`
Permissions map[string][]string `json:"permissions"`
UsedQuotaSize int64 `json:"used_quota_size"`
UsedQuotaFiles int `json:"used_quota_files"`
LastQuotaUpdate int64 `json:"last_quota_update"`
UploadBandwidth int64 `json:"upload_bandwidth"`
DownloadBandwidth int64 `json:"download_bandwidth"`
LastLogin int64 `json:"last_login"`
Filters UserFilters `json:"filters"`
FsConfig compatFilesystemV4 `json:"filesystem"`
}
type backupDataV4Compat struct {
Users []compatUserV4 `json:"users"`
Folders []vfs.BaseVirtualFolder `json:"folders"`
}
func createUserFromV4(u compatUserV4, fsConfig Filesystem) User {
user := User{
ID: u.ID,
Status: u.Status,
Username: u.Username,
ExpirationDate: u.ExpirationDate,
Password: u.Password,
PublicKeys: u.PublicKeys,
HomeDir: u.HomeDir,
VirtualFolders: u.VirtualFolders,
UID: u.UID,
GID: u.GID,
MaxSessions: u.MaxSessions,
QuotaSize: u.QuotaSize,
QuotaFiles: u.QuotaFiles,
Permissions: u.Permissions,
UsedQuotaSize: u.UsedQuotaSize,
UsedQuotaFiles: u.UsedQuotaFiles,
LastQuotaUpdate: u.LastQuotaUpdate,
UploadBandwidth: u.UploadBandwidth,
DownloadBandwidth: u.DownloadBandwidth,
LastLogin: u.LastLogin,
Filters: u.Filters,
}
user.FsConfig = fsConfig
return user
}
func getCGSCredentialsFromV4(config compatGCSFsConfigV4) (vfs.Secret, error) {
var secret vfs.Secret
var err error
if len(config.Credentials) > 0 {
secret.Status = vfs.SecretStatusPlain
secret.Payload = string(config.Credentials)
return secret, nil
}
if config.CredentialFile != "" {
creds, err := ioutil.ReadFile(config.CredentialFile)
if err != nil {
return secret, err
}
secret.Status = vfs.SecretStatusPlain
secret.Payload = string(creds)
return secret, nil
}
return secret, err
}
func convertFsConfigFromV4(compatFs compatFilesystemV4, username string) (Filesystem, error) {
fsConfig := Filesystem{
Provider: compatFs.Provider,
S3Config: vfs.S3FsConfig{},
AzBlobConfig: vfs.AzBlobFsConfig{},
GCSConfig: vfs.GCSFsConfig{},
}
switch compatFs.Provider {
case S3FilesystemProvider:
fsConfig.S3Config = vfs.S3FsConfig{
Bucket: compatFs.S3Config.Bucket,
KeyPrefix: compatFs.S3Config.KeyPrefix,
Region: compatFs.S3Config.Region,
AccessKey: compatFs.S3Config.AccessKey,
AccessSecret: vfs.Secret{},
Endpoint: compatFs.S3Config.Endpoint,
StorageClass: compatFs.S3Config.StorageClass,
UploadPartSize: compatFs.S3Config.UploadPartSize,
UploadConcurrency: compatFs.S3Config.UploadConcurrency,
}
if compatFs.S3Config.AccessSecret != "" {
secret, err := vfs.GetSecretFromCompatString(compatFs.S3Config.AccessSecret)
if err != nil {
providerLog(logger.LevelError, "unable to convert v4 filesystem for user %#v: %v", username, err)
return fsConfig, err
}
fsConfig.S3Config.AccessSecret = secret
}
case AzureBlobFilesystemProvider:
fsConfig.AzBlobConfig = vfs.AzBlobFsConfig{
Container: compatFs.AzBlobConfig.Container,
AccountName: compatFs.AzBlobConfig.AccountName,
AccountKey: vfs.Secret{},
Endpoint: compatFs.AzBlobConfig.Endpoint,
SASURL: compatFs.AzBlobConfig.SASURL,
KeyPrefix: compatFs.AzBlobConfig.KeyPrefix,
UploadPartSize: compatFs.AzBlobConfig.UploadPartSize,
UploadConcurrency: compatFs.AzBlobConfig.UploadConcurrency,
UseEmulator: compatFs.AzBlobConfig.UseEmulator,
AccessTier: compatFs.AzBlobConfig.AccessTier,
}
if compatFs.AzBlobConfig.AccountKey != "" {
secret, err := vfs.GetSecretFromCompatString(compatFs.AzBlobConfig.AccountKey)
if err != nil {
providerLog(logger.LevelError, "unable to convert v4 filesystem for user %#v: %v", username, err)
return fsConfig, err
}
fsConfig.AzBlobConfig.AccountKey = secret
}
case GCSFilesystemProvider:
fsConfig.GCSConfig = vfs.GCSFsConfig{
Bucket: compatFs.GCSConfig.Bucket,
KeyPrefix: compatFs.GCSConfig.KeyPrefix,
CredentialFile: compatFs.GCSConfig.CredentialFile,
AutomaticCredentials: compatFs.GCSConfig.AutomaticCredentials,
StorageClass: compatFs.GCSConfig.StorageClass,
}
if compatFs.GCSConfig.AutomaticCredentials == 0 {
compatFs.GCSConfig.CredentialFile = filepath.Join(credentialsDirPath, fmt.Sprintf("%v_gcs_credentials.json",
username))
}
secret, err := getCGSCredentialsFromV4(compatFs.GCSConfig)
if err != nil {
providerLog(logger.LevelError, "unable to convert v4 filesystem for user %#v: %v", username, err)
return fsConfig, err
}
fsConfig.GCSConfig.Credentials = secret
}
return fsConfig, nil
}

View file

@ -59,6 +59,9 @@ const (
BoltDataProviderName = "bolt"
// MemoryDataProviderName name for memory provider
MemoryDataProviderName = "memory"
// DumpVersion defines the version for the dump.
// For restore/load we support the current version and the previous one
DumpVersion = 5
argonPwdPrefix = "$argon2id$"
bcryptPwdPrefix = "$2a$"
@ -265,6 +268,7 @@ type Config struct {
type BackupData struct {
Users []User `json:"users"`
Folders []vfs.BaseVirtualFolder `json:"folders"`
Version int `json:"version"`
}
type keyboardAuthHookRequest struct {
@ -384,10 +388,8 @@ func Initialize(cnf Config, basePath string) error {
if err = validateHooks(); err != nil {
return err
}
if !cnf.PreferDatabaseCredentials {
if err = validateCredentialsDir(basePath); err != nil {
return err
}
if err = validateCredentialsDir(basePath, cnf.PreferDatabaseCredentials); err != nil {
return err
}
err = createProvider(basePath)
if err != nil {
@ -689,6 +691,7 @@ func GetFolders(limit, offset int, order, folderPath string) ([]vfs.BaseVirtualF
// DumpData returns all users and folders
func DumpData() (BackupData, error) {
var data BackupData
data.Version = DumpVersion
users, err := provider.dumpUsers()
if err != nil {
return data, err
@ -702,6 +705,33 @@ func DumpData() (BackupData, error) {
return data, err
}
// ParseDumpData tries to parse data as BackupData
func ParseDumpData(data []byte) (BackupData, error) {
var dump BackupData
err := json.Unmarshal(data, &dump)
if err == nil {
return dump, err
}
dump = BackupData{}
// try to parse as version 4
var dumpCompat backupDataV4Compat
err = json.Unmarshal(data, &dumpCompat)
if err != nil {
return dump, err
}
logger.WarnToConsole("You are loading data from an old format, please update to the latest supported one. We only support the current and the previous format.")
providerLog(logger.LevelWarn, "You are loading data from an old format, please update to the latest supported one. We only support the current and the previous format.")
dump.Folders = dumpCompat.Folders
for _, compatUser := range dumpCompat.Users {
fsConfig, err := convertFsConfigFromV4(compatUser.FsConfig, compatUser.Username)
if err != nil {
return dump, err
}
dump.Users = append(dump.Users, createUserFromV4(compatUser, fsConfig))
}
return dump, err
}
// GetProviderStatus returns an error if the provider is not available
func GetProviderStatus() error {
return provider.checkAvailability()
@ -1038,17 +1068,35 @@ func saveGCSCredentials(user *User) error {
if user.FsConfig.Provider != GCSFilesystemProvider {
return nil
}
if len(user.FsConfig.GCSConfig.Credentials) == 0 {
if user.FsConfig.GCSConfig.Credentials.Payload == "" {
return nil
}
if config.PreferDatabaseCredentials {
if user.FsConfig.GCSConfig.Credentials.IsPlain() {
user.FsConfig.GCSConfig.Credentials.AdditionalData = user.Username
err := user.FsConfig.GCSConfig.Credentials.Encrypt()
if err != nil {
return err
}
}
return nil
}
err := ioutil.WriteFile(user.getGCSCredentialsFilePath(), user.FsConfig.GCSConfig.Credentials, 0600)
if user.FsConfig.GCSConfig.Credentials.IsPlain() {
user.FsConfig.GCSConfig.Credentials.AdditionalData = user.Username
err := user.FsConfig.GCSConfig.Credentials.Encrypt()
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not encrypt GCS credentials: %v", err)}
}
}
creds, err := json.Marshal(user.FsConfig.GCSConfig.Credentials)
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not marshal GCS credentials: %v", err)}
}
err = ioutil.WriteFile(user.getGCSCredentialsFilePath(), creds, 0600)
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not save GCS credentials: %v", err)}
}
user.FsConfig.GCSConfig.Credentials = nil
user.FsConfig.GCSConfig.Credentials = vfs.Secret{}
return nil
}
@ -1058,38 +1106,38 @@ func validateFilesystemConfig(user *User) error {
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not validate s3config: %v", err)}
}
if user.FsConfig.S3Config.AccessSecret != "" {
vals := strings.Split(user.FsConfig.S3Config.AccessSecret, "$")
if !strings.HasPrefix(user.FsConfig.S3Config.AccessSecret, "$aes$") || len(vals) != 4 {
accessSecret, err := utils.EncryptData(user.FsConfig.S3Config.AccessSecret)
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not encrypt s3 access secret: %v", err)}
}
user.FsConfig.S3Config.AccessSecret = accessSecret
if user.FsConfig.S3Config.AccessSecret.IsPlain() {
user.FsConfig.S3Config.AccessSecret.AdditionalData = user.Username
err = user.FsConfig.S3Config.AccessSecret.Encrypt()
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not encrypt s3 access secret: %v", err)}
}
}
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
return nil
} else if user.FsConfig.Provider == GCSFilesystemProvider {
err := vfs.ValidateGCSFsConfig(&user.FsConfig.GCSConfig, user.getGCSCredentialsFilePath())
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not validate GCS config: %v", err)}
}
user.FsConfig.S3Config = vfs.S3FsConfig{}
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
return nil
} else if user.FsConfig.Provider == AzureBlobFilesystemProvider {
err := vfs.ValidateAzBlobFsConfig(&user.FsConfig.AzBlobConfig)
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not validate Azure Blob config: %v", err)}
}
if user.FsConfig.AzBlobConfig.AccountKey != "" {
vals := strings.Split(user.FsConfig.AzBlobConfig.AccountKey, "$")
if !strings.HasPrefix(user.FsConfig.AzBlobConfig.AccountKey, "$aes$") || len(vals) != 4 {
accountKey, err := utils.EncryptData(user.FsConfig.AzBlobConfig.AccountKey)
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not encrypt Azure blob account key: %v", err)}
}
user.FsConfig.AzBlobConfig.AccountKey = accountKey
if user.FsConfig.AzBlobConfig.AccountKey.IsPlain() {
user.FsConfig.AzBlobConfig.AccountKey.AdditionalData = user.Username
err = user.FsConfig.AzBlobConfig.AccountKey.Encrypt()
if err != nil {
return &ValidationError{err: fmt.Sprintf("could not encrypt Azure blob account key: %v", err)}
}
}
user.FsConfig.S3Config = vfs.S3FsConfig{}
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
return nil
}
user.FsConfig.Provider = LocalFilesystemProvider
@ -1321,19 +1369,6 @@ func comparePbkdf2PasswordAndHash(password, hashedPassword string) (bool, error)
return subtle.ConstantTimeCompare(df, expected) == 1, nil
}
// HideUserSensitiveData hides user sensitive data
func HideUserSensitiveData(user *User) User {
user.Password = ""
if user.FsConfig.Provider == S3FilesystemProvider {
user.FsConfig.S3Config.AccessSecret = utils.RemoveDecryptionKey(user.FsConfig.S3Config.AccessSecret)
} else if user.FsConfig.Provider == GCSFilesystemProvider {
user.FsConfig.GCSConfig.Credentials = nil
} else if user.FsConfig.Provider == AzureBlobFilesystemProvider {
user.FsConfig.AzBlobConfig.AccountKey = utils.RemoveDecryptionKey(user.FsConfig.AzBlobConfig.AccountKey)
}
return *user
}
func addCredentialsToUser(user *User) error {
if user.FsConfig.Provider != GCSFilesystemProvider {
return nil
@ -1343,7 +1378,7 @@ func addCredentialsToUser(user *User) error {
}
// Don't read from file if credentials have already been set
if len(user.FsConfig.GCSConfig.Credentials) > 0 {
if user.FsConfig.GCSConfig.Credentials.IsValid() {
return nil
}
@ -1351,8 +1386,7 @@ func addCredentialsToUser(user *User) error {
if err != nil {
return err
}
user.FsConfig.GCSConfig.Credentials = cred
return nil
return json.Unmarshal(cred, &user.FsConfig.GCSConfig.Credentials)
}
func getSSLMode() string {
@ -1396,12 +1430,18 @@ func startAvailabilityTimer() {
}()
}
func validateCredentialsDir(basePath string) error {
func validateCredentialsDir(basePath string, preferDbCredentials bool) error {
if filepath.IsAbs(config.CredentialsPath) {
credentialsDirPath = config.CredentialsPath
} else {
credentialsDirPath = filepath.Join(basePath, config.CredentialsPath)
}
// if we want to store credentials inside the database just stop here
// we just populate credentialsDirPath to be able to use existing users
// with credential files
if preferDbCredentials {
return nil
}
fi, err := os.Stat(credentialsDirPath)
if err == nil {
if !fi.IsDir() {
@ -2013,7 +2053,7 @@ func executeAction(operation string, user User) {
q := url.Query()
q.Add("action", operation)
url.RawQuery = q.Encode()
HideUserSensitiveData(&user)
user.HideConfidentialData()
userAsJSON, err := json.Marshal(user)
if err != nil {
return

View file

@ -1,7 +1,6 @@
package dataprovider
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
@ -300,7 +299,8 @@ func (p MemoryProvider) getUsers(limit int, offset int, order string, username s
if offset == 0 {
user, err := p.userExistsInternal(username)
if err == nil {
users = append(users, HideUserSensitiveData(&user))
user.HideConfidentialData()
users = append(users, user)
}
}
return users, err
@ -313,7 +313,8 @@ func (p MemoryProvider) getUsers(limit int, offset int, order string, username s
continue
}
user := p.dbHandle.users[username]
users = append(users, HideUserSensitiveData(&user))
user.HideConfidentialData()
users = append(users, user)
if len(users) >= limit {
break
}
@ -326,7 +327,8 @@ func (p MemoryProvider) getUsers(limit int, offset int, order string, username s
}
username := p.dbHandle.usernames[i]
user := p.dbHandle.users[username]
users = append(users, HideUserSensitiveData(&user))
user.HideConfidentialData()
users = append(users, user)
if len(users) >= limit {
break
}
@ -624,8 +626,7 @@ func (p MemoryProvider) reloadConfig() error {
providerLog(logger.LevelWarn, "error loading users: %v", err)
return err
}
var dump BackupData
err = json.Unmarshal(content, &dump)
dump, err := ParseDumpData(content)
if err != nil {
providerLog(logger.LevelWarn, "error loading users: %v", err)
return err

View file

@ -210,28 +210,46 @@ func (p MySQLProvider) migrateDatabase() error {
}
switch dbVersion.Version {
case 1:
err = updateMySQLDatabaseFrom1To2(p.dbHandle)
if err != nil {
return err
}
err = updateMySQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateMySQLDatabaseFrom3To4(p.dbHandle)
return updateMySQLDatabaseFromV1(p.dbHandle)
case 2:
err = updateMySQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateMySQLDatabaseFrom3To4(p.dbHandle)
return updateMySQLDatabaseFromV2(p.dbHandle)
case 3:
return updateMySQLDatabaseFrom3To4(p.dbHandle)
return updateMySQLDatabaseFromV3(p.dbHandle)
case 4:
return updateMySQLDatabaseFromV4(p.dbHandle)
default:
return fmt.Errorf("Database version not handled: %v", dbVersion.Version)
}
}
func updateMySQLDatabaseFromV1(dbHandle *sql.DB) error {
err := updateMySQLDatabaseFrom1To2(dbHandle)
if err != nil {
return err
}
return updateMySQLDatabaseFromV2(dbHandle)
}
func updateMySQLDatabaseFromV2(dbHandle *sql.DB) error {
err := updateMySQLDatabaseFrom2To3(dbHandle)
if err != nil {
return err
}
return updateMySQLDatabaseFromV3(dbHandle)
}
func updateMySQLDatabaseFromV3(dbHandle *sql.DB) error {
err := updateMySQLDatabaseFrom3To4(dbHandle)
if err != nil {
return err
}
return updateMySQLDatabaseFromV4(dbHandle)
}
func updateMySQLDatabaseFromV4(dbHandle *sql.DB) error {
return updateMySQLDatabaseFrom4To5(dbHandle)
}
func updateMySQLDatabaseFrom1To2(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 1 -> 2")
providerLog(logger.LevelInfo, "updating database version: 1 -> 2")
@ -249,3 +267,7 @@ func updateMySQLDatabaseFrom2To3(dbHandle *sql.DB) error {
func updateMySQLDatabaseFrom3To4(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom3To4(mysqlV4SQL, dbHandle)
}
func updateMySQLDatabaseFrom4To5(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom4To5(dbHandle)
}

View file

@ -209,28 +209,46 @@ func (p PGSQLProvider) migrateDatabase() error {
}
switch dbVersion.Version {
case 1:
err = updatePGSQLDatabaseFrom1To2(p.dbHandle)
if err != nil {
return err
}
err = updatePGSQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updatePGSQLDatabaseFrom3To4(p.dbHandle)
return updatePGSQLDatabaseFromV1(p.dbHandle)
case 2:
err = updatePGSQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updatePGSQLDatabaseFrom3To4(p.dbHandle)
return updatePGSQLDatabaseFromV2(p.dbHandle)
case 3:
return updatePGSQLDatabaseFrom3To4(p.dbHandle)
return updatePGSQLDatabaseFromV3(p.dbHandle)
case 4:
return updatePGSQLDatabaseFromV4(p.dbHandle)
default:
return fmt.Errorf("Database version not handled: %v", dbVersion.Version)
}
}
func updatePGSQLDatabaseFromV1(dbHandle *sql.DB) error {
err := updatePGSQLDatabaseFrom1To2(dbHandle)
if err != nil {
return err
}
return updatePGSQLDatabaseFromV2(dbHandle)
}
func updatePGSQLDatabaseFromV2(dbHandle *sql.DB) error {
err := updatePGSQLDatabaseFrom2To3(dbHandle)
if err != nil {
return err
}
return updatePGSQLDatabaseFromV3(dbHandle)
}
func updatePGSQLDatabaseFromV3(dbHandle *sql.DB) error {
err := updatePGSQLDatabaseFrom3To4(dbHandle)
if err != nil {
return err
}
return updatePGSQLDatabaseFromV4(dbHandle)
}
func updatePGSQLDatabaseFromV4(dbHandle *sql.DB) error {
return updatePGSQLDatabaseFrom4To5(dbHandle)
}
func updatePGSQLDatabaseFrom1To2(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 1 -> 2")
providerLog(logger.LevelInfo, "updating database version: 1 -> 2")
@ -248,3 +266,7 @@ func updatePGSQLDatabaseFrom2To3(dbHandle *sql.DB) error {
func updatePGSQLDatabaseFrom3To4(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom3To4(pgsqlV4SQL, dbHandle)
}
func updatePGSQLDatabaseFrom4To5(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom4To5(dbHandle)
}

View file

@ -14,7 +14,7 @@ import (
)
const (
sqlDatabaseVersion = 4
sqlDatabaseVersion = 5
initialDBVersionSQL = "INSERT INTO {{schema_version}} (version) VALUES (1);"
defaultSQLQueryTimeout = 10 * time.Second
longSQLQueryTimeout = 60 * time.Second
@ -354,7 +354,8 @@ func sqlCommonGetUsers(limit int, offset int, order string, username string, dbH
if err != nil {
return users, err
}
users = append(users, HideUserSensitiveData(&u))
u.HideConfidentialData()
users = append(users, u)
}
}
err = rows.Err()
@ -940,3 +941,88 @@ func sqlCommonUpdateDatabaseFrom3To4(sqlV4 string, dbHandle *sql.DB) error {
}
return err
}
func sqlCommonUpdateDatabaseFrom4To5(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 4 -> 5")
providerLog(logger.LevelInfo, "updating database version: 4 -> 5")
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
defer cancel()
q := getCompatV4FsConfigQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return err
}
defer rows.Close()
users := []User{}
for rows.Next() {
var compatUser compatUserV4
var fsConfigString sql.NullString
err = rows.Scan(&compatUser.ID, &compatUser.Username, &fsConfigString)
if err != nil {
return err
}
if fsConfigString.Valid {
err = json.Unmarshal([]byte(fsConfigString.String), &compatUser.FsConfig)
if err != nil {
logger.WarnToConsole("failed to unmarshal v4 user %#v, is it already migrated?", compatUser.Username)
continue
}
fsConfig, err := convertFsConfigFromV4(compatUser.FsConfig, compatUser.Username)
if err != nil {
return err
}
users = append(users, createUserFromV4(compatUser, fsConfig))
}
}
if err := rows.Err(); err != nil {
return err
}
for _, user := range users {
err = sqlCommonUpdateV4User(dbHandle, user)
if err != nil {
return err
}
providerLog(logger.LevelInfo, "filesystem config updated for user %#v", user.Username)
}
ctxVersion, cancelVersion := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancelVersion()
return sqlCommonUpdateDatabaseVersion(ctxVersion, dbHandle, 5)
}
func sqlCommonUpdateV4User(dbHandle *sql.DB, user User) error {
err := validateFilesystemConfig(&user)
if err != nil {
return err
}
err = saveGCSCredentials(&user)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := updateCompatV4FsConfigQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
fsConfig, err := user.GetFsConfigAsJSON()
if err != nil {
return err
}
_, err = stmt.ExecContext(ctx, string(fsConfig), user.ID)
return err
}

View file

@ -232,28 +232,46 @@ func (p SQLiteProvider) migrateDatabase() error {
}
switch dbVersion.Version {
case 1:
err = updateSQLiteDatabaseFrom1To2(p.dbHandle)
if err != nil {
return err
}
err = updateSQLiteDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateSQLiteDatabaseFrom3To4(p.dbHandle)
return updateSQLiteDatabaseFromV1(p.dbHandle)
case 2:
err = updateSQLiteDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateSQLiteDatabaseFrom3To4(p.dbHandle)
return updateSQLiteDatabaseFromV2(p.dbHandle)
case 3:
return updateSQLiteDatabaseFrom3To4(p.dbHandle)
return updateSQLiteDatabaseFromV3(p.dbHandle)
case 4:
return updateSQLiteDatabaseFromV4(p.dbHandle)
default:
return fmt.Errorf("Database version not handled: %v", dbVersion.Version)
}
}
func updateSQLiteDatabaseFromV1(dbHandle *sql.DB) error {
err := updateSQLiteDatabaseFrom1To2(dbHandle)
if err != nil {
return err
}
return updateSQLiteDatabaseFromV2(dbHandle)
}
func updateSQLiteDatabaseFromV2(dbHandle *sql.DB) error {
err := updateSQLiteDatabaseFrom2To3(dbHandle)
if err != nil {
return err
}
return updateSQLiteDatabaseFromV3(dbHandle)
}
func updateSQLiteDatabaseFromV3(dbHandle *sql.DB) error {
err := updateSQLiteDatabaseFrom3To4(dbHandle)
if err != nil {
return err
}
return updateSQLiteDatabaseFromV4(dbHandle)
}
func updateSQLiteDatabaseFromV4(dbHandle *sql.DB) error {
return updateSQLiteDatabaseFrom4To5(dbHandle)
}
func updateSQLiteDatabaseFrom1To2(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 1 -> 2")
providerLog(logger.LevelInfo, "updating database version: 1 -> 2")
@ -271,3 +289,7 @@ func updateSQLiteDatabaseFrom2To3(dbHandle *sql.DB) error {
func updateSQLiteDatabaseFrom3To4(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom3To4(sqliteV4SQL, dbHandle)
}
func updateSQLiteDatabaseFrom4To5(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom4To5(dbHandle)
}

View file

@ -184,3 +184,11 @@ func getUpdateDBVersionQuery() string {
func getCompatVirtualFoldersQuery() string {
return fmt.Sprintf(`SELECT id,username,virtual_folders FROM %v`, sqlTableUsers)
}
func getCompatV4FsConfigQuery() string {
return fmt.Sprintf(`SELECT id,username,filesystem FROM %v`, sqlTableUsers)
}
func updateCompatV4FsConfigQuery() string {
return fmt.Sprintf(`UPDATE %v SET filesystem=%v WHERE id=%v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1])
}

View file

@ -230,6 +230,19 @@ func (u *User) GetFilesystem(connectionID string) (vfs.Fs, error) {
return vfs.NewOsFs(connectionID, u.GetHomeDir(), u.VirtualFolders), nil
}
// HideConfidentialData hides user confidential data
func (u *User) HideConfidentialData() {
u.Password = ""
switch u.FsConfig.Provider {
case S3FilesystemProvider:
u.FsConfig.S3Config.AccessSecret.Hide()
case GCSFilesystemProvider:
u.FsConfig.GCSConfig.Credentials.Hide()
case AzureBlobFilesystemProvider:
u.FsConfig.AzBlobConfig.AccountKey.Hide()
}
}
// GetPermissionsForPath returns the permissions for the given path.
// The path must be an SFTP path
func (u *User) GetPermissionsForPath(p string) []string {
@ -809,6 +822,7 @@ func (u *User) getACopy() User {
UploadPartSize: u.FsConfig.AzBlobConfig.UploadPartSize,
UploadConcurrency: u.FsConfig.AzBlobConfig.UploadConcurrency,
UseEmulator: u.FsConfig.AzBlobConfig.UseEmulator,
AccessTier: u.FsConfig.AzBlobConfig.AccessTier,
},
}

View file

@ -79,15 +79,24 @@ Please take a look [here](../docs/full-configuration.md#environment-variables) t
Alternately you can mount your custom configuration file to `/var/lib/sftpgo` or `/var/lib/sftpgo/.config/sftpgo`.
### Loading initial data
Initial data can be loaded in the following ways:
- via the `--loaddata-from` flag or the `SFTPGO_LOADDATA_FROM` environment variable
- by providing a dump file to the memory provider
Please take a look [here](../docs/full-configuration.md) for more details.
### Running as an arbitrary user
The SFTPGo image runs using `1000` as UID/GID by default. If you know the permissions of your data and/or configuration directory are already set appropriately or you have need of running SFTPGo with a specific UID/GID, it is possible to invoke this image with `--user` set to any value (other than `root/0`) in order to achieve the desired access/configuration:
```shell
$ ls -lnd data
drwxr-xr-x 2 1100 11000 6 6 nov 09.09 data
drwxr-xr-x 2 1100 1100 6 7 nov 09.09 data
$ ls -lnd config
drwxr-xr-x 2 1100 11000 6 6 nov 09.19 config
drwxr-xr-x 2 1100 1100 6 7 nov 09.19 config
```
With the above directory permissions, you can start a SFTPGo instance like this:

View file

@ -115,7 +115,7 @@ The configuration file contains the following sections:
- `max_size`, integer. Maximum number of users to cache. 0 means unlimited. Default: 50.
- **"data_provider"**, the configuration for the data provider
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `bolt`, `memory`
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database. For driver `memory` this is the (optional) path relative to the config dir or the absolute path to the users dump, obtained using the `dumpdata` REST API, to load. This dump will be loaded at startup and can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. The `memory` provider will not modify the provided file so quota usage and last login will not be persisted
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database. For driver `memory` this is the (optional) path relative to the config dir or the absolute path to the provider dump, obtained using the `dumpdata` REST API, to load. This dump will be loaded at startup and can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. The `memory` provider will not modify the provided file so quota usage and last login will not be persisted
- `host`, string. Database host. Leave empty for drivers `sqlite`, `bolt` and `memory`
- `port`, integer. Database port. Leave empty for drivers `sqlite`, `bolt` and `memory`
- `username`, string. Database user. Leave empty for drivers `sqlite`, `bolt` and `memory`

View file

@ -58,7 +58,10 @@ Output:
"provider": 1,
"s3config": {
"access_key": "accesskey",
"access_secret": "$aes$6c088ba12b0b261247c8cf331c46d9260b8e58002957d89ad1c0495e3af665cd0227",
"access_secret": {
"payload": "ac46cec75466ba77e47f536436783b729ca5bbbb53252fda0de51f785a6da11ffb03",
"status": "AES-256-GCM"
},
"bucket": "test",
"endpoint": "http://127.0.0.1:9000",
"key_prefix": "vfolder/",

View file

@ -238,23 +238,30 @@ class SFTPGoApiRequests:
az_upload_concurrency, az_key_prefix, az_use_emulator, az_access_tier):
fs_config = {'provider':0}
if fs_provider == 'S3':
secret = {}
if s3_access_secret:
secret.update({"status":"Plain", "payload":s3_access_secret})
s3config = {'bucket':s3_bucket, 'region':s3_region, 'access_key':s3_access_key, 'access_secret':
s3_access_secret, 'endpoint':s3_endpoint, 'storage_class':s3_storage_class, 'key_prefix':
secret, 'endpoint':s3_endpoint, 'storage_class':s3_storage_class, 'key_prefix':
s3_key_prefix, 'upload_part_size':s3_upload_part_size, 'upload_concurrency':s3_upload_concurrency}
fs_config.update({'provider':1, 's3config':s3config})
elif fs_provider == 'GCS':
gcsconfig = {'bucket':gcs_bucket, 'key_prefix':gcs_key_prefix, 'storage_class':gcs_storage_class}
gcsconfig = {'bucket':gcs_bucket, 'key_prefix':gcs_key_prefix, 'storage_class':gcs_storage_class,
'credentials':{}}
if gcs_automatic_credentials == "automatic":
gcsconfig.update({'automatic_credentials':1})
else:
gcsconfig.update({'automatic_credentials':0})
if gcs_credentials_file:
with open(gcs_credentials_file) as creds:
gcsconfig.update({'credentials':base64.b64encode(creds.read().encode('UTF-8')).decode('UTF-8'),
'automatic_credentials':0})
secret = {"status":"Plain", "payload":creds.read()}
gcsconfig.update({'credentials':secret, 'automatic_credentials':0})
fs_config.update({'provider':2, 'gcsconfig':gcsconfig})
elif fs_provider == "AzureBlob":
azureconfig = {'container':az_container, 'account_name':az_account_name, 'account_key':az_account_key,
secret = {}
if az_account_key:
secret.update({"status":"Plain", "payload":az_account_key})
azureconfig = {'container':az_container, 'account_name':az_account_name, 'account_key':secret,
'sas_url':az_sas_url, 'endpoint':az_endpoint, 'upload_part_size':az_upload_part_size,
'upload_concurrency':az_upload_concurrency, 'key_prefix':az_key_prefix, 'use_emulator':
az_use_emulator, 'access_tier':az_access_tier}
@ -609,7 +616,7 @@ def addCommonUserArguments(parser):
help='Denied IP/Mask in CIDR notation. For example "192.168.2.0/24" or "2001:db8::/32". Default: %(default)s')
parser.add_argument('--denied-patterns', type=str, nargs='*', default=[], help='Denied file patterns case insensitive. '
+'The format is /dir::pattern1,pattern2. For example: "/somedir::*.jpg,*.png" "/otherdir/subdir::a*b?.zip,*.rar". ' +
'You have to set both denied and allowed patterns to update existing values or none to preserve them.' +
' You have to set both denied and allowed patterns to update existing values or none to preserve them.' +
' If you only set allowed or denied patterns the missing one is assumed to be an empty list. Default: %(default)s')
parser.add_argument('--allowed-patterns', type=str, nargs='*', default=[], help='Allowed file patterns case insensitive. '
+'The format is /dir::pattern1,pattern2. For example: "/somedir::*.jpg,a*b?.png" "/otherdir/subdir::*.zip,*.rar". ' +

View file

@ -876,7 +876,10 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
u := getTestUser()
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = []byte(`{ "type": "service_account" }`)
u.FsConfig.GCSConfig.Credentials = vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: `{ "type": "service_account" }`,
}
providerConf := config.GetProviderConf()
providerConf.PreferDatabaseCredentials = true
@ -897,9 +900,12 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.GCSConfig.Credentials.Status)
assert.NotEmpty(t, user.FsConfig.GCSConfig.Credentials.Payload)
assert.Empty(t, user.FsConfig.GCSConfig.Credentials.AdditionalData)
assert.Empty(t, user.FsConfig.GCSConfig.Credentials.Key)
_, err = os.Stat(credentialsFile)
assert.Error(t, err)
assert.NoFileExists(t, credentialsFile)
client, err := getFTPClient(user, false)
if assert.NoError(t, err) {
@ -922,7 +928,10 @@ func TestLoginInvalidFs(t *testing.T) {
u := getTestUser()
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = []byte("invalid JSON for credentials")
u.FsConfig.GCSConfig.Credentials = vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: "invalid JSON for credentials",
}
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)

View file

@ -96,8 +96,7 @@ func loadData(w http.ResponseWriter, r *http.Request) {
sendAPIResponse(w, r, err, "", getRespStatus(err))
return
}
var dump dataprovider.BackupData
err = json.Unmarshal(content, &dump)
dump, err := dataprovider.ParseDumpData(content)
if err != nil {
sendAPIResponse(w, r, err, fmt.Sprintf("Unable to parse input file: %#v", inputFile), http.StatusBadRequest)
return

View file

@ -11,7 +11,7 @@ import (
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/vfs"
)
func getUsers(w http.ResponseWriter, r *http.Request) {
@ -67,7 +67,8 @@ func getUserByID(w http.ResponseWriter, r *http.Request) {
}
user, err := dataprovider.GetUserByID(userID)
if err == nil {
render.JSON(w, r, dataprovider.HideUserSensitiveData(&user))
user.HideConfidentialData()
render.JSON(w, r, user)
} else {
sendAPIResponse(w, r, err, "", getRespStatus(err))
}
@ -81,11 +82,29 @@ func addUser(w http.ResponseWriter, r *http.Request) {
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
return
}
switch user.FsConfig.Provider {
case dataprovider.S3FilesystemProvider:
if user.FsConfig.S3Config.AccessSecret.IsRedacted() {
sendAPIResponse(w, r, errors.New("invalid access_secret"), "", http.StatusBadRequest)
return
}
case dataprovider.GCSFilesystemProvider:
if user.FsConfig.GCSConfig.Credentials.IsRedacted() {
sendAPIResponse(w, r, errors.New("invalid credentials"), "", http.StatusBadRequest)
return
}
case dataprovider.AzureBlobFilesystemProvider:
if user.FsConfig.AzBlobConfig.AccountKey.IsRedacted() {
sendAPIResponse(w, r, errors.New("invalid account_key"), "", http.StatusBadRequest)
return
}
}
err = dataprovider.AddUser(user)
if err == nil {
user, err = dataprovider.UserExists(user.Username)
if err == nil {
render.JSON(w, r, dataprovider.HideUserSensitiveData(&user))
user.HideConfidentialData()
render.JSON(w, r, user)
} else {
sendAPIResponse(w, r, err, "", getRespStatus(err))
}
@ -117,15 +136,22 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
return
}
currentPermissions := user.Permissions
currentS3AccessSecret := ""
currentAzAccountKey := ""
var currentS3AccessSecret vfs.Secret
var currentAzAccountKey vfs.Secret
var currentGCSCredentials vfs.Secret
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
currentS3AccessSecret = user.FsConfig.S3Config.AccessSecret
}
if user.FsConfig.Provider == dataprovider.AzureBlobFilesystemProvider {
currentAzAccountKey = user.FsConfig.AzBlobConfig.AccountKey
}
if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
currentGCSCredentials = user.FsConfig.GCSConfig.Credentials
}
user.Permissions = make(map[string][]string)
user.FsConfig.S3Config = vfs.S3FsConfig{}
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
err = render.DecodeJSON(r.Body, &user)
if err != nil {
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
@ -135,7 +161,7 @@ func updateUser(w http.ResponseWriter, r *http.Request) {
if len(user.Permissions) == 0 {
user.Permissions = currentPermissions
}
updateEncryptedSecrets(&user, currentS3AccessSecret, currentAzAccountKey)
updateEncryptedSecrets(&user, currentS3AccessSecret, currentAzAccountKey, currentGCSCredentials)
if user.ID != userID {
sendAPIResponse(w, r, err, "user ID in request body does not match user ID in path parameter", http.StatusBadRequest)
@ -181,18 +207,21 @@ func disconnectUser(username string) {
}
}
func updateEncryptedSecrets(user *dataprovider.User, currentS3AccessSecret, currentAzAccountKey string) {
// we use the new access secret if different from the old one and not empty
func updateEncryptedSecrets(user *dataprovider.User, currentS3AccessSecret, currentAzAccountKey, currentGCSCredentials vfs.Secret) {
// we use the new access secret if plain or empty, otherwise the old value
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
if utils.RemoveDecryptionKey(currentS3AccessSecret) == user.FsConfig.S3Config.AccessSecret ||
(user.FsConfig.S3Config.AccessSecret == "" && user.FsConfig.S3Config.AccessKey != "") {
if !user.FsConfig.S3Config.AccessSecret.IsPlain() && !user.FsConfig.S3Config.AccessSecret.IsEmpty() {
user.FsConfig.S3Config.AccessSecret = currentS3AccessSecret
}
}
if user.FsConfig.Provider == dataprovider.AzureBlobFilesystemProvider {
if utils.RemoveDecryptionKey(currentAzAccountKey) == user.FsConfig.AzBlobConfig.AccountKey ||
(user.FsConfig.AzBlobConfig.AccountKey == "" && user.FsConfig.AzBlobConfig.AccountName != "") {
if !user.FsConfig.AzBlobConfig.AccountKey.IsPlain() && !user.FsConfig.AzBlobConfig.AccountKey.IsEmpty() {
user.FsConfig.AzBlobConfig.AccountKey = currentAzAccountKey
}
}
if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
if !user.FsConfig.GCSConfig.Credentials.IsPlain() && !user.FsConfig.GCSConfig.Credentials.IsEmpty() {
user.FsConfig.GCSConfig.Credentials = currentGCSCredentials
}
}
}

View file

@ -707,28 +707,19 @@ func compareAzBlobConfig(expected *dataprovider.User, actual *dataprovider.User)
return nil
}
func checkEncryptedSecret(expectedAccessSecret, actualAccessSecret string) error {
if len(expectedAccessSecret) > 0 {
vals := strings.Split(expectedAccessSecret, "$")
if strings.HasPrefix(expectedAccessSecret, "$aes$") && len(vals) == 4 {
expectedAccessSecret = utils.RemoveDecryptionKey(expectedAccessSecret)
if expectedAccessSecret != actualAccessSecret {
return fmt.Errorf("secret mismatch, expected: %v", expectedAccessSecret)
}
} else {
// here we check that actualAccessSecret is aes encrypted without the nonce
parts := strings.Split(actualAccessSecret, "$")
if !strings.HasPrefix(actualAccessSecret, "$aes$") || len(parts) != 3 {
return errors.New("invalid secret")
}
if len(parts) == len(vals) {
if expectedAccessSecret != actualAccessSecret {
return errors.New("encrypted secret mismatch")
}
}
func checkEncryptedSecret(expected, actual vfs.Secret) error {
if expected.IsPlain() && actual.IsEncrypted() {
if actual.Payload == "" {
return errors.New("invalid secret payload")
}
if actual.AdditionalData != "" {
return errors.New("invalid secret additional data")
}
if actual.Key != "" {
return errors.New("invalid secret key")
}
} else {
if expectedAccessSecret != actualAccessSecret {
if expected.Status != actual.Status || expected.Payload != actual.Payload {
return errors.New("secret mismatch")
}
}

View file

@ -26,6 +26,7 @@ import (
_ "github.com/mattn/go-sqlite3"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/config"
@ -190,7 +191,7 @@ func TestMain(m *testing.M) {
defer testServer.Close()
exitCode := m.Run()
os.Remove(logfilePath) //nolint:errcheck
//os.Remove(logfilePath) //nolint:errcheck
os.RemoveAll(backupsPath) //nolint:errcheck
os.RemoveAll(credentialsPath) //nolint:errcheck
os.Remove(certPath) //nolint:errcheck
@ -438,12 +439,16 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
u.FsConfig.S3Config.Bucket = "testbucket"
u.FsConfig.S3Config.Region = "eu-west-1"
u.FsConfig.S3Config.AccessKey = "access-key"
u.FsConfig.S3Config.AccessSecret = "access-secret"
u.FsConfig.S3Config.AccessSecret.Payload = "access-secret"
u.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusRedacted
u.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000/path?a=b"
u.FsConfig.S3Config.StorageClass = "Standard" //nolint:goconst
u.FsConfig.S3Config.KeyPrefix = "/adir/subdir/"
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusPlain
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.S3Config.KeyPrefix = ""
u.FsConfig.S3Config.UploadPartSize = 3
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
@ -463,16 +468,20 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
u.FsConfig.GCSConfig.Bucket = "abucket"
u.FsConfig.GCSConfig.StorageClass = "Standard"
u.FsConfig.GCSConfig.KeyPrefix = "/somedir/subdir/"
u.FsConfig.GCSConfig.Credentials = []byte("test")
u.FsConfig.GCSConfig.Credentials.Payload = "test" //nolint:goconst
u.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusRedacted
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusPlain
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.GCSConfig.KeyPrefix = "somedir/subdir/" //nolint:goconst
u.FsConfig.GCSConfig.Credentials = nil
u.FsConfig.GCSConfig.Credentials = vfs.Secret{}
u.FsConfig.GCSConfig.AutomaticCredentials = 0
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.GCSConfig.Credentials = invalidBase64{}
u.FsConfig.GCSConfig.Credentials.Payload = "invalid"
u.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusAES256GCM
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
@ -488,10 +497,14 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
u.FsConfig.AzBlobConfig.Container = "container"
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.AzBlobConfig.AccountKey = "key"
u.FsConfig.AzBlobConfig.AccountKey.Payload = "key"
u.FsConfig.AzBlobConfig.AccountKey.Status = vfs.SecretStatusRedacted
u.FsConfig.AzBlobConfig.KeyPrefix = "/amedir/subdir/"
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.AzBlobConfig.AccountKey.Status = vfs.SecretStatusPlain
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
assert.NoError(t, err)
u.FsConfig.AzBlobConfig.KeyPrefix = "amedir/subdir/"
u.FsConfig.AzBlobConfig.UploadPartSize = -1
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
@ -1000,19 +1013,35 @@ func TestUserS3Config(t *testing.T) {
user.FsConfig.S3Config.Bucket = "test" //nolint:goconst
user.FsConfig.S3Config.Region = "us-east-1" //nolint:goconst
user.FsConfig.S3Config.AccessKey = "Server-Access-Key"
user.FsConfig.S3Config.AccessSecret = "Server-Access-Secret"
user.FsConfig.S3Config.AccessSecret.Payload = "Server-Access-Secret"
user.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusPlain
user.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000"
user.FsConfig.S3Config.UploadPartSize = 8
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
user, body, err := httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err, string(body))
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.S3Config.AccessSecret.Status)
assert.NotEmpty(t, user.FsConfig.S3Config.AccessSecret.Payload)
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.AdditionalData)
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.Key)
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
user.Password = defaultPassword
user.ID = 0
secret, _ := utils.EncryptData("Server-Access-Secret")
secret := vfs.Secret{
Payload: "Server-Access-Secret",
Status: vfs.SecretStatusAES256GCM,
}
user.FsConfig.S3Config.AccessSecret = secret
_, _, err = httpd.AddUser(user, http.StatusOK)
assert.Error(t, err)
user.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusPlain
user, _, err = httpd.AddUser(user, http.StatusOK)
assert.NoError(t, err)
initialSecretPayload := user.FsConfig.S3Config.AccessSecret.Payload
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.S3Config.AccessSecret.Status)
assert.NotEmpty(t, initialSecretPayload)
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.AdditionalData)
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.Key)
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "test-bucket"
user.FsConfig.S3Config.Region = "us-east-1" //nolint:goconst
@ -1022,29 +1051,31 @@ func TestUserS3Config(t *testing.T) {
user.FsConfig.S3Config.UploadConcurrency = 5
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
user.FsConfig.Provider = dataprovider.LocalFilesystemProvider
user.FsConfig.S3Config.Bucket = ""
user.FsConfig.S3Config.Region = ""
user.FsConfig.S3Config.AccessKey = ""
user.FsConfig.S3Config.AccessSecret = ""
user.FsConfig.S3Config.Endpoint = ""
user.FsConfig.S3Config.KeyPrefix = ""
user.FsConfig.S3Config.UploadPartSize = 0
user.FsConfig.S3Config.UploadConcurrency = 0
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.S3Config.AccessSecret.Status)
assert.Equal(t, initialSecretPayload, user.FsConfig.S3Config.AccessSecret.Payload)
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.AdditionalData)
assert.Empty(t, user.FsConfig.S3Config.AccessSecret.Key)
// test user without access key and access secret (shared config state)
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "testbucket"
user.FsConfig.S3Config.Region = "us-east-1"
user.FsConfig.S3Config.AccessKey = ""
user.FsConfig.S3Config.AccessSecret = ""
user.FsConfig.S3Config.AccessSecret = vfs.Secret{}
user.FsConfig.S3Config.Endpoint = ""
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir"
user.FsConfig.S3Config.UploadPartSize = 6
user.FsConfig.S3Config.UploadConcurrency = 4
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
user, body, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err, string(body))
assert.True(t, user.FsConfig.S3Config.AccessSecret.IsEmpty())
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
user.Password = defaultPassword
user.ID = 0
// shared credential test for add instead of update
user, _, err = httpd.AddUser(user, http.StatusOK)
assert.NoError(t, err)
assert.True(t, user.FsConfig.S3Config.AccessSecret.IsEmpty())
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
}
@ -1058,36 +1089,69 @@ func TestUserGCSConfig(t *testing.T) {
assert.NoError(t, err)
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
user.FsConfig.GCSConfig.Bucket = "test"
user.FsConfig.GCSConfig.Credentials = []byte("fake credentials")
user.FsConfig.GCSConfig.Credentials.Payload = "fake credentials" //nolint:goconst
user.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusPlain
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
credentialFile := filepath.Join(credentialsPath, fmt.Sprintf("%v_gcs_credentials.json", user.Username))
assert.FileExists(t, credentialFile)
creds, err := ioutil.ReadFile(credentialFile)
assert.NoError(t, err)
secret := &vfs.Secret{}
err = json.Unmarshal(creds, secret)
assert.NoError(t, err)
err = secret.Decrypt()
assert.NoError(t, err)
assert.Equal(t, "fake credentials", secret.Payload)
user.FsConfig.GCSConfig.Credentials.Payload = "fake encrypted credentials"
user.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusAES256GCM
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
assert.FileExists(t, credentialFile)
creds, err = ioutil.ReadFile(credentialFile)
assert.NoError(t, err)
secret = &vfs.Secret{}
err = json.Unmarshal(creds, secret)
assert.NoError(t, err)
err = secret.Decrypt()
assert.NoError(t, err)
assert.Equal(t, "fake credentials", secret.Payload)
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
user.Password = defaultPassword
user.ID = 0
user.FsConfig.GCSConfig.Credentials = []byte("fake credentials")
user.FsConfig.GCSConfig.Credentials.Payload = "fake credentials"
user.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusAES256GCM
_, _, err = httpd.AddUser(user, http.StatusOK)
assert.Error(t, err)
user.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusPlain
user, body, err := httpd.AddUser(user, http.StatusOK)
assert.NoError(t, err, string(body))
err = os.RemoveAll(credentialsPath)
assert.NoError(t, err)
err = os.MkdirAll(credentialsPath, 0700)
assert.NoError(t, err)
user.FsConfig.GCSConfig.Credentials = nil
user.FsConfig.GCSConfig.Credentials = vfs.Secret{}
user.FsConfig.GCSConfig.AutomaticCredentials = 1
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
assert.NoFileExists(t, credentialFile)
user.FsConfig.GCSConfig = vfs.GCSFsConfig{}
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
user.FsConfig.S3Config.Bucket = "test1"
user.FsConfig.S3Config.Region = "us-east-1"
user.FsConfig.S3Config.AccessKey = "Server-Access-Key1"
user.FsConfig.S3Config.AccessSecret = "secret"
user.FsConfig.S3Config.AccessSecret.Payload = "secret"
user.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusPlain
user.FsConfig.S3Config.Endpoint = "http://localhost:9000"
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir"
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
user.FsConfig.S3Config = vfs.S3FsConfig{}
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
user.FsConfig.GCSConfig.Bucket = "test1"
user.FsConfig.GCSConfig.Credentials = []byte("fake credentials")
user.FsConfig.GCSConfig.Credentials.Payload = "fake credentials"
user.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusPlain
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
@ -1101,42 +1165,248 @@ func TestUserAzureBlobConfig(t *testing.T) {
user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider
user.FsConfig.AzBlobConfig.Container = "test"
user.FsConfig.AzBlobConfig.AccountName = "Server-Account-Name"
user.FsConfig.AzBlobConfig.AccountKey = "Server-Account-Key"
user.FsConfig.AzBlobConfig.AccountKey.Payload = "Server-Account-Key"
user.FsConfig.AzBlobConfig.AccountKey.Status = vfs.SecretStatusPlain
user.FsConfig.AzBlobConfig.Endpoint = "http://127.0.0.1:9000"
user.FsConfig.AzBlobConfig.UploadPartSize = 8
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
initialPayload := user.FsConfig.AzBlobConfig.AccountKey.Payload
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.AzBlobConfig.AccountKey.Status)
assert.NotEmpty(t, initialPayload)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.Key)
user.FsConfig.AzBlobConfig.AccountKey.Status = vfs.SecretStatusAES256GCM
user.FsConfig.AzBlobConfig.AccountKey.AdditionalData = "data"
user.FsConfig.AzBlobConfig.AccountKey.Key = "fake key"
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.AzBlobConfig.AccountKey.Status)
assert.Equal(t, initialPayload, user.FsConfig.AzBlobConfig.AccountKey.Payload)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.Key)
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
user.Password = defaultPassword
user.ID = 0
secret, _ := utils.EncryptData("Server-Account-Key")
secret := vfs.Secret{
Payload: "Server-Account-Key",
Status: vfs.SecretStatusAES256GCM,
}
user.FsConfig.AzBlobConfig.AccountKey = secret
_, _, err = httpd.AddUser(user, http.StatusOK)
assert.Error(t, err)
user.FsConfig.AzBlobConfig.AccountKey = vfs.Secret{
Payload: "Server-Account-Key-Test",
Status: vfs.SecretStatusPlain,
}
user, _, err = httpd.AddUser(user, http.StatusOK)
assert.NoError(t, err)
initialPayload = user.FsConfig.AzBlobConfig.AccountKey.Payload
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.AzBlobConfig.AccountKey.Status)
assert.NotEmpty(t, initialPayload)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.Key)
user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider
user.FsConfig.AzBlobConfig.Container = "test-container"
user.FsConfig.AzBlobConfig.AccountKey = "Server-Account-Key1"
user.FsConfig.AzBlobConfig.Endpoint = "http://localhost:9001"
user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir"
user.FsConfig.AzBlobConfig.UploadConcurrency = 5
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
user.FsConfig.Provider = dataprovider.LocalFilesystemProvider
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{}
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.AzBlobConfig.AccountKey.Status)
assert.NotEmpty(t, initialPayload)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
assert.Empty(t, user.FsConfig.AzBlobConfig.AccountKey.Key)
// test user without access key and access secret (sas)
user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider
user.FsConfig.AzBlobConfig.SASURL = "https://myaccount.blob.core.windows.net/pictures/profile.jpg?sv=2012-02-12&st=2009-02-09&se=2009-02-10&sr=c&sp=r&si=YWJjZGVmZw%3d%3d&sig=dD80ihBh5jfNpymO5Hg1IdiJIEvHcJpCMiCMnN%2fRnbI%3d"
user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir"
user.FsConfig.AzBlobConfig.AccountName = ""
user.FsConfig.AzBlobConfig.AccountKey = vfs.Secret{}
user.FsConfig.AzBlobConfig.UploadPartSize = 6
user.FsConfig.AzBlobConfig.UploadConcurrency = 4
user, _, err = httpd.UpdateUser(user, http.StatusOK, "")
assert.NoError(t, err)
assert.True(t, user.FsConfig.AzBlobConfig.AccountKey.IsEmpty())
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
user.Password = defaultPassword
user.ID = 0
// sas test for add instead of update
user, _, err = httpd.AddUser(user, http.StatusOK)
assert.NoError(t, err)
assert.True(t, user.FsConfig.AzBlobConfig.AccountKey.IsEmpty())
_, err = httpd.RemoveUser(user, http.StatusOK)
assert.NoError(t, err)
}
func TestUserHiddenFields(t *testing.T) {
err := dataprovider.Close()
assert.NoError(t, err)
err = config.LoadConfig(configDir, "")
assert.NoError(t, err)
providerConf := config.GetProviderConf()
providerConf.PreferDatabaseCredentials = true
err = dataprovider.Initialize(providerConf, configDir)
assert.NoError(t, err)
// sensitive data must be hidden but not deleted from the dataprovider
usernames := []string{"user1", "user2", "user3"}
u1 := getTestUser()
u1.Username = usernames[0]
u1.FsConfig.Provider = dataprovider.S3FilesystemProvider
u1.FsConfig.S3Config.Bucket = "test"
u1.FsConfig.S3Config.Region = "us-east-1"
u1.FsConfig.S3Config.AccessKey = "S3-Access-Key"
u1.FsConfig.S3Config.AccessSecret.Payload = "S3-Access-Secret"
u1.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusPlain
user1, _, err := httpd.AddUser(u1, http.StatusOK)
assert.NoError(t, err)
u2 := getTestUser()
u2.Username = usernames[1]
u2.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u2.FsConfig.GCSConfig.Bucket = "test"
u2.FsConfig.GCSConfig.Credentials.Payload = "fake credentials"
u2.FsConfig.GCSConfig.Credentials.Status = vfs.SecretStatusPlain
user2, _, err := httpd.AddUser(u2, http.StatusOK)
assert.NoError(t, err)
u3 := getTestUser()
u3.Username = usernames[2]
u3.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider
u3.FsConfig.AzBlobConfig.Container = "test"
u3.FsConfig.AzBlobConfig.AccountName = "Server-Account-Name"
u3.FsConfig.AzBlobConfig.AccountKey.Payload = "Server-Account-Key"
u3.FsConfig.AzBlobConfig.AccountKey.Status = vfs.SecretStatusPlain
user3, _, err := httpd.AddUser(u3, http.StatusOK)
assert.NoError(t, err)
users, _, err := httpd.GetUsers(0, 0, "", http.StatusOK)
assert.NoError(t, err)
assert.GreaterOrEqual(t, len(users), 3)
for _, username := range usernames {
users, _, err = httpd.GetUsers(0, 0, username, http.StatusOK)
assert.NoError(t, err)
if assert.Len(t, users, 1) {
user := users[0]
assert.Empty(t, user.Password)
}
}
user1, _, err = httpd.GetUserByID(user1.ID, http.StatusOK)
assert.NoError(t, err)
assert.Empty(t, user1.Password)
assert.Empty(t, user1.FsConfig.S3Config.AccessSecret.Key)
assert.Empty(t, user1.FsConfig.S3Config.AccessSecret.AdditionalData)
assert.NotEmpty(t, user1.FsConfig.S3Config.AccessSecret.Status)
assert.NotEmpty(t, user1.FsConfig.S3Config.AccessSecret.Payload)
user2, _, err = httpd.GetUserByID(user2.ID, http.StatusOK)
assert.NoError(t, err)
assert.Empty(t, user2.Password)
assert.Empty(t, user2.FsConfig.GCSConfig.Credentials.Key)
assert.Empty(t, user2.FsConfig.GCSConfig.Credentials.AdditionalData)
assert.NotEmpty(t, user2.FsConfig.GCSConfig.Credentials.Status)
assert.NotEmpty(t, user2.FsConfig.GCSConfig.Credentials.Payload)
user3, _, err = httpd.GetUserByID(user3.ID, http.StatusOK)
assert.NoError(t, err)
assert.Empty(t, user3.Password)
assert.Empty(t, user3.FsConfig.AzBlobConfig.AccountKey.Key)
assert.Empty(t, user3.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.Status)
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.Payload)
// finally check that we have all the data inside the data provider
user1, err = dataprovider.GetUserByID(user1.ID)
assert.NoError(t, err)
assert.NotEmpty(t, user1.Password)
assert.NotEmpty(t, user1.FsConfig.S3Config.AccessSecret.Key)
assert.NotEmpty(t, user1.FsConfig.S3Config.AccessSecret.AdditionalData)
assert.NotEmpty(t, user1.FsConfig.S3Config.AccessSecret.Status)
assert.NotEmpty(t, user1.FsConfig.S3Config.AccessSecret.Payload)
err = user1.FsConfig.S3Config.AccessSecret.Decrypt()
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusPlain, user1.FsConfig.S3Config.AccessSecret.Status)
assert.Equal(t, u1.FsConfig.S3Config.AccessSecret.Payload, user1.FsConfig.S3Config.AccessSecret.Payload)
assert.Empty(t, user1.FsConfig.S3Config.AccessSecret.Key)
assert.Empty(t, user1.FsConfig.S3Config.AccessSecret.AdditionalData)
user2, err = dataprovider.GetUserByID(user2.ID)
assert.NoError(t, err)
assert.NotEmpty(t, user2.Password)
assert.NotEmpty(t, user2.FsConfig.GCSConfig.Credentials.Key)
assert.NotEmpty(t, user2.FsConfig.GCSConfig.Credentials.AdditionalData)
assert.NotEmpty(t, user2.FsConfig.GCSConfig.Credentials.Status)
assert.NotEmpty(t, user2.FsConfig.GCSConfig.Credentials.Payload)
err = user2.FsConfig.GCSConfig.Credentials.Decrypt()
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusPlain, user2.FsConfig.GCSConfig.Credentials.Status)
assert.Equal(t, u2.FsConfig.GCSConfig.Credentials.Payload, user2.FsConfig.GCSConfig.Credentials.Payload)
assert.Empty(t, user2.FsConfig.GCSConfig.Credentials.Key)
assert.Empty(t, user2.FsConfig.GCSConfig.Credentials.AdditionalData)
user3, err = dataprovider.GetUserByID(user3.ID)
assert.NoError(t, err)
assert.NotEmpty(t, user3.Password)
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.Key)
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.Status)
assert.NotEmpty(t, user3.FsConfig.AzBlobConfig.AccountKey.Payload)
err = user3.FsConfig.AzBlobConfig.AccountKey.Decrypt()
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusPlain, user3.FsConfig.AzBlobConfig.AccountKey.Status)
assert.Equal(t, u3.FsConfig.AzBlobConfig.AccountKey.Payload, user3.FsConfig.AzBlobConfig.AccountKey.Payload)
assert.Empty(t, user3.FsConfig.AzBlobConfig.AccountKey.Key)
assert.Empty(t, user3.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
_, err = httpd.RemoveUser(user1, http.StatusOK)
assert.NoError(t, err)
_, err = httpd.RemoveUser(user2, http.StatusOK)
assert.NoError(t, err)
_, err = httpd.RemoveUser(user3, http.StatusOK)
assert.NoError(t, err)
err = dataprovider.Close()
assert.NoError(t, err)
err = config.LoadConfig(configDir, "")
assert.NoError(t, err)
providerConf = config.GetProviderConf()
providerConf.CredentialsPath = credentialsPath
err = os.RemoveAll(credentialsPath)
assert.NoError(t, err)
err = dataprovider.Initialize(providerConf, configDir)
assert.NoError(t, err)
}
func TestSecretObject(t *testing.T) {
s := vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: "test data",
AdditionalData: "username",
}
require.True(t, s.IsValid())
err := s.Encrypt()
require.NoError(t, err)
require.Equal(t, vfs.SecretStatusAES256GCM, s.Status)
require.NotEmpty(t, s.Payload)
require.NotEmpty(t, s.Key)
require.True(t, s.IsValid())
err = s.Decrypt()
require.NoError(t, err)
require.Equal(t, vfs.SecretStatusPlain, s.Status)
require.Equal(t, "test data", s.Payload)
require.Empty(t, s.Key)
oldFormat := "$aes$5b97e3a3324a2f53e2357483383367c0$0ed3132b584742ab217866219da633266782b69b13e50ebc6ddfb7c4fbf2f2a414c6d5f813"
s, err = vfs.GetSecretFromCompatString(oldFormat)
require.NoError(t, err)
require.True(t, s.IsValid())
require.Equal(t, vfs.SecretStatusPlain, s.Status)
require.Equal(t, "test data", s.Payload)
require.Empty(t, s.Key)
}
func TestUpdateUserNoCredentials(t *testing.T) {
@ -2727,7 +2997,8 @@ func TestWebUserS3Mock(t *testing.T) {
user.FsConfig.S3Config.Bucket = "test"
user.FsConfig.S3Config.Region = "eu-west-1"
user.FsConfig.S3Config.AccessKey = "access-key"
user.FsConfig.S3Config.AccessSecret = "access-secret"
user.FsConfig.S3Config.AccessSecret.Payload = "access-secret"
user.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusPlain
user.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000/path?a=b"
user.FsConfig.S3Config.StorageClass = "Standard"
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir/"
@ -2753,7 +3024,7 @@ func TestWebUserS3Mock(t *testing.T) {
form.Set("s3_bucket", user.FsConfig.S3Config.Bucket)
form.Set("s3_region", user.FsConfig.S3Config.Region)
form.Set("s3_access_key", user.FsConfig.S3Config.AccessKey)
form.Set("s3_access_secret", user.FsConfig.S3Config.AccessSecret)
form.Set("s3_access_secret", user.FsConfig.S3Config.AccessSecret.Payload)
form.Set("s3_storage_class", user.FsConfig.S3Config.StorageClass)
form.Set("s3_endpoint", user.FsConfig.S3Config.Endpoint)
form.Set("s3_key_prefix", user.FsConfig.S3Config.KeyPrefix)
@ -2800,9 +3071,46 @@ func TestWebUserS3Mock(t *testing.T) {
assert.Equal(t, updateUser.FsConfig.S3Config.UploadPartSize, user.FsConfig.S3Config.UploadPartSize)
assert.Equal(t, updateUser.FsConfig.S3Config.UploadConcurrency, user.FsConfig.S3Config.UploadConcurrency)
assert.Equal(t, 2, len(updateUser.Filters.FileExtensions))
if !strings.HasPrefix(updateUser.FsConfig.S3Config.AccessSecret, "$aes$") {
t.Error("s3 access secret is not encrypted")
}
assert.Equal(t, vfs.SecretStatusAES256GCM, updateUser.FsConfig.S3Config.AccessSecret.Status)
assert.NotEmpty(t, updateUser.FsConfig.S3Config.AccessSecret.Payload)
assert.Empty(t, updateUser.FsConfig.S3Config.AccessSecret.Key)
assert.Empty(t, updateUser.FsConfig.S3Config.AccessSecret.AdditionalData)
// now check that a redacted password is not saved
form.Set("s3_access_secret", "[**redacted**] ")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusSeeOther, rr.Code)
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
users = nil
err = render.DecodeJSON(rr.Body, &users)
assert.NoError(t, err)
assert.Equal(t, 1, len(users))
lastUpdatedUser := users[0]
assert.Equal(t, vfs.SecretStatusAES256GCM, lastUpdatedUser.FsConfig.S3Config.AccessSecret.Status)
assert.Equal(t, updateUser.FsConfig.S3Config.AccessSecret.Payload, lastUpdatedUser.FsConfig.S3Config.AccessSecret.Payload)
assert.Empty(t, lastUpdatedUser.FsConfig.S3Config.AccessSecret.Key)
assert.Empty(t, lastUpdatedUser.FsConfig.S3Config.AccessSecret.AdditionalData)
// now clear credentials
form.Set("s3_access_key", "")
form.Set("s3_access_secret", "")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusSeeOther, rr.Code)
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
users = nil
err = render.DecodeJSON(rr.Body, &users)
assert.NoError(t, err)
assert.Equal(t, 1, len(users))
assert.True(t, users[0].FsConfig.S3Config.AccessSecret.IsEmpty())
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
@ -2908,7 +3216,8 @@ func TestWebUserAzureBlobMock(t *testing.T) {
user.FsConfig.Provider = dataprovider.AzureBlobFilesystemProvider
user.FsConfig.AzBlobConfig.Container = "container"
user.FsConfig.AzBlobConfig.AccountName = "aname"
user.FsConfig.AzBlobConfig.AccountKey = "access-skey"
user.FsConfig.AzBlobConfig.AccountKey.Payload = "access-skey"
user.FsConfig.AzBlobConfig.AccountKey.Status = vfs.SecretStatusPlain
user.FsConfig.AzBlobConfig.Endpoint = "http://127.0.0.1:9000/path?b=c"
user.FsConfig.AzBlobConfig.KeyPrefix = "somedir/subdir/"
user.FsConfig.AzBlobConfig.UploadPartSize = 5
@ -2933,7 +3242,7 @@ func TestWebUserAzureBlobMock(t *testing.T) {
form.Set("fs_provider", "3")
form.Set("az_container", user.FsConfig.AzBlobConfig.Container)
form.Set("az_account_name", user.FsConfig.AzBlobConfig.AccountName)
form.Set("az_account_key", user.FsConfig.AzBlobConfig.AccountKey)
form.Set("az_account_key", user.FsConfig.AzBlobConfig.AccountKey.Payload)
form.Set("az_sas_url", user.FsConfig.AzBlobConfig.SASURL)
form.Set("az_endpoint", user.FsConfig.AzBlobConfig.Endpoint)
form.Set("az_key_prefix", user.FsConfig.AzBlobConfig.KeyPrefix)
@ -2980,9 +3289,29 @@ func TestWebUserAzureBlobMock(t *testing.T) {
assert.Equal(t, updateUser.FsConfig.AzBlobConfig.UploadPartSize, user.FsConfig.AzBlobConfig.UploadPartSize)
assert.Equal(t, updateUser.FsConfig.AzBlobConfig.UploadConcurrency, user.FsConfig.AzBlobConfig.UploadConcurrency)
assert.Equal(t, 2, len(updateUser.Filters.FileExtensions))
if !strings.HasPrefix(updateUser.FsConfig.AzBlobConfig.AccountKey, "$aes$") {
t.Error("azure account secret is not encrypted")
}
assert.Equal(t, vfs.SecretStatusAES256GCM, updateUser.FsConfig.AzBlobConfig.AccountKey.Status)
assert.NotEmpty(t, updateUser.FsConfig.AzBlobConfig.AccountKey.Payload)
assert.Empty(t, updateUser.FsConfig.AzBlobConfig.AccountKey.Key)
assert.Empty(t, updateUser.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
// now check that a redacted password is not saved
form.Set("az_account_key", "[**redacted**] ")
b, contentType, _ = getMultipartFormData(form, "", "")
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
req.Header.Set("Content-Type", contentType)
rr = executeRequest(req)
checkResponseCode(t, http.StatusSeeOther, rr.Code)
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASC&username="+user.Username, nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
users = nil
err = render.DecodeJSON(rr.Body, &users)
assert.NoError(t, err)
assert.Equal(t, 1, len(users))
lastUpdatedUser := users[0]
assert.Equal(t, vfs.SecretStatusAES256GCM, lastUpdatedUser.FsConfig.AzBlobConfig.AccountKey.Status)
assert.Equal(t, updateUser.FsConfig.AzBlobConfig.AccountKey.Payload, lastUpdatedUser.FsConfig.AzBlobConfig.AccountKey.Payload)
assert.Empty(t, lastUpdatedUser.FsConfig.AzBlobConfig.AccountKey.Key)
assert.Empty(t, lastUpdatedUser.FsConfig.AzBlobConfig.AccountKey.AdditionalData)
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
@ -3211,9 +3540,3 @@ func getMultipartFormData(values url.Values, fileFieldName, filePath string) (by
err := w.Close()
return b, w.FormDataContentType(), err
}
type invalidBase64 []byte
func (b invalidBase64) MarshalJSON() ([]byte, error) {
return []byte(`not base64`), nil
}

View file

@ -332,24 +332,36 @@ func TestCompareUserFsConfig(t *testing.T) {
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.S3Config.AccessKey = ""
actual.FsConfig.S3Config.AccessSecret = "access secret"
actual.FsConfig.S3Config.AccessSecret.Payload = "access secret"
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
secret, _ := utils.EncryptData("access secret")
actual.FsConfig.S3Config.AccessSecret = ""
expected.FsConfig.S3Config.AccessSecret = secret
actual.FsConfig.S3Config.AccessSecret.Payload = ""
expected.FsConfig.S3Config.AccessSecret.Payload = secret
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.S3Config.AccessSecret = utils.RemoveDecryptionKey(secret)
actual.FsConfig.S3Config.AccessSecret = utils.RemoveDecryptionKey(secret) + "a"
expected.FsConfig.S3Config.AccessSecret.Payload = "test"
actual.FsConfig.S3Config.AccessSecret.Payload = ""
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.S3Config.AccessSecret = "test"
actual.FsConfig.S3Config.AccessSecret = ""
expected.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusPlain
actual.FsConfig.S3Config.AccessSecret.Status = vfs.SecretStatusAES256GCM
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.S3Config.AccessSecret = ""
actual.FsConfig.S3Config.AccessSecret = ""
actual.FsConfig.S3Config.AccessSecret.Payload = "payload"
actual.FsConfig.S3Config.AccessSecret.AdditionalData = "data"
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
actual.FsConfig.S3Config.AccessSecret.AdditionalData = ""
actual.FsConfig.S3Config.AccessSecret.Key = "key"
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.S3Config.AccessSecret.Status = ""
expected.FsConfig.S3Config.AccessSecret.Payload = ""
actual.FsConfig.S3Config.AccessSecret.Status = ""
actual.FsConfig.S3Config.AccessSecret.Payload = ""
actual.FsConfig.S3Config.AccessSecret.AdditionalData = ""
actual.FsConfig.S3Config.AccessSecret.Key = ""
expected.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000/"
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
@ -403,10 +415,10 @@ func TestCompareUserAzureConfig(t *testing.T) {
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.AzBlobConfig.AccountName = ""
expected.FsConfig.AzBlobConfig.AccountKey = "akey"
expected.FsConfig.AzBlobConfig.AccountKey.Payload = "akey"
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)
expected.FsConfig.AzBlobConfig.AccountKey = ""
expected.FsConfig.AzBlobConfig.AccountKey.Payload = ""
expected.FsConfig.AzBlobConfig.Endpoint = "endpt"
err = compareUserFsConfig(expected, actual)
assert.Error(t, err)

View file

@ -2,7 +2,7 @@ openapi: 3.0.3
info:
title: SFTPGo
description: 'SFTPGo REST API'
version: 2.0.3
version: 2.1.0
servers:
- url: /api/v1
@ -11,6 +11,7 @@ security:
paths:
/healthz:
get:
security: []
servers:
- url : /
tags:
@ -956,6 +957,22 @@ components:
nullable: true
description: maximum allowed size, as bytes, for a single file upload. The upload will be aborted if/when the size of the file being sent exceeds this limit. 0 means unlimited. This restriction does not apply for SSH system commands such as `git` and `rsync`
description: Additional restrictions
Secret:
type: object
properties:
status:
type: string
enum:
- Plain
- AES-256-GCM
- Redacted
description: Set to "Plain" to add or update an existing secret, set to "Redacted" to preserve the existing value
payload:
type: string
key:
type: string
additional_data:
type: string
S3Config:
type: object
properties:
@ -968,8 +985,7 @@ components:
access_key:
type: string
access_secret:
type: string
description: the access secret is stored encrypted (AES-256-GCM)
$ref: '#/components/schemas/Secret'
endpoint:
type: string
description: optional endpoint
@ -997,9 +1013,7 @@ components:
type: string
minLength: 1
credentials:
type: string
format: byte
description: Google Cloud Storage JSON credentials base64 encoded. This field must be populated only when adding/updating a user. It will be always omitted, since there are sensitive data, when you search/get users. The credentials will be stored in the configured "credentials_path"
$ref: '#/components/schemas/Secret'
automatic_credentials:
type: integer
nullable: true
@ -1019,7 +1033,7 @@ components:
required:
- bucket
nullable: true
description: Google Cloud Storage configuration details
description: Google Cloud Storage configuration details. The "credentials" field must be populated only when adding/updating a user. It will be always omitted, since there are sensitive data, when you search/get users
AzureBlobFsConfig:
type: object
properties:
@ -1029,8 +1043,7 @@ components:
type: string
description: Storage Account Name, leave blank to use SAS URL
account_key:
type: string
description: Storage Account Key leave blank to use SAS URL. The access key is stored encrypted (AES-256-GCM)
$ref: '#/components/schemas/Secret'
sas_url:
type: string
description: Shared access signature URL, leave blank if using account/key

View file

@ -39,6 +39,7 @@ const (
page500Body = "The server is unable to fulfill your request."
defaultQueryLimit = 500
webDateTimeFormat = "2006-01-02 15:04:05" // YYYY-MM-DD HH:MM:SS
redactedSecret = "[**redacted**]"
)
var (
@ -81,7 +82,6 @@ type connectionsPage struct {
type userPage struct {
basePage
IsAdd bool
User dataprovider.User
RootPerms []string
Error string
@ -89,6 +89,10 @@ type userPage struct {
ValidSSHLoginMethods []string
ValidProtocols []string
RootDirPerms []string
RedactedSecret string
IsAdd bool
IsS3SecretEnc bool
IsAzSecretEnc bool
}
type folderPage struct {
@ -210,6 +214,9 @@ func renderAddUserPage(w http.ResponseWriter, user dataprovider.User, error stri
ValidSSHLoginMethods: dataprovider.ValidSSHLoginMethods,
ValidProtocols: dataprovider.ValidProtocols,
RootDirPerms: user.GetPermissionsForPath("/"),
IsS3SecretEnc: user.FsConfig.S3Config.AccessSecret.IsEncrypted(),
IsAzSecretEnc: user.FsConfig.AzBlobConfig.AccountKey.IsEncrypted(),
RedactedSecret: redactedSecret,
}
renderTemplate(w, templateUser, data)
}
@ -224,6 +231,9 @@ func renderUpdateUserPage(w http.ResponseWriter, user dataprovider.User, error s
ValidSSHLoginMethods: dataprovider.ValidSSHLoginMethods,
ValidProtocols: dataprovider.ValidProtocols,
RootDirPerms: user.GetPermissionsForPath("/"),
IsS3SecretEnc: user.FsConfig.S3Config.AccessSecret.IsEncrypted(),
IsAzSecretEnc: user.FsConfig.AzBlobConfig.AccountKey.IsEncrypted(),
RedactedSecret: redactedSecret,
}
renderTemplate(w, templateUser, data)
}
@ -420,6 +430,20 @@ func getFiltersFromUserPostFields(r *http.Request) dataprovider.UserFilters {
return filters
}
func getSecretFromFormField(r *http.Request, field string) vfs.Secret {
secret := vfs.Secret{
Payload: r.Form.Get(field),
Status: vfs.SecretStatusPlain,
}
if strings.TrimSpace(secret.Payload) == redactedSecret {
secret.Status = vfs.SecretStatusRedacted
}
if strings.TrimSpace(secret.Payload) == "" {
secret.Status = ""
}
return secret
}
func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, error) {
var fs dataprovider.Filesystem
provider, err := strconv.Atoi(r.Form.Get("fs_provider"))
@ -431,7 +455,7 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er
fs.S3Config.Bucket = r.Form.Get("s3_bucket")
fs.S3Config.Region = r.Form.Get("s3_region")
fs.S3Config.AccessKey = r.Form.Get("s3_access_key")
fs.S3Config.AccessSecret = r.Form.Get("s3_access_secret")
fs.S3Config.AccessSecret = getSecretFromFormField(r, "s3_access_secret")
fs.S3Config.Endpoint = r.Form.Get("s3_endpoint")
fs.S3Config.StorageClass = r.Form.Get("s3_storage_class")
fs.S3Config.KeyPrefix = r.Form.Get("s3_key_prefix")
@ -468,12 +492,15 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er
}
return fs, err
}
fs.GCSConfig.Credentials = fileBytes
fs.GCSConfig.Credentials = vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: string(fileBytes),
}
fs.GCSConfig.AutomaticCredentials = 0
} else if fs.Provider == dataprovider.AzureBlobFilesystemProvider {
fs.AzBlobConfig.Container = r.Form.Get("az_container")
fs.AzBlobConfig.AccountName = r.Form.Get("az_account_name")
fs.AzBlobConfig.AccountKey = r.Form.Get("az_account_key")
fs.AzBlobConfig.AccountKey = getSecretFromFormField(r, "az_account_key")
fs.AzBlobConfig.SASURL = r.Form.Get("az_sas_url")
fs.AzBlobConfig.Endpoint = r.Form.Get("az_endpoint")
fs.AzBlobConfig.KeyPrefix = r.Form.Get("az_key_prefix")
@ -655,6 +682,12 @@ func handleWebUpdateUserPost(w http.ResponseWriter, r *http.Request) {
if len(updatedUser.Password) == 0 {
updatedUser.Password = user.Password
}
if !updatedUser.FsConfig.S3Config.AccessSecret.IsPlain() && !updatedUser.FsConfig.S3Config.AccessSecret.IsEmpty() {
updatedUser.FsConfig.S3Config.AccessSecret = user.FsConfig.S3Config.AccessSecret
}
if !updatedUser.FsConfig.AzBlobConfig.AccountKey.IsPlain() && !updatedUser.FsConfig.AzBlobConfig.AccountKey.IsEmpty() {
updatedUser.FsConfig.AzBlobConfig.AccountKey = user.FsConfig.AzBlobConfig.AccountKey
}
err = dataprovider.UpdateUser(updatedUser)
if err == nil {
if len(r.Form.Get("disconnect")) > 0 {

View file

@ -2,7 +2,6 @@
package service
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
@ -207,9 +206,7 @@ func (s *Service) loadInitialData() error {
if err != nil {
return fmt.Errorf("unable to read input file %#v: %v", s.LoadDataFrom, err)
}
var dump dataprovider.BackupData
err = json.Unmarshal(content, &dump)
dump, err := dataprovider.ParseDumpData(content)
if err != nil {
return fmt.Errorf("unable to parse file to restore %#v: %v", s.LoadDataFrom, err)
}

View file

@ -1312,7 +1312,10 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
u := getTestUser(usePubKey)
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "testbucket"
u.FsConfig.GCSConfig.Credentials = []byte(`{ "type": "service_account" }`)
u.FsConfig.GCSConfig.Credentials = vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: `{ "type": "service_account" }`,
}
providerConf := config.GetProviderConf()
providerConf.PreferDatabaseCredentials = true
@ -1333,9 +1336,12 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.GCSConfig.Credentials.Status)
assert.NotEmpty(t, user.FsConfig.GCSConfig.Credentials.Payload)
assert.Empty(t, user.FsConfig.GCSConfig.Credentials.AdditionalData)
assert.Empty(t, user.FsConfig.GCSConfig.Credentials.Key)
_, err = os.Stat(credentialsFile)
assert.Error(t, err)
assert.NoFileExists(t, credentialsFile)
client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err) {
@ -1358,7 +1364,10 @@ func TestLoginInvalidFs(t *testing.T) {
u := getTestUser(usePubKey)
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = []byte("invalid JSON for credentials")
u.FsConfig.GCSConfig.Credentials = vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: "invalid JSON for credentials",
}
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)

View file

@ -337,7 +337,7 @@
<label for="idS3AccessSecret" class="col-sm-2 col-form-label">Access Secret</label>
<div class="col-sm-3">
<input type="text" class="form-control" id="idS3AccessSecret" name="s3_access_secret" placeholder=""
value="{{.User.FsConfig.S3Config.AccessSecret}}" maxlength="1000">
value="{{if .IsS3SecretEnc}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.S3Config.AccessSecret.Payload}}{{end}}" maxlength="1000">
</div>
</div>
@ -345,7 +345,7 @@
<label for="idS3StorageClass" class="col-sm-2 col-form-label">Storage Class</label>
<div class="col-sm-3">
<input type="text" class="form-control" id="idS3StorageClass" name="s3_storage_class" placeholder=""
value="{{.User.FsConfig.S3Config.StorageClass}}" maxlength="1000">
value="{{.User.FsConfig.S3Config.StorageClass}}" maxlength="255">
</div>
<div class="col-sm-2"></div>
<label for="idS3Endpoint" class="col-sm-2 col-form-label">Endpoint</label>
@ -448,7 +448,7 @@
<label for="idAzAccountKey" class="col-sm-2 col-form-label">Account Key</label>
<div class="col-sm-10">
<input type="text" class="form-control" id="idAzAccountKey" name="az_account_key" placeholder=""
value="{{.User.FsConfig.AzBlobConfig.AccountKey}}" maxlength="255">
value="{{if .IsAzSecretEnc}}{{.RedactedSecret}}{{else}}{{.User.FsConfig.AzBlobConfig.AccountKey.Payload}}{{end}}" maxlength="1000">
</div>
</div>

View file

@ -193,6 +193,9 @@ func DecryptData(data string) (string, error) {
return result, err
}
nonceSize := gcm.NonceSize()
if len(encrypted) < nonceSize {
return result, errors.New("malformed ciphertext")
}
nonce, ciphertext := encrypted[:nonceSize], encrypted[nonceSize:]
plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
if err != nil {

View file

@ -24,7 +24,6 @@ import (
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/metrics"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/version"
)
@ -61,12 +60,11 @@ func NewAzBlobFs(connectionID, localTempDir string, config AzBlobFsConfig) (Fs,
if err := ValidateAzBlobFsConfig(&fs.config); err != nil {
return fs, err
}
if fs.config.AccountKey != "" {
accountKey, err := utils.DecryptData(fs.config.AccountKey)
if fs.config.AccountKey.IsEncrypted() {
err := fs.config.AccountKey.Decrypt()
if err != nil {
return fs, err
}
fs.config.AccountKey = accountKey
}
fs.setConfigDefaults()
@ -106,7 +104,7 @@ func NewAzBlobFs(connectionID, localTempDir string, config AzBlobFsConfig) (Fs,
return fs, nil
}
credential, err := azblob.NewSharedKeyCredential(fs.config.AccountName, fs.config.AccountKey)
credential, err := azblob.NewSharedKeyCredential(fs.config.AccountName, fs.config.AccountKey.Payload)
if err != nil {
return fs, fmt.Errorf("invalid credentials: %v", err)
}

View file

@ -4,9 +4,11 @@ package vfs
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"os"
@ -60,10 +62,28 @@ func NewGCSFs(connectionID, localTempDir string, config GCSFsConfig) (Fs, error)
ctx := context.Background()
if fs.config.AutomaticCredentials > 0 {
fs.svc, err = storage.NewClient(ctx)
} else if len(fs.config.Credentials) > 0 {
fs.svc, err = storage.NewClient(ctx, option.WithCredentialsJSON(fs.config.Credentials))
} else if fs.config.Credentials.IsEncrypted() {
err = fs.config.Credentials.Decrypt()
if err != nil {
return fs, err
}
fs.svc, err = storage.NewClient(ctx, option.WithCredentialsJSON([]byte(fs.config.Credentials.Payload)))
} else {
fs.svc, err = storage.NewClient(ctx, option.WithCredentialsFile(fs.config.CredentialFile))
var creds []byte
creds, err = ioutil.ReadFile(fs.config.CredentialFile)
if err != nil {
return fs, err
}
secret := &Secret{}
err = json.Unmarshal(creds, secret)
if err != nil {
return fs, err
}
err = secret.Decrypt()
if err != nil {
return fs, err
}
fs.svc, err = storage.NewClient(ctx, option.WithCredentialsJSON([]byte(secret.Payload)))
}
return fs, err
}

View file

@ -60,13 +60,12 @@ func NewS3Fs(connectionID, localTempDir string, config S3FsConfig) (Fs, error) {
awsConfig.WithRegion(fs.config.Region)
}
if fs.config.AccessSecret != "" {
accessSecret, err := utils.DecryptData(fs.config.AccessSecret)
if fs.config.AccessSecret.IsEncrypted() {
err := fs.config.AccessSecret.Decrypt()
if err != nil {
return fs, err
}
fs.config.AccessSecret = accessSecret
awsConfig.Credentials = credentials.NewStaticCredentials(fs.config.AccessKey, fs.config.AccessSecret, "")
awsConfig.Credentials = credentials.NewStaticCredentials(fs.config.AccessKey, fs.config.AccessSecret.Payload, "")
}
if fs.config.Endpoint != "" {

209
vfs/secret.go Normal file
View file

@ -0,0 +1,209 @@
package vfs
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"errors"
"io"
"github.com/drakkan/sftpgo/utils"
)
// SecretStatus defines the statuses of a Secret object
type SecretStatus = string
const (
// SecretStatusPlain means the secret is in plain text and must be encrypted
SecretStatusPlain SecretStatus = "Plain"
// SecretStatusAES256GCM means the secret is encrypted using AES-256-GCM
SecretStatusAES256GCM SecretStatus = "AES-256-GCM"
// SecretStatusRedacted means the secret is redacted
SecretStatusRedacted SecretStatus = "Redacted"
)
var (
errWrongSecretStatus = errors.New("wrong secret status")
errMalformedCiphertext = errors.New("malformed ciphertext")
errInvalidSecret = errors.New("invalid secret")
validSecretStatuses = []string{SecretStatusPlain, SecretStatusAES256GCM, SecretStatusRedacted}
)
// Secret defines the struct used to store confidential data
type Secret struct {
Status SecretStatus `json:"status,omitempty"`
Payload string `json:"payload,omitempty"`
Key string `json:"key,omitempty"`
AdditionalData string `json:"additional_data,omitempty"`
}
// GetSecretFromCompatString returns a secret from the previous format
func GetSecretFromCompatString(secret string) (Secret, error) {
s := Secret{}
plain, err := utils.DecryptData(secret)
if err != nil {
return s, errMalformedCiphertext
}
s.Status = SecretStatusPlain
s.Payload = plain
return s, nil
}
// IsEncrypted returns true if the secret is encrypted
// This isn't a pointer receiver because we don't want to pass
// a pointer to html template
func (s *Secret) IsEncrypted() bool {
return s.Status == SecretStatusAES256GCM
}
// IsPlain returns true if the secret is in plain text
func (s *Secret) IsPlain() bool {
return s.Status == SecretStatusPlain
}
// IsRedacted returns true if the secret is redacted
func (s *Secret) IsRedacted() bool {
return s.Status == SecretStatusRedacted
}
// IsEmpty returns true if all fields are empty
func (s *Secret) IsEmpty() bool {
if s.Status != "" {
return false
}
if s.Payload != "" {
return false
}
if s.Key != "" {
return false
}
if s.AdditionalData != "" {
return false
}
return true
}
// IsValid returns true if the secret is not empty and valid
func (s *Secret) IsValid() bool {
if !s.IsValidInput() {
return false
}
if s.Status == SecretStatusAES256GCM {
if len(s.Key) != 64 {
return false
}
}
return true
}
// IsValidInput returns true if the secret is a valid user input
func (s *Secret) IsValidInput() bool {
if !utils.IsStringInSlice(s.Status, validSecretStatuses) {
return false
}
if s.Payload == "" {
return false
}
return true
}
// Hide hides info to decrypt data
func (s *Secret) Hide() {
s.Key = ""
s.AdditionalData = ""
}
// deriveKey is a weak method of deriving a key but it is still better than using the key as it is.
// We should use a KMS in future
func (s *Secret) deriveKey(key []byte) []byte {
var combined []byte
combined = append(combined, key...)
if s.AdditionalData != "" {
combined = append(combined, []byte(s.AdditionalData)...)
}
combined = append(combined, key...)
hash := sha256.Sum256(combined)
return hash[:]
}
// Encrypt encrypts a plain text Secret object
func (s *Secret) Encrypt() error {
if s.Payload == "" {
return errInvalidSecret
}
switch s.Status {
case SecretStatusPlain:
key := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, key); err != nil {
return err
}
block, err := aes.NewCipher(s.deriveKey(key))
if err != nil {
return err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return err
}
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
return err
}
var aad []byte
if s.AdditionalData != "" {
aad = []byte(s.AdditionalData)
}
ciphertext := gcm.Seal(nonce, nonce, []byte(s.Payload), aad)
s.Key = hex.EncodeToString(key)
s.Payload = hex.EncodeToString(ciphertext)
s.Status = SecretStatusAES256GCM
return nil
default:
return errWrongSecretStatus
}
}
// Decrypt decrypts a Secret object
func (s *Secret) Decrypt() error {
switch s.Status {
case SecretStatusAES256GCM:
encrypted, err := hex.DecodeString(s.Payload)
if err != nil {
return err
}
key, err := hex.DecodeString(s.Key)
if err != nil {
return err
}
block, err := aes.NewCipher(s.deriveKey(key))
if err != nil {
return err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return err
}
nonceSize := gcm.NonceSize()
if len(encrypted) < nonceSize {
return errMalformedCiphertext
}
nonce, ciphertext := encrypted[:nonceSize], encrypted[nonceSize:]
var aad []byte
if s.AdditionalData != "" {
aad = []byte(s.AdditionalData)
}
plaintext, err := gcm.Open(nil, nonce, ciphertext, aad)
if err != nil {
return err
}
s.Status = SecretStatusPlain
s.Payload = string(plaintext)
s.Key = ""
s.AdditionalData = ""
return nil
default:
return errWrongSecretStatus
}
}

View file

@ -113,7 +113,7 @@ type S3FsConfig struct {
KeyPrefix string `json:"key_prefix,omitempty"`
Region string `json:"region,omitempty"`
AccessKey string `json:"access_key,omitempty"`
AccessSecret string `json:"access_secret,omitempty"`
AccessSecret Secret `json:"access_secret,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
StorageClass string `json:"storage_class,omitempty"`
// The buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB,
@ -137,9 +137,10 @@ type GCSFsConfig struct {
// folder. The prefix, if not empty, must not start with "/" and must
// end with "/".
// If empty the whole bucket contents will be available
KeyPrefix string `json:"key_prefix,omitempty"`
CredentialFile string `json:"-"`
Credentials []byte `json:"credentials,omitempty"`
KeyPrefix string `json:"key_prefix,omitempty"`
CredentialFile string `json:"-"`
Credentials Secret `json:"credentials,omitempty"`
// 0 explicit, 1 automatic
AutomaticCredentials int `json:"automatic_credentials,omitempty"`
StorageClass string `json:"storage_class,omitempty"`
}
@ -151,7 +152,7 @@ type AzBlobFsConfig struct {
AccountName string `json:"account_name,omitempty"`
// Storage Account Key leave blank to use SAS URL.
// The access key is stored encrypted (AES-256-GCM)
AccountKey string `json:"account_key,omitempty"`
AccountKey Secret `json:"account_key,omitempty"`
// Optional endpoint. Default is "blob.core.windows.net".
// If you use the emulator the endpoint must include the protocol,
// for example "http://127.0.0.1:10000"
@ -235,20 +236,33 @@ func IsLocalOsFs(fs Fs) bool {
return fs.Name() == osFsName
}
// ValidateS3FsConfig returns nil if the specified s3 config is valid, otherwise an error
func ValidateS3FsConfig(config *S3FsConfig) error {
if len(config.Bucket) == 0 {
return errors.New("bucket cannot be empty")
}
if len(config.Region) == 0 {
return errors.New("region cannot be empty")
}
if len(config.AccessKey) == 0 && len(config.AccessSecret) > 0 {
func checkS3Credentials(config *S3FsConfig) error {
if config.AccessKey == "" && !config.AccessSecret.IsEmpty() {
return errors.New("access_key cannot be empty with access_secret not empty")
}
if len(config.AccessSecret) == 0 && len(config.AccessKey) > 0 {
if config.AccessSecret.IsEmpty() && config.AccessKey != "" {
return errors.New("access_secret cannot be empty with access_key not empty")
}
if config.AccessSecret.IsEncrypted() && !config.AccessSecret.IsValid() {
return errors.New("invalid encrypted access_secret")
}
if !config.AccessSecret.IsEmpty() && !config.AccessSecret.IsValidInput() {
return errors.New("invalid access_secret")
}
return nil
}
// ValidateS3FsConfig returns nil if the specified s3 config is valid, otherwise an error
func ValidateS3FsConfig(config *S3FsConfig) error {
if config.Bucket == "" {
return errors.New("bucket cannot be empty")
}
if config.Region == "" {
return errors.New("region cannot be empty")
}
if err := checkS3Credentials(config); err != nil {
return err
}
if config.KeyPrefix != "" {
if strings.HasPrefix(config.KeyPrefix, "/") {
return errors.New("key_prefix cannot start with /")
@ -281,7 +295,10 @@ func ValidateGCSFsConfig(config *GCSFsConfig, credentialsFilePath string) error
config.KeyPrefix += "/"
}
}
if len(config.Credentials) == 0 && config.AutomaticCredentials == 0 {
if config.Credentials.IsEncrypted() && !config.Credentials.IsValid() {
return errors.New("invalid encrypted credentials")
}
if !config.Credentials.IsValidInput() && config.AutomaticCredentials == 0 {
fi, err := os.Stat(credentialsFilePath)
if err != nil {
return fmt.Errorf("invalid credentials %v", err)
@ -302,8 +319,11 @@ func ValidateAzBlobFsConfig(config *AzBlobFsConfig) error {
if config.Container == "" {
return errors.New("container cannot be empty")
}
if config.AccountName == "" || config.AccountKey == "" {
return errors.New("credentials cannot be empty")
if config.AccountName == "" || !config.AccountKey.IsValidInput() {
return errors.New("credentials cannot be empty or invalid")
}
if config.AccountKey.IsEncrypted() && !config.AccountKey.IsValid() {
return errors.New("invalid encrypted account_key")
}
if config.KeyPrefix != "" {
if strings.HasPrefix(config.KeyPrefix, "/") {

View file

@ -861,7 +861,10 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
u := getTestUser()
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = []byte(`{ "type": "service_account" }`)
u.FsConfig.GCSConfig.Credentials = vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: `{ "type": "service_account" }`,
}
providerConf := config.GetProviderConf()
providerConf.PreferDatabaseCredentials = true
@ -882,9 +885,12 @@ func TestLoginWithDatabaseCredentials(t *testing.T) {
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)
assert.Equal(t, vfs.SecretStatusAES256GCM, user.FsConfig.GCSConfig.Credentials.Status)
assert.NotEmpty(t, user.FsConfig.GCSConfig.Credentials.Payload)
assert.Empty(t, user.FsConfig.GCSConfig.Credentials.AdditionalData)
assert.Empty(t, user.FsConfig.GCSConfig.Credentials.Key)
_, err = os.Stat(credentialsFile)
assert.Error(t, err)
assert.NoFileExists(t, credentialsFile)
client := getWebDavClient(user)
@ -906,7 +912,10 @@ func TestLoginInvalidFs(t *testing.T) {
u := getTestUser()
u.FsConfig.Provider = dataprovider.GCSFilesystemProvider
u.FsConfig.GCSConfig.Bucket = "test"
u.FsConfig.GCSConfig.Credentials = []byte("invalid JSON for credentials")
u.FsConfig.GCSConfig.Credentials = vfs.Secret{
Status: vfs.SecretStatusPlain,
Payload: "invalid JSON for credentials",
}
user, _, err := httpd.AddUser(u, http.StatusOK)
assert.NoError(t, err)