printf: replace %#v with the more explicit %q

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
Nicola Murino 2023-02-27 19:02:43 +01:00
parent a23fdea9e3
commit dba088daed
No known key found for this signature in database
GPG key ID: 935D2952DEC4EECF
80 changed files with 580 additions and 580 deletions

View file

@ -153,6 +153,6 @@ func LoadConfig(configDir, configName string) error {
logger.WarnToConsole("error parsing configuration file: %v. Default configuration will be used.", err)
return err
}
logger.Debug(logSender, "", "config file used: '%#v', config loaded: %+v", viper.ConfigFileUsed(), getRedactedConf())
logger.Debug(logSender, "", "config file used: '%q', config loaded: %+v", viper.ConfigFileUsed(), getRedactedConf())
return err
}

View file

@ -118,13 +118,13 @@ func loadCACerts(configDir string) error {
caPath := getConfigPath(ca, configDir)
certs, err := os.ReadFile(caPath)
if err != nil {
logger.Warn(logSender, "", "error loading ca cert %#v: %v", caPath, err)
logger.Warn(logSender, "", "error loading ca cert %q: %v", caPath, err)
return err
}
if !rootCAs.AppendCertsFromPEM(certs) {
logger.Warn(logSender, "", "unable to add ca cert %#v", caPath)
logger.Warn(logSender, "", "unable to add ca cert %q", caPath)
} else {
logger.Debug(logSender, "", "ca cert %#v added to the trusted certificates", caPath)
logger.Debug(logSender, "", "ca cert %q added to the trusted certificates", caPath)
}
}

View file

@ -85,7 +85,7 @@ func checkSFTPGoUserAuth(w http.ResponseWriter, r *http.Request) {
sr, err := l.Search(searchRequest)
if err != nil {
logger.Warn(logSender, middleware.GetReqID(r.Context()), "error searching LDAP user %#v: %v", authReq.Username, err)
logger.Warn(logSender, middleware.GetReqID(r.Context()), "error searching LDAP user %q: %v", authReq.Username, err)
sendAPIResponse(w, r, err, "Error searching LDAP user", http.StatusInternalServerError)
return
}
@ -99,7 +99,7 @@ func checkSFTPGoUserAuth(w http.ResponseWriter, r *http.Request) {
if len(authReq.PublicKey) > 0 {
userKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(authReq.PublicKey))
if err != nil {
logger.Warn(logSender, middleware.GetReqID(r.Context()), "invalid public key for user %#v: %v", authReq.Username, err)
logger.Warn(logSender, middleware.GetReqID(r.Context()), "invalid public key for user %q: %v", authReq.Username, err)
sendAPIResponse(w, r, err, "Invalid public key", http.StatusBadRequest)
return
}
@ -116,7 +116,7 @@ func checkSFTPGoUserAuth(w http.ResponseWriter, r *http.Request) {
}
}
if !authOk {
logger.Warn(logSender, middleware.GetReqID(r.Context()), "public key authentication failed for user: %#v", authReq.Username)
logger.Warn(logSender, middleware.GetReqID(r.Context()), "public key authentication failed for user: %q", authReq.Username)
sendAPIResponse(w, r, nil, "public key authentication failed", http.StatusForbidden)
return
}
@ -125,7 +125,7 @@ func checkSFTPGoUserAuth(w http.ResponseWriter, r *http.Request) {
userdn := sr.Entries[0].DN
err = l.Bind(userdn, authReq.Password)
if err != nil {
logger.Warn(logSender, middleware.GetReqID(r.Context()), "password authentication failed for user: %#v", authReq.Username)
logger.Warn(logSender, middleware.GetReqID(r.Context()), "password authentication failed for user: %q", authReq.Username)
sendAPIResponse(w, r, nil, "password authentication failed", http.StatusForbidden)
return
}
@ -133,7 +133,7 @@ func checkSFTPGoUserAuth(w http.ResponseWriter, r *http.Request) {
user, err := getSFTPGoUser(sr.Entries[0], authReq.Username)
if err != nil {
logger.Warn(logSender, middleware.GetReqID(r.Context()), "get user from LDAP entry failed for username %#v: %v",
logger.Warn(logSender, middleware.GetReqID(r.Context()), "get user from LDAP entry failed for username %q: %v",
authReq.Username, err)
sendAPIResponse(w, r, err, "mapping LDAP user failed", http.StatusInternalServerError)
return

View file

@ -76,7 +76,7 @@ Please take a look at the usage below to customize the options.`,
providerConf.Actions.Hook = ""
providerConf.Actions.ExecuteFor = nil
providerConf.Actions.ExecuteOn = nil
logger.InfoToConsole("Initializing provider: %#v config file: %#v", providerConf.Driver, viper.ConfigFileUsed())
logger.InfoToConsole("Initializing provider: %q config file: %q", providerConf.Driver, viper.ConfigFileUsed())
err = dataprovider.InitializeDatabase(providerConf, configDir)
if err == nil {
logger.InfoToConsole("Data provider successfully initialized/updated")

View file

@ -147,7 +147,7 @@ Please take a look at the usage below to customize the serving parameters`,
_, err := common.NewCertManager(keyPairs, filepath.Clean(defaultConfigDir),
"FTP portable")
if err != nil {
fmt.Printf("Unable to load FTPS key pair, cert file %#v key file %#v error: %v\n",
fmt.Printf("Unable to load FTPS key pair, cert file %q key file %q error: %v\n",
portableFTPSCert, portableFTPSKey, err)
os.Exit(1)
}
@ -163,7 +163,7 @@ Please take a look at the usage below to customize the serving parameters`,
_, err := common.NewCertManager(keyPairs, filepath.Clean(defaultConfigDir),
"WebDAV portable")
if err != nil {
fmt.Printf("Unable to load WebDAV key pair, cert file %#v key file %#v error: %v\n",
fmt.Printf("Unable to load WebDAV key pair, cert file %q key file %q error: %v\n",
portableWebDAVCert, portableWebDAVKey, err)
os.Exit(1)
}
@ -480,7 +480,7 @@ func getFileContents(name string) (string, error) {
return "", err
}
if fi.Size() > 1048576 {
return "", fmt.Errorf("%#v is too big %v/1048576 bytes", name, fi.Size())
return "", fmt.Errorf("%q is too big %v/1048576 bytes", name, fi.Size())
}
contents, err := os.ReadFile(name)
if err != nil {

View file

@ -78,7 +78,7 @@ Command-line flags should be specified in the Subsystem declaration.
}
username := osUser.Username
homedir := osUser.HomeDir
logger.Info(logSender, connectionID, "starting SFTPGo %v as subsystem, user %#v home dir %#v config dir %#v base home dir %#v",
logger.Info(logSender, connectionID, "starting SFTPGo %v as subsystem, user %q home dir %q config dir %q base home dir %q",
version.Get(), username, homedir, configDir, baseHomeDir)
err = config.LoadConfig(configDir, configFile)
if err != nil {
@ -144,7 +144,7 @@ Command-line flags should be specified in the Subsystem declaration.
user.HomeDir = filepath.Clean(homedir)
err = dataprovider.UpdateUser(&user, dataprovider.ActionExecutorSystem, "", "")
if err != nil {
logger.Error(logSender, connectionID, "unable to update user %#v: %v", username, err)
logger.Error(logSender, connectionID, "unable to update user %q: %v", username, err)
os.Exit(1)
}
}
@ -155,19 +155,19 @@ Command-line flags should be specified in the Subsystem declaration.
} else {
user.HomeDir = filepath.Clean(homedir)
}
logger.Debug(logSender, connectionID, "home dir for new user %#v", user.HomeDir)
logger.Debug(logSender, connectionID, "home dir for new user %q", user.HomeDir)
user.Password = connectionID
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
err = dataprovider.AddUser(&user, dataprovider.ActionExecutorSystem, "", "")
if err != nil {
logger.Error(logSender, connectionID, "unable to add user %#v: %v", username, err)
logger.Error(logSender, connectionID, "unable to add user %q: %v", username, err)
os.Exit(1)
}
}
err = user.LoadAndApplyGroupSettings()
if err != nil {
logger.Error(logSender, connectionID, "unable to apply group settings for user %#v: %v", username, err)
logger.Error(logSender, connectionID, "unable to apply group settings for user %q: %v", username, err)
os.Exit(1)
}
err = sftpd.ServeSubSystemConnection(&user, connectionID, os.Stdin, os.Stdout)

View file

@ -38,7 +38,7 @@ var (
fmt.Printf("Error querying service status: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Service status: %#v\r\n", status.String())
fmt.Printf("Service status: %q\r\n", status.String())
}
},
}

View file

@ -96,23 +96,23 @@ func (c Config) Initialize() error {
}
for _, env := range c.Env {
if len(strings.SplitN(env, "=", 2)) != 2 {
return fmt.Errorf("invalid env var %#v", env)
return fmt.Errorf("invalid env var %q", env)
}
}
for idx, cmd := range c.Commands {
if cmd.Path == "" {
return fmt.Errorf("invalid path %#v", cmd.Path)
return fmt.Errorf("invalid path %q", cmd.Path)
}
if cmd.Timeout == 0 {
c.Commands[idx].Timeout = c.Timeout
} else {
if cmd.Timeout < minTimeout || cmd.Timeout > maxTimeout {
return fmt.Errorf("invalid timeout %v for command %#v", cmd.Timeout, cmd.Path)
return fmt.Errorf("invalid timeout %v for command %q", cmd.Timeout, cmd.Path)
}
}
for _, env := range cmd.Env {
if len(strings.SplitN(env, "=", 2)) != 2 {
return fmt.Errorf("invalid env var %#v for command %#v", env, cmd.Path)
return fmt.Errorf("invalid env var %q for command %q", env, cmd.Path)
}
}
// don't validate args, we allow to pass empty arguments

View file

@ -636,7 +636,7 @@ func (c *Configuration) ExecuteStartupHook() error {
var url *url.URL
url, err := url.Parse(c.StartupHook)
if err != nil {
logger.Warn(logSender, "", "Invalid startup hook %#v: %v", c.StartupHook, err)
logger.Warn(logSender, "", "Invalid startup hook %q: %v", c.StartupHook, err)
return err
}
startTime := time.Now()
@ -650,8 +650,8 @@ func (c *Configuration) ExecuteStartupHook() error {
return nil
}
if !filepath.IsAbs(c.StartupHook) {
err := fmt.Errorf("invalid startup hook %#v", c.StartupHook)
logger.Warn(logSender, "", "Invalid startup hook %#v", c.StartupHook)
err := fmt.Errorf("invalid startup hook %q", c.StartupHook)
logger.Warn(logSender, "", "Invalid startup hook %q", c.StartupHook)
return err
}
startTime := time.Now()
@ -677,7 +677,7 @@ func (c *Configuration) executePostDisconnectHook(remoteAddr, protocol, username
var url *url.URL
url, err := url.Parse(c.PostDisconnectHook)
if err != nil {
logger.Warn(protocol, connID, "Invalid post disconnect hook %#v: %v", c.PostDisconnectHook, err)
logger.Warn(protocol, connID, "Invalid post disconnect hook %q: %v", c.PostDisconnectHook, err)
return
}
q := url.Query()
@ -698,7 +698,7 @@ func (c *Configuration) executePostDisconnectHook(remoteAddr, protocol, username
return
}
if !filepath.IsAbs(c.PostDisconnectHook) {
logger.Debug(protocol, connID, "invalid post disconnect hook %#v", c.PostDisconnectHook)
logger.Debug(protocol, connID, "invalid post disconnect hook %q", c.PostDisconnectHook)
return
}
timeout, env, args := command.GetConfig(c.PostDisconnectHook, command.HookPostDisconnect)
@ -735,7 +735,7 @@ func (c *Configuration) ExecutePostConnectHook(ipAddr, protocol string) error {
var url *url.URL
url, err := url.Parse(c.PostConnectHook)
if err != nil {
logger.Warn(protocol, "", "Login from ip %#v denied, invalid post connect hook %#v: %v",
logger.Warn(protocol, "", "Login from ip %q denied, invalid post connect hook %q: %v",
ipAddr, c.PostConnectHook, err)
return err
}
@ -746,19 +746,19 @@ func (c *Configuration) ExecutePostConnectHook(ipAddr, protocol string) error {
resp, err := httpclient.RetryableGet(url.String())
if err != nil {
logger.Warn(protocol, "", "Login from ip %#v denied, error executing post connect hook: %v", ipAddr, err)
logger.Warn(protocol, "", "Login from ip %q denied, error executing post connect hook: %v", ipAddr, err)
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logger.Warn(protocol, "", "Login from ip %#v denied, post connect hook response code: %v", ipAddr, resp.StatusCode)
logger.Warn(protocol, "", "Login from ip %q denied, post connect hook response code: %v", ipAddr, resp.StatusCode)
return errUnexpectedHTTResponse
}
return nil
}
if !filepath.IsAbs(c.PostConnectHook) {
err := fmt.Errorf("invalid post connect hook %#v", c.PostConnectHook)
logger.Warn(protocol, "", "Login from ip %#v denied: %v", ipAddr, err)
err := fmt.Errorf("invalid post connect hook %q", c.PostConnectHook)
logger.Warn(protocol, "", "Login from ip %q denied: %v", ipAddr, err)
return err
}
timeout, env, args := command.GetConfig(c.PostConnectHook, command.HookPostConnect)
@ -1034,7 +1034,7 @@ func (conns *ActiveConnections) checkIdles() {
if idleTime > Config.idleTimeoutAsDuration || (isUnauthenticatedFTPUser && idleTime > Config.idleLoginTimeout) {
defer func(conn ActiveConnection) {
err := conn.Disconnect()
logger.Debug(conn.GetProtocol(), conn.GetID(), "close idle connection, idle time: %v, username: %#v close err: %v",
logger.Debug(conn.GetProtocol(), conn.GetID(), "close idle connection, idle time: %v, username: %q close err: %v",
time.Since(conn.GetLastActivity()), conn.GetUsername(), err)
}(c)
}
@ -1091,7 +1091,7 @@ func (conns *ActiveConnections) checkTransfers() {
for _, c := range conns.connections {
for _, overquotaTransfer := range overquotaTransfers {
if c.GetID() == overquotaTransfer.ConnID {
logger.Info(logSender, c.GetID(), "user %#v is overquota, try to close transfer id %v",
logger.Info(logSender, c.GetID(), "user %q is overquota, try to close transfer id %v",
c.GetUsername(), overquotaTransfer.TransferID)
var err error
if overquotaTransfer.TransferType == TransferDownload {
@ -1234,7 +1234,7 @@ func (c *ConnectionStatus) GetConnectionDuration() string {
func (c *ConnectionStatus) GetConnectionInfo() string {
var result strings.Builder
result.WriteString(fmt.Sprintf("%v. Client: %#v From: %#v", c.Protocol, c.ClientVersion, c.RemoteAddress))
result.WriteString(fmt.Sprintf("%v. Client: %q From: %q", c.Protocol, c.ClientVersion, c.RemoteAddress))
if c.Command == "" {
return result.String()
@ -1242,9 +1242,9 @@ func (c *ConnectionStatus) GetConnectionInfo() string {
switch c.Protocol {
case ProtocolSSH, ProtocolFTP:
result.WriteString(fmt.Sprintf(". Command: %#v", c.Command))
result.WriteString(fmt.Sprintf(". Command: %q", c.Command))
case ProtocolWebDAV:
result.WriteString(fmt.Sprintf(". Method: %#v", c.Command))
result.WriteString(fmt.Sprintf(". Method: %q", c.Command))
}
return result.String()

View file

@ -362,7 +362,7 @@ func (c *BaseConnection) CreateDir(virtualPath string, checkFilePatterns bool) e
}
startTime := time.Now()
if err := fs.Mkdir(fsPath); err != nil {
c.Log(logger.LevelError, "error creating dir: %#v error: %+v", fsPath, err)
c.Log(logger.LevelError, "error creating dir: %q error: %+v", fsPath, err)
return c.GetFsError(fs, err)
}
vfs.SetPathPermissions(fs, fsPath, c.User.GetUID(), c.User.GetGID())
@ -440,7 +440,7 @@ func (c *BaseConnection) IsRemoveDirAllowed(fs vfs.Fs, fsPath, virtualPath strin
return fmt.Errorf("removing virtual folders is not allowed: %w", c.GetPermissionDeniedError())
}
if c.User.HasVirtualFoldersInside(virtualPath) {
c.Log(logger.LevelWarn, "removing a directory with a virtual folder inside is not allowed: %#v", virtualPath)
c.Log(logger.LevelWarn, "removing a directory with a virtual folder inside is not allowed: %q", virtualPath)
return fmt.Errorf("cannot remove directory %q with virtual folders inside: %w", virtualPath, c.GetOpUnsupportedError())
}
if c.User.IsMappedPath(fsPath) {
@ -484,7 +484,7 @@ func (c *BaseConnection) RemoveDir(virtualPath string) error {
startTime := time.Now()
if err := fs.Remove(fsPath, true); err != nil {
c.Log(logger.LevelError, "failed to remove directory %#v: %+v", fsPath, err)
c.Log(logger.LevelError, "failed to remove directory %q: %+v", fsPath, err)
return c.GetFsError(fs, err)
}
elapsed := time.Since(startTime).Nanoseconds() / 1000000
@ -810,11 +810,11 @@ func (c *BaseConnection) CreateSymlink(virtualSourcePath, virtualTargetPath stri
}
ok, policy := c.User.IsFileAllowed(virtualSourcePath)
if !ok && policy == sdk.DenyPolicyHide {
c.Log(logger.LevelError, "symlink source path %#v is not allowed", virtualSourcePath)
c.Log(logger.LevelError, "symlink source path %q is not allowed", virtualSourcePath)
return c.GetNotExistError()
}
if ok, _ = c.User.IsFileAllowed(virtualTargetPath); !ok {
c.Log(logger.LevelError, "symlink target path %#v is not allowed", virtualTargetPath)
c.Log(logger.LevelError, "symlink target path %q is not allowed", virtualTargetPath)
return c.GetPermissionDeniedError()
}
if relativePath != "" {
@ -913,7 +913,7 @@ func (c *BaseConnection) handleChmod(fs vfs.Fs, fsPath, pathForPerms string, att
}
startTime := time.Now()
if err := fs.Chmod(c.getRealFsPath(fsPath), attributes.Mode); err != nil {
c.Log(logger.LevelError, "failed to chmod path %#v, mode: %v, err: %+v", fsPath, attributes.Mode.String(), err)
c.Log(logger.LevelError, "failed to chmod path %q, mode: %v, err: %+v", fsPath, attributes.Mode.String(), err)
return c.GetFsError(fs, err)
}
elapsed := time.Since(startTime).Nanoseconds() / 1000000
@ -931,7 +931,7 @@ func (c *BaseConnection) handleChown(fs vfs.Fs, fsPath, pathForPerms string, att
}
startTime := time.Now()
if err := fs.Chown(c.getRealFsPath(fsPath), attributes.UID, attributes.GID); err != nil {
c.Log(logger.LevelError, "failed to chown path %#v, uid: %v, gid: %v, err: %+v", fsPath, attributes.UID,
c.Log(logger.LevelError, "failed to chown path %q, uid: %v, gid: %v, err: %+v", fsPath, attributes.UID,
attributes.GID, err)
return c.GetFsError(fs, err)
}
@ -955,7 +955,7 @@ func (c *BaseConnection) handleChtimes(fs vfs.Fs, fsPath, pathForPerms string, a
if errors.Is(err, vfs.ErrVfsUnsupported) && Config.SetstatMode == 2 {
return nil
}
c.Log(logger.LevelError, "failed to chtimes for path %#v, access time: %v, modification time: %v, err: %+v",
c.Log(logger.LevelError, "failed to chtimes for path %q, access time: %v, modification time: %v, err: %+v",
fsPath, attributes.Atime, attributes.Mtime, err)
return c.GetFsError(fs, err)
}
@ -1002,7 +1002,7 @@ func (c *BaseConnection) SetStat(virtualPath string, attributes *StatAttributes)
}
startTime := time.Now()
if err = c.truncateFile(fs, fsPath, virtualPath, attributes.Size); err != nil {
c.Log(logger.LevelError, "failed to truncate path %#v, size: %v, err: %+v", fsPath, attributes.Size, err)
c.Log(logger.LevelError, "failed to truncate path %q, size: %v, err: %+v", fsPath, attributes.Size, err)
return c.GetFsError(fs, err)
}
elapsed := time.Since(startTime).Nanoseconds() / 1000000
@ -1020,7 +1020,7 @@ func (c *BaseConnection) truncateFile(fs vfs.Fs, fsPath, virtualPath string, siz
var err error
initialSize, err = c.truncateOpenHandle(fsPath, size)
if err == errNoTransfer {
c.Log(logger.LevelDebug, "file path %#v not found in active transfers, execute trucate by path", fsPath)
c.Log(logger.LevelDebug, "file path %q not found in active transfers, execute trucate by path", fsPath)
var info os.FileInfo
info, err = fs.Stat(fsPath)
if err != nil {
@ -1147,11 +1147,11 @@ func (c *BaseConnection) isRenamePermitted(fsSrc, fsDst vfs.Fs, fsSourcePath, fs
return false
}
if c.User.IsMappedPath(fsSourcePath) && vfs.IsLocalOrCryptoFs(fsSrc) {
c.Log(logger.LevelWarn, "renaming a directory mapped as virtual folder is not allowed: %#v", fsSourcePath)
c.Log(logger.LevelWarn, "renaming a directory mapped as virtual folder is not allowed: %q", fsSourcePath)
return false
}
if c.User.IsMappedPath(fsTargetPath) && vfs.IsLocalOrCryptoFs(fsDst) {
c.Log(logger.LevelWarn, "renaming to a directory mapped as virtual folder is not allowed: %#v", fsTargetPath)
c.Log(logger.LevelWarn, "renaming to a directory mapped as virtual folder is not allowed: %q", fsTargetPath)
return false
}
if virtualSourcePath == "/" || virtualTargetPath == "/" || fsSrc.GetRelativePath(fsSourcePath) == "/" {
@ -1316,7 +1316,7 @@ func (c *BaseConnection) checkUserQuota() (dataprovider.TransferQuota, int, int6
}
usedFiles, usedSize, usedULSize, usedDLSize, err := dataprovider.GetUsedQuota(c.User.Username)
if err != nil {
c.Log(logger.LevelError, "error getting used quota for %#v: %v", c.User.Username, err)
c.Log(logger.LevelError, "error getting used quota for %q: %v", c.User.Username, err)
result.AllowedTotalSize = -1
return result, -1, -1
}
@ -1376,7 +1376,7 @@ func (c *BaseConnection) HasSpace(checkFiles, getUsage bool, requestPath string)
}
}
if err != nil {
c.Log(logger.LevelError, "error getting used quota for %#v request path %#v: %v", c.User.Username, requestPath, err)
c.Log(logger.LevelError, "error getting used quota for %q request path %q: %v", c.User.Username, requestPath, err)
result.HasSpace = false
return result, transferQuota
}

View file

@ -170,7 +170,7 @@ func (c *RetentionCheck) Validate() error {
nothingToDo = false
}
if _, ok := folderPaths[f.Path]; ok {
return util.NewValidationError(fmt.Sprintf("duplicated folder path %#v", f.Path))
return util.NewValidationError(fmt.Sprintf("duplicated folder path %q", f.Path))
}
folderPaths[f.Path] = true
}
@ -191,7 +191,7 @@ func (c *RetentionCheck) Validate() error {
return util.NewValidationError("in order to notify results via hook you must define a data_retention_hook")
}
default:
return util.NewValidationError(fmt.Sprintf("invalid notification %#v", notification))
return util.NewValidationError(fmt.Sprintf("invalid notification %q", notification))
}
}
return nil
@ -215,7 +215,7 @@ func (c *RetentionCheck) getFolderRetention(folderPath string) (dataprovider.Fol
}
}
return dataprovider.FolderRetention{}, fmt.Errorf("unable to find folder retention for %#v", folderPath)
return dataprovider.FolderRetention{}, fmt.Errorf("unable to find folder retention for %q", folderPath)
}
func (c *RetentionCheck) removeFile(virtualPath string, info os.FileInfo) error {
@ -238,7 +238,7 @@ func (c *RetentionCheck) cleanupFolder(folderPath string) error {
if !c.conn.User.HasPerm(dataprovider.PermListItems, folderPath) || !c.conn.User.HasAnyPerm(deleteFilesPerms, folderPath) {
result.Elapsed = time.Since(startTime)
result.Info = "data retention check skipped: no permissions"
c.conn.Log(logger.LevelInfo, "user %#v does not have permissions to check retention on %#v, retention check skipped",
c.conn.Log(logger.LevelInfo, "user %q does not have permissions to check retention on %q, retention check skipped",
c.conn.User.Username, folderPath)
return nil
}
@ -247,27 +247,27 @@ func (c *RetentionCheck) cleanupFolder(folderPath string) error {
if err != nil {
result.Elapsed = time.Since(startTime)
result.Error = "unable to get folder retention"
c.conn.Log(logger.LevelError, "unable to get folder retention for path %#v", folderPath)
c.conn.Log(logger.LevelError, "unable to get folder retention for path %q", folderPath)
return err
}
result.Retention = folderRetention.Retention
if folderRetention.Retention == 0 {
result.Elapsed = time.Since(startTime)
result.Info = "data retention check skipped: retention is set to 0"
c.conn.Log(logger.LevelDebug, "retention check skipped for folder %#v, retention is set to 0", folderPath)
c.conn.Log(logger.LevelDebug, "retention check skipped for folder %q, retention is set to 0", folderPath)
return nil
}
c.conn.Log(logger.LevelDebug, "start retention check for folder %#v, retention: %v hours, delete empty dirs? %v, ignore user perms? %v",
c.conn.Log(logger.LevelDebug, "start retention check for folder %q, retention: %v hours, delete empty dirs? %v, ignore user perms? %v",
folderPath, folderRetention.Retention, folderRetention.DeleteEmptyDirs, folderRetention.IgnoreUserPermissions)
files, err := c.conn.ListDir(folderPath)
if err != nil {
result.Elapsed = time.Since(startTime)
if err == c.conn.GetNotExistError() {
result.Info = "data retention check skipped, folder does not exist"
c.conn.Log(logger.LevelDebug, "folder %#v does not exist, retention check skipped", folderPath)
c.conn.Log(logger.LevelDebug, "folder %q does not exist, retention check skipped", folderPath)
return nil
}
result.Error = fmt.Sprintf("unable to list directory %#v", folderPath)
result.Error = fmt.Sprintf("unable to list directory %q", folderPath)
c.conn.Log(logger.LevelError, result.Error)
return err
}
@ -277,7 +277,7 @@ func (c *RetentionCheck) cleanupFolder(folderPath string) error {
if err := c.cleanupFolder(virtualPath); err != nil {
result.Elapsed = time.Since(startTime)
result.Error = fmt.Sprintf("unable to check folder: %v", err)
c.conn.Log(logger.LevelError, "unable to cleanup folder %#v: %v", virtualPath, err)
c.conn.Log(logger.LevelError, "unable to cleanup folder %q: %v", virtualPath, err)
return err
}
} else {
@ -285,12 +285,12 @@ func (c *RetentionCheck) cleanupFolder(folderPath string) error {
if retentionTime.Before(time.Now()) {
if err := c.removeFile(virtualPath, info); err != nil {
result.Elapsed = time.Since(startTime)
result.Error = fmt.Sprintf("unable to remove file %#v: %v", virtualPath, err)
c.conn.Log(logger.LevelError, "unable to remove file %#v, retention %v: %v",
result.Error = fmt.Sprintf("unable to remove file %q: %v", virtualPath, err)
c.conn.Log(logger.LevelError, "unable to remove file %q, retention %v: %v",
virtualPath, retentionTime, err)
return err
}
c.conn.Log(logger.LevelDebug, "removed file %#v, modification time: %v, retention: %v hours, retention time: %v",
c.conn.Log(logger.LevelDebug, "removed file %q, modification time: %v, retention: %v hours, retention time: %v",
virtualPath, info.ModTime(), folderRetention.Retention, retentionTime)
result.DeletedFiles++
result.DeletedSize += info.Size()
@ -302,7 +302,7 @@ func (c *RetentionCheck) cleanupFolder(folderPath string) error {
c.checkEmptyDirRemoval(folderPath)
}
result.Elapsed = time.Since(startTime)
c.conn.Log(logger.LevelDebug, "retention check completed for folder %#v, deleted files: %v, deleted size: %v bytes",
c.conn.Log(logger.LevelDebug, "retention check completed for folder %q, deleted files: %v, deleted size: %v bytes",
folderPath, result.DeletedFiles, result.DeletedSize)
return nil
@ -325,7 +325,7 @@ func (c *RetentionCheck) checkEmptyDirRemoval(folderPath string) {
files, err := c.conn.ListDir(folderPath)
if err == nil && len(files) == 0 {
err = c.conn.RemoveDir(folderPath)
c.conn.Log(logger.LevelDebug, "tryed to remove empty dir %#v, error: %v", folderPath, err)
c.conn.Log(logger.LevelDebug, "tryed to remove empty dir %q, error: %v", folderPath, err)
}
}
}
@ -340,7 +340,7 @@ func (c *RetentionCheck) Start() error {
for _, folder := range c.Folders {
if folder.Retention > 0 {
if err := c.cleanupFolder(folder.Path); err != nil {
c.conn.Log(logger.LevelError, "retention check failed, unable to cleanup folder %#v", folder.Path)
c.conn.Log(logger.LevelError, "retention check failed, unable to cleanup folder %q", folder.Path)
c.sendNotifications(time.Since(startTime), err)
return err
}
@ -429,7 +429,7 @@ func (c *RetentionCheck) sendHookNotification(elapsed time.Duration, errCheck er
var url *url.URL
url, err := url.Parse(Config.DataRetentionHook)
if err != nil {
c.conn.Log(logger.LevelError, "invalid data retention hook %#v: %v", Config.DataRetentionHook, err)
c.conn.Log(logger.LevelError, "invalid data retention hook %q: %v", Config.DataRetentionHook, err)
return err
}
respCode := 0
@ -444,13 +444,13 @@ func (c *RetentionCheck) sendHookNotification(elapsed time.Duration, errCheck er
}
}
c.conn.Log(logger.LevelDebug, "notified result to URL: %#v, status code: %v, elapsed: %v err: %v",
c.conn.Log(logger.LevelDebug, "notified result to URL: %q, status code: %v, elapsed: %v err: %v",
url.Redacted(), respCode, time.Since(startTime), err)
return err
}
if !filepath.IsAbs(Config.DataRetentionHook) {
err := fmt.Errorf("invalid data retention hook %#v", Config.DataRetentionHook)
err := fmt.Errorf("invalid data retention hook %q", Config.DataRetentionHook)
c.conn.Log(logger.LevelError, "%v", err)
return err
}

View file

@ -819,7 +819,7 @@ func addZipEntry(wr *zipWriterWrapper, conn *BaseConnection, entryPath, baseDir
}
info, err := conn.DoStat(entryPath, 1, false)
if err != nil {
eventManagerLog(logger.LevelError, "unable to add zip entry %#v, stat error: %v", entryPath, err)
eventManagerLog(logger.LevelError, "unable to add zip entry %q, stat error: %v", entryPath, err)
return err
}
entryName, err := getZipEntryName(entryPath, baseDir)

View file

@ -95,7 +95,7 @@ func (r *RateLimiterConfig) validate() error {
r.Protocols = util.RemoveDuplicates(r.Protocols, true)
for _, protocol := range r.Protocols {
if !util.Contains(rateLimiterProtocolValues, protocol) {
return fmt.Errorf("invalid protocol %#v", protocol)
return fmt.Errorf("invalid protocol %q", protocol)
}
}
return nil

View file

@ -272,7 +272,7 @@ func (t *BaseTransfer) Truncate(fsPath string, size int64) (int64, error) {
}
t.Unlock()
}
t.Connection.Log(logger.LevelDebug, "file %#v truncated to size %v max write size %v new initial size %v err: %v",
t.Connection.Log(logger.LevelDebug, "file %q truncated to size %v max write size %v new initial size %v err: %v",
fsPath, size, t.MaxWriteSize, t.InitialSize, err)
return initialSize, err
}
@ -301,7 +301,7 @@ func (t *BaseTransfer) TransferError(err error) {
t.cancelFn()
}
elapsed := time.Since(t.start).Nanoseconds() / 1000000
t.Connection.Log(logger.LevelError, "Unexpected error for transfer, path: %#v, error: \"%v\" bytes sent: %v, "+
t.Connection.Log(logger.LevelError, "Unexpected error for transfer, path: %q, error: \"%v\" bytes sent: %v, "+
"bytes received: %v transfer running since %v ms", t.fsPath, t.ErrTransfer, t.BytesSent.Load(),
t.BytesReceived.Load(), elapsed)
}
@ -317,7 +317,7 @@ func (t *BaseTransfer) getUploadFileSize() (int64, int, error) {
if t.ErrTransfer != nil && vfs.IsCryptOsFs(t.Fs) {
errDelete := t.Fs.Remove(t.fsPath, false)
if errDelete != nil {
t.Connection.Log(logger.LevelWarn, "error removing partial crypto file %#v: %v", t.fsPath, errDelete)
t.Connection.Log(logger.LevelWarn, "error removing partial crypto file %q: %v", t.fsPath, errDelete)
} else {
fileSize = 0
deletedFiles = 1
@ -337,7 +337,7 @@ func (t *BaseTransfer) checkUploadOutsideHomeDir(err error) int {
return 0
}
err = t.Fs.Remove(t.effectiveFsPath, false)
t.Connection.Log(logger.LevelWarn, "upload in temp path cannot be renamed, delete temporary file: %#v, deletion error: %v",
t.Connection.Log(logger.LevelWarn, "upload in temp path cannot be renamed, delete temporary file: %q, deletion error: %v",
t.effectiveFsPath, err)
// the file is outside the home dir so don't update the quota
t.BytesReceived.Store(0)
@ -368,18 +368,18 @@ func (t *BaseTransfer) Close() error {
t.BytesReceived.Store(0)
t.MinWriteOffset = 0
}
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %#v, deletion error: %v",
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %q, deletion error: %v",
t.File.Name(), err)
} else if t.transferType == TransferUpload && t.effectiveFsPath != t.fsPath {
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
_, _, err = t.Fs.Rename(t.effectiveFsPath, t.fsPath)
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %#v -> %#v, error: %v",
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %q -> %q, error: %v",
t.effectiveFsPath, t.fsPath, err)
// the file must be removed if it is uploaded to a path outside the home dir and cannot be renamed
t.checkUploadOutsideHomeDir(err)
} else {
err = t.Fs.Remove(t.effectiveFsPath, false)
t.Connection.Log(logger.LevelWarn, "atomic upload completed with error: \"%v\", delete temporary file: %#v, deletion error: %v",
t.Connection.Log(logger.LevelWarn, "atomic upload completed with error: \"%v\", delete temporary file: %q, deletion error: %v",
t.ErrTransfer, t.effectiveFsPath, err)
if err == nil {
t.BytesReceived.Store(0)
@ -415,7 +415,7 @@ func (t *BaseTransfer) Close() error {
t.Connection.ID, t.Connection.protocol, t.Connection.localAddr, t.Connection.remoteAddr, t.ftpMode)
}
if t.ErrTransfer != nil {
t.Connection.Log(logger.LevelError, "transfer error: %v, path: %#v", t.ErrTransfer, t.fsPath)
t.Connection.Log(logger.LevelError, "transfer error: %v, path: %q", t.ErrTransfer, t.fsPath)
if err == nil {
err = t.ErrTransfer
}
@ -479,7 +479,7 @@ func (t *BaseTransfer) getUploadedFiles() int {
func (t *BaseTransfer) updateTimes() {
if !t.aTime.IsZero() && !t.mTime.IsZero() {
err := t.Fs.Chtimes(t.fsPath, t.aTime, t.mTime, true)
t.Connection.Log(logger.LevelDebug, "set times for file %#v, atime: %v, mtime: %v, err: %v",
t.Connection.Log(logger.LevelDebug, "set times for file %q, atime: %v, mtime: %v, err: %v",
t.fsPath, t.aTime, t.mTime, err)
}
}

View file

@ -200,7 +200,7 @@ func (t *baseTransferChecker) getOverquotaTransfers(usersToFetch map[string]bool
// file will be successful
usedDiskQuota += tr.CurrentULSize - tr.TruncatedSize
}
logger.Debug(logSender, "", "username %#v, folder %#v, concurrent transfers: %v, remaining disk quota (bytes): %v, disk quota used in ongoing transfers (bytes): %v",
logger.Debug(logSender, "", "username %q, folder %q, concurrent transfers: %v, remaining disk quota (bytes): %v, disk quota used in ongoing transfers (bytes): %v",
username, folderName, len(transfers), remaningDiskQuota, usedDiskQuota)
if usedDiskQuota > remaningDiskQuota {
for _, tr := range transfers {
@ -221,7 +221,7 @@ func (t *baseTransferChecker) getOverquotaTransfers(usersToFetch map[string]bool
ulSize += tr.CurrentULSize
dlSize += tr.CurrentDLSize
}
logger.Debug(logSender, "", "username %#v, concurrent transfers: %v, quota (bytes) used in ongoing transfers, ul: %v, dl: %v",
logger.Debug(logSender, "", "username %q, concurrent transfers: %v, quota (bytes) used in ongoing transfers, ul: %v, dl: %v",
username, len(transfers), ulSize, dlSize)
for _, tr := range transfers {
if t.isDataTransferExceeded(usersMap[username], tr, ulSize, dlSize) {

View file

@ -732,7 +732,7 @@ func LoadConfig(configDir, configFile string) error {
loadBindingsFromEnv()
loadWebDAVCacheMappingsFromEnv()
resetInvalidConfigs()
logger.Debug(logSender, "", "config file used: '%#v', config loaded: %+v", viper.ConfigFileUsed(), getRedactedGlobalConf())
logger.Debug(logSender, "", "config file used: '%q', config loaded: %+v", viper.ConfigFileUsed(), getRedactedGlobalConf())
return nil
}
@ -759,7 +759,7 @@ func resetInvalidConfigs() {
globalConf.HTTPDConfig.Setup.InstallationCodeHint = defaultInstallCodeHint
}
if globalConf.ProviderConf.UsersBaseDir != "" && !util.IsFileInputValid(globalConf.ProviderConf.UsersBaseDir) {
warn := fmt.Sprintf("invalid users base dir %#v will be ignored", globalConf.ProviderConf.UsersBaseDir)
warn := fmt.Sprintf("invalid users base dir %q will be ignored", globalConf.ProviderConf.UsersBaseDir)
globalConf.ProviderConf.UsersBaseDir = ""
logger.Warn(logSender, "", "Non-fatal configuration error: %v", warn)
logger.WarnToConsole("Non-fatal configuration error: %v", warn)
@ -786,7 +786,7 @@ func resetInvalidConfigs() {
}
if globalConf.Common.DefenderConfig.Enabled && globalConf.Common.DefenderConfig.Driver == common.DefenderDriverProvider {
if !globalConf.ProviderConf.IsDefenderSupported() {
warn := fmt.Sprintf("provider based defender is not supported with data provider %#v, "+
warn := fmt.Sprintf("provider based defender is not supported with data provider %q, "+
"the memory defender implementation will be used. If you want to use the provider defender "+
"implementation please switch to a shared/distributed data provider",
globalConf.ProviderConf.Driver)

View file

@ -97,7 +97,7 @@ func (c *AdminTOTPConfig) validate(username string) error {
return util.NewValidationError("totp: config name is mandatory")
}
if !util.Contains(mfa.GetAvailableTOTPConfigNames(), c.ConfigName) {
return util.NewValidationError(fmt.Sprintf("totp: config name %#v not found", c.ConfigName))
return util.NewValidationError(fmt.Sprintf("totp: config name %q not found", c.ConfigName))
}
if c.Secret.IsEmpty() {
return util.NewValidationError("totp: secret is mandatory")
@ -396,13 +396,13 @@ func (a *Admin) validate() error {
return err
}
if a.Email != "" && !util.IsEmailValid(a.Email) {
return util.NewValidationError(fmt.Sprintf("email %#v is not valid", a.Email))
return util.NewValidationError(fmt.Sprintf("email %q is not valid", a.Email))
}
a.Filters.AllowList = util.RemoveDuplicates(a.Filters.AllowList, false)
for _, IPMask := range a.Filters.AllowList {
_, _, err := net.ParseCIDR(IPMask)
if err != nil {
return util.NewValidationError(fmt.Sprintf("could not parse allow list entry %#v : %v", IPMask, err))
return util.NewValidationError(fmt.Sprintf("could not parse allow list entry %q : %v", IPMask, err))
}
}
@ -462,7 +462,7 @@ func (a *Admin) CanLoginFromIP(ip string) bool {
// CanLogin returns an error if the login is not allowed
func (a *Admin) CanLogin(ip string) error {
if a.Status != 1 {
return fmt.Errorf("admin %#v is disabled", a.Username)
return fmt.Errorf("admin %q is disabled", a.Username)
}
if !a.CanLoginFromIP(ip) {
return fmt.Errorf("login from IP %v not allowed", ip)

View file

@ -182,7 +182,7 @@ func (k *APIKey) validate() error {
// Authenticate tries to authenticate the provided plain key
func (k *APIKey) Authenticate(plainKey string) error {
if k.ExpiresAt > 0 && k.ExpiresAt < util.GetTimeAsMsSinceEpoch(time.Now()) {
return fmt.Errorf("API key %#v is expired, expiration timestamp: %v current timestamp: %v", k.KeyID,
return fmt.Errorf("API key %q is expired, expiration timestamp: %v current timestamp: %v", k.KeyID,
k.ExpiresAt, util.GetTimeAsMsSinceEpoch(time.Now()))
}
if strings.HasPrefix(k.Key, bcryptPwdPrefix) {

View file

@ -73,7 +73,7 @@ func initializeBoltProvider(basePath string) error {
dbPath := config.Name
if !util.IsFileInputValid(dbPath) {
return fmt.Errorf("invalid database path: %#v", dbPath)
return fmt.Errorf("invalid database path: %q", dbPath)
}
if !filepath.IsAbs(dbPath) {
dbPath = filepath.Join(basePath, dbPath)
@ -113,7 +113,7 @@ func (p *BoltProvider) validateUserAndTLSCert(username, protocol string, tlsCert
}
user, err := p.userExists(username, "")
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, err
}
return checkUserAndTLSCertificate(&user, protocol, tlsCert)
@ -122,7 +122,7 @@ func (p *BoltProvider) validateUserAndTLSCert(username, protocol string, tlsCert
func (p *BoltProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
user, err := p.userExists(username, "")
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, err
}
return checkUserAndPass(&user, password, ip, protocol)
@ -131,7 +131,7 @@ func (p *BoltProvider) validateUserAndPass(username, password, ip, protocol stri
func (p *BoltProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
admin, err := p.adminExists(username)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating admin %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating admin %q: %v", username, err)
return admin, ErrInvalidCredentials
}
err = admin.checkUserAndPass(password, ip)
@ -145,7 +145,7 @@ func (p *BoltProvider) validateUserAndPubKey(username string, pubKey []byte, isS
}
user, err := p.userExists(username, "")
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, "", err
}
return checkUserAndPubKey(&user, pubKey, isSSHCert)
@ -159,7 +159,7 @@ func (p *BoltProvider) updateAPIKeyLastUse(keyID string) error {
}
var u []byte
if u = bucket.Get([]byte(keyID)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("key %#v does not exist, unable to update last use", keyID))
return util.NewRecordNotFoundError(fmt.Sprintf("key %q does not exist, unable to update last use", keyID))
}
var apiKey APIKey
err = json.Unmarshal(u, &apiKey)
@ -173,10 +173,10 @@ func (p *BoltProvider) updateAPIKeyLastUse(keyID string) error {
}
err = bucket.Put([]byte(keyID), buf)
if err != nil {
providerLog(logger.LevelWarn, "error updating last use for key %#v: %v", keyID, err)
providerLog(logger.LevelWarn, "error updating last use for key %q: %v", keyID, err)
return err
}
providerLog(logger.LevelDebug, "last use updated for key %#v", keyID)
providerLog(logger.LevelDebug, "last use updated for key %q", keyID)
return nil
})
}
@ -189,7 +189,7 @@ func (p *BoltProvider) setUpdatedAt(username string) {
}
var u []byte
if u = bucket.Get([]byte(username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update updated at", username))
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist, unable to update updated at", username))
}
var user User
err = json.Unmarshal(u, &user)
@ -203,10 +203,10 @@ func (p *BoltProvider) setUpdatedAt(username string) {
}
err = bucket.Put([]byte(username), buf)
if err == nil {
providerLog(logger.LevelDebug, "updated at set for user %#v", username)
providerLog(logger.LevelDebug, "updated at set for user %q", username)
setLastUserUpdate()
} else {
providerLog(logger.LevelWarn, "error setting updated_at for user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error setting updated_at for user %q: %v", username, err)
}
return err
})
@ -220,7 +220,7 @@ func (p *BoltProvider) updateLastLogin(username string) error {
}
var u []byte
if u = bucket.Get([]byte(username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update last login", username))
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist, unable to update last login", username))
}
var user User
err = json.Unmarshal(u, &user)
@ -234,9 +234,9 @@ func (p *BoltProvider) updateLastLogin(username string) error {
}
err = bucket.Put([]byte(username), buf)
if err != nil {
providerLog(logger.LevelWarn, "error updating last login for user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error updating last login for user %q: %v", username, err)
} else {
providerLog(logger.LevelDebug, "last login updated for user %#v", username)
providerLog(logger.LevelDebug, "last login updated for user %q", username)
}
return err
})
@ -250,7 +250,7 @@ func (p *BoltProvider) updateAdminLastLogin(username string) error {
}
var a []byte
if a = bucket.Get([]byte(username)); a == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("admin %#v does not exist, unable to update last login", username))
return util.NewRecordNotFoundError(fmt.Sprintf("admin %q does not exist, unable to update last login", username))
}
var admin Admin
err = json.Unmarshal(a, &admin)
@ -264,10 +264,10 @@ func (p *BoltProvider) updateAdminLastLogin(username string) error {
}
err = bucket.Put([]byte(username), buf)
if err == nil {
providerLog(logger.LevelDebug, "last login updated for admin %#v", username)
providerLog(logger.LevelDebug, "last login updated for admin %q", username)
return err
}
providerLog(logger.LevelWarn, "error updating last login for admin %#v: %v", username, err)
providerLog(logger.LevelWarn, "error updating last login for admin %q: %v", username, err)
return err
})
}
@ -280,7 +280,7 @@ func (p *BoltProvider) updateTransferQuota(username string, uploadSize, download
}
var u []byte
if u = bucket.Get([]byte(username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update transfer quota",
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist, unable to update transfer quota",
username))
}
var user User
@ -301,7 +301,7 @@ func (p *BoltProvider) updateTransferQuota(username string, uploadSize, download
return err
}
err = bucket.Put([]byte(username), buf)
providerLog(logger.LevelDebug, "transfer quota updated for user %#v, ul increment: %v dl increment: %v is reset? %v",
providerLog(logger.LevelDebug, "transfer quota updated for user %q, ul increment: %v dl increment: %v is reset? %v",
username, uploadSize, downloadSize, reset)
return err
})
@ -315,7 +315,7 @@ func (p *BoltProvider) updateQuota(username string, filesAdd int, sizeAdd int64,
}
var u []byte
if u = bucket.Get([]byte(username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to update quota", username))
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist, unable to update quota", username))
}
var user User
err = json.Unmarshal(u, &user)
@ -335,7 +335,7 @@ func (p *BoltProvider) updateQuota(username string, filesAdd int, sizeAdd int64,
return err
}
err = bucket.Put([]byte(username), buf)
providerLog(logger.LevelDebug, "quota updated for user %#v, files increment: %v size increment: %v is reset? %v",
providerLog(logger.LevelDebug, "quota updated for user %q, files increment: %v size increment: %v is reset? %v",
username, filesAdd, sizeAdd, reset)
return err
})
@ -695,7 +695,7 @@ func (p *BoltProvider) updateUser(user *User) error {
}
var u []byte
if u = bucket.Get([]byte(user.Username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", user.Username))
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist", user.Username))
}
var oldUser User
err = json.Unmarshal(u, &oldUser)
@ -789,7 +789,7 @@ func (p *BoltProvider) updateUserPassword(username, password string) error {
}
var u []byte
if u = bucket.Get([]byte(username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist", username))
}
var user User
err = json.Unmarshal(u, &user)
@ -1247,7 +1247,7 @@ func (p *BoltProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int6
}
var f []byte
if f = bucket.Get([]byte(name)); f == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("folder %#v does not exist, unable to update quota", name))
return util.NewRecordNotFoundError(fmt.Sprintf("folder %q does not exist, unable to update quota", name))
}
var folder vfs.BaseVirtualFolder
err = json.Unmarshal(f, &folder)
@ -1273,7 +1273,7 @@ func (p *BoltProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int6
func (p *BoltProvider) getUsedFolderQuota(name string) (int, int64, error) {
folder, err := p.getFolderByName(name)
if err != nil {
providerLog(logger.LevelError, "unable to get quota for folder %#v error: %v", name, err)
providerLog(logger.LevelError, "unable to get quota for folder %q error: %v", name, err)
return 0, 0, err
}
return folder.UsedQuotaFiles, folder.UsedQuotaSize, err
@ -1396,7 +1396,7 @@ func (p *BoltProvider) groupExists(name string) (Group, error) {
}
g := bucket.Get([]byte(name))
if g == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("group %#v does not exist", name))
return util.NewRecordNotFoundError(fmt.Sprintf("group %q does not exist", name))
}
foldersBucket, err := p.getFoldersBucket(tx)
if err != nil {
@ -1462,7 +1462,7 @@ func (p *BoltProvider) updateGroup(group *Group) error {
}
var g []byte
if g = bucket.Get([]byte(group.Name)); g == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("group %#v does not exist", group.Name))
return util.NewRecordNotFoundError(fmt.Sprintf("group %q does not exist", group.Name))
}
var oldGroup Group
err = json.Unmarshal(g, &oldGroup)
@ -1502,7 +1502,7 @@ func (p *BoltProvider) deleteGroup(group Group) error {
}
var g []byte
if g = bucket.Get([]byte(group.Name)); g == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("group %#v does not exist", group.Name))
return util.NewRecordNotFoundError(fmt.Sprintf("group %q does not exist", group.Name))
}
var oldGroup Group
err = json.Unmarshal(g, &oldGroup)
@ -1510,7 +1510,7 @@ func (p *BoltProvider) deleteGroup(group Group) error {
return err
}
if len(oldGroup.Users) > 0 {
return util.NewValidationError(fmt.Sprintf("the group %#v is referenced, it cannot be removed", oldGroup.Name))
return util.NewValidationError(fmt.Sprintf("the group %q is referenced, it cannot be removed", oldGroup.Name))
}
if len(oldGroup.VirtualFolders) > 0 {
foldersBucket, err := p.getFoldersBucket(tx)
@ -1605,12 +1605,12 @@ func (p *BoltProvider) addAPIKey(apiKey *APIKey) error {
apiKey.LastUseAt = 0
if apiKey.User != "" {
if err := p.userExistsInternal(tx, apiKey.User); err != nil {
return util.NewValidationError(fmt.Sprintf("related user %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related user %q does not exists", apiKey.User))
}
}
if apiKey.Admin != "" {
if err := p.adminExistsInternal(tx, apiKey.Admin); err != nil {
return util.NewValidationError(fmt.Sprintf("related admin %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related admin %q does not exists", apiKey.User))
}
}
buf, err := json.Marshal(apiKey)
@ -1650,12 +1650,12 @@ func (p *BoltProvider) updateAPIKey(apiKey *APIKey) error {
apiKey.UpdatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
if apiKey.User != "" {
if err := p.userExistsInternal(tx, apiKey.User); err != nil {
return util.NewValidationError(fmt.Sprintf("related user %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related user %q does not exists", apiKey.User))
}
}
if apiKey.Admin != "" {
if err := p.adminExistsInternal(tx, apiKey.Admin); err != nil {
return util.NewValidationError(fmt.Sprintf("related admin %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related admin %q does not exists", apiKey.User))
}
}
buf, err := json.Marshal(apiKey)
@ -1809,7 +1809,7 @@ func (p *BoltProvider) addShare(share *Share) error {
share.UpdatedAt = share.CreatedAt
}
if err := p.userExistsInternal(tx, share.Username); err != nil {
return util.NewValidationError(fmt.Sprintf("related user %#v does not exists", share.Username))
return util.NewValidationError(fmt.Sprintf("related user %q does not exists", share.Username))
}
buf, err := json.Marshal(share)
if err != nil {
@ -1857,7 +1857,7 @@ func (p *BoltProvider) updateShare(share *Share) error {
share.UpdatedAt = share.CreatedAt
}
if err := p.userExistsInternal(tx, share.Username); err != nil {
return util.NewValidationError(fmt.Sprintf("related user %#v does not exists", share.Username))
return util.NewValidationError(fmt.Sprintf("related user %q does not exists", share.Username))
}
buf, err := json.Marshal(share)
if err != nil {
@ -1978,7 +1978,7 @@ func (p *BoltProvider) updateShareLastUse(shareID string, numTokens int) error {
}
var u []byte
if u = bucket.Get([]byte(shareID)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("share %#v does not exist, unable to update last use", shareID))
return util.NewRecordNotFoundError(fmt.Sprintf("share %q does not exist, unable to update last use", shareID))
}
var share Share
err = json.Unmarshal(u, &share)
@ -1993,10 +1993,10 @@ func (p *BoltProvider) updateShareLastUse(shareID string, numTokens int) error {
}
err = bucket.Put([]byte(shareID), buf)
if err != nil {
providerLog(logger.LevelWarn, "error updating last use for share %#v: %v", shareID, err)
providerLog(logger.LevelWarn, "error updating last use for share %q: %v", shareID, err)
return err
}
providerLog(logger.LevelDebug, "last use updated for share %#v", shareID)
providerLog(logger.LevelDebug, "last use updated for share %q", shareID)
return nil
})
}
@ -3020,7 +3020,7 @@ func (p *BoltProvider) setFirstDownloadTimestamp(username string) error {
}
var u []byte
if u = bucket.Get([]byte(username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to set download timestamp",
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist, unable to set download timestamp",
username))
}
var user User
@ -3049,7 +3049,7 @@ func (p *BoltProvider) setFirstUploadTimestamp(username string) error {
}
var u []byte
if u = bucket.Get([]byte(username)); u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist, unable to set upload timestamp",
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist, unable to set upload timestamp",
username))
}
var user User
@ -3285,7 +3285,7 @@ func (p *BoltProvider) groupExistsInternal(name string, bucket *bolt.Bucket) (Gr
var group Group
g := bucket.Get([]byte(name))
if g == nil {
err := util.NewRecordNotFoundError(fmt.Sprintf("group %#v does not exist", name))
err := util.NewRecordNotFoundError(fmt.Sprintf("group %q does not exist", name))
return group, err
}
err := json.Unmarshal(g, &group)
@ -3296,7 +3296,7 @@ func (p *BoltProvider) folderExistsInternal(name string, bucket *bolt.Bucket) (v
var folder vfs.BaseVirtualFolder
f := bucket.Get([]byte(name))
if f == nil {
err := util.NewRecordNotFoundError(fmt.Sprintf("folder %#v does not exist", name))
err := util.NewRecordNotFoundError(fmt.Sprintf("folder %q does not exist", name))
return folder, err
}
err := json.Unmarshal(f, &folder)
@ -3782,7 +3782,7 @@ func (p *BoltProvider) userExistsInternal(tx *bolt.Tx, username string) error {
}
u := bucket.Get([]byte(username))
if u == nil {
return util.NewRecordNotFoundError(fmt.Sprintf("username %#v does not exist", username))
return util.NewRecordNotFoundError(fmt.Sprintf("username %q does not exist", username))
}
return nil
}

View file

@ -86,27 +86,27 @@ func (cache *usersCache) swap(userRef *User) {
if cachedUser, ok := cache.users[user.Username]; ok {
if cachedUser.User.Password != user.Password {
providerLog(logger.LevelDebug, "current password different from the cached one for user %#v, removing from cache",
providerLog(logger.LevelDebug, "current password different from the cached one for user %q, removing from cache",
user.Username)
// the password changed, the cached user is no longer valid
delete(cache.users, user.Username)
return
}
if err != nil {
providerLog(logger.LevelDebug, "unable to load group settings, for user %#v, removing from cache, err :%v",
providerLog(logger.LevelDebug, "unable to load group settings, for user %q, removing from cache, err :%v",
user.Username, err)
delete(cache.users, user.Username)
return
}
if cachedUser.User.isFsEqual(&user) {
// the updated user has the same fs as the cached one, we can preserve the lock filesystem
providerLog(logger.LevelDebug, "current password and fs unchanged for for user %#v, swap cached one",
providerLog(logger.LevelDebug, "current password and fs unchanged for for user %q, swap cached one",
user.Username)
cachedUser.User = user
cache.users[user.Username] = cachedUser
} else {
// filesystem changed, the cached user is no longer valid
providerLog(logger.LevelDebug, "current fs different from the cached one for user %#v, removing from cache",
providerLog(logger.LevelDebug, "current fs different from the cached one for user %q, removing from cache",
user.Username)
delete(cache.users, user.Username)
}

View file

@ -547,7 +547,7 @@ func (c *EventActionDataRetentionConfig) validate() error {
nothingToDo = false
}
if _, ok := folderPaths[f.Path]; ok {
return util.NewValidationError(fmt.Sprintf("duplicated folder path %#v", f.Path))
return util.NewValidationError(fmt.Sprintf("duplicated folder path %q", f.Path))
}
folderPaths[f.Path] = true
}

View file

@ -156,7 +156,7 @@ func (p *MemoryProvider) validateUserAndTLSCert(username, protocol string, tlsCe
}
user, err := p.userExists(username, "")
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, err
}
return checkUserAndTLSCertificate(&user, protocol, tlsCert)
@ -165,7 +165,7 @@ func (p *MemoryProvider) validateUserAndTLSCert(username, protocol string, tlsCe
func (p *MemoryProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
user, err := p.userExists(username, "")
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, err
}
return checkUserAndPass(&user, password, ip, protocol)
@ -178,7 +178,7 @@ func (p *MemoryProvider) validateUserAndPubKey(username string, pubKey []byte, i
}
user, err := p.userExists(username, "")
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, "", err
}
return checkUserAndPubKey(&user, pubKey, isSSHCert)
@ -187,7 +187,7 @@ func (p *MemoryProvider) validateUserAndPubKey(username string, pubKey []byte, i
func (p *MemoryProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
admin, err := p.adminExists(username)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating admin %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating admin %q: %v", username, err)
return admin, ErrInvalidCredentials
}
err = admin.checkUserAndPass(password, ip)
@ -262,7 +262,7 @@ func (p *MemoryProvider) updateTransferQuota(username string, uploadSize, downlo
}
user, err := p.userExistsInternal(username)
if err != nil {
providerLog(logger.LevelError, "unable to update transfer quota for user %#v error: %v", username, err)
providerLog(logger.LevelError, "unable to update transfer quota for user %q error: %v", username, err)
return err
}
if reset {
@ -273,7 +273,7 @@ func (p *MemoryProvider) updateTransferQuota(username string, uploadSize, downlo
user.UsedDownloadDataTransfer += downloadSize
}
user.LastQuotaUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
providerLog(logger.LevelDebug, "transfer quota updated for user %#v, ul increment: %v dl increment: %v is reset? %v",
providerLog(logger.LevelDebug, "transfer quota updated for user %q, ul increment: %v dl increment: %v is reset? %v",
username, uploadSize, downloadSize, reset)
p.dbHandle.users[user.Username] = user
return nil
@ -287,7 +287,7 @@ func (p *MemoryProvider) updateQuota(username string, filesAdd int, sizeAdd int6
}
user, err := p.userExistsInternal(username)
if err != nil {
providerLog(logger.LevelError, "unable to update quota for user %#v error: %v", username, err)
providerLog(logger.LevelError, "unable to update quota for user %q error: %v", username, err)
return err
}
if reset {
@ -298,7 +298,7 @@ func (p *MemoryProvider) updateQuota(username string, filesAdd int, sizeAdd int6
user.UsedQuotaFiles += filesAdd
}
user.LastQuotaUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
providerLog(logger.LevelDebug, "quota updated for user %#v, files increment: %v size increment: %v is reset? %v",
providerLog(logger.LevelDebug, "quota updated for user %q, files increment: %v size increment: %v is reset? %v",
username, filesAdd, sizeAdd, reset)
p.dbHandle.users[user.Username] = user
return nil
@ -312,7 +312,7 @@ func (p *MemoryProvider) getUsedQuota(username string) (int, int64, int64, int64
}
user, err := p.userExistsInternal(username)
if err != nil {
providerLog(logger.LevelError, "unable to get quota for user %#v error: %v", username, err)
providerLog(logger.LevelError, "unable to get quota for user %q error: %v", username, err)
return 0, 0, 0, 0, err
}
return user.UsedQuotaFiles, user.UsedQuotaSize, user.UsedUploadDataTransfer, user.UsedDownloadDataTransfer, err
@ -700,7 +700,7 @@ func (p *MemoryProvider) addAdmin(admin *Admin) error {
}
_, err = p.adminExistsInternal(admin.Username)
if err == nil {
return fmt.Errorf("admin %#v already exists", admin.Username)
return fmt.Errorf("admin %q already exists", admin.Username)
}
admin.ID = p.getNextAdminID()
admin.CreatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
@ -811,7 +811,7 @@ func (p *MemoryProvider) adminExistsInternal(username string) (Admin, error) {
if val, ok := p.dbHandle.admins[username]; ok {
return val.getACopy(), nil
}
return Admin{}, util.NewRecordNotFoundError(fmt.Sprintf("admin %#v does not exist", username))
return Admin{}, util.NewRecordNotFoundError(fmt.Sprintf("admin %q does not exist", username))
}
func (p *MemoryProvider) dumpAdmins() ([]Admin, error) {
@ -883,7 +883,7 @@ func (p *MemoryProvider) updateFolderQuota(name string, filesAdd int, sizeAdd in
}
folder, err := p.folderExistsInternal(name)
if err != nil {
providerLog(logger.LevelError, "unable to update quota for folder %#v error: %v", name, err)
providerLog(logger.LevelError, "unable to update quota for folder %q error: %v", name, err)
return err
}
if reset {
@ -1005,7 +1005,7 @@ func (p *MemoryProvider) addGroup(group *Group) error {
_, err := p.groupExistsInternal(group.Name)
if err == nil {
return fmt.Errorf("group %#v already exists", group.Name)
return fmt.Errorf("group %q already exists", group.Name)
}
group.ID = p.getNextGroupID()
group.CreatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
@ -1056,7 +1056,7 @@ func (p *MemoryProvider) deleteGroup(group Group) error {
return err
}
if len(g.Users) > 0 {
return util.NewValidationError(fmt.Sprintf("the group %#v is referenced, it cannot be removed", group.Name))
return util.NewValidationError(fmt.Sprintf("the group %q is referenced, it cannot be removed", group.Name))
}
for _, oldFolder := range g.VirtualFolders {
p.removeRelationFromFolderMapping(oldFolder.Name, "", g.Name)
@ -1099,7 +1099,7 @@ func (p *MemoryProvider) getUsedFolderQuota(name string) (int, int64, error) {
}
folder, err := p.folderExistsInternal(name)
if err != nil {
providerLog(logger.LevelError, "unable to get quota for folder %#v error: %v", name, err)
providerLog(logger.LevelError, "unable to get quota for folder %q error: %v", name, err)
return 0, 0, err
}
return folder.UsedQuotaFiles, folder.UsedQuotaSize, err
@ -1419,7 +1419,7 @@ func (p *MemoryProvider) folderExistsInternal(name string) (vfs.BaseVirtualFolde
if val, ok := p.dbHandle.vfolders[name]; ok {
return val, nil
}
return vfs.BaseVirtualFolder{}, util.NewRecordNotFoundError(fmt.Sprintf("folder %#v does not exist", name))
return vfs.BaseVirtualFolder{}, util.NewRecordNotFoundError(fmt.Sprintf("folder %q does not exist", name))
}
func (p *MemoryProvider) getFolders(limit, offset int, order string, minimal bool) ([]vfs.BaseVirtualFolder, error) {
@ -1494,7 +1494,7 @@ func (p *MemoryProvider) addFolder(folder *vfs.BaseVirtualFolder) error {
_, err = p.folderExistsInternal(folder.Name)
if err == nil {
return fmt.Errorf("folder %#v already exists", folder.Name)
return fmt.Errorf("folder %q already exists", folder.Name)
}
folder.ID = p.getNextFolderID()
folder.Users = nil
@ -1598,7 +1598,7 @@ func (p *MemoryProvider) apiKeyExistsInternal(keyID string) (APIKey, error) {
if val, ok := p.dbHandle.apiKeys[keyID]; ok {
return val.getACopy(), nil
}
return APIKey{}, util.NewRecordNotFoundError(fmt.Sprintf("API key %#v does not exist", keyID))
return APIKey{}, util.NewRecordNotFoundError(fmt.Sprintf("API key %q does not exist", keyID))
}
func (p *MemoryProvider) apiKeyExists(keyID string) (APIKey, error) {
@ -1624,16 +1624,16 @@ func (p *MemoryProvider) addAPIKey(apiKey *APIKey) error {
_, err = p.apiKeyExistsInternal(apiKey.KeyID)
if err == nil {
return fmt.Errorf("API key %#v already exists", apiKey.KeyID)
return fmt.Errorf("API key %q already exists", apiKey.KeyID)
}
if apiKey.User != "" {
if _, err := p.userExistsInternal(apiKey.User); err != nil {
return util.NewValidationError(fmt.Sprintf("related user %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related user %q does not exists", apiKey.User))
}
}
if apiKey.Admin != "" {
if _, err := p.adminExistsInternal(apiKey.Admin); err != nil {
return util.NewValidationError(fmt.Sprintf("related admin %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related admin %q does not exists", apiKey.User))
}
}
apiKey.CreatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
@ -1662,12 +1662,12 @@ func (p *MemoryProvider) updateAPIKey(apiKey *APIKey) error {
}
if apiKey.User != "" {
if _, err := p.userExistsInternal(apiKey.User); err != nil {
return util.NewValidationError(fmt.Sprintf("related user %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related user %q does not exists", apiKey.User))
}
}
if apiKey.Admin != "" {
if _, err := p.adminExistsInternal(apiKey.Admin); err != nil {
return util.NewValidationError(fmt.Sprintf("related admin %#v does not exists", apiKey.User))
return util.NewValidationError(fmt.Sprintf("related admin %q does not exists", apiKey.User))
}
}
apiKey.ID = k.ID
@ -1818,11 +1818,11 @@ func (p *MemoryProvider) updateSharesOrdering() {
func (p *MemoryProvider) shareExistsInternal(shareID, username string) (Share, error) {
if val, ok := p.dbHandle.shares[shareID]; ok {
if username != "" && val.Username != username {
return Share{}, util.NewRecordNotFoundError(fmt.Sprintf("Share %#v does not exist", shareID))
return Share{}, util.NewRecordNotFoundError(fmt.Sprintf("Share %q does not exist", shareID))
}
return val.getACopy(), nil
}
return Share{}, util.NewRecordNotFoundError(fmt.Sprintf("Share %#v does not exist", shareID))
return Share{}, util.NewRecordNotFoundError(fmt.Sprintf("Share %q does not exist", shareID))
}
func (p *MemoryProvider) shareExists(shareID, username string) (Share, error) {
@ -1851,7 +1851,7 @@ func (p *MemoryProvider) addShare(share *Share) error {
return fmt.Errorf("share %q already exists", share.ShareID)
}
if _, err := p.userExistsInternal(share.Username); err != nil {
return util.NewValidationError(fmt.Sprintf("related user %#v does not exists", share.Username))
return util.NewValidationError(fmt.Sprintf("related user %q does not exists", share.Username))
}
if !share.IsRestore {
share.CreatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
@ -2962,7 +2962,7 @@ func (p *MemoryProvider) reloadConfig() error {
providerLog(logger.LevelDebug, "no dump configuration file defined")
return nil
}
providerLog(logger.LevelDebug, "loading dump from file: %#v", p.dbHandle.configFile)
providerLog(logger.LevelDebug, "loading dump from file: %q", p.dbHandle.configFile)
fi, err := os.Stat(p.dbHandle.configFile)
if err != nil {
providerLog(logger.LevelError, "error loading dump: %v", err)
@ -3038,7 +3038,7 @@ func (p *MemoryProvider) restoreDump(dump *BackupData) error {
return err
}
providerLog(logger.LevelDebug, "config loaded from file: %#v", p.dbHandle.configFile)
providerLog(logger.LevelDebug, "config loaded from file: %q", p.dbHandle.configFile)
return nil
}
@ -3098,13 +3098,13 @@ func (p *MemoryProvider) restoreShares(dump *BackupData) error {
share.ID = s.ID
err = UpdateShare(&share, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error updating share %#v: %v", share.ShareID, err)
providerLog(logger.LevelError, "error updating share %q: %v", share.ShareID, err)
return err
}
} else {
err = AddShare(&share, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error adding share %#v: %v", share.ShareID, err)
providerLog(logger.LevelError, "error adding share %q: %v", share.ShareID, err)
return err
}
}
@ -3123,13 +3123,13 @@ func (p *MemoryProvider) restoreAPIKeys(dump *BackupData) error {
apiKey.ID = k.ID
err = UpdateAPIKey(&apiKey, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error updating API key %#v: %v", apiKey.KeyID, err)
providerLog(logger.LevelError, "error updating API key %q: %v", apiKey.KeyID, err)
return err
}
} else {
err = AddAPIKey(&apiKey, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error adding API key %#v: %v", apiKey.KeyID, err)
providerLog(logger.LevelError, "error adding API key %q: %v", apiKey.KeyID, err)
return err
}
}
@ -3146,13 +3146,13 @@ func (p *MemoryProvider) restoreAdmins(dump *BackupData) error {
admin.ID = a.ID
err = UpdateAdmin(&admin, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error updating admin %#v: %v", admin.Username, err)
providerLog(logger.LevelError, "error updating admin %q: %v", admin.Username, err)
return err
}
} else {
err = AddAdmin(&admin, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error adding admin %#v: %v", admin.Username, err)
providerLog(logger.LevelError, "error adding admin %q: %v", admin.Username, err)
return err
}
}
@ -3222,14 +3222,14 @@ func (p *MemoryProvider) restoreGroups(dump *BackupData) error {
group.ID = g.ID
err = UpdateGroup(&group, g.Users, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error updating group %#v: %v", group.Name, err)
providerLog(logger.LevelError, "error updating group %q: %v", group.Name, err)
return err
}
} else {
group.Users = nil
err = AddGroup(&group, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error adding group %#v: %v", group.Name, err)
providerLog(logger.LevelError, "error adding group %q: %v", group.Name, err)
return err
}
}
@ -3246,14 +3246,14 @@ func (p *MemoryProvider) restoreFolders(dump *BackupData) error {
folder.ID = f.ID
err = UpdateFolder(&folder, f.Users, f.Groups, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error updating folder %#v: %v", folder.Name, err)
providerLog(logger.LevelError, "error updating folder %q: %v", folder.Name, err)
return err
}
} else {
folder.Users = nil
err = AddFolder(&folder, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error adding folder %#v: %v", folder.Name, err)
providerLog(logger.LevelError, "error adding folder %q: %v", folder.Name, err)
return err
}
}
@ -3270,13 +3270,13 @@ func (p *MemoryProvider) restoreUsers(dump *BackupData) error {
user.ID = u.ID
err = UpdateUser(&user, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error updating user %#v: %v", user.Username, err)
providerLog(logger.LevelError, "error updating user %q: %v", user.Username, err)
return err
}
} else {
err = AddUser(&user, ActionExecutorSystem, "", "")
if err != nil {
providerLog(logger.LevelError, "error adding user %#v: %v", user.Username, err)
providerLog(logger.LevelError, "error adding user %q: %v", user.Username, err)
return err
}
}

View file

@ -277,10 +277,10 @@ func registerMySQLCustomTLSConfig() error {
}
rootCrt, err := os.ReadFile(config.RootCert)
if err != nil {
return fmt.Errorf("unable to load root certificate %#v: %v", config.RootCert, err)
return fmt.Errorf("unable to load root certificate %q: %v", config.RootCert, err)
}
if !rootCAs.AppendCertsFromPEM(rootCrt) {
return fmt.Errorf("unable to parse root certificate %#v", config.RootCert)
return fmt.Errorf("unable to parse root certificate %q", config.RootCert)
}
tlsConfig.RootCAs = rootCAs
}
@ -288,7 +288,7 @@ func registerMySQLCustomTLSConfig() error {
clientCert := make([]tls.Certificate, 0, 1)
tlsCert, err := tls.LoadX509KeyPair(config.ClientCert, config.ClientKey)
if err != nil {
return fmt.Errorf("unable to load key pair %#v, %#v: %v", config.ClientCert, config.ClientKey, err)
return fmt.Errorf("unable to load key pair %q, %q: %v", config.ClientCert, config.ClientKey, err)
}
clientCert = append(clientCert, tlsCert)
tlsConfig.Certificates = clientCert
@ -299,7 +299,7 @@ func registerMySQLCustomTLSConfig() error {
if !filepath.IsAbs(config.Host) && !config.DisableSNI {
tlsConfig.ServerName = config.Host
}
providerLog(logger.LevelInfo, "registering custom TLS config, root cert %#v, client cert %#v, client key %#v, disable SNI? %v",
providerLog(logger.LevelInfo, "registering custom TLS config, root cert %q, client cert %q, client key %q, disable SNI? %v",
config.RootCert, config.ClientCert, config.ClientKey, config.DisableSNI)
if err := mysql.RegisterTLSConfig("custom", tlsConfig); err != nil {
return fmt.Errorf("unable to register tls config: %v", err)

View file

@ -224,7 +224,7 @@ func (q *quotaUpdater) storeUsersQuota() {
if size != 0 || files != 0 {
err := provider.updateQuota(username, files, size, false)
if err != nil {
providerLog(logger.LevelWarn, "unable to update quota delayed for user %#v: %v", username, err)
providerLog(logger.LevelWarn, "unable to update quota delayed for user %q: %v", username, err)
continue
}
q.updateUserQuota(username, -files, -size)
@ -238,7 +238,7 @@ func (q *quotaUpdater) storeFoldersQuota() {
if size != 0 || files != 0 {
err := provider.updateFolderQuota(name, files, size, false)
if err != nil {
providerLog(logger.LevelWarn, "unable to update quota delayed for folder %#v: %v", name, err)
providerLog(logger.LevelWarn, "unable to update quota delayed for folder %q: %v", name, err)
continue
}
q.updateFolderQuota(name, -files, -size)
@ -252,7 +252,7 @@ func (q *quotaUpdater) storeUsersTransferQuota() {
if ulSize != 0 || dlSize != 0 {
err := provider.updateTransferQuota(username, ulSize, dlSize, false)
if err != nil {
providerLog(logger.LevelWarn, "unable to update transfer quota delayed for user %#v: %v", username, err)
providerLog(logger.LevelWarn, "unable to update transfer quota delayed for user %q: %v", username, err)
continue
}
q.updateUserTransferQuota(username, -ulSize, -dlSize)

View file

@ -266,7 +266,7 @@ func (s *Share) validate() error {
for _, IPMask := range s.AllowFrom {
_, _, err := net.ParseCIDR(IPMask)
if err != nil {
return util.NewValidationError(fmt.Sprintf("could not parse allow from entry %#v : %v", IPMask, err))
return util.NewValidationError(fmt.Sprintf("could not parse allow from entry %q : %v", IPMask, err))
}
}
return nil

View file

@ -111,7 +111,7 @@ func sqlCommonAddShare(share *Share, dbHandle *sql.DB) error {
user, err := provider.userExists(share.Username, "")
if err != nil {
return util.NewGenericError(fmt.Sprintf("unable to validate user %#v", share.Username))
return util.NewGenericError(fmt.Sprintf("unable to validate user %q", share.Username))
}
paths, err := json.Marshal(share.Paths)
@ -171,7 +171,7 @@ func sqlCommonUpdateShare(share *Share, dbHandle *sql.DB) error {
user, err := provider.userExists(share.Username, "")
if err != nil {
return util.NewGenericError(fmt.Sprintf("unable to validate user %#v", share.Username))
return util.NewGenericError(fmt.Sprintf("unable to validate user %q", share.Username))
}
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
@ -415,7 +415,7 @@ func sqlCommonGetAdminByUsername(username string, dbHandle sqlQuerier) (Admin, e
func sqlCommonValidateAdminAndPass(username, password, ip string, dbHandle *sql.DB) (Admin, error) {
admin, err := sqlCommonGetAdminByUsername(username, dbHandle)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating admin %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating admin %q: %v", username, err)
return admin, ErrInvalidCredentials
}
err = admin.checkUserAndPass(password, ip)
@ -1148,7 +1148,7 @@ func sqlCommonGetUserByUsername(username, role string, dbHandle sqlQuerier) (Use
func sqlCommonValidateUserAndPass(username, password, ip, protocol string, dbHandle *sql.DB) (User, error) {
user, err := sqlCommonGetUserByUsername(username, "", dbHandle)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, err
}
return checkUserAndPass(&user, password, ip, protocol)
@ -1161,7 +1161,7 @@ func sqlCommonValidateUserAndTLSCertificate(username, protocol string, tlsCert *
}
user, err := sqlCommonGetUserByUsername(username, "", dbHandle)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, err
}
return checkUserAndTLSCertificate(&user, protocol, tlsCert)
@ -1174,7 +1174,7 @@ func sqlCommonValidateUserAndPubKey(username string, pubKey []byte, isSSHCert bo
}
user, err := sqlCommonGetUserByUsername(username, "", dbHandle)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user %q: %v", username, err)
return user, "", err
}
return checkUserAndPubKey(&user, pubKey, isSSHCert)
@ -1828,7 +1828,7 @@ func sqlCommonIsDefenderHostBanned(ip string, dbHandle sqlQuerier) (DefenderEntr
if errors.Is(err, sql.ErrNoRows) {
return host, util.NewRecordNotFoundError("host not found")
}
providerLog(logger.LevelError, "unable to check ban status for host %#v: %v", ip, err)
providerLog(logger.LevelError, "unable to check ban status for host %q: %v", ip, err)
return host, err
}
@ -1848,7 +1848,7 @@ func sqlCommonGetDefenderHostByIP(ip string, from int64, dbHandle sqlQuerier) (D
if errors.Is(err, sql.ErrNoRows) {
return host, util.NewRecordNotFoundError("host not found")
}
providerLog(logger.LevelError, "unable to get host for ip %#v: %v", ip, err)
providerLog(logger.LevelError, "unable to get host for ip %q: %v", ip, err)
return host, err
}
if banTime.Valid && banTime.Int64 > 0 {
@ -1877,10 +1877,10 @@ func sqlCommonDefenderIncrementBanTime(ip string, minutesToAdd int, dbHandle *sq
q := getDefenderIncrementBanTimeQuery()
_, err := dbHandle.ExecContext(ctx, q, minutesToAdd*60000, ip)
if err == nil {
providerLog(logger.LevelDebug, "ban time updated for ip %#v, increment (minutes): %v",
providerLog(logger.LevelDebug, "ban time updated for ip %q, increment (minutes): %v",
ip, minutesToAdd)
} else {
providerLog(logger.LevelError, "error updating ban time for ip %#v: %v", ip, err)
providerLog(logger.LevelError, "error updating ban time for ip %q: %v", ip, err)
}
return err
}
@ -1892,9 +1892,9 @@ func sqlCommonSetDefenderBanTime(ip string, banTime int64, dbHandle *sql.DB) err
q := getDefenderSetBanTimeQuery()
_, err := dbHandle.ExecContext(ctx, q, banTime, ip)
if err == nil {
providerLog(logger.LevelDebug, "ip %#v banned until %v", ip, util.GetTimeFromMsecSinceEpoch(banTime))
providerLog(logger.LevelDebug, "ip %q banned until %v", ip, util.GetTimeFromMsecSinceEpoch(banTime))
} else {
providerLog(logger.LevelError, "error setting ban time for ip %#v: %v", ip, err)
providerLog(logger.LevelError, "error setting ban time for ip %q: %v", ip, err)
}
return err
}
@ -1906,7 +1906,7 @@ func sqlCommonDeleteDefenderHost(ip string, dbHandle sqlQuerier) error {
q := getDeleteDefenderHostQuery()
res, err := dbHandle.ExecContext(ctx, q, ip)
if err != nil {
providerLog(logger.LevelError, "unable to delete defender host %#v: %v", ip, err)
providerLog(logger.LevelError, "unable to delete defender host %q: %v", ip, err)
return err
}
return sqlCommonRequireRowAffected(res)
@ -1935,7 +1935,7 @@ func sqlCommonAddDefenderHost(ctx context.Context, ip string, tx *sql.Tx) error
q := getAddDefenderHostQuery()
_, err := tx.ExecContext(ctx, q, ip, util.GetTimeAsMsSinceEpoch(time.Now()))
if err != nil {
providerLog(logger.LevelError, "unable to add defender host %#v: %v", ip, err)
providerLog(logger.LevelError, "unable to add defender host %q: %v", ip, err)
}
return err
}
@ -1944,7 +1944,7 @@ func sqlCommonAddDefenderEvent(ctx context.Context, ip string, score int, tx *sq
q := getAddDefenderEventQuery()
_, err := tx.ExecContext(ctx, q, util.GetTimeAsMsSinceEpoch(time.Now()), score, ip)
if err != nil {
providerLog(logger.LevelError, "unable to add defender event for %#v: %v", ip, err)
providerLog(logger.LevelError, "unable to add defender event for %q: %v", ip, err)
}
return err
}
@ -2214,8 +2214,8 @@ func getUserFromDbRow(row sqlScanner) (User, error) {
perms := make(map[string][]string)
err = json.Unmarshal(permissions, &perms)
if err != nil {
providerLog(logger.LevelError, "unable to deserialize permissions for user %#v: %v", user.Username, err)
return user, fmt.Errorf("unable to deserialize permissions for user %#v: %v", user.Username, err)
providerLog(logger.LevelError, "unable to deserialize permissions for user %q: %v", user.Username, err)
return user, fmt.Errorf("unable to deserialize permissions for user %q: %v", user.Username, err)
}
user.Permissions = perms
// we can have a empty string or an invalid json in null string
@ -2290,14 +2290,14 @@ func sqlCommonGetFolderByName(ctx context.Context, name string, dbHandle sqlQuer
return folder, err
}
if len(folders) != 1 {
return folder, fmt.Errorf("unable to associate users with folder %#v", name)
return folder, fmt.Errorf("unable to associate users with folder %q", name)
}
folders, err = getVirtualFoldersWithGroups([]vfs.BaseVirtualFolder{folders[0]}, dbHandle)
if err != nil {
return folder, err
}
if len(folders) != 1 {
return folder, fmt.Errorf("unable to associate groups with folder %#v", name)
return folder, fmt.Errorf("unable to associate groups with folder %q", name)
}
return folders[0], nil
}
@ -3107,7 +3107,7 @@ func sqlCommonUpdateFolderQuota(name string, filesAdd int, sizeAdd int64, reset
providerLog(logger.LevelDebug, "quota updated for folder %q, files increment: %d size increment: %d is reset? %t",
name, filesAdd, sizeAdd, reset)
} else {
providerLog(logger.LevelWarn, "error updating quota for folder %#v: %v", name, err)
providerLog(logger.LevelWarn, "error updating quota for folder %q: %v", name, err)
}
return err
}
@ -3893,7 +3893,7 @@ func sqlCommonGetDatabaseVersion(dbHandle sqlQuerier, showInitWarn bool) (schema
q := getDatabaseVersionQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelError, "error preparing database query %#v: %v", q, err)
providerLog(logger.LevelError, "error preparing database query %q: %v", q, err)
if showInitWarn && strings.Contains(err.Error(), sqlTableSchemaVersion) {
logger.WarnToConsole("database query error, did you forgot to run the \"initprovider\" command?")
}

View file

@ -216,7 +216,7 @@ func initializeSQLiteProvider(basePath string) error {
if config.ConnectionString == "" {
dbPath := config.Name
if !util.IsFileInputValid(dbPath) {
return fmt.Errorf("invalid database path: %#v", dbPath)
return fmt.Errorf("invalid database path: %q", dbPath)
}
if !filepath.IsAbs(dbPath) {
dbPath = filepath.Join(basePath, dbPath)

View file

@ -186,14 +186,14 @@ func (u *User) checkDirWithParents(virtualDirPath, connectionID string) error {
}
fs, err := u.GetFilesystemForPath(vPath, connectionID)
if err != nil {
return fmt.Errorf("unable to get fs for path %#v: %w", vPath, err)
return fmt.Errorf("unable to get fs for path %q: %w", vPath, err)
}
if fs.HasVirtualFolders() {
continue
}
fsPath, err := fs.ResolvePath(vPath)
if err != nil {
return fmt.Errorf("unable to resolve path %#v: %w", vPath, err)
return fmt.Errorf("unable to resolve path %q: %w", vPath, err)
}
_, err = fs.Stat(fsPath)
if err == nil {
@ -206,7 +206,7 @@ func (u *User) checkDirWithParents(virtualDirPath, connectionID string) error {
}
vfs.SetPathPermissions(fs, fsPath, u.GetUID(), u.GetGID())
} else {
return fmt.Errorf("unable to stat path %#v: %w", vPath, err)
return fmt.Errorf("unable to stat path %q: %w", vPath, err)
}
}
@ -261,7 +261,7 @@ func (u *User) CheckFsRoot(connectionID string) error {
if u.Filters.StartDirectory != "" {
err = u.checkDirWithParents(u.Filters.StartDirectory, connectionID)
if err != nil {
logger.Warn(logSender, connectionID, "could not create start directory %#v, err: %v",
logger.Warn(logSender, connectionID, "could not create start directory %q, err: %v",
u.Filters.StartDirectory, err)
}
}
@ -274,7 +274,7 @@ func (u *User) CheckFsRoot(connectionID string) error {
// now check intermediary folders
err = u.checkDirWithParents(path.Dir(v.VirtualPath), connectionID)
if err != nil {
logger.Warn(logSender, connectionID, "could not create intermediary dir to %#v, err: %v", v.VirtualPath, err)
logger.Warn(logSender, connectionID, "could not create intermediary dir to %q, err: %v", v.VirtualPath, err)
}
}
return nil
@ -336,10 +336,10 @@ func (u *User) isFsEqual(other *User) bool {
// CheckLoginConditions checks if the user is active and not expired
func (u *User) CheckLoginConditions() error {
if u.Status < 1 {
return fmt.Errorf("user %#v is disabled", u.Username)
return fmt.Errorf("user %q is disabled", u.Username)
}
if u.ExpirationDate > 0 && u.ExpirationDate < util.GetTimeAsMsSinceEpoch(time.Now()) {
return fmt.Errorf("user %#v is expired, expiration timestamp: %v current timestamp: %v", u.Username,
return fmt.Errorf("user %q is expired, expiration timestamp: %v current timestamp: %v", u.Username,
u.ExpirationDate, util.GetTimeAsMsSinceEpoch(time.Now()))
}
return nil
@ -1180,7 +1180,7 @@ func (u *User) GetBandwidthForIP(clientIP, connectionID string) (int64, int64) {
_, ipNet, err := net.ParseCIDR(source)
if err == nil {
if ipNet.Contains(ip) {
logger.Debug(logSender, connectionID, "override bandwidth limit for ip %#v, upload limit: %v KB/s, download limit: %v KB/s",
logger.Debug(logSender, connectionID, "override bandwidth limit for ip %q, upload limit: %v KB/s, download limit: %v KB/s",
clientIP, bwLimit.UploadBandwidth, bwLimit.DownloadBandwidth)
return bwLimit.UploadBandwidth, bwLimit.DownloadBandwidth
}
@ -1203,7 +1203,7 @@ func (u *User) IsLoginFromAddrAllowed(remoteAddr string) bool {
remoteIP := net.ParseIP(util.GetIPFromRemoteAddress(remoteAddr))
// if remoteIP is invalid we allow login, this should never happen
if remoteIP == nil {
logger.Warn(logSender, "", "login allowed for invalid IP. remote address: %#v", remoteAddr)
logger.Warn(logSender, "", "login allowed for invalid IP. remote address: %q", remoteAddr)
return true
}
for _, IPMask := range u.Filters.AllowedIP {
@ -1372,7 +1372,7 @@ func (u *User) GetPermissionsAsString() string {
result := ""
for dir, perms := range u.Permissions {
dirPerms := strings.Join(perms, ", ")
dp := fmt.Sprintf("%#v: %#v", dir, dirPerms)
dp := fmt.Sprintf("%q: %q", dir, dirPerms)
if dir == "/" {
if result != "" {
result = dp + ", " + result

View file

@ -432,11 +432,11 @@ func GetStatus() ServiceStatus {
func parsePassiveIP(passiveIP string) (string, error) {
ip := net.ParseIP(passiveIP)
if ip == nil {
return "", fmt.Errorf("the provided passive IP %#v is not valid", passiveIP)
return "", fmt.Errorf("the provided passive IP %q is not valid", passiveIP)
}
ip = ip.To4()
if ip == nil {
return "", fmt.Errorf("the provided passive IP %#v is not a valid IPv4 address", passiveIP)
return "", fmt.Errorf("the provided passive IP %q is not a valid IPv4 address", passiveIP)
}
return ip.String(), nil
}

View file

@ -128,12 +128,12 @@ func (c *Connection) Remove(name string) error {
var fi os.FileInfo
if fi, err = fs.Lstat(p); err != nil {
c.Log(logger.LevelError, "failed to remove file %#v: stat error: %+v", p, err)
c.Log(logger.LevelError, "failed to remove file %q: stat error: %+v", p, err)
return c.GetFsError(fs, err)
}
if fi.IsDir() && fi.Mode()&os.ModeSymlink == 0 {
c.Log(logger.LevelError, "cannot remove %#v is not a file/symlink", p)
c.Log(logger.LevelError, "cannot remove %q is not a file/symlink", p)
return c.GetGenericError(nil)
}
return c.RemoveFile(fs, p, name, fi)
@ -338,18 +338,18 @@ func (c *Connection) downloadFile(fs vfs.Fs, fsPath, ftpPath string, offset int6
}
if ok, policy := c.User.IsFileAllowed(ftpPath); !ok {
c.Log(logger.LevelWarn, "reading file %#v is not allowed", ftpPath)
c.Log(logger.LevelWarn, "reading file %q is not allowed", ftpPath)
return nil, c.GetErrorForDeniedFile(policy)
}
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreDownload, fsPath, ftpPath, 0, 0); err != nil {
c.Log(logger.LevelDebug, "download for file %#v denied by pre action: %v", ftpPath, err)
c.Log(logger.LevelDebug, "download for file %q denied by pre action: %v", ftpPath, err)
return nil, c.GetPermissionDeniedError()
}
file, r, cancelFn, err := fs.Open(fsPath, offset)
if err != nil {
c.Log(logger.LevelError, "could not open file %#v for reading: %+v", fsPath, err)
c.Log(logger.LevelError, "could not open file %q for reading: %+v", fsPath, err)
return nil, c.GetFsError(fs, err)
}
@ -363,7 +363,7 @@ func (c *Connection) downloadFile(fs vfs.Fs, fsPath, ftpPath string, offset int6
func (c *Connection) uploadFile(fs vfs.Fs, fsPath, ftpPath string, flags int) (ftpserver.FileTransfer, error) {
if ok, _ := c.User.IsFileAllowed(ftpPath); !ok {
c.Log(logger.LevelWarn, "writing file %#v is not allowed", ftpPath)
c.Log(logger.LevelWarn, "writing file %q is not allowed", ftpPath)
return nil, ftpserver.ErrFileNameNotAllowed
}
@ -381,13 +381,13 @@ func (c *Connection) uploadFile(fs vfs.Fs, fsPath, ftpPath string, flags int) (f
}
if statErr != nil {
c.Log(logger.LevelError, "error performing file stat %#v: %+v", fsPath, statErr)
c.Log(logger.LevelError, "error performing file stat %q: %+v", fsPath, statErr)
return nil, c.GetFsError(fs, statErr)
}
// This happen if we upload a file that has the same name of an existing directory
if stat.IsDir() {
c.Log(logger.LevelError, "attempted to open a directory for writing to: %#v", fsPath)
c.Log(logger.LevelError, "attempted to open a directory for writing to: %q", fsPath)
return nil, c.GetOpUnsupportedError()
}
@ -405,12 +405,12 @@ func (c *Connection) handleFTPUploadToNewFile(fs vfs.Fs, flags int, resolvedPath
return nil, ftpserver.ErrStorageExceeded
}
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, 0, 0); err != nil {
c.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
c.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
return nil, fmt.Errorf("%w, denied by pre-upload action", ftpserver.ErrFileNameNotAllowed)
}
file, w, cancelFn, err := fs.Create(filePath, flags)
if err != nil {
c.Log(logger.LevelError, "error creating file %#v, flags %v: %+v", resolvedPath, flags, err)
c.Log(logger.LevelError, "error creating file %q, flags %v: %+v", resolvedPath, flags, err)
return nil, c.GetFsError(fs, err)
}
@ -450,14 +450,14 @@ func (c *Connection) handleFTPUploadToExistingFile(fs vfs.Fs, flags int, resolve
return nil, err
}
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, fileSize, flags); err != nil {
c.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
c.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
return nil, fmt.Errorf("%w, denied by pre-upload action", ftpserver.ErrFileNameNotAllowed)
}
if common.Config.IsAtomicUploadEnabled() && fs.IsAtomicUploadSupported() {
_, _, err = fs.Rename(resolvedPath, filePath)
if err != nil {
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %+v",
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %q, dest: %q, err: %+v",
resolvedPath, filePath, err)
return nil, c.GetFsError(fs, err)
}
@ -465,14 +465,14 @@ func (c *Connection) handleFTPUploadToExistingFile(fs vfs.Fs, flags int, resolve
file, w, cancelFn, err := fs.Create(filePath, flags)
if err != nil {
c.Log(logger.LevelError, "error opening existing file, flags: %v, source: %#v, err: %+v", flags, filePath, err)
c.Log(logger.LevelError, "error opening existing file, flags: %v, source: %q, err: %+v", flags, filePath, err)
return nil, c.GetFsError(fs, err)
}
initialSize := int64(0)
truncatedSize := int64(0) // bytes truncated and not included in quota
if isResume {
c.Log(logger.LevelDebug, "resuming upload requested, file path: %#v initial size: %v", filePath, fileSize)
c.Log(logger.LevelDebug, "resuming upload requested, file path: %q initial size: %v", filePath, fileSize)
minWriteOffset = fileSize
initialSize = fileSize
if vfs.IsSFTPFs(fs) && fs.IsUploadResumeSupported() {

View file

@ -161,7 +161,7 @@ func (s *Server) ClientConnected(cc ftpserver.ClientContext) (string, error) {
ipAddr := util.GetIPFromRemoteAddress(cc.RemoteAddr().String())
common.Connections.AddClientConnection(ipAddr)
if common.IsBanned(ipAddr, common.ProtocolFTP) {
logger.Log(logger.LevelDebug, common.ProtocolFTP, "", "connection refused, ip %#v is banned", ipAddr)
logger.Log(logger.LevelDebug, common.ProtocolFTP, "", "connection refused, ip %q is banned", ipAddr)
return "Access denied: banned client IP", common.ErrConnectionDenied
}
if err := common.Connections.IsNewConnectionAllowed(ipAddr, common.ProtocolFTP); err != nil {
@ -216,7 +216,7 @@ func (s *Server) AuthUser(cc ftpserver.ClientContext, username, password string)
return nil, err
}
setStartDirectory(user.Filters.StartDirectory, cc)
connection.Log(logger.LevelInfo, "User %#v logged in with %#v from ip %#v", user.Username, loginMethod, ipAddr)
connection.Log(logger.LevelInfo, "User %q logged in with %q from ip %q", user.Username, loginMethod, ipAddr)
dataprovider.UpdateLastLogin(&user)
return connection, nil
}
@ -281,7 +281,7 @@ func (s *Server) VerifyConnection(cc ftpserver.ClientContext, user string, tlsCo
return nil, err
}
setStartDirectory(dbUser.Filters.StartDirectory, cc)
connection.Log(logger.LevelInfo, "User id: %d, logged in with FTP using a TLS certificate, username: %#v, home_dir: %#v remote addr: %#v",
connection.Log(logger.LevelInfo, "User id: %d, logged in with FTP using a TLS certificate, username: %q, home_dir: %q remote addr: %q",
dbUser.ID, dbUser.Username, dbUser.HomeDir, ipAddr)
dataprovider.UpdateLastLogin(&dbUser)
return connection, nil
@ -305,7 +305,7 @@ func (s *Server) buildTLSConfig() {
CipherSuites: s.binding.ciphers,
PreferServerCipherSuites: true,
}
logger.Debug(logSender, "", "configured TLS cipher suites for binding %#v: %v, certID: %v",
logger.Debug(logSender, "", "configured TLS cipher suites for binding %q: %v, certID: %v",
s.binding.GetAddress(), s.binding.ciphers, certID)
if s.binding.isMutualTLSEnabled() {
s.tlsConfig.ClientCAs = certMgr.GetRootCAs()
@ -349,7 +349,7 @@ func (s *Server) verifyTLSConnection(state tls.ConnectionState) error {
caCrt = verifiedChain[len(verifiedChain)-1]
}
if certMgr.IsRevoked(clientCrt, caCrt) {
logger.Debug(logSender, "", "tls handshake error, client certificate %#v has beed revoked", clientCrtName)
logger.Debug(logSender, "", "tls handshake error, client certificate %q has beed revoked", clientCrtName)
return common.ErrCrtRevoked
}
}
@ -361,37 +361,37 @@ func (s *Server) verifyTLSConnection(state tls.ConnectionState) error {
func (s *Server) validateUser(user dataprovider.User, cc ftpserver.ClientContext, loginMethod string) (*Connection, error) {
connectionID := fmt.Sprintf("%v_%v_%v", common.ProtocolFTP, s.ID, cc.ID())
if !filepath.IsAbs(user.HomeDir) {
logger.Warn(logSender, connectionID, "user %#v has an invalid home dir: %#v. Home dir must be an absolute path, login not allowed",
logger.Warn(logSender, connectionID, "user %q has an invalid home dir: %q. Home dir must be an absolute path, login not allowed",
user.Username, user.HomeDir)
return nil, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
return nil, fmt.Errorf("cannot login user with invalid home dir: %q", user.HomeDir)
}
if util.Contains(user.Filters.DeniedProtocols, common.ProtocolFTP) {
logger.Info(logSender, connectionID, "cannot login user %#v, protocol FTP is not allowed", user.Username)
return nil, fmt.Errorf("protocol FTP is not allowed for user %#v", user.Username)
logger.Info(logSender, connectionID, "cannot login user %q, protocol FTP is not allowed", user.Username)
return nil, fmt.Errorf("protocol FTP is not allowed for user %q", user.Username)
}
if !user.IsLoginMethodAllowed(loginMethod, common.ProtocolFTP, nil) {
logger.Info(logSender, connectionID, "cannot login user %#v, %v login method is not allowed",
logger.Info(logSender, connectionID, "cannot login user %q, %v login method is not allowed",
user.Username, loginMethod)
return nil, fmt.Errorf("login method %v is not allowed for user %#v", loginMethod, user.Username)
return nil, fmt.Errorf("login method %v is not allowed for user %q", loginMethod, user.Username)
}
if user.MustSetSecondFactorForProtocol(common.ProtocolFTP) {
logger.Info(logSender, connectionID, "cannot login user %#v, second factor authentication is not set",
logger.Info(logSender, connectionID, "cannot login user %q, second factor authentication is not set",
user.Username)
return nil, fmt.Errorf("second factor authentication is not set for user %#v", user.Username)
return nil, fmt.Errorf("second factor authentication is not set for user %q", user.Username)
}
if user.MaxSessions > 0 {
activeSessions := common.Connections.GetActiveSessions(user.Username)
if activeSessions >= user.MaxSessions {
logger.Info(logSender, connectionID, "authentication refused for user: %#v, too many open sessions: %v/%v",
logger.Info(logSender, connectionID, "authentication refused for user: %q, too many open sessions: %v/%v",
user.Username, activeSessions, user.MaxSessions)
return nil, fmt.Errorf("too many open sessions: %v", activeSessions)
}
}
remoteAddr := cc.RemoteAddr().String()
if !user.IsLoginFromAddrAllowed(remoteAddr) {
logger.Info(logSender, connectionID, "cannot login user %#v, remote address is not allowed: %v",
logger.Info(logSender, connectionID, "cannot login user %q, remote address is not allowed: %v",
user.Username, remoteAddr)
return nil, fmt.Errorf("login for user %#v is not allowed from this address: %v", user.Username, remoteAddr)
return nil, fmt.Errorf("login for user %q is not allowed from this address: %v", user.Username, remoteAddr)
}
err := user.CheckFsRoot(connectionID)
if err != nil {

View file

@ -129,7 +129,7 @@ func (c *Config) loadCACerts(configDir string) (*x509.CertPool, error) {
for _, ca := range c.CACertificates {
if !util.IsFileInputValid(ca) {
return nil, fmt.Errorf("unable to load invalid CA certificate: %#v", ca)
return nil, fmt.Errorf("unable to load invalid CA certificate: %q", ca)
}
if !filepath.IsAbs(ca) {
ca = filepath.Join(configDir, ca)
@ -139,9 +139,9 @@ func (c *Config) loadCACerts(configDir string) (*x509.CertPool, error) {
return nil, fmt.Errorf("unable to load CA certificate: %v", err)
}
if rootCAs.AppendCertsFromPEM(certs) {
logger.Debug(logSender, "", "CA certificate %#v added to the trusted certificates", ca)
logger.Debug(logSender, "", "CA certificate %q added to the trusted certificates", ca)
} else {
return nil, fmt.Errorf("unable to add CA certificate %#v to the trusted cetificates", ca)
return nil, fmt.Errorf("unable to add CA certificate %q to the trusted cetificates", ca)
}
}
return rootCAs, nil
@ -156,10 +156,10 @@ func (c *Config) loadCertificates(configDir string) error {
cert := keyPair.Cert
key := keyPair.Key
if !util.IsFileInputValid(cert) {
return fmt.Errorf("unable to load invalid certificate: %#v", cert)
return fmt.Errorf("unable to load invalid certificate: %q", cert)
}
if !util.IsFileInputValid(key) {
return fmt.Errorf("unable to load invalid key: %#v", key)
return fmt.Errorf("unable to load invalid key: %q", key)
}
if !filepath.IsAbs(cert) {
cert = filepath.Join(configDir, cert)
@ -169,14 +169,14 @@ func (c *Config) loadCertificates(configDir string) error {
}
tlsCert, err := tls.LoadX509KeyPair(cert, key)
if err != nil {
return fmt.Errorf("unable to load key pair %#v, %#v: %v", cert, key, err)
return fmt.Errorf("unable to load key pair %q, %q: %v", cert, key, err)
}
x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0])
if err == nil {
logger.Debug(logSender, "", "adding leaf certificate for key pair %q, %q", cert, key)
tlsCert.Leaf = x509Cert
}
logger.Debug(logSender, "", "client certificate %#v and key %#v successfully loaded", cert, key)
logger.Debug(logSender, "", "client certificate %q and key %q successfully loaded", cert, key)
c.customTransport.TLSClientConfig.Certificates = append(c.customTransport.TLSClientConfig.Certificates, tlsCert)
}
return nil

View file

@ -86,7 +86,7 @@ func getIPFromID(r *http.Request) (string, error) {
func validateIPAddress(ip string) error {
if net.ParseIP(ip) == nil {
return fmt.Errorf("ip address %#v is not valid", ip)
return fmt.Errorf("ip address %q is not valid", ip)
}
return nil
}

View file

@ -47,7 +47,7 @@ func getCommonSearchParamsFromRequest(r *http.Request) (eventsearcher.CommonSear
if _, ok := r.URL.Query()["order"]; ok {
order := r.URL.Query().Get("order")
if order != dataprovider.OrderASC && order != dataprovider.OrderDESC {
return c, util.NewValidationError(fmt.Sprintf("invalid order %#v", order))
return c, util.NewValidationError(fmt.Sprintf("invalid order %q", order))
}
if order == dataprovider.OrderASC {
c.Order = 1

View file

@ -49,7 +49,7 @@ func startMetadataCheck(w http.ResponseWriter, r *http.Request) {
return
}
if !common.ActiveMetadataChecks.Add(user.Username, user.Role) {
sendAPIResponse(w, r, err, fmt.Sprintf("Another check is already in progress for user %#v", user.Username),
sendAPIResponse(w, r, err, fmt.Sprintf("Another check is already in progress for user %q", user.Username),
http.StatusConflict)
return
}
@ -63,9 +63,9 @@ func doMetadataCheck(user dataprovider.User) error {
err := user.CheckMetadataConsistency()
if err != nil {
logger.Warn(logSender, "", "error checking metadata for user %#v: %v", user.Username, err)
logger.Warn(logSender, "", "error checking metadata for user %q: %v", user.Username, err)
return err
}
logger.Debug(logSender, "", "metadata check completed for user: %#v", user.Username)
logger.Debug(logSender, "", "metadata check completed for user: %q", user.Username)
return nil
}

View file

@ -68,9 +68,9 @@ func generateTOTPSecret(w http.ResponseWriter, r *http.Request) {
}
var accountName string
if claims.hasUserAudience() {
accountName = fmt.Sprintf("User %#v", claims.Username)
accountName = fmt.Sprintf("User %q", claims.Username)
} else {
accountName = fmt.Sprintf("Admin %#v", claims.Username)
accountName = fmt.Sprintf("Admin %q", claims.Username)
}
var req generateTOTPRequest
@ -257,7 +257,7 @@ func saveUserTOTPConfig(username string, r *http.Request, recoveryCodes []datapr
}
for _, p := range user.Filters.TwoFactorAuthProtocols {
if !util.Contains(user.Filters.TOTPConfig.Protocols, p) {
return util.NewValidationError(fmt.Sprintf("totp: the following protocols are required: %#v",
return util.NewValidationError(fmt.Sprintf("totp: the following protocols are required: %q",
strings.Join(user.Filters.TwoFactorAuthProtocols, ", ")))
}
}

View file

@ -215,7 +215,7 @@ func doStartUserQuotaScan(w http.ResponseWriter, r *http.Request, username strin
return
}
if !common.QuotaScans.AddUserQuotaScan(user.Username, user.Role) {
sendAPIResponse(w, r, nil, fmt.Sprintf("Another scan is already in progress for user %#v", username),
sendAPIResponse(w, r, nil, fmt.Sprintf("Another scan is already in progress for user %q", username),
http.StatusConflict)
return
}
@ -234,7 +234,7 @@ func doStartFolderQuotaScan(w http.ResponseWriter, r *http.Request, name string)
return
}
if !common.QuotaScans.AddVFolderQuotaScan(folder.Name) {
sendAPIResponse(w, r, err, fmt.Sprintf("Another scan is already in progress for folder %#v", name),
sendAPIResponse(w, r, err, fmt.Sprintf("Another scan is already in progress for folder %q", name),
http.StatusConflict)
return
}
@ -246,11 +246,11 @@ func doUserQuotaScan(user dataprovider.User) error {
defer common.QuotaScans.RemoveUserQuotaScan(user.Username)
numFiles, size, err := user.ScanQuota()
if err != nil {
logger.Warn(logSender, "", "error scanning user quota %#v: %v", user.Username, err)
logger.Warn(logSender, "", "error scanning user quota %q: %v", user.Username, err)
return err
}
err = dataprovider.UpdateUserQuota(&user, numFiles, size, true)
logger.Debug(logSender, "", "user quota scanned, user: %#v, error: %v", user.Username, err)
logger.Debug(logSender, "", "user quota scanned, user: %q, error: %v", user.Username, err)
return err
}
@ -262,11 +262,11 @@ func doFolderQuotaScan(folder vfs.BaseVirtualFolder) error {
}
numFiles, size, err := f.ScanQuota()
if err != nil {
logger.Warn(logSender, "", "error scanning folder %#v: %v", folder.Name, err)
logger.Warn(logSender, "", "error scanning folder %q: %v", folder.Name, err)
return err
}
err = dataprovider.UpdateVirtualFolderQuota(&folder, numFiles, size, true)
logger.Debug(logSender, "", "virtual folder %#v scanned, error: %v", folder.Name, err)
logger.Debug(logSender, "", "virtual folder %q scanned, error: %v", folder.Name, err)
return err
}

View file

@ -72,7 +72,7 @@ func startRetentionCheck(w http.ResponseWriter, r *http.Request) {
}
c := common.RetentionChecks.Add(check, &user)
if c == nil {
sendAPIResponse(w, r, err, fmt.Sprintf("Another check is already in progress for user %#v", username),
sendAPIResponse(w, r, err, fmt.Sprintf("Another check is already in progress for user %q", username),
http.StatusConflict)
return
}

View file

@ -237,7 +237,7 @@ func (s *httpdServer) downloadBrowsableSharedFile(w http.ResponseWriter, r *http
return
}
if info.IsDir() {
sendAPIResponse(w, r, nil, fmt.Sprintf("Please set the path to a valid file, %#v is a directory", name),
sendAPIResponse(w, r, nil, fmt.Sprintf("Please set the path to a valid file, %q is a directory", name),
http.StatusBadRequest)
return
}
@ -541,7 +541,7 @@ func getBrowsableSharedPath(share dataprovider.Share, r *http.Request) (string,
return name, nil
}
if name != share.Paths[0] && !strings.HasPrefix(name, share.Paths[0]+"/") {
return "", util.NewValidationError(fmt.Sprintf("Invalid path %#v", r.URL.Query().Get("path")))
return "", util.NewValidationError(fmt.Sprintf("Invalid path %q", r.URL.Query().Get("path")))
}
return name, nil
}

View file

@ -335,7 +335,7 @@ func renderCompressedFiles(w http.ResponseWriter, conn *Connection, baseDir stri
func addZipEntry(wr *zip.Writer, conn *Connection, entryPath, baseDir string) error {
info, err := conn.Stat(entryPath, 1)
if err != nil {
conn.Log(logger.LevelDebug, "unable to add zip entry %#v, stat error: %v", entryPath, err)
conn.Log(logger.LevelDebug, "unable to add zip entry %q, stat error: %v", entryPath, err)
return err
}
entryName, err := getZipEntryName(entryPath, baseDir)
@ -350,12 +350,12 @@ func addZipEntry(wr *zip.Writer, conn *Connection, entryPath, baseDir string) er
Modified: info.ModTime(),
})
if err != nil {
conn.Log(logger.LevelError, "unable to create zip entry %#v: %v", entryPath, err)
conn.Log(logger.LevelError, "unable to create zip entry %q: %v", entryPath, err)
return err
}
contents, err := conn.ReadDir(entryPath)
if err != nil {
conn.Log(logger.LevelDebug, "unable to add zip entry %#v, read dir error: %v", entryPath, err)
conn.Log(logger.LevelDebug, "unable to add zip entry %q, read dir error: %v", entryPath, err)
return err
}
for _, info := range contents {
@ -368,12 +368,12 @@ func addZipEntry(wr *zip.Writer, conn *Connection, entryPath, baseDir string) er
}
if !info.Mode().IsRegular() {
// we only allow regular files
conn.Log(logger.LevelInfo, "skipping zip entry for non regular file %#v", entryPath)
conn.Log(logger.LevelInfo, "skipping zip entry for non regular file %q", entryPath)
return nil
}
reader, err := conn.getFileReader(entryPath, 0, http.MethodGet)
if err != nil {
conn.Log(logger.LevelDebug, "unable to add zip entry %#v, cannot open file: %v", entryPath, err)
conn.Log(logger.LevelDebug, "unable to add zip entry %q, cannot open file: %v", entryPath, err)
return err
}
defer reader.Close()
@ -384,7 +384,7 @@ func addZipEntry(wr *zip.Writer, conn *Connection, entryPath, baseDir string) er
Modified: info.ModTime(),
})
if err != nil {
conn.Log(logger.LevelError, "unable to create zip entry %#v: %v", entryPath, err)
conn.Log(logger.LevelError, "unable to create zip entry %q: %v", entryPath, err)
return err
}
_, err = io.Copy(f, reader)
@ -423,7 +423,7 @@ func downloadFile(w http.ResponseWriter, r *http.Request, connection *Connection
responseStatus := http.StatusOK
if strings.HasPrefix(rangeHeader, "bytes=") {
if strings.Contains(rangeHeader, ",") {
return http.StatusRequestedRangeNotSatisfiable, fmt.Errorf("unsupported range %#v", rangeHeader)
return http.StatusRequestedRangeNotSatisfiable, fmt.Errorf("unsupported range %q", rangeHeader)
}
offset, size, err = parseRangeRequest(rangeHeader[6:], size)
if err != nil {
@ -433,7 +433,7 @@ func downloadFile(w http.ResponseWriter, r *http.Request, connection *Connection
}
reader, err := connection.getFileReader(name, offset, r.Method)
if err != nil {
return getMappedStatusCode(err), fmt.Errorf("unable to read file %#v: %v", name, err)
return getMappedStatusCode(err), fmt.Errorf("unable to read file %q: %v", name, err)
}
defer reader.Close()
@ -451,7 +451,7 @@ func downloadFile(w http.ResponseWriter, r *http.Request, connection *Connection
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
w.Header().Set("Content-Type", ctype)
if !inline {
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%#v", path.Base(name)))
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", path.Base(name)))
}
w.Header().Set("Accept-Ranges", "bytes")
w.WriteHeader(responseStatus)
@ -566,7 +566,7 @@ func parseRangeRequest(bytesRange string, size int64) (int64, int64, error) {
}
}
if start == -1 && end == 0 {
return 0, 0, fmt.Errorf("unsupported range %#v", bytesRange)
return 0, 0, fmt.Errorf("unsupported range %q", bytesRange)
}
if end > 0 {
@ -579,7 +579,7 @@ func parseRangeRequest(bytesRange string, size int64) (int64, int64, error) {
// we have something like 500-600
size = end - start + 1
if size < 0 {
return 0, 0, fmt.Errorf("unacceptable range %#v", bytesRange)
return 0, 0, fmt.Errorf("unacceptable range %q", bytesRange)
}
}
return start, size, nil
@ -587,7 +587,7 @@ func parseRangeRequest(bytesRange string, size int64) (int64, int64, error) {
// we have something like 500-
size -= start
if size < 0 {
return 0, 0, fmt.Errorf("unacceptable range %#v", bytesRange)
return 0, 0, fmt.Errorf("unacceptable range %q", bytesRange)
}
return start, size, err
}
@ -615,24 +615,24 @@ func updateLoginMetrics(user *dataprovider.User, loginMethod, ip string, err err
func checkHTTPClientUser(user *dataprovider.User, r *http.Request, connectionID string, checkSessions bool) error {
if util.Contains(user.Filters.DeniedProtocols, common.ProtocolHTTP) {
logger.Info(logSender, connectionID, "cannot login user %#v, protocol HTTP is not allowed", user.Username)
return fmt.Errorf("protocol HTTP is not allowed for user %#v", user.Username)
logger.Info(logSender, connectionID, "cannot login user %q, protocol HTTP is not allowed", user.Username)
return fmt.Errorf("protocol HTTP is not allowed for user %q", user.Username)
}
if !isLoggedInWithOIDC(r) && !user.IsLoginMethodAllowed(dataprovider.LoginMethodPassword, common.ProtocolHTTP, nil) {
logger.Info(logSender, connectionID, "cannot login user %#v, password login method is not allowed", user.Username)
return fmt.Errorf("login method password is not allowed for user %#v", user.Username)
logger.Info(logSender, connectionID, "cannot login user %q, password login method is not allowed", user.Username)
return fmt.Errorf("login method password is not allowed for user %q", user.Username)
}
if checkSessions && user.MaxSessions > 0 {
activeSessions := common.Connections.GetActiveSessions(user.Username)
if activeSessions >= user.MaxSessions {
logger.Info(logSender, connectionID, "authentication refused for user: %#v, too many open sessions: %v/%v", user.Username,
logger.Info(logSender, connectionID, "authentication refused for user: %q, too many open sessions: %v/%v", user.Username,
activeSessions, user.MaxSessions)
return fmt.Errorf("too many open sessions: %v", activeSessions)
}
}
if !user.IsLoginFromAddrAllowed(r.RemoteAddr) {
logger.Info(logSender, connectionID, "cannot login user %#v, remote address is not allowed: %v", user.Username, r.RemoteAddr)
return fmt.Errorf("login for user %#v is not allowed from this address: %v", user.Username, r.RemoteAddr)
logger.Info(logSender, connectionID, "cannot login user %q, remote address is not allowed: %v", user.Username, r.RemoteAddr)
return fmt.Errorf("login for user %q is not allowed from this address: %v", user.Username, r.RemoteAddr)
}
return nil
}
@ -649,11 +649,11 @@ func handleForgotPassword(r *http.Request, username string, isAdmin bool) error
if isAdmin {
admin, err = dataprovider.AdminExists(username)
email = admin.Email
subject = fmt.Sprintf("Email Verification Code for admin %#v", username)
subject = fmt.Sprintf("Email Verification Code for admin %q", username)
} else {
user, err = dataprovider.GetUserWithGroupSettings(username, "")
email = user.Email
subject = fmt.Sprintf("Email Verification Code for user %#v", username)
subject = fmt.Sprintf("Email Verification Code for user %q", username)
if err == nil {
if !isUserAllowedToResetPassword(r, &user) {
return util.NewValidationError("you are not allowed to reset your password")
@ -662,7 +662,7 @@ func handleForgotPassword(r *http.Request, username string, isAdmin bool) error
}
if err != nil {
if errors.Is(err, util.ErrNotFound) {
logger.Debug(logSender, middleware.GetReqID(r.Context()), "username %#v does not exists, reset password request silently ignored, is admin? %v",
logger.Debug(logSender, middleware.GetReqID(r.Context()), "username %q does not exists, reset password request silently ignored, is admin? %v",
username, isAdmin)
return nil
}
@ -685,7 +685,7 @@ func handleForgotPassword(r *http.Request, username string, isAdmin bool) error
err, time.Since(startTime))
return util.NewGenericError(fmt.Sprintf("Unable to send confirmation code via email: %v", err))
}
logger.Debug(logSender, middleware.GetReqID(r.Context()), "reset code sent via email to %#v, email: %#v, is admin? %v, elapsed: %v",
logger.Debug(logSender, middleware.GetReqID(r.Context()), "reset code sent via email to %q, email: %q, is admin? %v, elapsed: %v",
username, email, isAdmin, time.Since(startTime))
return resetCodesMgr.Add(c)
}

View file

@ -398,7 +398,7 @@ func createCSRFToken(ip string) string {
func verifyCSRFToken(tokenString, ip string) error {
token, err := jwtauth.VerifyToken(csrfTokenAuth, tokenString)
if err != nil || token == nil {
logger.Debug(logSender, "", "error validating CSRF token %#v: %v", tokenString, err)
logger.Debug(logSender, "", "error validating CSRF token %q: %v", tokenString, err)
return fmt.Errorf("unable to verify form token: %v", err)
}

View file

@ -108,7 +108,7 @@ func (c *Connection) getFileReader(name string, offset int64, method string) (io
}
if ok, policy := c.User.IsFileAllowed(name); !ok {
c.Log(logger.LevelWarn, "reading file %#v is not allowed", name)
c.Log(logger.LevelWarn, "reading file %q is not allowed", name)
return nil, c.GetErrorForDeniedFile(policy)
}
@ -119,14 +119,14 @@ func (c *Connection) getFileReader(name string, offset int64, method string) (io
if method != http.MethodHead {
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreDownload, p, name, 0, 0); err != nil {
c.Log(logger.LevelDebug, "download for file %#v denied by pre action: %v", name, err)
c.Log(logger.LevelDebug, "download for file %q denied by pre action: %v", name, err)
return nil, c.GetPermissionDeniedError()
}
}
file, r, cancelFn, err := fs.Open(p, offset)
if err != nil {
c.Log(logger.LevelError, "could not open file %#v for reading: %+v", p, err)
c.Log(logger.LevelError, "could not open file %q for reading: %+v", p, err)
return nil, c.GetFsError(fs, err)
}
@ -139,7 +139,7 @@ func (c *Connection) getFileWriter(name string) (io.WriteCloser, error) {
c.UpdateLastActivity()
if ok, _ := c.User.IsFileAllowed(name); !ok {
c.Log(logger.LevelWarn, "writing file %#v is not allowed", name)
c.Log(logger.LevelWarn, "writing file %q is not allowed", name)
return nil, c.GetPermissionDeniedError()
}
@ -161,13 +161,13 @@ func (c *Connection) getFileWriter(name string) (io.WriteCloser, error) {
}
if statErr != nil {
c.Log(logger.LevelError, "error performing file stat %#v: %+v", p, statErr)
c.Log(logger.LevelError, "error performing file stat %q: %+v", p, statErr)
return nil, c.GetFsError(fs, statErr)
}
// This happen if we upload a file that has the same name of an existing directory
if stat.IsDir() {
c.Log(logger.LevelError, "attempted to open a directory for writing to: %#v", p)
c.Log(logger.LevelError, "attempted to open a directory for writing to: %q", p)
return nil, c.GetOpUnsupportedError()
}
@ -178,7 +178,7 @@ func (c *Connection) getFileWriter(name string) (io.WriteCloser, error) {
if common.Config.IsAtomicUploadEnabled() && fs.IsAtomicUploadSupported() {
_, _, err = fs.Rename(p, filePath)
if err != nil {
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %+v",
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %q, dest: %q, err: %+v",
p, filePath, err)
return nil, c.GetFsError(fs, err)
}
@ -195,7 +195,7 @@ func (c *Connection) handleUploadFile(fs vfs.Fs, resolvedPath, filePath, request
}
_, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, fileSize, os.O_TRUNC)
if err != nil {
c.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
c.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
return nil, c.GetPermissionDeniedError()
}
@ -203,7 +203,7 @@ func (c *Connection) handleUploadFile(fs vfs.Fs, resolvedPath, filePath, request
file, w, cancelFn, err := fs.Create(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
c.Log(logger.LevelError, "error opening existing file, source: %#v, err: %+v", filePath, err)
c.Log(logger.LevelError, "error opening existing file, source: %q, err: %+v", filePath, err)
return nil, c.GetFsError(fs, err)
}

View file

@ -808,7 +808,7 @@ func (c *Conf) isWebClientEnabled() bool {
func (c *Conf) checkRequiredDirs(staticFilesPath, templatesPath string) error {
if (c.isWebAdminEnabled() || c.isWebClientEnabled()) && (staticFilesPath == "" || templatesPath == "") {
return fmt.Errorf("required directory is invalid, static file path: %#v template path: %#v",
return fmt.Errorf("required directory is invalid, static file path: %q template path: %q",
staticFilesPath, templatesPath)
}
return nil

View file

@ -118,7 +118,7 @@ func (s *httpdServer) validateJWTPartialToken(w http.ResponseWriter, r *http.Req
return errInvalidToken
}
if !util.Contains(token.Audience(), audience) {
logger.Debug(logSender, "", "the token is not valid for audience %#v", audience)
logger.Debug(logSender, "", "the token is not valid for audience %q", audience)
notFoundFunc(w, r, nil)
return errInvalidToken
}
@ -362,7 +362,7 @@ func checkAPIKeyAuth(tokenAuth *jwtauth.JWTAuth, scope dataprovider.APIKeyScope)
}
keyParams := strings.SplitN(apiKey, ".", 3)
if len(keyParams) < 2 {
logger.Debug(logSender, "", "invalid api key %#v", apiKey)
logger.Debug(logSender, "", "invalid api key %q", apiKey)
sendAPIResponse(w, r, errors.New("the provided api key is not valid"), "", http.StatusBadRequest)
return
}
@ -375,12 +375,12 @@ func checkAPIKeyAuth(tokenAuth *jwtauth.JWTAuth, scope dataprovider.APIKeyScope)
k, err := dataprovider.APIKeyExists(keyID)
if err != nil {
logger.Debug(logSender, "invalid api key %#v: %v", apiKey, err)
logger.Debug(logSender, "invalid api key %q: %v", apiKey, err)
sendAPIResponse(w, r, errors.New("the provided api key is not valid"), "", http.StatusBadRequest)
return
}
if err := k.Authenticate(key); err != nil {
logger.Debug(logSender, "", "unable to authenticate api key %#v: %v", apiKey, err)
logger.Debug(logSender, "", "unable to authenticate api key %q: %v", apiKey, err)
sendAPIResponse(w, r, fmt.Errorf("the provided api key cannot be authenticated"), "", http.StatusUnauthorized)
return
}
@ -389,7 +389,7 @@ func checkAPIKeyAuth(tokenAuth *jwtauth.JWTAuth, scope dataprovider.APIKeyScope)
apiUser = k.Admin
}
if err := authenticateAdminWithAPIKey(apiUser, keyID, tokenAuth, r); err != nil {
logger.Debug(logSender, "", "unable to authenticate admin %#v associated with api key %#v: %v",
logger.Debug(logSender, "", "unable to authenticate admin %q associated with api key %q: %v",
apiUser, apiKey, err)
sendAPIResponse(w, r, fmt.Errorf("the admin associated with the provided api key cannot be authenticated"),
"", http.StatusUnauthorized)
@ -400,7 +400,7 @@ func checkAPIKeyAuth(tokenAuth *jwtauth.JWTAuth, scope dataprovider.APIKeyScope)
apiUser = k.User
}
if err := authenticateUserWithAPIKey(apiUser, keyID, tokenAuth, r); err != nil {
logger.Debug(logSender, "", "unable to authenticate user %#v associated with api key %#v: %v",
logger.Debug(logSender, "", "unable to authenticate user %q associated with api key %q: %v",
apiUser, apiKey, err)
code := http.StatusUnauthorized
if errors.Is(err, common.ErrInternalFailure) {
@ -443,7 +443,7 @@ func authenticateAdminWithAPIKey(username, keyID string, tokenAuth *jwtauth.JWTA
return err
}
if !admin.Filters.AllowAPIKeyAuth {
return fmt.Errorf("API key authentication disabled for admin %#v", admin.Username)
return fmt.Errorf("API key authentication disabled for admin %q", admin.Username)
}
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
if err := admin.CanLogin(ipAddr); err != nil {
@ -485,7 +485,7 @@ func authenticateUserWithAPIKey(username, keyID string, tokenAuth *jwtauth.JWTAu
return err
}
if !user.Filters.AllowAPIKeyAuth {
err := fmt.Errorf("API key authentication disabled for user %#v", user.Username)
err := fmt.Errorf("API key authentication disabled for user %q", user.Username)
updateLoginMetrics(&user, dataprovider.LoginMethodPassword, ipAddr, err)
return err
}

View file

@ -127,7 +127,7 @@ func (o *OIDC) getRedirectURL() string {
url = strings.TrimSuffix(o.RedirectBaseURL, "/")
}
url += webOIDCRedirectPath
logger.Debug(logSender, "", "oidc redirect URL: %#v", url)
logger.Debug(logSender, "", "oidc redirect URL: %q", url)
return url
}
@ -149,7 +149,7 @@ func (o *OIDC) initialize() error {
provider, err := oidc.NewProvider(ctx, o.ConfigURL)
if err != nil {
return fmt.Errorf("oidc: unable to initialize provider for URL %#v: %w", o.ConfigURL, err)
return fmt.Errorf("oidc: unable to initialize provider for URL %q: %w", o.ConfigURL, err)
}
claims := make(map[string]any)
// we cannot get an error here because the response body was already parsed as JSON
@ -159,7 +159,7 @@ func (o *OIDC) initialize() error {
if ok {
if val, ok := endSessionEndPoint.(string); ok {
o.providerLogoutURL = val
logger.Debug(logSender, "", "oidc end session endpoint %#v", o.providerLogoutURL)
logger.Debug(logSender, "", "oidc end session endpoint %q", o.providerLogoutURL)
}
}
o.provider = provider
@ -225,7 +225,7 @@ func (t *oidcToken) parseClaims(claims map[string]any, usernameField, roleField
username, ok := claims[usernameField].(string)
if !ok || username == "" {
logger.Warn(logSender, "", "username field %#v not found, claims fields: %+v", usernameField, getClaimsFields())
logger.Warn(logSender, "", "username field %q not found, claims fields: %+v", usernameField, getClaimsFields())
return errors.New("no username field")
}
t.Username = username
@ -242,10 +242,10 @@ func (t *oidcToken) parseClaims(claims map[string]any, usernameField, roleField
customFields := make(map[string]any)
t.CustomFields = &customFields
}
logger.Debug(logSender, "", "custom field %#v found in token claims", field)
logger.Debug(logSender, "", "custom field %q found in token claims", field)
(*t.CustomFields)[field] = val
} else {
logger.Info(logSender, "", "custom field %#v not found in token claims", field)
logger.Info(logSender, "", "custom field %q not found in token claims", field)
}
}
}
@ -316,7 +316,7 @@ func (t *oidcToken) isExpired() bool {
func (t *oidcToken) refresh(config OAuth2Config, verifier OIDCTokenVerifier, r *http.Request) error {
if t.RefreshToken == "" {
logger.Debug(logSender, "", "refresh token not set, unable to refresh cookie %#v", t.Cookie)
logger.Debug(logSender, "", "refresh token not set, unable to refresh cookie %q", t.Cookie)
return errors.New("refresh token not set")
}
oauth2Token := oauth2.Token{
@ -332,12 +332,12 @@ func (t *oidcToken) refresh(config OAuth2Config, verifier OIDCTokenVerifier, r *
newToken, err := config.TokenSource(ctx, &oauth2Token).Token()
if err != nil {
logger.Debug(logSender, "", "unable to refresh token for cookie %#v: %v", t.Cookie, err)
logger.Debug(logSender, "", "unable to refresh token for cookie %q: %v", t.Cookie, err)
return err
}
rawIDToken, ok := newToken.Extra("id_token").(string)
if !ok {
logger.Debug(logSender, "", "the refreshed token has no id token, cookie %#v", t.Cookie)
logger.Debug(logSender, "", "the refreshed token has no id token, cookie %q", t.Cookie)
return errors.New("the refreshed token has no id token")
}
@ -352,17 +352,17 @@ func (t *oidcToken) refresh(config OAuth2Config, verifier OIDCTokenVerifier, r *
}
idToken, err := verifier.Verify(ctx, rawIDToken)
if err != nil {
logger.Debug(logSender, "", "unable to verify refreshed id token for cookie %#v: %v", t.Cookie, err)
logger.Debug(logSender, "", "unable to verify refreshed id token for cookie %q: %v", t.Cookie, err)
return err
}
if idToken.Nonce != t.Nonce {
logger.Debug(logSender, "", "unable to verify refreshed id token for cookie %#v: nonce mismatch", t.Cookie)
logger.Debug(logSender, "", "unable to verify refreshed id token for cookie %q: nonce mismatch", t.Cookie)
return errors.New("the refreshed token nonce mismatch")
}
claims := make(map[string]any)
err = idToken.Claims(&claims)
if err != nil {
logger.Debug(logSender, "", "unable to get refreshed id token claims for cookie %#v: %v", t.Cookie, err)
logger.Debug(logSender, "", "unable to get refreshed id token claims for cookie %q: %v", t.Cookie, err)
return err
}
sid, ok := claims["sid"].(string)
@ -371,10 +371,10 @@ func (t *oidcToken) refresh(config OAuth2Config, verifier OIDCTokenVerifier, r *
}
err = t.refreshUser(r)
if err != nil {
logger.Debug(logSender, "", "unable to refresh user after token refresh for cookie %#v: %v", t.Cookie, err)
logger.Debug(logSender, "", "unable to refresh user after token refresh for cookie %q: %v", t.Cookie, err)
return err
}
logger.Debug(logSender, "", "oidc token refreshed for user %#v, cookie %#v", t.Username, t.Cookie)
logger.Debug(logSender, "", "oidc token refreshed for user %q, cookie %q", t.Username, t.Cookie)
oidcMgr.addToken(*t)
return nil
@ -474,12 +474,12 @@ func (s *httpdServer) validateOIDCToken(w http.ResponseWriter, r *http.Request,
}
token, err := oidcMgr.getToken(cookie.Value)
if err != nil {
logger.Debug(logSender, "", "error getting oidc token associated with cookie %#v: %v", cookie.Value, err)
logger.Debug(logSender, "", "error getting oidc token associated with cookie %q: %v", cookie.Value, err)
doRedirect()
return oidcToken{}, errInvalidToken
}
if token.isExpired() {
logger.Debug(logSender, "", "oidc token associated with cookie %#v is expired", token.Cookie)
logger.Debug(logSender, "", "oidc token associated with cookie %q is expired", token.Cookie)
if err = token.refresh(s.binding.OIDC.oauth2Config, s.binding.OIDC.verifier, r); err != nil {
setFlashMessage(w, r, "Your OpenID token is expired, please log-in again")
doRedirect()
@ -490,7 +490,7 @@ func (s *httpdServer) validateOIDCToken(w http.ResponseWriter, r *http.Request,
}
if isAdmin {
if !token.isAdmin() {
logger.Debug(logSender, "", "oidc token associated with cookie %#v is not valid for admin users", token.Cookie)
logger.Debug(logSender, "", "oidc token associated with cookie %q is not valid for admin users", token.Cookie)
setFlashMessage(w, r, "Your OpenID token is not valid for the SFTPGo Web Admin UI. Please logout from your OpenID server and log-in as an SFTPGo admin")
doRedirect()
return oidcToken{}, errInvalidToken
@ -498,7 +498,7 @@ func (s *httpdServer) validateOIDCToken(w http.ResponseWriter, r *http.Request,
return token, nil
}
if token.isAdmin() {
logger.Debug(logSender, "", "oidc token associated with cookie %#v is valid for admin users", token.Cookie)
logger.Debug(logSender, "", "oidc token associated with cookie %q is valid for admin users", token.Cookie)
setFlashMessage(w, r, "Your OpenID token is not valid for the SFTPGo Web Client UI. Please logout from your OpenID server and log-in as an SFTPGo user")
doRedirect()
return oidcToken{}, errInvalidToken
@ -735,7 +735,7 @@ func (s *httpdServer) doOIDCFromLogout(idToken string) {
logoutURL.RawQuery = query.Encode()
resp, err := httpclient.RetryableGet(logoutURL.String())
if err != nil {
logger.Warn(logSender, "", "oidc: error calling logout URL %#v: %v", logoutURL.String(), err)
logger.Warn(logSender, "", "oidc: error calling logout URL %q: %v", logoutURL.String(), err)
return
}
defer resp.Body.Close()

View file

@ -114,7 +114,7 @@ func (s *httpdServer) listenAndServe() error {
PreferServerCipherSuites: true,
}
httpServer.TLSConfig = config
logger.Debug(logSender, "", "configured TLS cipher suites for binding %#v: %v, certID: %v",
logger.Debug(logSender, "", "configured TLS cipher suites for binding %q: %v, certID: %v",
s.binding.GetAddress(), httpServer.TLSConfig.CipherSuites, certID)
if s.binding.ClientAuthType == 1 {
httpServer.TLSConfig.ClientCAs = certMgr.GetRootCAs()
@ -144,7 +144,7 @@ func (s *httpdServer) verifyTLSConnection(state tls.ConnectionState) error {
caCrt = verifiedChain[len(verifiedChain)-1]
}
if certMgr.IsRevoked(clientCrt, caCrt) {
logger.Debug(logSender, "", "tls handshake error, client certificate %#v has been revoked", clientCrtName)
logger.Debug(logSender, "", "tls handshake error, client certificate %q has been revoked", clientCrtName)
return common.ErrCrtRevoked
}
}
@ -352,7 +352,7 @@ func (s *httpdServer) handleWebClientTwoFactorRecoveryPost(w http.ResponseWriter
user.Filters.RecoveryCodes[idx].Used = true
err = dataprovider.UpdateUser(&user, dataprovider.ActionExecutorSelf, ipAddr, user.Role)
if err != nil {
logger.Warn(logSender, "", "unable to set the recovery code %#v as used: %v", recoveryCode, err)
logger.Warn(logSender, "", "unable to set the recovery code %q as used: %v", recoveryCode, err)
s.renderClientInternalServerErrorPage(w, r, errors.New("unable to set the recovery code as used"))
return
}
@ -456,7 +456,7 @@ func (s *httpdServer) handleWebAdminTwoFactorRecoveryPost(w http.ResponseWriter,
admin.Filters.RecoveryCodes[idx].Used = true
err = dataprovider.UpdateAdmin(&admin, dataprovider.ActionExecutorSelf, ipAddr, admin.Role)
if err != nil {
logger.Warn(logSender, "", "unable to set the recovery code %#v as used: %v", recoveryCode, err)
logger.Warn(logSender, "", "unable to set the recovery code %q as used: %v", recoveryCode, err)
s.renderInternalServerErrorPage(w, r, errors.New("unable to set the recovery code as used"))
return
}
@ -800,7 +800,7 @@ func (s *httpdServer) getUserToken(w http.ResponseWriter, r *http.Request) {
if user.Filters.TOTPConfig.Enabled && util.Contains(user.Filters.TOTPConfig.Protocols, common.ProtocolHTTP) {
passcode := r.Header.Get(otpHeaderCode)
if passcode == "" {
logger.Debug(logSender, "", "TOTP enabled for user %#v and not passcode provided, authentication refused", user.Username)
logger.Debug(logSender, "", "TOTP enabled for user %q and not passcode provided, authentication refused", user.Username)
w.Header().Set(common.HTTPAuthenticationHeader, basicRealm)
updateLoginMetrics(&user, dataprovider.LoginMethodPassword, ipAddr, dataprovider.ErrInvalidCredentials)
sendAPIResponse(w, r, dataprovider.ErrInvalidCredentials, http.StatusText(http.StatusUnauthorized),
@ -816,7 +816,7 @@ func (s *httpdServer) getUserToken(w http.ResponseWriter, r *http.Request) {
match, err := mfa.ValidateTOTPPasscode(user.Filters.TOTPConfig.ConfigName, passcode,
user.Filters.TOTPConfig.Secret.GetPayload())
if !match || err != nil {
logger.Debug(logSender, "invalid passcode for user %#v, match? %v, err: %v", user.Username, match, err)
logger.Debug(logSender, "invalid passcode for user %q, match? %v, err: %v", user.Username, match, err)
w.Header().Set(common.HTTPAuthenticationHeader, basicRealm)
updateLoginMetrics(&user, dataprovider.LoginMethodPassword, ipAddr, dataprovider.ErrInvalidCredentials)
sendAPIResponse(w, r, dataprovider.ErrInvalidCredentials, http.StatusText(http.StatusUnauthorized),
@ -878,7 +878,7 @@ func (s *httpdServer) getToken(w http.ResponseWriter, r *http.Request) {
if admin.Filters.TOTPConfig.Enabled {
passcode := r.Header.Get(otpHeaderCode)
if passcode == "" {
logger.Debug(logSender, "", "TOTP enabled for admin %#v and not passcode provided, authentication refused", admin.Username)
logger.Debug(logSender, "", "TOTP enabled for admin %q and not passcode provided, authentication refused", admin.Username)
w.Header().Set(common.HTTPAuthenticationHeader, basicRealm)
sendAPIResponse(w, r, dataprovider.ErrInvalidCredentials, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
@ -892,7 +892,7 @@ func (s *httpdServer) getToken(w http.ResponseWriter, r *http.Request) {
match, err := mfa.ValidateTOTPPasscode(admin.Filters.TOTPConfig.ConfigName, passcode,
admin.Filters.TOTPConfig.Secret.GetPayload())
if !match || err != nil {
logger.Debug(logSender, "invalid passcode for admin %#v, match? %v, err: %v", admin.Username, match, err)
logger.Debug(logSender, "invalid passcode for admin %q, match? %v, err: %v", admin.Username, match, err)
w.Header().Set(common.HTTPAuthenticationHeader, basicRealm)
sendAPIResponse(w, r, dataprovider.ErrInvalidCredentials, http.StatusText(http.StatusUnauthorized),
http.StatusUnauthorized)
@ -971,22 +971,22 @@ func (s *httpdServer) refreshAdminToken(w http.ResponseWriter, r *http.Request,
return
}
if admin.Status != 1 {
logger.Debug(logSender, "", "admin %#v is disabled, unable to refresh cookie", admin.Username)
logger.Debug(logSender, "", "admin %q is disabled, unable to refresh cookie", admin.Username)
return
}
if admin.GetSignature() != tokenClaims.Signature {
logger.Debug(logSender, "", "signature mismatch for admin %#v, unable to refresh cookie", admin.Username)
logger.Debug(logSender, "", "signature mismatch for admin %q, unable to refresh cookie", admin.Username)
return
}
ipAddr := util.GetIPFromRemoteAddress(r.RemoteAddr)
if !admin.CanLoginFromIP(ipAddr) {
logger.Debug(logSender, "", "admin %#v cannot login from %v, unable to refresh cookie", admin.Username, r.RemoteAddr)
logger.Debug(logSender, "", "admin %q cannot login from %v, unable to refresh cookie", admin.Username, r.RemoteAddr)
return
}
tokenClaims.Permissions = admin.Permissions
tokenClaims.Role = admin.Role
tokenClaims.HideUserPageSections = admin.Filters.Preferences.HideUserPageSections
logger.Debug(logSender, "", "cookie refreshed for admin %#v", admin.Username)
logger.Debug(logSender, "", "cookie refreshed for admin %q", admin.Username)
tokenClaims.createAndSetCookie(w, r, s.tokenAuth, tokenAudienceWebAdmin, ipAddr) //nolint:errcheck
}

View file

@ -1362,21 +1362,21 @@ func getDataTransferLimitsFromPostFields(r *http.Request) ([]sdk.DataTransferLim
if ul != "" {
dataUL, err := strconv.ParseInt(ul, 10, 64)
if err != nil {
return result, fmt.Errorf("invalid upload_data_transfer_source%v %#v: %w", idx, ul, err)
return result, fmt.Errorf("invalid upload_data_transfer_source%v %q: %w", idx, ul, err)
}
dtLimit.UploadDataTransfer = dataUL
}
if dl != "" {
dataDL, err := strconv.ParseInt(dl, 10, 64)
if err != nil {
return result, fmt.Errorf("invalid download_data_transfer_source%v %#v: %w", idx, dl, err)
return result, fmt.Errorf("invalid download_data_transfer_source%v %q: %w", idx, dl, err)
}
dtLimit.DownloadDataTransfer = dataDL
}
if total != "" {
dataTotal, err := strconv.ParseInt(total, 10, 64)
if err != nil {
return result, fmt.Errorf("invalid total_data_transfer_source%v %#v: %w", idx, total, err)
return result, fmt.Errorf("invalid total_data_transfer_source%v %q: %w", idx, total, err)
}
dtLimit.TotalDataTransfer = dataTotal
}
@ -1405,14 +1405,14 @@ func getBandwidthLimitsFromPostFields(r *http.Request) ([]sdk.BandwidthLimit, er
if ul != "" {
bandwidthUL, err := strconv.ParseInt(ul, 10, 64)
if err != nil {
return result, fmt.Errorf("invalid upload_bandwidth_source%v %#v: %w", idx, ul, err)
return result, fmt.Errorf("invalid upload_bandwidth_source%v %q: %w", idx, ul, err)
}
bwLimit.UploadBandwidth = bandwidthUL
}
if dl != "" {
bandwidthDL, err := strconv.ParseInt(dl, 10, 64)
if err != nil {
return result, fmt.Errorf("invalid download_bandwidth_source%v %#v: %w", idx, ul, err)
return result, fmt.Errorf("invalid download_bandwidth_source%v %q: %w", idx, ul, err)
}
bwLimit.DownloadBandwidth = bandwidthDL
}
@ -3005,7 +3005,7 @@ func (s *httpdServer) handleWebTemplateFolderPost(w http.ResponseWriter, r *http
for _, tmpl := range foldersFields {
f := getFolderFromTemplate(templateFolder, tmpl)
if err := dataprovider.ValidateFolder(&f); err != nil {
s.renderMessagePage(w, r, "Folder validation error", fmt.Sprintf("Error validating folder %#v", f.Name),
s.renderMessagePage(w, r, "Folder validation error", fmt.Sprintf("Error validating folder %q", f.Name),
http.StatusBadRequest, err, "")
return
}
@ -3095,7 +3095,7 @@ func (s *httpdServer) handleWebTemplateUserPost(w http.ResponseWriter, r *http.R
for _, tmpl := range userTmplFields {
u := getUserFromTemplate(templateUser, tmpl)
if err := dataprovider.ValidateUser(&u); err != nil {
s.renderMessagePage(w, r, "User validation error", fmt.Sprintf("Error validating user %#v", u.Username),
s.renderMessagePage(w, r, "User validation error", fmt.Sprintf("Error validating user %q", u.Username),
http.StatusBadRequest, err, "")
return
}

View file

@ -960,7 +960,7 @@ func (s *httpdServer) handleClientGetFiles(w http.ResponseWriter, r *http.Reques
info, err = connection.Stat(name, 0)
}
if err != nil {
s.renderFilesPage(w, r, path.Dir(name), fmt.Sprintf("unable to stat file %#v: %v", name, err),
s.renderFilesPage(w, r, path.Dir(name), fmt.Sprintf("unable to stat file %q: %v", name, err),
user, len(s.binding.WebClientIntegrations) > 0)
return
}
@ -1014,17 +1014,17 @@ func (s *httpdServer) handleClientEditFile(w http.ResponseWriter, r *http.Reques
name := connection.User.GetCleanedPath(r.URL.Query().Get("path"))
info, err := connection.Stat(name, 0)
if err != nil {
s.renderClientMessagePage(w, r, fmt.Sprintf("Unable to stat file %#v", name), "",
s.renderClientMessagePage(w, r, fmt.Sprintf("Unable to stat file %q", name), "",
getRespStatus(err), nil, "")
return
}
if info.IsDir() {
s.renderClientMessagePage(w, r, fmt.Sprintf("The path %#v does not point to a file", name), "",
s.renderClientMessagePage(w, r, fmt.Sprintf("The path %q does not point to a file", name), "",
http.StatusBadRequest, nil, "")
return
}
if info.Size() > httpdMaxEditFileSize {
s.renderClientMessagePage(w, r, fmt.Sprintf("The file size %v for %#v exceeds the maximum allowed size",
s.renderClientMessagePage(w, r, fmt.Sprintf("The file size %v for %q exceeds the maximum allowed size",
util.ByteCountIEC(info.Size()), name), "", http.StatusBadRequest, nil, "")
return
}
@ -1032,7 +1032,7 @@ func (s *httpdServer) handleClientEditFile(w http.ResponseWriter, r *http.Reques
connection.User.CheckFsRoot(connection.ID) //nolint:errcheck
reader, err := connection.getFileReader(name, 0, r.Method)
if err != nil {
s.renderClientMessagePage(w, r, fmt.Sprintf("Unable to get a reader for the file %#v", name), "",
s.renderClientMessagePage(w, r, fmt.Sprintf("Unable to get a reader for the file %q", name), "",
getRespStatus(err), nil, "")
return
}
@ -1041,7 +1041,7 @@ func (s *httpdServer) handleClientEditFile(w http.ResponseWriter, r *http.Reques
var b bytes.Buffer
_, err = io.Copy(&b, reader)
if err != nil {
s.renderClientMessagePage(w, r, fmt.Sprintf("Unable to read the file %#v", name), "", http.StatusInternalServerError,
s.renderClientMessagePage(w, r, fmt.Sprintf("Unable to read the file %q", name), "", http.StatusInternalServerError,
nil, "")
return
}

View file

@ -120,7 +120,7 @@ func (c *Configuration) Initialize() error {
config.Secrets.URL = sdkkms.SchemeLocal + "://"
}
for k, v := range secretProviders {
logger.Info(logSender, "", "secret provider registered for scheme: %#v, encrypted status: %#v",
logger.Info(logSender, "", "secret provider registered for scheme: %q, encrypted status: %q",
k, v.encryptedStatus)
}
return nil
@ -195,7 +195,7 @@ func (s *Secret) UnmarshalJSON(data []byte) error {
return nil
}
}
logger.Error(logSender, "", "no provider registered for status %#v", baseSecret.Status)
logger.Error(logSender, "", "no provider registered for status %q", baseSecret.Status)
return ErrInvalidSecret
}

View file

@ -75,7 +75,7 @@ func InitLogger(logFilePath string, logMaxSize int, logMaxBackups int, logMaxAge
if _, err := os.Stat(logDir); errors.Is(err, fs.ErrNotExist) {
err = os.MkdirAll(logDir, os.ModePerm)
if err != nil {
fmt.Printf("unable to create log dir %#v: %v", logDir, err)
fmt.Printf("unable to create log dir %q: %v", logDir, err)
}
}
rollingLogger = &lumberjack.Logger{

View file

@ -56,7 +56,7 @@ func (c *Config) Initialize() error {
}
if _, ok := totp[totpConfig.Name]; ok {
totpConfigs = nil
return fmt.Errorf("totp: duplicate configuration name %#v", totpConfig.Name)
return fmt.Errorf("totp: duplicate configuration name %q", totpConfig.Name)
}
totp[totpConfig.Name] = true
totpConfigs = append(totpConfigs, &totpConfig)
@ -89,7 +89,7 @@ func ValidateTOTPPasscode(configName, passcode, secret string) (bool, error) {
}
}
return false, fmt.Errorf("totp: no configuration %#v", configName)
return false, fmt.Errorf("totp: no configuration %q", configName)
}
// GenerateTOTPSecret generates a new TOTP secret and QR code for the given username
@ -102,7 +102,7 @@ func GenerateTOTPSecret(configName, username string) (string, string, string, []
}
}
return "", "", "", nil, fmt.Errorf("totp: no configuration %#v", configName)
return "", "", "", nil, fmt.Errorf("totp: no configuration %q", configName)
}
// the ticker cannot be started/stopped from multiple goroutines

View file

@ -66,7 +66,7 @@ func (c *TOTPConfig) validate() error {
case TOTPAlgoSHA512:
c.algo = otp.AlgorithmSHA512
default:
return fmt.Errorf("unsupported totp algo %#v", c.Algo)
return fmt.Errorf("unsupported totp algo %q", c.Algo)
}
return nil
}

View file

@ -107,9 +107,9 @@ func newAuthPlugin(config Config) (*authPlugin, error) {
func (p *authPlugin) initialize() error {
killProcess(p.config.Cmd)
logger.Debug(logSender, "", "create new auth plugin %#v", p.config.Cmd)
logger.Debug(logSender, "", "create new auth plugin %q", p.config.Cmd)
if err := p.config.AuthOptions.validate(); err != nil {
return fmt.Errorf("invalid options for auth plugin %#v: %v", p.config.Cmd, err)
return fmt.Errorf("invalid options for auth plugin %q: %v", p.config.Cmd, err)
}
secureConfig, err := p.config.getSecureConfig()
@ -136,12 +136,12 @@ func (p *authPlugin) initialize() error {
})
rpcClient, err := client.Client()
if err != nil {
logger.Debug(logSender, "", "unable to get rpc client for kms plugin %#v: %v", p.config.Cmd, err)
logger.Debug(logSender, "", "unable to get rpc client for kms plugin %q: %v", p.config.Cmd, err)
return err
}
raw, err := rpcClient.Dispense(auth.PluginName)
if err != nil {
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %#v: %v",
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %q: %v",
auth.PluginName, p.config.Cmd, err)
return err
}

View file

@ -51,7 +51,7 @@ func (p *ipFilterPlugin) cleanup() {
}
func (p *ipFilterPlugin) initialize() error {
logger.Debug(logSender, "", "create new IP filter plugin %#v", p.config.Cmd)
logger.Debug(logSender, "", "create new IP filter plugin %q", p.config.Cmd)
killProcess(p.config.Cmd)
secureConfig, err := p.config.getSecureConfig()
if err != nil {
@ -77,12 +77,12 @@ func (p *ipFilterPlugin) initialize() error {
})
rpcClient, err := client.Client()
if err != nil {
logger.Debug(logSender, "", "unable to get rpc client for plugin %#v: %v", p.config.Cmd, err)
logger.Debug(logSender, "", "unable to get rpc client for plugin %q: %v", p.config.Cmd, err)
return err
}
raw, err := rpcClient.Dispense(ipfilter.PluginName)
if err != nil {
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %#v: %v",
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %q: %v",
ipfilter.PluginName, p.config.Cmd, err)
return err
}

View file

@ -70,9 +70,9 @@ func newKMSPlugin(config Config) (*kmsPlugin, error) {
func (p *kmsPlugin) initialize() error {
killProcess(p.config.Cmd)
logger.Debug(logSender, "", "create new kms plugin %#v", p.config.Cmd)
logger.Debug(logSender, "", "create new kms plugin %q", p.config.Cmd)
if err := p.config.KMSOptions.validate(); err != nil {
return fmt.Errorf("invalid options for kms plugin %#v: %v", p.config.Cmd, err)
return fmt.Errorf("invalid options for kms plugin %q: %v", p.config.Cmd, err)
}
secureConfig, err := p.config.getSecureConfig()
if err != nil {
@ -98,12 +98,12 @@ func (p *kmsPlugin) initialize() error {
})
rpcClient, err := client.Client()
if err != nil {
logger.Debug(logSender, "", "unable to get rpc client for kms plugin %#v: %v", p.config.Cmd, err)
logger.Debug(logSender, "", "unable to get rpc client for kms plugin %q: %v", p.config.Cmd, err)
return err
}
raw, err := rpcClient.Dispense(kmsplugin.PluginName)
if err != nil {
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %#v: %v",
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %q: %v",
kmsplugin.PluginName, p.config.Cmd, err)
return err
}

View file

@ -52,7 +52,7 @@ func (p *metadataPlugin) cleanup() {
func (p *metadataPlugin) initialize() error {
killProcess(p.config.Cmd)
logger.Debug(logSender, "", "create new metadata plugin %#v", p.config.Cmd)
logger.Debug(logSender, "", "create new metadata plugin %q", p.config.Cmd)
secureConfig, err := p.config.getSecureConfig()
if err != nil {
return err
@ -77,12 +77,12 @@ func (p *metadataPlugin) initialize() error {
})
rpcClient, err := client.Client()
if err != nil {
logger.Debug(logSender, "", "unable to get rpc client for plugin %#v: %v", p.config.Cmd, err)
logger.Debug(logSender, "", "unable to get rpc client for plugin %q: %v", p.config.Cmd, err)
return err
}
raw, err := rpcClient.Dispense(metadata.PluginName)
if err != nil {
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %#v: %v",
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %q: %v",
metadata.PluginName, p.config.Cmd, err)
return err
}

View file

@ -133,9 +133,9 @@ func (p *notifierPlugin) cleanup() {
func (p *notifierPlugin) initialize() error {
killProcess(p.config.Cmd)
logger.Debug(logSender, "", "create new notifier plugin %#v", p.config.Cmd)
logger.Debug(logSender, "", "create new notifier plugin %q", p.config.Cmd)
if !p.config.NotifierOptions.hasActions() {
return fmt.Errorf("no actions defined for the notifier plugin %#v", p.config.Cmd)
return fmt.Errorf("no actions defined for the notifier plugin %q", p.config.Cmd)
}
secureConfig, err := p.config.getSecureConfig()
if err != nil {
@ -161,12 +161,12 @@ func (p *notifierPlugin) initialize() error {
})
rpcClient, err := client.Client()
if err != nil {
logger.Debug(logSender, "", "unable to get rpc client for plugin %#v: %v", p.config.Cmd, err)
logger.Debug(logSender, "", "unable to get rpc client for plugin %q: %v", p.config.Cmd, err)
return err
}
raw, err := rpcClient.Dispense(notifier.PluginName)
if err != nil {
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %#v: %v",
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %q: %v",
notifier.PluginName, p.config.Cmd, err)
return err
}
@ -248,7 +248,7 @@ func (p *notifierPlugin) sendQueuedEvents() {
if queueSize == 0 {
return
}
logger.Debug(logSender, "", "check queued events for notifier %#v, events size: %v", p.config.Cmd, queueSize)
logger.Debug(logSender, "", "check queued events for notifier %q, events size: %v", p.config.Cmd, queueSize)
fsEv := p.queue.popFsEvent()
for fsEv != nil {
go func(ev *notifier.FsEvent) {
@ -264,5 +264,5 @@ func (p *notifierPlugin) sendQueuedEvents() {
}(providerEv)
providerEv = p.queue.popProviderEvent()
}
logger.Debug(logSender, "", "queued events sent for notifier %#v, new events size: %v", p.config.Cmd, p.queue.getSize())
logger.Debug(logSender, "", "queued events sent for notifier %q, new events size: %v", p.config.Cmd, p.queue.getSize())
}

View file

@ -233,10 +233,10 @@ func (m *Manager) validateConfigs() error {
switch config.Type {
case kmsplugin.PluginName:
if _, ok := kmsSchemes[config.KMSOptions.Scheme]; ok {
return fmt.Errorf("invalid KMS configuration, duplicated scheme %#v", config.KMSOptions.Scheme)
return fmt.Errorf("invalid KMS configuration, duplicated scheme %q", config.KMSOptions.Scheme)
}
if _, ok := kmsEncryptions[config.KMSOptions.EncryptedStatus]; ok {
return fmt.Errorf("invalid KMS configuration, duplicated encrypted status %#v", config.KMSOptions.EncryptedStatus)
return fmt.Errorf("invalid KMS configuration, duplicated encrypted status %q", config.KMSOptions.EncryptedStatus)
}
kmsSchemes[config.KMSOptions.Scheme] = true
kmsEncryptions[config.KMSOptions.EncryptedStatus] = true
@ -397,7 +397,7 @@ func (m *Manager) IsIPBanned(ip, protocol string) bool {
m.ipFilterLock.RUnlock()
if plugin.exited() {
logger.Warn(logSender, "", "ip filter plugin is not active, cannot check ip %#v", ip)
logger.Warn(logSender, "", "ip filter plugin is not active, cannot check ip %q", ip)
return false
}
@ -629,10 +629,10 @@ func (m *Manager) restartNotifierPlugin(config Config, idx int) {
if m.closed.Load() {
return
}
logger.Info(logSender, "", "try to restart crashed notifier plugin %#v, idx: %v", config.Cmd, idx)
logger.Info(logSender, "", "try to restart crashed notifier plugin %q, idx: %v", config.Cmd, idx)
plugin, err := newNotifierPlugin(config)
if err != nil {
logger.Error(logSender, "", "unable to restart notifier plugin %#v, err: %v", config.Cmd, err)
logger.Error(logSender, "", "unable to restart notifier plugin %q, err: %v", config.Cmd, err)
return
}
@ -647,10 +647,10 @@ func (m *Manager) restartKMSPlugin(config Config, idx int) {
if m.closed.Load() {
return
}
logger.Info(logSender, "", "try to restart crashed kms plugin %#v, idx: %v", config.Cmd, idx)
logger.Info(logSender, "", "try to restart crashed kms plugin %q, idx: %v", config.Cmd, idx)
plugin, err := newKMSPlugin(config)
if err != nil {
logger.Error(logSender, "", "unable to restart kms plugin %#v, err: %v", config.Cmd, err)
logger.Error(logSender, "", "unable to restart kms plugin %q, err: %v", config.Cmd, err)
return
}
@ -663,10 +663,10 @@ func (m *Manager) restartAuthPlugin(config Config, idx int) {
if m.closed.Load() {
return
}
logger.Info(logSender, "", "try to restart crashed auth plugin %#v, idx: %v", config.Cmd, idx)
logger.Info(logSender, "", "try to restart crashed auth plugin %q, idx: %v", config.Cmd, idx)
plugin, err := newAuthPlugin(config)
if err != nil {
logger.Error(logSender, "", "unable to restart auth plugin %#v, err: %v", config.Cmd, err)
logger.Error(logSender, "", "unable to restart auth plugin %q, err: %v", config.Cmd, err)
return
}
@ -679,10 +679,10 @@ func (m *Manager) restartSearcherPlugin(config Config) {
if m.closed.Load() {
return
}
logger.Info(logSender, "", "try to restart crashed searcher plugin %#v", config.Cmd)
logger.Info(logSender, "", "try to restart crashed searcher plugin %q", config.Cmd)
plugin, err := newSearcherPlugin(config)
if err != nil {
logger.Error(logSender, "", "unable to restart searcher plugin %#v, err: %v", config.Cmd, err)
logger.Error(logSender, "", "unable to restart searcher plugin %q, err: %v", config.Cmd, err)
return
}
@ -695,10 +695,10 @@ func (m *Manager) restartMetadaterPlugin(config Config) {
if m.closed.Load() {
return
}
logger.Info(logSender, "", "try to restart crashed metadater plugin %#v", config.Cmd)
logger.Info(logSender, "", "try to restart crashed metadater plugin %q", config.Cmd)
plugin, err := newMetadaterPlugin(config)
if err != nil {
logger.Error(logSender, "", "unable to restart metadater plugin %#v, err: %v", config.Cmd, err)
logger.Error(logSender, "", "unable to restart metadater plugin %q, err: %v", config.Cmd, err)
return
}
@ -711,10 +711,10 @@ func (m *Manager) restartIPFilterPlugin(config Config) {
if m.closed.Load() {
return
}
logger.Info(logSender, "", "try to restart crashed IP filter plugin %#v", config.Cmd)
logger.Info(logSender, "", "try to restart crashed IP filter plugin %q", config.Cmd)
plugin, err := newIPFilterPlugin(config)
if err != nil {
logger.Error(logSender, "", "unable to restart IP filter plugin %#v, err: %v", config.Cmd, err)
logger.Error(logSender, "", "unable to restart IP filter plugin %q, err: %v", config.Cmd, err)
return
}

View file

@ -52,7 +52,7 @@ func (p *searcherPlugin) cleanup() {
func (p *searcherPlugin) initialize() error {
killProcess(p.config.Cmd)
logger.Debug(logSender, "", "create new searcher plugin %#v", p.config.Cmd)
logger.Debug(logSender, "", "create new searcher plugin %q", p.config.Cmd)
secureConfig, err := p.config.getSecureConfig()
if err != nil {
return err
@ -77,12 +77,12 @@ func (p *searcherPlugin) initialize() error {
})
rpcClient, err := client.Client()
if err != nil {
logger.Debug(logSender, "", "unable to get rpc client for plugin %#v: %v", p.config.Cmd, err)
logger.Debug(logSender, "", "unable to get rpc client for plugin %q: %v", p.config.Cmd, err)
return err
}
raw, err := rpcClient.Dispense(eventsearcher.PluginName)
if err != nil {
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %#v: %v",
logger.Debug(logSender, "", "unable to get plugin %v from rpc client for command %q: %v",
eventsearcher.PluginName, p.config.Cmd, err)
return err
}

View file

@ -74,7 +74,7 @@ func registerAWSContainer(disableAWSInstallationCode bool) error {
if err != nil {
return fmt.Errorf("unable to register API operation for AWSMarketplace Metering: %w", err)
}
logger.Debug(logSender, "", "API operation for AWSMarketplace Metering registered, token %#v",
logger.Debug(logSender, "", "API operation for AWSMarketplace Metering registered, token %q",
util.GetStringFromPointer(result.Signature))
return nil
}
@ -88,7 +88,7 @@ func getAWSConfig(ctx context.Context) (aws.Config, error) {
svc := imds.NewFromConfig(cfg)
region, err := svc.GetRegion(ctx, &imds.GetRegionInput{})
if err == nil {
logger.Debug(logSender, "", "AWS region from imds %#v", region.Region)
logger.Debug(logSender, "", "AWS region from imds %q", region.Region)
cfg.Region = region.Region
} else {
logger.Warn(logSender, "", "unable to get region from imds, continuing anyway, error: %v", err)
@ -124,7 +124,7 @@ func setInstallationCode(cfg aws.Config) error {
if err != nil {
return fmt.Errorf("unable to update installation code: %w", err)
}
logger.Debug(logSender, "", "installation code updated, secret name %#v, arn %#v, version id %#v",
logger.Debug(logSender, "", "installation code updated, secret name %q, arn %q, version id %q",
util.GetStringFromPointer(result.Name), util.GetStringFromPointer(result.ARN),
util.GetStringFromPointer(result.VersionId))
} else {
@ -138,7 +138,7 @@ func setInstallationCode(cfg aws.Config) error {
if err != nil {
return fmt.Errorf("unable to create installation code: %w", err)
}
logger.Debug(logSender, "", "installation code set, secret name %#v, arn %#v, version id %#v",
logger.Debug(logSender, "", "installation code set, secret name %q, arn %q, version id %q",
util.GetStringFromPointer(result.Name), util.GetStringFromPointer(result.ARN),
util.GetStringFromPointer(result.VersionId))
}

View file

@ -92,7 +92,7 @@ func (s *Service) initLogger() {
func (s *Service) Start(disableAWSInstallationCode bool) error {
s.initLogger()
logger.Info(logSender, "", "starting SFTPGo %v, config dir: %v, config file: %v, log max size: %v log max backups: %v "+
"log max age: %v log level: %v, log compress: %v, log utc time: %v, load data from: %#v, grace time: %d secs",
"log max age: %v log level: %v, log compress: %v, log utc time: %v, load data from: %q, grace time: %d secs",
version.GetAsString(), s.ConfigDir, s.ConfigFile, s.LogMaxSize, s.LogMaxBackups, s.LogMaxAge, s.LogLevel,
s.LogCompress, s.LogUTCTime, s.LoadDataFrom, graceTime)
// in portable mode we don't read configuration from file
@ -307,7 +307,7 @@ func (s *Service) LoadInitialData() error {
return nil
}
if !filepath.IsAbs(s.LoadDataFrom) {
return fmt.Errorf("invalid input_file %#v, it must be an absolute path", s.LoadDataFrom)
return fmt.Errorf("invalid input_file %q, it must be an absolute path", s.LoadDataFrom)
}
if s.LoadDataMode < 0 || s.LoadDataMode > 1 {
return fmt.Errorf("invalid loaddata-mode %v", s.LoadDataMode)
@ -317,7 +317,7 @@ func (s *Service) LoadInitialData() error {
}
info, err := os.Stat(s.LoadDataFrom)
if err != nil {
return fmt.Errorf("unable to stat file %#v: %w", s.LoadDataFrom, err)
return fmt.Errorf("unable to stat file %q: %w", s.LoadDataFrom, err)
}
if info.Size() > httpd.MaxRestoreSize {
return fmt.Errorf("unable to restore input file %q size too big: %d/%d bytes",
@ -325,26 +325,26 @@ func (s *Service) LoadInitialData() error {
}
content, err := os.ReadFile(s.LoadDataFrom)
if err != nil {
return fmt.Errorf("unable to read input file %#v: %w", s.LoadDataFrom, err)
return fmt.Errorf("unable to read input file %q: %w", s.LoadDataFrom, err)
}
dump, err := dataprovider.ParseDumpData(content)
if err != nil {
return fmt.Errorf("unable to parse file to restore %#v: %w", s.LoadDataFrom, err)
return fmt.Errorf("unable to parse file to restore %q: %w", s.LoadDataFrom, err)
}
err = s.restoreDump(&dump)
if err != nil {
return err
}
logger.Info(logSender, "", "data loaded from file %#v mode: %v", s.LoadDataFrom, s.LoadDataMode)
logger.InfoToConsole("data loaded from file %#v mode: %v", s.LoadDataFrom, s.LoadDataMode)
logger.Info(logSender, "", "data loaded from file %q mode: %v", s.LoadDataFrom, s.LoadDataMode)
logger.InfoToConsole("data loaded from file %q mode: %v", s.LoadDataFrom, s.LoadDataMode)
if s.LoadDataClean {
err = os.Remove(s.LoadDataFrom)
if err == nil {
logger.Info(logSender, "", "file %#v deleted after successful load", s.LoadDataFrom)
logger.InfoToConsole("file %#v deleted after successful load", s.LoadDataFrom)
logger.Info(logSender, "", "file %q deleted after successful load", s.LoadDataFrom)
logger.InfoToConsole("file %q deleted after successful load", s.LoadDataFrom)
} else {
logger.Warn(logSender, "", "unable to delete file %#v after successful load: %v", s.LoadDataFrom, err)
logger.WarnToConsole("unable to delete file %#v after successful load: %v", s.LoadDataFrom, err)
logger.Warn(logSender, "", "unable to delete file %q after successful load: %v", s.LoadDataFrom, err)
logger.WarnToConsole("unable to delete file %q after successful load: %v", s.LoadDataFrom, err)
}
}
return nil

View file

@ -117,7 +117,7 @@ func (s *Service) StartPortableMode(sftpdPort, ftpPort, webdavPort int, enabledS
return err
}
logger.InfoToConsole("Portable mode ready, user: %#v, password: %#v, public keys: %v, directory: %#v, "+
logger.InfoToConsole("Portable mode ready, user: %q, password: %q, public keys: %v, directory: %q, "+
"permissions: %+v, enabled ssh commands: %v file patterns filters: %+v %v", s.PortableUser.Username,
printablePassword, s.PortableUser.PublicKeys, s.getPortableDirToServe(), s.PortableUser.Permissions,
sftpdConf.EnabledSSHCommands, s.PortableUser.Filters.FilePatterns, s.getServiceOptionalInfoString())

View file

@ -47,7 +47,7 @@ type scpCommand struct {
func (c *scpCommand) handle() (err error) {
defer func() {
if r := recover(); r != nil {
logger.Error(logSender, "", "panic in handle scp command: %#v stack trace: %v", r, string(debug.Stack()))
logger.Error(logSender, "", "panic in handle scp command: %q stack trace: %v", r, string(debug.Stack()))
err = common.ErrGenericFailure
}
}()
@ -59,7 +59,7 @@ func (c *scpCommand) handle() (err error) {
destPath := c.getDestPath()
commandType := c.getCommandType()
c.connection.Log(logger.LevelDebug, "handle scp command, args: %v user: %v command type: %v, dest path: %#v",
c.connection.Log(logger.LevelDebug, "handle scp command, args: %v user: %v command type: %v, dest path: %q",
c.args, c.connection.User.Username, commandType, destPath)
if commandType == "-t" {
// -t means "to", so upload
@ -95,8 +95,8 @@ func (c *scpCommand) handleRecursiveUpload() error {
for {
fs, err := c.connection.User.GetFilesystemForPath(destPath, c.connection.ID)
if err != nil {
c.connection.Log(logger.LevelError, "error uploading file %#v: %+v", destPath, err)
c.sendErrorMessage(nil, fmt.Errorf("unable to get fs for path %#v", destPath))
c.connection.Log(logger.LevelError, "error uploading file %q: %+v", destPath, err)
c.sendErrorMessage(nil, fmt.Errorf("unable to get fs for path %q", destPath))
return err
}
command, err := c.getNextUploadProtocolMessage()
@ -127,15 +127,15 @@ func (c *scpCommand) handleRecursiveUpload() error {
destPath = path.Join(destPath, name)
fs, err = c.connection.User.GetFilesystemForPath(destPath, c.connection.ID)
if err != nil {
c.connection.Log(logger.LevelError, "error uploading file %#v: %+v", destPath, err)
c.sendErrorMessage(nil, fmt.Errorf("unable to get fs for path %#v", destPath))
c.connection.Log(logger.LevelError, "error uploading file %q: %+v", destPath, err)
c.sendErrorMessage(nil, fmt.Errorf("unable to get fs for path %q", destPath))
return err
}
err = c.handleCreateDir(fs, destPath)
if err != nil {
return err
}
c.connection.Log(logger.LevelDebug, "received start dir command, num dirs: %v destPath: %#v", numDirs, destPath)
c.connection.Log(logger.LevelDebug, "received start dir command, num dirs: %v destPath: %q", numDirs, destPath)
} else if strings.HasPrefix(command, "C") {
err = c.handleUpload(c.getFileUploadDestPath(fs, destPath, name), sizeToRead)
if err != nil {
@ -155,12 +155,12 @@ func (c *scpCommand) handleCreateDir(fs vfs.Fs, dirPath string) error {
p, err := fs.ResolvePath(dirPath)
if err != nil {
c.connection.Log(logger.LevelError, "error creating dir: %#v, invalid file path, err: %v", dirPath, err)
c.connection.Log(logger.LevelError, "error creating dir: %q, invalid file path, err: %v", dirPath, err)
c.sendErrorMessage(fs, err)
return err
}
if !c.connection.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(dirPath)) {
c.connection.Log(logger.LevelError, "error creating dir: %#v, permission denied", dirPath)
c.connection.Log(logger.LevelError, "error creating dir: %q, permission denied", dirPath)
c.sendErrorMessage(fs, common.ErrPermissionDenied)
return common.ErrPermissionDenied
}
@ -232,14 +232,14 @@ func (c *scpCommand) handleUploadFile(fs vfs.Fs, resolvedPath, filePath string,
diskQuota, transferQuota := c.connection.HasSpace(isNewFile, false, requestPath)
if !diskQuota.HasSpace || !transferQuota.HasUploadSpace() {
err := fmt.Errorf("denying file write due to quota limits")
c.connection.Log(logger.LevelError, "error uploading file: %#v, err: %v", filePath, err)
c.connection.Log(logger.LevelError, "error uploading file: %q, err: %v", filePath, err)
c.sendErrorMessage(nil, err)
return err
}
_, err := common.ExecutePreAction(c.connection.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath,
fileSize, os.O_TRUNC)
if err != nil {
c.connection.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
c.connection.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
err = c.connection.GetPermissionDeniedError()
c.sendErrorMessage(fs, err)
return err
@ -249,7 +249,7 @@ func (c *scpCommand) handleUploadFile(fs vfs.Fs, resolvedPath, filePath string,
file, w, cancelFn, err := fs.Create(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
c.connection.Log(logger.LevelError, "error creating file %#v: %v", resolvedPath, err)
c.connection.Log(logger.LevelError, "error creating file %q: %v", resolvedPath, err)
c.sendErrorMessage(fs, err)
return err
}
@ -290,13 +290,13 @@ func (c *scpCommand) handleUpload(uploadFilePath string, sizeToRead int64) error
fs, p, err := c.connection.GetFsAndResolvedPath(uploadFilePath)
if err != nil {
c.connection.Log(logger.LevelError, "error uploading file: %#v, err: %v", uploadFilePath, err)
c.connection.Log(logger.LevelError, "error uploading file: %q, err: %v", uploadFilePath, err)
c.sendErrorMessage(nil, err)
return err
}
if ok, _ := c.connection.User.IsFileAllowed(uploadFilePath); !ok {
c.connection.Log(logger.LevelWarn, "writing file %#v is not allowed", uploadFilePath)
c.connection.Log(logger.LevelWarn, "writing file %q is not allowed", uploadFilePath)
c.sendErrorMessage(fs, c.connection.GetPermissionDeniedError())
return common.ErrPermissionDenied
}
@ -308,7 +308,7 @@ func (c *scpCommand) handleUpload(uploadFilePath string, sizeToRead int64) error
stat, statErr := fs.Lstat(p)
if (statErr == nil && stat.Mode()&os.ModeSymlink != 0) || fs.IsNotExist(statErr) {
if !c.connection.User.HasPerm(dataprovider.PermUpload, path.Dir(uploadFilePath)) {
c.connection.Log(logger.LevelWarn, "cannot upload file: %#v, permission denied", uploadFilePath)
c.connection.Log(logger.LevelWarn, "cannot upload file: %q, permission denied", uploadFilePath)
c.sendErrorMessage(fs, common.ErrPermissionDenied)
return common.ErrPermissionDenied
}
@ -316,20 +316,20 @@ func (c *scpCommand) handleUpload(uploadFilePath string, sizeToRead int64) error
}
if statErr != nil {
c.connection.Log(logger.LevelError, "error performing file stat %#v: %v", p, statErr)
c.connection.Log(logger.LevelError, "error performing file stat %q: %v", p, statErr)
c.sendErrorMessage(fs, statErr)
return statErr
}
if stat.IsDir() {
c.connection.Log(logger.LevelError, "attempted to open a directory for writing to: %#v", p)
err = fmt.Errorf("attempted to open a directory for writing: %#v", p)
c.connection.Log(logger.LevelError, "attempted to open a directory for writing to: %q", p)
err = fmt.Errorf("attempted to open a directory for writing: %q", p)
c.sendErrorMessage(fs, err)
return err
}
if !c.connection.User.HasPerm(dataprovider.PermOverwrite, uploadFilePath) {
c.connection.Log(logger.LevelWarn, "cannot overwrite file: %#v, permission denied", uploadFilePath)
c.connection.Log(logger.LevelWarn, "cannot overwrite file: %q, permission denied", uploadFilePath)
c.sendErrorMessage(fs, common.ErrPermissionDenied)
return common.ErrPermissionDenied
}
@ -337,7 +337,7 @@ func (c *scpCommand) handleUpload(uploadFilePath string, sizeToRead int64) error
if common.Config.IsAtomicUploadEnabled() && fs.IsAtomicUploadSupported() {
_, _, err = fs.Rename(p, filePath)
if err != nil {
c.connection.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %v",
c.connection.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %q, dest: %q, err: %v",
p, filePath, err)
c.sendErrorMessage(fs, err)
return err
@ -381,7 +381,7 @@ func (c *scpCommand) sendDownloadProtocolMessages(virtualDirPath string, stat os
func (c *scpCommand) handleRecursiveDownload(fs vfs.Fs, dirPath, virtualPath string, stat os.FileInfo) error {
var err error
if c.isRecursive() {
c.connection.Log(logger.LevelDebug, "recursive download, dir path %#v virtual path %#v", dirPath, virtualPath)
c.connection.Log(logger.LevelDebug, "recursive download, dir path %q virtual path %q", dirPath, virtualPath)
err = c.sendDownloadProtocolMessages(virtualPath, stat)
if err != nil {
return err
@ -505,14 +505,14 @@ func (c *scpCommand) handleDownload(filePath string) error {
var stat os.FileInfo
if stat, err = fs.Stat(p); err != nil {
c.connection.Log(logger.LevelError, "error downloading file: %#v->%#v, err: %v", filePath, p, err)
c.connection.Log(logger.LevelError, "error downloading file: %q->%q, err: %v", filePath, p, err)
c.sendErrorMessage(fs, err)
return err
}
if stat.IsDir() {
if !c.connection.User.HasPerm(dataprovider.PermDownload, filePath) {
c.connection.Log(logger.LevelWarn, "error downloading dir: %#v, permission denied", filePath)
c.connection.Log(logger.LevelWarn, "error downloading dir: %q, permission denied", filePath)
c.sendErrorMessage(fs, common.ErrPermissionDenied)
return common.ErrPermissionDenied
}
@ -521,26 +521,26 @@ func (c *scpCommand) handleDownload(filePath string) error {
}
if !c.connection.User.HasPerm(dataprovider.PermDownload, path.Dir(filePath)) {
c.connection.Log(logger.LevelWarn, "error downloading dir: %#v, permission denied", filePath)
c.connection.Log(logger.LevelWarn, "error downloading dir: %q, permission denied", filePath)
c.sendErrorMessage(fs, common.ErrPermissionDenied)
return common.ErrPermissionDenied
}
if ok, policy := c.connection.User.IsFileAllowed(filePath); !ok {
c.connection.Log(logger.LevelWarn, "reading file %#v is not allowed", filePath)
c.connection.Log(logger.LevelWarn, "reading file %q is not allowed", filePath)
c.sendErrorMessage(fs, c.connection.GetErrorForDeniedFile(policy))
return common.ErrPermissionDenied
}
if _, err := common.ExecutePreAction(c.connection.BaseConnection, common.OperationPreDownload, p, filePath, 0, 0); err != nil {
c.connection.Log(logger.LevelDebug, "download for file %#v denied by pre action: %v", filePath, err)
c.connection.Log(logger.LevelDebug, "download for file %q denied by pre action: %v", filePath, err)
c.sendErrorMessage(fs, common.ErrPermissionDenied)
return common.ErrPermissionDenied
}
file, r, cancelFn, err := fs.Open(p, 0)
if err != nil {
c.connection.Log(logger.LevelError, "could not open file %#v for reading: %v", p, err)
c.connection.Log(logger.LevelError, "could not open file %q for reading: %v", p, err)
c.sendErrorMessage(fs, err)
return err
}
@ -685,7 +685,7 @@ func (c *scpCommand) getNextUploadProtocolMessage() (string, error) {
func (c *scpCommand) createDir(fs vfs.Fs, dirPath string) error {
err := fs.Mkdir(dirPath)
if err != nil {
c.connection.Log(logger.LevelError, "error creating dir %#v: %v", dirPath, err)
c.connection.Log(logger.LevelError, "error creating dir %q: %v", dirPath, err)
c.sendErrorMessage(fs, err)
return err
}
@ -725,7 +725,7 @@ func (c *scpCommand) parseUploadMessage(fs vfs.Fs, command string) (int64, strin
return size, name, err
}
} else {
err = fmt.Errorf("unable to split upload message: %#v", command)
err = fmt.Errorf("unable to split upload message: %q", command)
c.connection.Log(logger.LevelError, "error: %v", err)
c.sendErrorMessage(fs, err)
return size, name, err

View file

@ -571,7 +571,7 @@ func (c *Configuration) configureKeyboardInteractiveAuth(serverConfig *ssh.Serve
func canAcceptConnection(ip string) bool {
if common.IsBanned(ip, common.ProtocolSSH) {
logger.Log(logger.LevelDebug, common.ProtocolSSH, "", "connection refused, ip %#v is banned", ip)
logger.Log(logger.LevelDebug, common.ProtocolSSH, "", "connection refused, ip %q is banned", ip)
return false
}
if err := common.Connections.IsNewConnectionAllowed(ip, common.ProtocolSSH); err != nil {
@ -592,7 +592,7 @@ func canAcceptConnection(ip string) bool {
func (c *Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.ServerConfig) {
defer func() {
if r := recover(); r != nil {
logger.Error(logSender, "", "panic in AcceptInboundConnection: %#v stack trace: %v", r, string(debug.Stack()))
logger.Error(logSender, "", "panic in AcceptInboundConnection: %q stack trace: %v", r, string(debug.Stack()))
}
}()
@ -710,7 +710,7 @@ func (c *Configuration) AcceptInboundConnection(conn net.Conn, config *ssh.Serve
func (c *Configuration) handleSftpConnection(channel ssh.Channel, connection *Connection) {
defer func() {
if r := recover(); r != nil {
logger.Error(logSender, "", "panic in handleSftpConnection: %#v stack trace: %v", r, string(debug.Stack()))
logger.Error(logSender, "", "panic in handleSftpConnection: %q stack trace: %v", r, string(debug.Stack()))
}
}()
if err := common.Connections.Add(connection); err != nil {
@ -784,37 +784,37 @@ func loginUser(user *dataprovider.User, loginMethod, publicKey string, conn ssh.
connectionID = hex.EncodeToString(conn.SessionID())
}
if !filepath.IsAbs(user.HomeDir) {
logger.Warn(logSender, connectionID, "user %#v has an invalid home dir: %#v. Home dir must be an absolute path, login not allowed",
logger.Warn(logSender, connectionID, "user %q has an invalid home dir: %q. Home dir must be an absolute path, login not allowed",
user.Username, user.HomeDir)
return nil, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
return nil, fmt.Errorf("cannot login user with invalid home dir: %q", user.HomeDir)
}
if util.Contains(user.Filters.DeniedProtocols, common.ProtocolSSH) {
logger.Info(logSender, connectionID, "cannot login user %#v, protocol SSH is not allowed", user.Username)
return nil, fmt.Errorf("protocol SSH is not allowed for user %#v", user.Username)
logger.Info(logSender, connectionID, "cannot login user %q, protocol SSH is not allowed", user.Username)
return nil, fmt.Errorf("protocol SSH is not allowed for user %q", user.Username)
}
if user.MaxSessions > 0 {
activeSessions := common.Connections.GetActiveSessions(user.Username)
if activeSessions >= user.MaxSessions {
logger.Info(logSender, "", "authentication refused for user: %#v, too many open sessions: %v/%v", user.Username,
logger.Info(logSender, "", "authentication refused for user: %q, too many open sessions: %v/%v", user.Username,
activeSessions, user.MaxSessions)
return nil, fmt.Errorf("too many open sessions: %v", activeSessions)
}
}
if !user.IsLoginMethodAllowed(loginMethod, common.ProtocolSSH, conn.PartialSuccessMethods()) {
logger.Info(logSender, connectionID, "cannot login user %#v, login method %#v is not allowed",
logger.Info(logSender, connectionID, "cannot login user %q, login method %q is not allowed",
user.Username, loginMethod)
return nil, fmt.Errorf("login method %#v is not allowed for user %#v", loginMethod, user.Username)
return nil, fmt.Errorf("login method %q is not allowed for user %q", loginMethod, user.Username)
}
if user.MustSetSecondFactorForProtocol(common.ProtocolSSH) {
logger.Info(logSender, connectionID, "cannot login user %#v, second factor authentication is not set",
logger.Info(logSender, connectionID, "cannot login user %q, second factor authentication is not set",
user.Username)
return nil, fmt.Errorf("second factor authentication is not set for user %#v", user.Username)
return nil, fmt.Errorf("second factor authentication is not set for user %q", user.Username)
}
remoteAddr := conn.RemoteAddr().String()
if !user.IsLoginFromAddrAllowed(remoteAddr) {
logger.Info(logSender, connectionID, "cannot login user %#v, remote address is not allowed: %v",
logger.Info(logSender, connectionID, "cannot login user %q, remote address is not allowed: %v",
user.Username, remoteAddr)
return nil, fmt.Errorf("login for user %#v is not allowed from this address: %v", user.Username, remoteAddr)
return nil, fmt.Errorf("login for user %q is not allowed from this address: %v", user.Username, remoteAddr)
}
json, err := json.Marshal(user)
@ -843,8 +843,8 @@ func (c *Configuration) checkSSHCommands() {
if util.Contains(supportedSSHCommands, command) {
sshCommands = append(sshCommands, command)
} else {
logger.Warn(logSender, "", "unsupported ssh command: %#v ignored", command)
logger.WarnToConsole("unsupported ssh command: %#v ignored", command)
logger.Warn(logSender, "", "unsupported ssh command: %q ignored", command)
logger.WarnToConsole("unsupported ssh command: %q ignored", command)
}
}
c.EnabledSSHCommands = sshCommands
@ -860,7 +860,7 @@ func (c *Configuration) checkFolderPrefix() {
}
if c.FolderPrefix != "" {
c.EnabledSSHCommands = nil
logger.Debug(logSender, "", "folder prefix %#v configured, SSH commands are disabled", c.FolderPrefix)
logger.Debug(logSender, "", "folder prefix %q configured, SSH commands are disabled", c.FolderPrefix)
}
}
@ -870,8 +870,8 @@ func (c *Configuration) generateDefaultHostKeys(configDir string) error {
for _, k := range defaultHostKeys {
autoFile := filepath.Join(configDir, k)
if _, err = os.Stat(autoFile); errors.Is(err, fs.ErrNotExist) {
logger.Info(logSender, "", "No host keys configured and %#v does not exist; try to create a new host key", autoFile)
logger.InfoToConsole("No host keys configured and %#v does not exist; try to create a new host key", autoFile)
logger.Info(logSender, "", "No host keys configured and %q does not exist; try to create a new host key", autoFile)
logger.InfoToConsole("No host keys configured and %q does not exist; try to create a new host key", autoFile)
if k == defaultPrivateRSAKeyName {
err = util.GenerateRSAKeys(autoFile)
} else if k == defaultPrivateECDSAKeyName {
@ -880,8 +880,8 @@ func (c *Configuration) generateDefaultHostKeys(configDir string) error {
err = util.GenerateEd25519Keys(autoFile)
}
if err != nil {
logger.Warn(logSender, "", "error creating host key %#v: %v", autoFile, err)
logger.WarnToConsole("error creating host key %#v: %v", autoFile, err)
logger.Warn(logSender, "", "error creating host key %q: %v", autoFile, err)
logger.WarnToConsole("error creating host key %q: %v", autoFile, err)
return err
}
}
@ -899,35 +899,35 @@ func (c *Configuration) checkHostKeyAutoGeneration(configDir string) error {
keyName := filepath.Base(k)
switch keyName {
case defaultPrivateRSAKeyName:
logger.Info(logSender, "", "try to create non-existent host key %#v", k)
logger.InfoToConsole("try to create non-existent host key %#v", k)
logger.Info(logSender, "", "try to create non-existent host key %q", k)
logger.InfoToConsole("try to create non-existent host key %q", k)
err = util.GenerateRSAKeys(k)
if err != nil {
logger.Warn(logSender, "", "error creating host key %#v: %v", k, err)
logger.WarnToConsole("error creating host key %#v: %v", k, err)
logger.Warn(logSender, "", "error creating host key %q: %v", k, err)
logger.WarnToConsole("error creating host key %q: %v", k, err)
return err
}
case defaultPrivateECDSAKeyName:
logger.Info(logSender, "", "try to create non-existent host key %#v", k)
logger.InfoToConsole("try to create non-existent host key %#v", k)
logger.Info(logSender, "", "try to create non-existent host key %q", k)
logger.InfoToConsole("try to create non-existent host key %q", k)
err = util.GenerateECDSAKeys(k)
if err != nil {
logger.Warn(logSender, "", "error creating host key %#v: %v", k, err)
logger.WarnToConsole("error creating host key %#v: %v", k, err)
logger.Warn(logSender, "", "error creating host key %q: %v", k, err)
logger.WarnToConsole("error creating host key %q: %v", k, err)
return err
}
case defaultPrivateEd25519KeyName:
logger.Info(logSender, "", "try to create non-existent host key %#v", k)
logger.InfoToConsole("try to create non-existent host key %#v", k)
logger.Info(logSender, "", "try to create non-existent host key %q", k)
logger.InfoToConsole("try to create non-existent host key %q", k)
err = util.GenerateEd25519Keys(k)
if err != nil {
logger.Warn(logSender, "", "error creating host key %#v: %v", k, err)
logger.WarnToConsole("error creating host key %#v: %v", k, err)
logger.Warn(logSender, "", "error creating host key %q: %v", k, err)
logger.WarnToConsole("error creating host key %q: %v", k, err)
return err
}
default:
logger.Warn(logSender, "", "non-existent host key %#v will not be created", k)
logger.WarnToConsole("non-existent host key %#v will not be created", k)
logger.Warn(logSender, "", "non-existent host key %q will not be created", k)
logger.WarnToConsole("non-existent host key %q will not be created", k)
}
}
}
@ -1036,8 +1036,8 @@ func (c *Configuration) loadHostCertificates(configDir string) ([]*ssh.Certifica
for _, certPath := range c.HostCertificates {
certPath = strings.TrimSpace(certPath)
if !util.IsFileInputValid(certPath) {
logger.Warn(logSender, "", "unable to load invalid host certificate %#v", certPath)
logger.WarnToConsole("unable to load invalid host certificate %#v", certPath)
logger.Warn(logSender, "", "unable to load invalid host certificate %q", certPath)
logger.WarnToConsole("unable to load invalid host certificate %q", certPath)
continue
}
if !filepath.IsAbs(certPath) {
@ -1045,18 +1045,18 @@ func (c *Configuration) loadHostCertificates(configDir string) ([]*ssh.Certifica
}
certBytes, err := os.ReadFile(certPath)
if err != nil {
return certs, fmt.Errorf("unable to load host certificate %#v: %w", certPath, err)
return certs, fmt.Errorf("unable to load host certificate %q: %w", certPath, err)
}
parsed, _, _, _, err := ssh.ParseAuthorizedKey(certBytes)
if err != nil {
return nil, fmt.Errorf("unable to parse host certificate %#v: %w", certPath, err)
return nil, fmt.Errorf("unable to parse host certificate %q: %w", certPath, err)
}
cert, ok := parsed.(*ssh.Certificate)
if !ok {
return nil, fmt.Errorf("the file %#v is not an SSH certificate", certPath)
return nil, fmt.Errorf("the file %q is not an SSH certificate", certPath)
}
if cert.CertType != ssh.HostCert {
return nil, fmt.Errorf("the file %#v is not an host certificate", certPath)
return nil, fmt.Errorf("the file %q is not an host certificate", certPath)
}
certs = append(certs, cert)
}
@ -1067,8 +1067,8 @@ func (c *Configuration) initializeCertChecker(configDir string) error {
for _, keyPath := range c.TrustedUserCAKeys {
keyPath = strings.TrimSpace(keyPath)
if !util.IsFileInputValid(keyPath) {
logger.Warn(logSender, "", "unable to load invalid trusted user CA key %#v", keyPath)
logger.WarnToConsole("unable to load invalid trusted user CA key %#v", keyPath)
logger.Warn(logSender, "", "unable to load invalid trusted user CA key %q", keyPath)
logger.WarnToConsole("unable to load invalid trusted user CA key %q", keyPath)
continue
}
if !filepath.IsAbs(keyPath) {
@ -1076,14 +1076,14 @@ func (c *Configuration) initializeCertChecker(configDir string) error {
}
keyBytes, err := os.ReadFile(keyPath)
if err != nil {
logger.Warn(logSender, "", "error loading trusted user CA key %#v: %v", keyPath, err)
logger.WarnToConsole("error loading trusted user CA key %#v: %v", keyPath, err)
logger.Warn(logSender, "", "error loading trusted user CA key %q: %v", keyPath, err)
logger.WarnToConsole("error loading trusted user CA key %q: %v", keyPath, err)
return err
}
parsedKey, _, _, _, err := ssh.ParseAuthorizedKey(keyBytes)
if err != nil {
logger.Warn(logSender, "", "error parsing trusted user CA key %#v: %v", keyPath, err)
logger.WarnToConsole("error parsing trusted user CA key %#v: %v", keyPath, err)
logger.Warn(logSender, "", "error parsing trusted user CA key %q: %v", keyPath, err)
logger.WarnToConsole("error parsing trusted user CA key %q: %v", keyPath, err)
return err
}
c.parsedUserCAKeys = append(c.parsedUserCAKeys, parsedKey)
@ -1103,7 +1103,7 @@ func (c *Configuration) initializeCertChecker(configDir string) error {
}
if c.RevokedUserCertsFile != "" {
if !util.IsFileInputValid(c.RevokedUserCertsFile) {
return fmt.Errorf("invalid revoked user certificate: %#v", c.RevokedUserCertsFile)
return fmt.Errorf("invalid revoked user certificate: %q", c.RevokedUserCertsFile)
}
if !filepath.IsAbs(c.RevokedUserCertsFile) {
c.RevokedUserCertsFile = filepath.Join(configDir, c.RevokedUserCertsFile)
@ -1250,24 +1250,24 @@ func (r *revokedCertificates) load() error {
if r.filePath == "" {
return nil
}
logger.Debug(logSender, "", "loading revoked user certificate file %#v", r.filePath)
logger.Debug(logSender, "", "loading revoked user certificate file %q", r.filePath)
info, err := os.Stat(r.filePath)
if err != nil {
return fmt.Errorf("unable to load revoked user certificate file %#v: %w", r.filePath, err)
return fmt.Errorf("unable to load revoked user certificate file %q: %w", r.filePath, err)
}
maxSize := int64(1048576 * 5) // 5MB
if info.Size() > maxSize {
return fmt.Errorf("unable to load revoked user certificate file %#v size too big: %v/%v bytes",
return fmt.Errorf("unable to load revoked user certificate file %q size too big: %v/%v bytes",
r.filePath, info.Size(), maxSize)
}
content, err := os.ReadFile(r.filePath)
if err != nil {
return fmt.Errorf("unable to read revoked user certificate file %#v: %w", r.filePath, err)
return fmt.Errorf("unable to read revoked user certificate file %q: %w", r.filePath, err)
}
var certs []string
err = json.Unmarshal(content, &certs)
if err != nil {
return fmt.Errorf("unable to parse revoked user certificate file %#v: %w", r.filePath, err)
return fmt.Errorf("unable to parse revoked user certificate file %q: %w", r.filePath, err)
}
r.mu.Lock()
@ -1277,7 +1277,7 @@ func (r *revokedCertificates) load() error {
for _, fp := range certs {
r.certs[fp] = true
}
logger.Debug(logSender, "", "revoked user certificate file %#v loaded, entries: %v", r.filePath, len(r.certs))
logger.Debug(logSender, "", "revoked user certificate file %q loaded, entries: %v", r.filePath, len(r.certs))
return nil
}

View file

@ -7293,7 +7293,7 @@ func TestHashedPasswords(t *testing.T) {
assert.Equal(t, pwd, user.Password)
user.Password = clearPwd
conn, client, err := getSftpClient(user, usePubKey)
if assert.NoError(t, err, "unable to login with password %#v", pwd) {
if assert.NoError(t, err, "unable to login with password %q", pwd) {
assert.NoError(t, checkBasicSFTP(client))
conn.Close()
client.Close()
@ -7318,7 +7318,7 @@ func TestHashedPasswords(t *testing.T) {
// login should still work
user.Password = clearPwd
conn, client, err = getSftpClient(user, usePubKey)
if assert.NoError(t, err, "unable to login with password %#v", pwd) {
if assert.NoError(t, err, "unable to login with password %q", pwd) {
assert.NoError(t, checkBasicSFTP(client))
conn.Close()
client.Close()
@ -11444,7 +11444,7 @@ func getHostKeysFingerprints(hostKeys []string) {
for _, k := range hostKeys {
fp, err := getHostKeyFingerprint(filepath.Join(configDir, k))
if err != nil {
logger.ErrorToConsole("unable to get fingerprint for host key %#v: %v", k, err)
logger.ErrorToConsole("unable to get fingerprint for host key %q: %v", k, err)
os.Exit(1)
}
hostKeyFPs = append(hostKeyFPs, fp)

View file

@ -117,7 +117,7 @@ func processSSHCommand(payload []byte, connection *Connection, enabledSSHCommand
return true
}
} else {
connection.Log(logger.LevelInfo, "ssh command not enabled/supported: %#v", name)
connection.Log(logger.LevelInfo, "ssh command not enabled/supported: %q", name)
}
}
err := connection.CloseFS()
@ -128,7 +128,7 @@ func processSSHCommand(payload []byte, connection *Connection, enabledSSHCommand
func (c *sshCommand) handle() (err error) {
defer func() {
if r := recover(); r != nil {
logger.Error(logSender, "", "panic in handle ssh command: %#v stack trace: %v", r, string(debug.Stack()))
logger.Error(logSender, "", "panic in handle ssh command: %q stack trace: %v", r, string(debug.Stack()))
err = common.ErrGenericFailure
}
}()
@ -227,7 +227,7 @@ func (c *sshCommand) handleHashCommands() error {
} else {
sshPath := c.getDestPath()
if ok, policy := c.connection.User.IsFileAllowed(sshPath); !ok {
c.connection.Log(logger.LevelInfo, "hash not allowed for file %#v", sshPath)
c.connection.Log(logger.LevelInfo, "hash not allowed for file %q", sshPath)
return c.sendErrorResponse(c.connection.GetErrorForDeniedFile(policy))
}
fs, fsPath, err := c.connection.GetFsAndResolvedPath(sshPath)
@ -278,7 +278,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
}
closeCmdOnError := func() {
c.connection.Log(logger.LevelDebug, "kill cmd: %#v and close ssh channel after read or write error",
c.connection.Log(logger.LevelDebug, "kill cmd: %q and close ssh channel after read or write error",
c.connection.command)
killerr := command.cmd.Process.Kill()
closerr := c.connection.channel.Close()
@ -296,7 +296,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
transfer := newTransfer(baseTransfer, nil, nil, nil)
w, e := transfer.copyFromReaderToWriter(stdin, c.connection.channel)
c.connection.Log(logger.LevelDebug, "command: %#v, copy from remote command to sdtin ended, written: %v, "+
c.connection.Log(logger.LevelDebug, "command: %q, copy from remote command to sdtin ended, written: %v, "+
"initial remaining quota: %v, err: %v", c.connection.command, w, remainingQuotaSize, e)
if e != nil {
once.Do(closeCmdOnError)
@ -309,7 +309,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
transfer := newTransfer(baseTransfer, nil, nil, nil)
w, e := transfer.copyFromReaderToWriter(c.connection.channel, stdout)
c.connection.Log(logger.LevelDebug, "command: %#v, copy from sdtout to remote command ended, written: %v err: %v",
c.connection.Log(logger.LevelDebug, "command: %q, copy from sdtout to remote command ended, written: %v err: %v",
c.connection.command, w, e)
if e != nil {
once.Do(closeCmdOnError)
@ -323,7 +323,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
transfer := newTransfer(baseTransfer, nil, nil, nil)
w, e := transfer.copyFromReaderToWriter(c.connection.channel.(ssh.Channel).Stderr(), stderr)
c.connection.Log(logger.LevelDebug, "command: %#v, copy from sdterr to remote command ended, written: %v err: %v",
c.connection.Log(logger.LevelDebug, "command: %q, copy from sdterr to remote command ended, written: %v err: %v",
c.connection.command, w, e)
// os.ErrClosed means that the command is finished so we don't need to do anything
if (e != nil && !errors.Is(e, os.ErrClosed)) || w > 0 {
@ -339,7 +339,7 @@ func (c *sshCommand) executeSystemCommand(command systemCommand) error {
if errSize == nil {
c.updateQuota(sshDestPath, numFiles-initialFiles, dirSize-initialSize)
}
c.connection.Log(logger.LevelDebug, "command %#v finished for path %#v, initial files %v initial size %v "+
c.connection.Log(logger.LevelDebug, "command %q finished for path %q, initial files %v initial size %v "+
"current files %v current size %v size err: %v", c.connection.command, command.fsPath, initialFiles, initialSize,
numFiles, dirSize, errSize)
return c.connection.GetFsError(command.fs, err)
@ -352,21 +352,21 @@ func (c *sshCommand) isSystemCommandAllowed() error {
return nil
}
if c.connection.User.HasVirtualFoldersInside(sshDestPath) {
c.connection.Log(logger.LevelDebug, "command %#v is not allowed, path %#v has virtual folders inside it, user %#v",
c.connection.Log(logger.LevelDebug, "command %q is not allowed, path %q has virtual folders inside it, user %q",
c.command, sshDestPath, c.connection.User.Username)
return errUnsupportedConfig
}
for _, f := range c.connection.User.Filters.FilePatterns {
if f.Path == sshDestPath {
c.connection.Log(logger.LevelDebug,
"command %#v is not allowed inside folders with file patterns filters %#v user %#v",
"command %q is not allowed inside folders with file patterns filters %q user %q",
c.command, sshDestPath, c.connection.User.Username)
return errUnsupportedConfig
}
if len(sshDestPath) > len(f.Path) {
if strings.HasPrefix(sshDestPath, f.Path+"/") || f.Path == "/" {
c.connection.Log(logger.LevelDebug,
"command %#v is not allowed it includes folders with file patterns filters %#v user %#v",
"command %q is not allowed it includes folders with file patterns filters %q user %q",
c.command, sshDestPath, c.connection.User.Username)
return errUnsupportedConfig
}
@ -374,7 +374,7 @@ func (c *sshCommand) isSystemCommandAllowed() error {
if len(sshDestPath) < len(f.Path) {
if strings.HasPrefix(sshDestPath+"/", f.Path) || sshDestPath == "/" {
c.connection.Log(logger.LevelDebug,
"command %#v is not allowed inside folder with file patterns filters %#v user %#v",
"command %q is not allowed inside folder with file patterns filters %q user %q",
c.command, sshDestPath, c.connection.User.Username)
return errUnsupportedConfig
}
@ -416,7 +416,7 @@ func (c *sshCommand) getSystemCommand() (systemCommand, error) {
}
if strings.HasSuffix(sshPath, "/") && !strings.HasSuffix(fsPath, string(os.PathSeparator)) {
fsPath += string(os.PathSeparator)
c.connection.Log(logger.LevelDebug, "path separator added to fsPath %#v", fsPath)
c.connection.Log(logger.LevelDebug, "path separator added to fsPath %q", fsPath)
}
args = args[:len(args)-1]
args = append(args, fsPath)
@ -441,7 +441,7 @@ func (c *sshCommand) getSystemCommand() (systemCommand, error) {
}
}
}
c.connection.Log(logger.LevelDebug, "new system command %#v, with args: %+v fs path %#v quota check path %#v",
c.connection.Log(logger.LevelDebug, "new system command %q, with args: %+v fs path %q quota check path %q",
c.command, args, fsPath, quotaPath)
cmd := exec.Command(c.command, args...)
uid := c.connection.User.GetUID()
@ -507,13 +507,13 @@ func (c *sshCommand) getSizeForPath(fs vfs.Fs, name string) (int, int64, error)
if fs.IsNotExist(err) {
return 0, 0, nil
}
c.connection.Log(logger.LevelDebug, "unable to stat %#v error: %v", name, err)
c.connection.Log(logger.LevelDebug, "unable to stat %q error: %v", name, err)
return 0, 0, err
}
if fi.IsDir() {
files, size, err := fs.GetDirSize(name)
if err != nil {
c.connection.Log(logger.LevelDebug, "unable to get size for dir %#v error: %v", name, err)
c.connection.Log(logger.LevelDebug, "unable to get size for dir %q error: %v", name, err)
}
return files, size, err
} else if fi.Mode().IsRegular() {
@ -600,7 +600,7 @@ func (c *sshCommand) computeHashForFile(fs vfs.Fs, hasher hash.Hash, path string
func parseCommandPayload(command string) (string, []string, error) {
parts, err := shlex.Split(command)
if err == nil && len(parts) == 0 {
err = fmt.Errorf("invalid command: %#v", command)
err = fmt.Errorf("invalid command: %q", command)
}
if err != nil {
return "", []string{}, err

View file

@ -47,7 +47,7 @@ func FindSharedDataPath(name, searchDir string) string {
res := filepath.Join(basePath, name)
_, err := os.Stat(res)
if err == nil {
logger.Debug(logSender, "", "found share data path for name %#v: %#v", name, res)
logger.Debug(logSender, "", "found share data path for name %q: %q", name, res)
return res
}
}

View file

@ -554,7 +554,7 @@ func HTTPListenAndServe(srv *http.Server, address string, port int, isTLS bool,
if filepath.IsAbs(address) && runtime.GOOS != osWindows {
if !IsFileInputValid(address) {
return fmt.Errorf("invalid socket address %#v", address)
return fmt.Errorf("invalid socket address %q", address)
}
err = createDirPathIfMissing(address, os.ModePerm)
if err != nil {

View file

@ -219,7 +219,7 @@ func (fs *AzureBlobFs) Open(name string, offset int64) (File, *pipeat.PipeReader
blockBlob := fs.containerClient.NewBlockBlobClient(name)
err := fs.handleMultipartDownload(ctx, blockBlob, offset, w)
w.CloseWithError(err) //nolint:errcheck
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %+v", name, w.GetWrittenBytes(), err)
fsLog(fs, logger.LevelDebug, "download completed, path: %q size: %v, err: %+v", name, w.GetWrittenBytes(), err)
metric.AZTransferCompleted(w.GetWrittenBytes(), 1, err)
}()
@ -257,7 +257,7 @@ func (fs *AzureBlobFs) Create(name string, flag int) (File, *PipeWriter, func(),
err := fs.handleMultipartUpload(ctx, r, blockBlob, &headers, metadata)
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %+v", name, r.GetReadedBytes(), err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %q, readed bytes: %v, err: %+v", name, r.GetReadedBytes(), err)
metric.AZTransferCompleted(r.GetReadedBytes(), 0, err)
}()
@ -284,7 +284,7 @@ func (fs *AzureBlobFs) Remove(name string, isDir bool) error {
return err
}
if hasContents {
return fmt.Errorf("cannot remove non empty directory: %#v", name)
return fmt.Errorf("cannot remove non empty directory: %q", name)
}
}
@ -310,7 +310,7 @@ func (fs *AzureBlobFs) Remove(name string, isDir bool) error {
metric.AZDeleteObjectCompleted(err)
if plugin.Handler.HasMetadater() && err == nil && !isDir {
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %+v", name, errMetadata)
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %q: %+v", name, errMetadata)
}
}
return err

View file

@ -96,7 +96,7 @@ func (fs *CryptFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt,
if isZeroDownload {
w.CloseWithError(err) //nolint:errcheck
f.Close()
fsLog(fs, logger.LevelDebug, "zero bytes download completed, path: %#v", name)
fsLog(fs, logger.LevelDebug, "zero bytes download completed, path: %q", name)
return
}
var n int64
@ -143,7 +143,7 @@ func (fs *CryptFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt,
}
w.CloseWithError(err) //nolint:errcheck
f.Close()
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
fsLog(fs, logger.LevelDebug, "download completed, path: %q size: %v, err: %v", name, n, err)
}()
return nil, r, nil, nil
@ -199,7 +199,7 @@ func (fs *CryptFs) Create(name string, flag int) (File, *PipeWriter, func(), err
}
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, n, err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %q, readed bytes: %v, err: %v", name, n, err)
}()
return nil, p, nil, nil

View file

@ -154,7 +154,7 @@ func (fs *GCSFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fu
n, err := io.Copy(w, objectReader)
w.CloseWithError(err) //nolint:errcheck
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %+v", name, n, err)
fsLog(fs, logger.LevelDebug, "download completed, path: %q size: %v, err: %+v", name, n, err)
metric.GCSTransferCompleted(n, 1, err)
}()
return nil, r, cancelFn, nil
@ -215,7 +215,7 @@ func (fs *GCSFs) Create(name string, flag int) (File, *PipeWriter, func(), error
}
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, acl: %#v, readed bytes: %v, err: %+v",
fsLog(fs, logger.LevelDebug, "upload completed, path: %q, acl: %q, readed bytes: %v, err: %+v",
name, fs.config.ACL, n, err)
metric.GCSTransferCompleted(n, 0, err)
}()
@ -242,7 +242,7 @@ func (fs *GCSFs) Remove(name string, isDir bool) error {
return err
}
if hasContents {
return fmt.Errorf("cannot remove non empty directory: %#v", name)
return fmt.Errorf("cannot remove non empty directory: %q", name)
}
if !strings.HasSuffix(name, "/") {
name += "/"
@ -268,7 +268,7 @@ func (fs *GCSFs) Remove(name string, isDir bool) error {
metric.GCSDeleteObjectCompleted(err)
if plugin.Handler.HasMetadater() && err == nil && !isDir {
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %+v", name, errMetadata)
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %q: %+v", name, errMetadata)
}
}
return err

View file

@ -264,7 +264,7 @@ func NewHTTPFs(connectionID, localTempDir, mountPath string, config HTTPFsConfig
// Name returns the name for the Fs implementation
func (fs *HTTPFs) Name() string {
return fmt.Sprintf("%v %#v", httpFsName, fs.config.Endpoint)
return fmt.Sprintf("%v %q", httpFsName, fs.config.Endpoint)
}
// ConnectionID returns the connection ID associated to this Fs implementation

View file

@ -325,7 +325,7 @@ func (fs *OsFs) ResolvePath(virtualPath string) (string, error) {
// path chain until we hit a directory that _does_ exist and can be validated.
_, err = fs.findFirstExistingDir(r)
if err != nil {
fsLog(fs, logger.LevelError, "error resolving non-existent path %#v", err)
fsLog(fs, logger.LevelError, "error resolving non-existent path %q", err)
}
return r, err
}

View file

@ -235,7 +235,7 @@ func (fs *S3Fs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, fun
Range: streamRange,
})
w.CloseWithError(err) //nolint:errcheck
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %+v", name, n, err)
fsLog(fs, logger.LevelDebug, "download completed, path: %q size: %v, err: %+v", name, n, err)
metric.S3TransferCompleted(n, 1, err)
}()
return nil, r, cancelFn, nil
@ -278,7 +278,7 @@ func (fs *S3Fs) Create(name string, flag int) (File, *PipeWriter, func(), error)
})
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, acl: %#v, readed bytes: %v, err: %+v",
fsLog(fs, logger.LevelDebug, "upload completed, path: %q, acl: %q, readed bytes: %v, err: %+v",
name, fs.config.ACL, r.GetReadedBytes(), err)
metric.S3TransferCompleted(r.GetReadedBytes(), 0, err)
}()
@ -305,7 +305,7 @@ func (fs *S3Fs) Remove(name string, isDir bool) error {
return err
}
if hasContents {
return fmt.Errorf("cannot remove non empty directory: %#v", name)
return fmt.Errorf("cannot remove non empty directory: %q", name)
}
if !strings.HasSuffix(name, "/") {
name += "/"
@ -321,7 +321,7 @@ func (fs *S3Fs) Remove(name string, isDir bool) error {
metric.S3DeleteObjectCompleted(err)
if plugin.Handler.HasMetadater() && err == nil && !isDir {
if errMetadata := plugin.Handler.RemoveMetadata(fs.getStorageID(), ensureAbsPath(name)); errMetadata != nil {
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %#v: %+v", name, errMetadata)
fsLog(fs, logger.LevelWarn, "unable to remove metadata for path %q: %+v", name, errMetadata)
}
}
return err
@ -533,7 +533,7 @@ func (fs *S3Fs) getFileNamesInPrefix(fsPrefix string) (map[string]bool, error) {
if err != nil {
metric.S3ListObjectsCompleted(err)
if err != nil {
fsLog(fs, logger.LevelError, "unable to get content for prefix %#v: %+v", prefix, err)
fsLog(fs, logger.LevelError, "unable to get content for prefix %q: %+v", prefix, err)
return nil, err
}
return fileNames, err

View file

@ -367,7 +367,7 @@ func (fs *SFTPFs) Open(name string, offset int64) (File, *pipeat.PipeReaderAt, f
n, err := io.Copy(w, f)
w.CloseWithError(err) //nolint:errcheck
f.Close()
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
fsLog(fs, logger.LevelDebug, "download completed, path: %q size: %v, err: %v", name, n, err)
}()
return nil, r, nil, nil
@ -419,7 +419,7 @@ func (fs *SFTPFs) Create(name string, flag int) (File, *PipeWriter, func(), erro
}
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v err truncate: %v",
fsLog(fs, logger.LevelDebug, "upload completed, path: %q, readed bytes: %v, err: %v err truncate: %v",
name, n, err, errTruncate)
}()
@ -677,7 +677,7 @@ func (fs *SFTPFs) ResolvePath(virtualPath string) (string, error) {
validatedPath, err = fs.getRealPath(fsPath)
isNotExist := fs.IsNotExist(err)
if err != nil && !isNotExist {
fsLog(fs, logger.LevelError, "Invalid path resolution, original path %v resolved %#v err: %v",
fsLog(fs, logger.LevelError, "Invalid path resolution, original path %v resolved %q err: %v",
virtualPath, fsPath, err)
return "", err
} else if isNotExist {
@ -690,13 +690,13 @@ func (fs *SFTPFs) ResolvePath(virtualPath string) (string, error) {
validatedPath, err = fs.getRealPath(validatedPath)
}
if err != nil {
fsLog(fs, logger.LevelError, "Invalid path resolution, dir %#v original path %#v resolved %#v err: %v",
fsLog(fs, logger.LevelError, "Invalid path resolution, dir %q original path %q resolved %q err: %v",
validatedPath, virtualPath, fsPath, err)
return "", err
}
}
if err := fs.isSubDir(validatedPath); err != nil {
fsLog(fs, logger.LevelError, "Invalid path resolution, dir %#v original path %#v resolved %#v err: %v",
fsLog(fs, logger.LevelError, "Invalid path resolution, dir %q original path %q resolved %q err: %v",
validatedPath, virtualPath, fsPath, err)
return "", err
}
@ -762,11 +762,11 @@ func (fs *SFTPFs) isSubDir(name string) error {
return nil
}
if len(name) < len(fs.config.Prefix) {
err := fmt.Errorf("path %q is not inside: %#v", name, fs.config.Prefix)
err := fmt.Errorf("path %q is not inside: %q", name, fs.config.Prefix)
return &pathResolutionError{err: err.Error()}
}
if !strings.HasPrefix(name, fs.config.Prefix+"/") {
err := fmt.Errorf("path %q is not inside: %#v", name, fs.config.Prefix)
err := fmt.Errorf("path %q is not inside: %q", name, fs.config.Prefix)
return &pathResolutionError{err: err.Error()}
}
return nil

View file

@ -620,7 +620,7 @@ func (c *AzBlobFsConfig) validate() error {
return err
}
if !util.Contains(validAzAccessTier, c.AccessTier) {
return fmt.Errorf("invalid access tier %#v, valid values: \"''%v\"", c.AccessTier, strings.Join(validAzAccessTier, ", "))
return fmt.Errorf("invalid access tier %q, valid values: \"''%v\"", c.AccessTier, strings.Join(validAzAccessTier, ", "))
}
return nil
}
@ -890,19 +890,19 @@ func fsMetadataCheck(fs fsMetadataChecker, storageID, keyPrefix string) error {
continue
}
}
fsLog(fs, logger.LevelDebug, "check metadata for folder %#v", folder)
fsLog(fs, logger.LevelDebug, "check metadata for folder %q", folder)
metadataValues, err := plugin.Handler.GetModificationTimes(storageID, folder)
if err != nil {
fsLog(fs, logger.LevelError, "unable to get modification times for folder %#v: %v", folder, err)
fsLog(fs, logger.LevelError, "unable to get modification times for folder %q: %v", folder, err)
return err
}
if len(metadataValues) == 0 {
fsLog(fs, logger.LevelDebug, "no metadata for folder %#v", folder)
fsLog(fs, logger.LevelDebug, "no metadata for folder %q", folder)
continue
}
fileNames, err := fs.getFileNamesInPrefix(fsPrefix)
if err != nil {
fsLog(fs, logger.LevelError, "unable to get content for prefix %#v: %v", fsPrefix, err)
fsLog(fs, logger.LevelError, "unable to get content for prefix %q: %v", fsPrefix, err)
return err
}
// now check if we have metadata for a missing object

View file

@ -169,12 +169,12 @@ func (f *webDavFile) checkFirstRead() error {
return f.Connection.GetReadQuotaExceededError()
}
if ok, policy := f.Connection.User.IsFileAllowed(f.GetVirtualPath()); !ok {
f.Connection.Log(logger.LevelWarn, "reading file %#v is not allowed", f.GetVirtualPath())
f.Connection.Log(logger.LevelWarn, "reading file %q is not allowed", f.GetVirtualPath())
return f.Connection.GetErrorForDeniedFile(policy)
}
_, err := common.ExecutePreAction(f.Connection, common.OperationPreDownload, f.GetFsPath(), f.GetVirtualPath(), 0, 0)
if err != nil {
f.Connection.Log(logger.LevelDebug, "download for file %#v denied by pre action: %v", f.GetVirtualPath(), err)
f.Connection.Log(logger.LevelDebug, "download for file %q denied by pre action: %v", f.GetVirtualPath(), err)
return f.Connection.GetPermissionDeniedError()
}
f.readTryed.Store(true)

View file

@ -170,7 +170,7 @@ func (c *Connection) getFile(fs vfs.Fs, fsPath, virtualPath string) (webdav.File
func (c *Connection) putFile(fs vfs.Fs, fsPath, virtualPath string) (webdav.File, error) {
if ok, _ := c.User.IsFileAllowed(virtualPath); !ok {
c.Log(logger.LevelWarn, "writing file %#v is not allowed", virtualPath)
c.Log(logger.LevelWarn, "writing file %q is not allowed", virtualPath)
return nil, c.GetPermissionDeniedError()
}
@ -188,13 +188,13 @@ func (c *Connection) putFile(fs vfs.Fs, fsPath, virtualPath string) (webdav.File
}
if statErr != nil {
c.Log(logger.LevelError, "error performing file stat %#v: %+v", fsPath, statErr)
c.Log(logger.LevelError, "error performing file stat %q: %+v", fsPath, statErr)
return nil, c.GetFsError(fs, statErr)
}
// This happen if we upload a file that has the same name of an existing directory
if stat.IsDir() {
c.Log(logger.LevelError, "attempted to open a directory for writing to: %#v", fsPath)
c.Log(logger.LevelError, "attempted to open a directory for writing to: %q", fsPath)
return nil, c.GetOpUnsupportedError()
}
@ -212,12 +212,12 @@ func (c *Connection) handleUploadToNewFile(fs vfs.Fs, resolvedPath, filePath, re
return nil, common.ErrQuotaExceeded
}
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath, 0, 0); err != nil {
c.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
c.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
return nil, c.GetPermissionDeniedError()
}
file, w, cancelFn, err := fs.Create(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
c.Log(logger.LevelError, "error creating file %#v: %+v", resolvedPath, err)
c.Log(logger.LevelError, "error creating file %q: %+v", resolvedPath, err)
return nil, c.GetFsError(fs, err)
}
@ -245,7 +245,7 @@ func (c *Connection) handleUploadToExistingFile(fs vfs.Fs, resolvedPath, filePat
}
if _, err := common.ExecutePreAction(c.BaseConnection, common.OperationPreUpload, resolvedPath, requestPath,
fileSize, os.O_TRUNC); err != nil {
c.Log(logger.LevelDebug, "upload for file %#v denied by pre action: %v", requestPath, err)
c.Log(logger.LevelDebug, "upload for file %q denied by pre action: %v", requestPath, err)
return nil, c.GetPermissionDeniedError()
}
@ -256,7 +256,7 @@ func (c *Connection) handleUploadToExistingFile(fs vfs.Fs, resolvedPath, filePat
if common.Config.IsAtomicUploadEnabled() && fs.IsAtomicUploadSupported() {
_, _, err = fs.Rename(resolvedPath, filePath)
if err != nil {
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %#v, dest: %#v, err: %+v",
c.Log(logger.LevelError, "error renaming existing file for atomic upload, source: %q, dest: %q, err: %+v",
resolvedPath, filePath, err)
return nil, c.GetFsError(fs, err)
}
@ -264,7 +264,7 @@ func (c *Connection) handleUploadToExistingFile(fs vfs.Fs, resolvedPath, filePat
file, w, cancelFn, err := fs.Create(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
c.Log(logger.LevelError, "error creating file %#v: %+v", resolvedPath, err)
c.Log(logger.LevelError, "error creating file %q: %+v", resolvedPath, err)
return nil, c.GetFsError(fs, err)
}
initialSize := int64(0)

View file

@ -358,7 +358,7 @@ func TestUserInvalidParams(t *testing.T) {
_, err = server.validateUser(u, req, dataprovider.LoginMethodPassword)
if assert.Error(t, err) {
assert.EqualError(t, err, fmt.Sprintf("cannot login user with invalid home dir: %#v", u.HomeDir))
assert.EqualError(t, err, fmt.Sprintf("cannot login user with invalid home dir: %q", u.HomeDir))
}
req.TLS = &tls.ConnectionState{}

View file

@ -83,7 +83,7 @@ func (s *webDavServer) listenAndServe(compressor *middleware.Compressor) error {
CipherSuites: util.GetTLSCiphersFromNames(s.binding.TLSCipherSuites),
PreferServerCipherSuites: true,
}
logger.Debug(logSender, "", "configured TLS cipher suites for binding %#v: %v, certID: %v",
logger.Debug(logSender, "", "configured TLS cipher suites for binding %q: %v, certID: %v",
s.binding.GetAddress(), httpServer.TLSConfig.CipherSuites, certID)
if s.binding.isMutualTLSEnabled() {
httpServer.TLSConfig.ClientCAs = certMgr.GetRootCAs()
@ -123,7 +123,7 @@ func (s *webDavServer) verifyTLSConnection(state tls.ConnectionState) error {
caCrt = verifiedChain[len(verifiedChain)-1]
}
if certMgr.IsRevoked(clientCrt, caCrt) {
logger.Debug(logSender, "", "tls handshake error, client certificate %#v has been revoked", clientCrtName)
logger.Debug(logSender, "", "tls handshake error, client certificate %q has been revoked", clientCrtName)
return common.ErrCrtRevoked
}
}
@ -155,7 +155,7 @@ func (s *webDavServer) checkRequestMethod(ctx context.Context, r *http.Request,
func (s *webDavServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer func() {
if r := recover(); r != nil {
logger.Error(logSender, "", "panic in ServeHTTP: %#v stack trace: %v", r, string(debug.Stack()))
logger.Error(logSender, "", "panic in ServeHTTP: %q stack trace: %v", r, string(debug.Stack()))
http.Error(w, common.ErrGenericFailure.Error(), http.StatusInternalServerError)
}
}()
@ -325,23 +325,23 @@ func (s *webDavServer) validateUser(user *dataprovider.User, r *http.Request, lo
connectionID := fmt.Sprintf("%v_%v", common.ProtocolWebDAV, connID)
if !filepath.IsAbs(user.HomeDir) {
logger.Warn(logSender, connectionID, "user %#v has an invalid home dir: %#v. Home dir must be an absolute path, login not allowed",
logger.Warn(logSender, connectionID, "user %q has an invalid home dir: %q. Home dir must be an absolute path, login not allowed",
user.Username, user.HomeDir)
return connID, fmt.Errorf("cannot login user with invalid home dir: %#v", user.HomeDir)
return connID, fmt.Errorf("cannot login user with invalid home dir: %q", user.HomeDir)
}
if util.Contains(user.Filters.DeniedProtocols, common.ProtocolWebDAV) {
logger.Info(logSender, connectionID, "cannot login user %#v, protocol DAV is not allowed", user.Username)
return connID, fmt.Errorf("protocol DAV is not allowed for user %#v", user.Username)
logger.Info(logSender, connectionID, "cannot login user %q, protocol DAV is not allowed", user.Username)
return connID, fmt.Errorf("protocol DAV is not allowed for user %q", user.Username)
}
if !user.IsLoginMethodAllowed(loginMethod, common.ProtocolWebDAV, nil) {
logger.Info(logSender, connectionID, "cannot login user %#v, %v login method is not allowed",
logger.Info(logSender, connectionID, "cannot login user %q, %v login method is not allowed",
user.Username, loginMethod)
return connID, fmt.Errorf("login method %v is not allowed for user %#v", loginMethod, user.Username)
return connID, fmt.Errorf("login method %v is not allowed for user %q", loginMethod, user.Username)
}
if !user.IsLoginFromAddrAllowed(r.RemoteAddr) {
logger.Info(logSender, connectionID, "cannot login user %#v, remote address is not allowed: %v",
logger.Info(logSender, connectionID, "cannot login user %q, remote address is not allowed: %v",
user.Username, r.RemoteAddr)
return connID, fmt.Errorf("login for user %#v is not allowed from this address: %v", user.Username, r.RemoteAddr)
return connID, fmt.Errorf("login for user %q is not allowed from this address: %v", user.Username, r.RemoteAddr)
}
return connID, nil
}