add metrics for Azure Blob storage

This commit is contained in:
Nicola Murino 2020-10-26 19:01:17 +01:00
parent f2acde789d
commit e54828a7b8
No known key found for this signature in database
GPG key ID: 2F1FB59433D5A8CB
10 changed files with 243 additions and 13 deletions

View file

@ -234,7 +234,7 @@ advertised via multicast DNS, this
flag allows to put username/password
inside the advertised TXT record`)
portableCmd.Flags().IntVarP(&portableFsProvider, "fs-provider", "f", int(dataprovider.LocalFilesystemProvider), `0 => local filesystem
1 => Amazon S3 compatible
1 => AWS S3 compatible
2 => Google Cloud Storage
3 => Azure Blob Storage`)
portableCmd.Flags().StringVar(&portableS3Bucket, "s3-bucket", "", "")

View file

@ -125,7 +125,7 @@ type FilesystemProvider int
// supported values for FilesystemProvider
const (
LocalFilesystemProvider FilesystemProvider = iota // Local
S3FilesystemProvider // Amazon S3 compatible
S3FilesystemProvider // AWS S3 compatible
GCSFilesystemProvider // Google Cloud Storage
AzureBlobFilesystemProvider // Azure Blob Storage
)

View file

@ -208,7 +208,7 @@ Click `Add` and fill the user details, the minimum required parameters are:
- `Password` or `Public keys`
- `Permissions`
- `Home Dir` can be empty since we defined a default base dir
- Select `Amazon S3 (Compatible)` as storage and then set `Bucket`, `Region` and optionally a `Key Prefix` if you want to restrict the user to a specific virtual folder in the bucket. The specified virtual folder does not need to be pre-created. You can leave `Access Key` and `Access Secret` empty since we defined global credentials for the `sftpgo` user and we use this system user to run the SFTPGo service.
- Select `AWS S3 (Compatible)` as storage and then set `Bucket`, `Region` and optionally a `Key Prefix` if you want to restrict the user to a specific virtual folder in the bucket. The specified virtual folder does not need to be pre-created. You can leave `Access Key` and `Access Secret` empty since we defined global credentials for the `sftpgo` user and we use this system user to run the SFTPGo service.
You are done! Now you can connect to you SFTPGo instance using any compatible `sftp` client on port `2022`.

View file

@ -48,7 +48,7 @@ Flags:
relative to the current directory
(default ".")
-f, --fs-provider int 0 => local filesystem
1 => Amazon S3 compatible
1 => AWS S3 compatible
2 => Google Cloud Storage
3 => Azure Blob Storage
--ftpd-cert string Path to the certificate file for FTPS

View file

@ -51,7 +51,7 @@ type GCSFsConfig struct {
// SFTPGoFilesystem defines cloud storage filesystem details
type SFTPGoFilesystem struct {
// 0 local filesystem, 1 Amazon S3 compatible, 2 Google Cloud Storage
// 0 local filesystem, 1 AWS S3 compatible, 2 Google Cloud Storage
Provider int `json:"provider"`
S3Config S3FsConfig `json:"s3config,omitempty"`
GCSConfig GCSFsConfig `json:"gcsconfig,omitempty"`

View file

@ -306,6 +306,18 @@ var (
Help: "The total number of S3 delete object errors",
})
// totalS3HeadObject is the metric that reports the total successful S3 head object requests
totalS3HeadObject = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_s3_head_object",
Help: "The total number of successful S3 head object requests",
})
// totalS3HeadObjectErrors is the metric that reports the total S3 head object errors
totalS3HeadObjectErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_s3_head_object_errors",
Help: "The total number of S3 head object errors",
})
// totalS3HeadBucket is the metric that reports the total successful S3 head bucket requests
totalS3HeadBucket = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_s3_head_bucket",
@ -354,7 +366,7 @@ var (
Help: "The total GCS download size as bytes, partial downloads are included",
})
// totalS3ListObjects is the metric that reports the total successful GCS list objects requests
// totalGCSListObjects is the metric that reports the total successful GCS list objects requests
totalGCSListObjects = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_gcs_list_objects",
Help: "The total number of successful GCS list objects requests",
@ -366,7 +378,7 @@ var (
Help: "The total number of successful GCS copy object requests",
})
// totalGCSDeleteObject is the metric that reports the total successful S3 delete object requests
// totalGCSDeleteObject is the metric that reports the total successful GCS delete object requests
totalGCSDeleteObject = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_gcs_delete_object",
Help: "The total number of successful GCS delete object requests",
@ -390,6 +402,18 @@ var (
Help: "The total number of GCS delete object errors",
})
// totalGCSHeadObject is the metric that reports the total successful GCS head object requests
totalGCSHeadObject = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_gcs_head_object",
Help: "The total number of successful GCS head object requests",
})
// totalGCSHeadObjectErrors is the metric that reports the total GCS head object errors
totalGCSHeadObjectErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_gcs_head_object_errors",
Help: "The total number of GCS head object errors",
})
// totalGCSHeadBucket is the metric that reports the total successful GCS head bucket requests
totalGCSHeadBucket = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_gcs_head_bucket",
@ -401,6 +425,102 @@ var (
Name: "sftpgo_gcs_head_bucket_errors",
Help: "The total number of GCS head bucket errors",
})
// totalAZUploads is the metric that reports the total number of successful Azure uploads
totalAZUploads = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_uploads_total",
Help: "The total number of successful Azure uploads",
})
// totalAZDownloads is the metric that reports the total number of successful Azure downloads
totalAZDownloads = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_downloads_total",
Help: "The total number of successful Azure downloads",
})
// totalAZUploadErrors is the metric that reports the total number of Azure upload errors
totalAZUploadErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_upload_errors_total",
Help: "The total number of Azure upload errors",
})
// totalAZDownloadErrors is the metric that reports the total number of Azure download errors
totalAZDownloadErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_download_errors_total",
Help: "The total number of Azure download errors",
})
// totalAZUploadSize is the metric that reports the total Azure uploads size as bytes
totalAZUploadSize = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_upload_size",
Help: "The total Azure upload size as bytes, partial uploads are included",
})
// totalAZDownloadSize is the metric that reports the total Azure downloads size as bytes
totalAZDownloadSize = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_download_size",
Help: "The total Azure download size as bytes, partial downloads are included",
})
// totalAZListObjects is the metric that reports the total successful Azure list objects requests
totalAZListObjects = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_list_objects",
Help: "The total number of successful Azure list objects requests",
})
// totalAZCopyObject is the metric that reports the total successful Azure copy object requests
totalAZCopyObject = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_copy_object",
Help: "The total number of successful Azure copy object requests",
})
// totalAZDeleteObject is the metric that reports the total successful Azure delete object requests
totalAZDeleteObject = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_delete_object",
Help: "The total number of successful Azure delete object requests",
})
// totalAZListObjectsError is the metric that reports the total Azure list objects errors
totalAZListObjectsErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_list_objects_errors",
Help: "The total number of Azure list objects errors",
})
// totalAZCopyObjectErrors is the metric that reports the total Azure copy object errors
totalAZCopyObjectErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_copy_object_errors",
Help: "The total number of Azure copy object errors",
})
// totalAZDeleteObjectErrors is the metric that reports the total Azure delete object errors
totalAZDeleteObjectErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_delete_object_errors",
Help: "The total number of Azure delete object errors",
})
// totalAZHeadObject is the metric that reports the total successful Azure head object requests
totalAZHeadObject = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_head_object",
Help: "The total number of successful Azure head object requests",
})
// totalAZHeadObjectErrors is the metric that reports the total Azure head object errors
totalAZHeadObjectErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_head_object_errors",
Help: "The total number of Azure head object errors",
})
// totalAZHeadContainer is the metric that reports the total successful Azure head container requests
totalAZHeadContainer = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_head_container",
Help: "The total number of successful Azure head container requests",
})
// totalAZHeadContainerErrors is the metric that reports the total Azure head container errors
totalAZHeadContainerErrors = promauto.NewCounter(prometheus.CounterOpts{
Name: "sftpgo_az_head_container_errors",
Help: "The total number of Azure head container errors",
})
)
// AddMetricsEndpoint exposes metrics to the specified endpoint
@ -477,7 +597,16 @@ func S3DeleteObjectCompleted(err error) {
}
}
// S3HeadBucketCompleted updates metrics after an S3 head bucket request terminates
// S3HeadObjectCompleted updates metrics after a S3 head object request terminates
func S3HeadObjectCompleted(err error) {
if err == nil {
totalS3HeadObject.Inc()
} else {
totalS3HeadObjectErrors.Inc()
}
}
// S3HeadBucketCompleted updates metrics after a S3 head bucket request terminates
func S3HeadBucketCompleted(err error) {
if err == nil {
totalS3HeadBucket.Inc()
@ -534,6 +663,15 @@ func GCSDeleteObjectCompleted(err error) {
}
}
// GCSHeadObjectCompleted updates metrics after a GCS head object request terminates
func GCSHeadObjectCompleted(err error) {
if err == nil {
totalGCSHeadObject.Inc()
} else {
totalGCSHeadObjectErrors.Inc()
}
}
// GCSHeadBucketCompleted updates metrics after a GCS head bucket request terminates
func GCSHeadBucketCompleted(err error) {
if err == nil {
@ -543,6 +681,72 @@ func GCSHeadBucketCompleted(err error) {
}
}
// AZTransferCompleted updates metrics after a Azure upload or a download
func AZTransferCompleted(bytes int64, transferKind int, err error) {
if transferKind == 0 {
// upload
if err == nil {
totalAZUploads.Inc()
} else {
totalAZUploadErrors.Inc()
}
totalAZUploadSize.Add(float64(bytes))
} else {
// download
if err == nil {
totalAZDownloads.Inc()
} else {
totalAZDownloadErrors.Inc()
}
totalAZDownloadSize.Add(float64(bytes))
}
}
// AZListObjectsCompleted updates metrics after a Azure list objects request terminates
func AZListObjectsCompleted(err error) {
if err == nil {
totalAZListObjects.Inc()
} else {
totalAZListObjectsErrors.Inc()
}
}
// AZCopyObjectCompleted updates metrics after a Azure copy object request terminates
func AZCopyObjectCompleted(err error) {
if err == nil {
totalAZCopyObject.Inc()
} else {
totalAZCopyObjectErrors.Inc()
}
}
// AZDeleteObjectCompleted updates metrics after a Azure delete object request terminates
func AZDeleteObjectCompleted(err error) {
if err == nil {
totalAZDeleteObject.Inc()
} else {
totalAZDeleteObjectErrors.Inc()
}
}
// AZHeadObjectCompleted updates metrics after a Azure head object request terminates
func AZHeadObjectCompleted(err error) {
if err == nil {
totalAZHeadObject.Inc()
} else {
totalAZHeadObjectErrors.Inc()
}
}
// AZHeadContainerCompleted updates metrics after a Azure head container request terminates
func AZHeadContainerCompleted(err error) {
if err == nil {
totalAZHeadContainer.Inc()
} else {
totalAZHeadContainerErrors.Inc()
}
}
// SSHCommandCompleted update metrics after an SSH command terminates
func SSHCommandCompleted(err error) {
if err == nil {

View file

@ -276,7 +276,7 @@
<div class="col-sm-10">
<select class="form-control" id="idFilesystem" name="fs_provider" onchange="onFilesystemChanged(this.value)">
<option value="0" {{if eq .User.FsConfig.Provider 0 }}selected{{end}}>local</option>
<option value="1" {{if eq .User.FsConfig.Provider 1 }}selected{{end}}>Amazon S3 (Compatible)</option>
<option value="1" {{if eq .User.FsConfig.Provider 1 }}selected{{end}}>AWS S3 (Compatible)</option>
<option value="2" {{if eq .User.FsConfig.Provider 2 }}selected{{end}}>Google Cloud Storage</option>
<option value="3" {{if eq .User.FsConfig.Provider 3 }}selected{{end}}>Azure Blob Storage</option>
</select>

View file

@ -23,6 +23,7 @@ import (
"github.com/eikenb/pipeat"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/metrics"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/version"
)
@ -185,11 +186,13 @@ func (fs AzureBlobFs) Stat(name string) (os.FileInfo, error) {
Prefix: prefix,
})
if err != nil {
metrics.AZListObjectsCompleted(err)
return nil, err
}
marker = listBlob.NextMarker
for _, blobPrefix := range listBlob.Segment.BlobPrefixes {
if fs.isEqual(blobPrefix.Name, name) {
metrics.AZListObjectsCompleted(nil)
return NewFileInfo(name, true, 0, time.Now(), false), nil
}
}
@ -203,11 +206,13 @@ func (fs AzureBlobFs) Stat(name string) (os.FileInfo, error) {
if blobInfo.Properties.ContentLength != nil {
size = *blobInfo.Properties.ContentLength
}
metrics.AZListObjectsCompleted(nil)
return NewFileInfo(name, isDir, size, blobInfo.Properties.LastModified, false), nil
}
}
}
metrics.AZListObjectsCompleted(nil)
return nil, errors.New("404 no such file or directory")
}
@ -242,6 +247,7 @@ func (fs AzureBlobFs) Open(name string, offset int64) (*os.File, *pipeat.PipeRea
n, err := io.Copy(w, body)
w.CloseWithError(err) //nolint:errcheck
fsLog(fs, logger.LevelDebug, "download completed, path: %#v size: %v, err: %v", name, n, err)
metrics.AZTransferCompleted(n, 1, err)
}()
return nil, r, cancelFn, nil
@ -284,6 +290,7 @@ func (fs AzureBlobFs) Create(name string, flag int) (*os.File, *PipeWriter, func
r.CloseWithError(err) //nolint:errcheck
p.Done(err)
fsLog(fs, logger.LevelDebug, "upload completed, path: %#v, readed bytes: %v, err: %v", name, r.GetReadedBytes(), err)
metrics.AZTransferCompleted(r.GetReadedBytes(), 0, err)
}()
return nil, p, cancelFn, nil
@ -322,6 +329,7 @@ func (fs AzureBlobFs) Rename(source, target string) error {
resp, err := dstBlobURL.StartCopyFromURL(ctx, srcURL, md, mac, bac)
if err != nil {
metrics.AZCopyObjectCompleted(err)
return err
}
copyStatus := resp.CopyStatus()
@ -335,6 +343,7 @@ func (fs AzureBlobFs) Rename(source, target string) error {
// of them before giving up.
nErrors++
if ctx.Err() != nil || nErrors == 3 {
metrics.AZCopyObjectCompleted(err)
return err
}
} else {
@ -342,8 +351,11 @@ func (fs AzureBlobFs) Rename(source, target string) error {
}
}
if copyStatus != azblob.CopyStatusSuccess {
return fmt.Errorf("Copy failed with status: %s", copyStatus)
err := fmt.Errorf("Copy failed with status: %s", copyStatus)
metrics.AZCopyObjectCompleted(err)
return err
}
metrics.AZCopyObjectCompleted(nil)
return fs.Remove(source, fi.IsDir())
}
@ -363,6 +375,7 @@ func (fs AzureBlobFs) Remove(name string, isDir bool) error {
defer cancelFn()
_, err := blobBlockURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
metrics.AZDeleteObjectCompleted(err)
return err
}
@ -444,6 +457,7 @@ func (fs AzureBlobFs) ReadDir(dirname string) ([]os.FileInfo, error) {
Prefix: prefix,
})
if err != nil {
metrics.AZListObjectsCompleted(err)
return nil, err
}
marker = listBlob.NextMarker
@ -481,6 +495,7 @@ func (fs AzureBlobFs) ReadDir(dirname string) ([]os.FileInfo, error) {
}
}
metrics.AZListObjectsCompleted(nil)
return result, nil
}
@ -565,6 +580,7 @@ func (fs AzureBlobFs) ScanRootDirContents() (int, int64, error) {
Prefix: fs.config.KeyPrefix,
})
if err != nil {
metrics.AZListObjectsCompleted(err)
return numFiles, size, err
}
marker = listBlob.NextMarker
@ -585,6 +601,7 @@ func (fs AzureBlobFs) ScanRootDirContents() (int, int64, error) {
}
}
metrics.AZListObjectsCompleted(nil)
return numFiles, size, nil
}
@ -644,6 +661,7 @@ func (fs AzureBlobFs) Walk(root string, walkFn filepath.WalkFunc) error {
Prefix: prefix,
})
if err != nil {
metrics.AZListObjectsCompleted(err)
return err
}
marker = listBlob.NextMarker
@ -667,6 +685,7 @@ func (fs AzureBlobFs) Walk(root string, walkFn filepath.WalkFunc) error {
}
}
metrics.AZListObjectsCompleted(nil)
return walkFn(root, NewFileInfo(root, true, 0, time.Now(), false), nil)
}
@ -695,6 +714,7 @@ func (fs AzureBlobFs) GetMimeType(name string) (string, error) {
blobBlockURL := fs.containerURL.NewBlockBlobURL(name)
response, err := blobBlockURL.GetProperties(ctx, azblob.BlobAccessConditions{})
metrics.AZHeadObjectCompleted(err)
if err != nil {
return "", err
}
@ -719,6 +739,7 @@ func (fs *AzureBlobFs) checkIfBucketExists() error {
defer cancelFn()
_, err := fs.containerURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
metrics.AZHeadContainerCompleted(err)
return err
}
@ -790,7 +811,6 @@ func (fs *AzureBlobFs) handleMultipartUpload(ctx context.Context, reader io.Read
defer cancelFn()
_, err := blockBlobURL.StageBlock(innerCtx, blockID, bufferReader, azblob.LeaseAccessConditions{}, nil)
pool.releaseBuffer(buf)
if err != nil {
errOnce.Do(func() {
poolError = err
@ -798,6 +818,7 @@ func (fs *AzureBlobFs) handleMultipartUpload(ctx context.Context, reader io.Read
poolCancel()
})
}
pool.releaseBuffer(buf)
<-guard
}(blockID, buf, n)
}
@ -840,11 +861,13 @@ type bufferAllocator struct {
sync.Mutex
available [][]byte
bufferSize int
finalized bool
}
func newBufferAllocator(size int) *bufferAllocator {
return &bufferAllocator{
bufferSize: size,
finalized: false,
}
}
@ -871,7 +894,7 @@ func (b *bufferAllocator) releaseBuffer(buf []byte) {
b.Lock()
defer b.Unlock()
if len(buf) != b.bufferSize {
if b.finalized || len(buf) != b.bufferSize {
return
}
@ -883,4 +906,5 @@ func (b *bufferAllocator) free() {
defer b.Unlock()
b.available = nil
b.finalized = true
}

View file

@ -610,6 +610,7 @@ func (fs GCSFs) GetMimeType(name string) (string, error) {
bkt := fs.svc.Bucket(fs.config.Bucket)
obj := bkt.Object(name)
attrs, err := obj.Attrs(ctx)
metrics.GCSHeadObjectCompleted(err)
if err != nil {
return "", err
}

View file

@ -27,7 +27,7 @@ import (
"github.com/drakkan/sftpgo/version"
)
// S3Fs is a Fs implementation for Amazon S3 compatible object storage.
// S3Fs is a Fs implementation for AWS S3 compatible object storages
type S3Fs struct {
connectionID string
localTempDir string
@ -598,6 +598,7 @@ func (fs S3Fs) GetMimeType(name string) (string, error) {
Bucket: aws.String(fs.config.Bucket),
Key: aws.String(name),
})
metrics.S3HeadObjectCompleted(err)
if err != nil {
return "", err
}