2023-08-27 01:35:51 +02:00
// What is this module called?
module syncEngine ;
// What does this module require to function?
import core.stdc.stdlib : EXIT_SUCCESS , EXIT_FAILURE , exit ;
import core.thread ;
import core.time ;
2018-04-23 02:58:47 +02:00
import std.algorithm ;
2023-08-27 01:35:51 +02:00
import std.array ;
import std.concurrency ;
2019-01-28 18:54:03 +01:00
import std.conv ;
2023-08-27 01:35:51 +02:00
import std.datetime ;
2019-05-13 13:52:49 +02:00
import std.encoding ;
2023-08-27 01:35:51 +02:00
import std.exception ;
import std.file ;
import std.json ;
import std.parallelism ;
import std.path ;
import std.range ;
import std.regex ;
import std.stdio ;
import std.string ;
import std.uni ;
import std.uri ;
import std.utf ;
2019-01-05 19:35:55 +01:00
2023-08-27 01:35:51 +02:00
// What other modules that we have created do we need to import?
import config ;
import log ;
import util ;
import onedrive ;
import itemdb ;
import clientSideFiltering ;
import progress ;
2019-07-13 07:42:35 +02:00
2023-08-27 01:35:51 +02:00
class posixException : Exception {
@safe pure this ( string localTargetName , string remoteTargetName ) {
string msg = format ( "POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention" , localTargetName , remoteTargetName ) ;
super ( msg ) ;
}
2019-06-28 05:13:32 +02:00
}
2023-08-27 01:35:51 +02:00
class SyncException : Exception {
@nogc @safe pure nothrow this ( string msg , string file = __FILE__ , size_t line = __LINE__ ) {
super ( msg , file , line ) ;
}
2019-07-13 07:42:35 +02:00
}
2023-08-27 01:35:51 +02:00
class SyncEngine {
// Class Variables
ApplicationConfig appConfig ;
OneDriveApi oneDriveApiInstance ;
ItemDatabase itemDB ;
ClientSideFiltering selectiveSync ;
// Array of directory databaseItem.id to skip while applying the changes.
// These are the 'parent path' id's that are being excluded, so if the parent id is in here, the child needs to be skipped as well
string [ ] skippedItems ;
// Array of databaseItem.id to delete after the changes have been downloaded
string [ 2 ] [ ] idsToDelete ;
// Array of JSON items which are files or directories that are not 'root', skipped or to be deleted, that need to be processed
JSONValue [ ] jsonItemsToProcess ;
// Array of JSON items which are files that are not 'root', skipped or to be deleted, that need to be downloaded
JSONValue [ ] fileJSONItemsToDownload ;
// Array of paths that failed to download
string [ ] fileDownloadFailures ;
// Array of all OneDrive driveId's that have been seen
string [ ] driveIDsArray ;
// List of items we fake created when using --dry-run
string [ 2 ] [ ] idsFaked ;
// List of paths we fake deleted when using --dry-run
string [ ] pathFakeDeletedArray ;
// Array of database Parent Item ID, Item ID & Local Path where the content has changed and needs to be uploaded
string [ 3 ] [ ] databaseItemsWhereContentHasChanged ;
// Array of local file paths that need to be uploaded as new itemts to OneDrive
string [ ] newLocalFilesToUploadToOneDrive ;
// Array of local file paths that failed to be uploaded to OneDrive
string [ ] fileUploadFailures ;
// List of path names changed online, but not changed locally when using --dry-run
string [ ] pathsRenamed ;
2023-09-12 21:53:53 +02:00
// List of paths that were a POSIX case-insensitive match, thus could not be created online
string [ ] posixViolationPaths ;
2023-09-22 23:55:59 +02:00
// List of local paths, that, when using the OneDrive Business Shared Folders feature, then diabling it, folder still exists locally and online
// This list of local paths need to be skipped
string [ ] businessSharedFoldersOnlineToSkip ;
2023-08-27 01:35:51 +02:00
// Flag that there were upload or download failures listed
bool syncFailures = false ;
// Is sync_list configured
bool syncListConfigured = false ;
// Was --dry-run used?
bool dryRun = false ;
// Was --upload-only used?
bool uploadOnly = false ;
// Was --remove-source-files used?
// Flag to set whether the local file should be deleted once it is successfully uploaded to OneDrive
bool localDeleteAfterUpload = false ;
// Do we configure to disable the download validation routine due to --disable-download-validation
bool disableDownloadValidation = false ;
// Do we perform a local cleanup of files that are 'extra' on the local file system, when using --download-only
bool cleanupLocalFiles = false ;
// Are we performing a --single-directory sync ?
bool singleDirectoryScope = false ;
string singleDirectoryScopeDriveId ;
string singleDirectoryScopeItemId ;
// Is National Cloud Deployments configured ?
bool nationalCloudDeployment = false ;
// Do we configure not to perform a remote file delete if --upload-only & --no-remote-delete configured
bool noRemoteDelete = false ;
// Is bypass_data_preservation set via config file
// Local data loss MAY occur in this scenario
bool bypassDataPreservation = false ;
// Maximum file size upload
// https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us
// July 2020, maximum file size for all accounts is 100GB
// January 2021, maximum file size for all accounts is 250GB
ulong maxUploadFileSize = 268435456000 ; // 250GB
// Threshold after which files will be uploaded using an upload session
ulong sessionThresholdFileSize = 4 * 2 ^ ^ 20 ; // 4 MiB
// File size limit for file operations that the user has configured
ulong fileSizeLimit ;
// Total data to upload
ulong totalDataToUpload ;
// How many items have been processed for the active operation
ulong processedCount ;
// Configure this class instance
this ( ApplicationConfig appConfig , ItemDatabase itemDB , ClientSideFiltering selectiveSync ) {
// Configure the class varaible to consume the application configuration
this . appConfig = appConfig ;
// Configure the class varaible to consume the database configuration
this . itemDB = itemDB ;
// Configure the class variable to consume the selective sync (skip_dir, skip_file and sync_list) configuration
this . selectiveSync = selectiveSync ;
2023-08-29 01:56:55 +02:00
// Configure the dryRun flag to capture if --dry-run was used
// Application startup already flagged we are also in a --dry-run state, so no need to output anything else here
this . dryRun = appConfig . getValueBool ( "dry_run" ) ;
2023-08-27 01:35:51 +02:00
// Configure file size limit
if ( appConfig . getValueLong ( "skip_size" ) ! = 0 ) {
fileSizeLimit = appConfig . getValueLong ( "skip_size" ) * 2 ^ ^ 20 ;
fileSizeLimit = ( fileSizeLimit = = 0 ) ? ulong . max : fileSizeLimit ;
2019-02-24 07:19:45 +01:00
}
2023-08-27 01:35:51 +02:00
// Is there a sync_list file present?
if ( exists ( appConfig . syncListFilePath ) ) this . syncListConfigured = true ;
// Configure the uploadOnly flag to capture if --upload-only was used
if ( appConfig . getValueBool ( "upload_only" ) ) {
log . vdebug ( "Configuring uploadOnly flag to TRUE as --upload-only passed in or configured" ) ;
this . uploadOnly = appConfig . getValueBool ( "upload_only" ) ;
2018-06-17 04:02:58 +02:00
}
2018-04-21 06:03:02 +02:00
2023-08-27 01:35:51 +02:00
// Configure the localDeleteAfterUpload flag
if ( appConfig . getValueBool ( "remove_source_files" ) ) {
log . vdebug ( "Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured" ) ;
this . localDeleteAfterUpload = appConfig . getValueBool ( "remove_source_files" ) ;
2018-10-10 10:31:21 +02:00
}
2023-08-27 01:35:51 +02:00
// Configure the disableDownloadValidation flag
if ( appConfig . getValueBool ( "disable_download_validation" ) ) {
log . vdebug ( "Configuring disableDownloadValidation flag to TRUE as --disable-download-validation passed in or configured" ) ;
this . disableDownloadValidation = appConfig . getValueBool ( "disable_download_validation" ) ;
2017-05-28 22:13:19 +02:00
}
2023-08-27 01:35:51 +02:00
// Do we configure to clean up local files if using --download-only ?
if ( ( appConfig . getValueBool ( "download_only" ) ) & & ( appConfig . getValueBool ( "cleanup_local_files" ) ) ) {
// --download-only and --cleanup-local-files were passed in
log . log ( "WARNING: Application has been configured to cleanup local files that are not present online." ) ;
log . log ( "WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally." ) ;
// Set the flag
this . cleanupLocalFiles = true ;
}
// Do we configure to NOT perform a remote delete if --upload-only & --no-remote-delete configured ?
if ( ( appConfig . getValueBool ( "upload_only" ) ) & & ( appConfig . getValueBool ( "no_remote_delete" ) ) ) {
// --upload-only and --no-remote-delete were passed in
log . log ( "WARNING: Application has been configured NOT to cleanup remote files that are deleted locally." ) ;
// Set the flag
this . noRemoteDelete = true ;
}
// Are we forcing to use /children scan instead of /delta to simulate National Cloud Deployment use of /children?
if ( appConfig . getValueBool ( "force_children_scan" ) ) {
log . log ( "Forcing client to use /children API call rather than /delta API to retrieve objects from the OneDrive API" ) ;
this . nationalCloudDeployment = true ;
}
// Are we forcing the client to bypass any data preservation techniques to NOT rename any local files if there is a conflict?
// The enabling of this function could lead to data loss
if ( appConfig . getValueBool ( "bypass_data_preservation" ) ) {
log . log ( "WARNING: Application has been configured to bypass local data preservation in the event of file conflict." ) ;
log . log ( "WARNING: Local data loss MAY occur in this scenario." ) ;
this . bypassDataPreservation = true ;
2023-06-19 22:55:00 +02:00
}
2023-08-29 01:56:55 +02:00
// Did the user configure a specific rate limit for the application?
if ( appConfig . getValueLong ( "rate_limit" ) > 0 ) {
// User configured rate limit
log . log ( "User Configured Rate Limit: " , appConfig . getValueLong ( "rate_limit" ) ) ;
// If user provided rate limit is < 131072, flag that this is too low, setting to the minimum of 131072
if ( appConfig . getValueLong ( "rate_limit" ) < 131072 ) {
// user provided limit too low
log . log ( "WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to default minimum of 131072 (128KB/s)" ) ;
appConfig . setValueLong ( "rate_limit" , 131072 ) ;
}
}
// Did the user downgrade all HTTP operations to force HTTP 1.1
if ( appConfig . getValueBool ( "force_http_11" ) ) {
// User is forcing downgrade to curl to use HTTP 1.1 for all operations
log . vlog ( "Downgrading all HTTP operations to HTTP/1.1 due to user configuration" ) ;
} else {
// Use curl defaults
log . vdebug ( "Using Curl defaults for HTTP operational protocol version (potentially HTTP/2)" ) ;
}
2017-12-27 15:13:28 +01:00
}
2020-06-16 23:57:14 +02:00
2023-08-27 01:35:51 +02:00
// Initialise the Sync Engine class
bool initialise ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// create a new instance of the OneDrive API
oneDriveApiInstance = new OneDriveApi ( appConfig ) ;
if ( oneDriveApiInstance . initialise ( ) ) {
log . log ( "Sync Engine Initialised with new Onedrive API instance" ) ;
// Get the default account drive details
getDefaultDriveDetails ( ) ;
getDefaultRootDetails ( ) ;
displaySyncEngineDetails ( ) ;
} else {
// API could not be initialised
log . error ( "OneDrive API could not be initialised" ) ;
exit ( - 1 ) ;
}
return true ;
2015-09-08 18:25:41 +02:00
}
2022-05-31 21:57:05 +02:00
2023-08-27 01:35:51 +02:00
// Get Default Drive Details for this Account
void getDefaultDriveDetails ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Function variables
JSONValue defaultOneDriveDriveDetails ;
2018-11-21 08:54:08 +01:00
2023-08-27 01:35:51 +02:00
// Get Default Drive Details for this Account
2019-05-05 13:06:55 +02:00
try {
2023-08-27 01:35:51 +02:00
log . vdebug ( "Getting Account Default Drive Details" ) ;
defaultOneDriveDriveDetails = oneDriveApiInstance . getDefaultDriveDetails ( ) ;
} catch ( OneDriveException exception ) {
log . vdebug ( "defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails() generated a OneDriveException" ) ;
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
2023-09-21 21:34:42 +02:00
if ( ( exception . httpStatusCode = = 400 ) | | ( exception . httpStatusCode = = 401 ) ) {
// Handle the 400 | 401 error
handleClientUnauthorised ( exception . httpStatusCode , exception . msg ) ;
2023-09-02 04:27:10 +02:00
}
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( oneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query Account Default Drive Details - retrying applicable request in 30 seconds" ) ;
log . vdebug ( "defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails() previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429 and 504 - but loop back calling this function
2023-09-02 23:38:36 +02:00
log . vdebug ( "Retrying Function: getDefaultDriveDetails()" ) ;
2023-09-02 04:27:10 +02:00
getDefaultDriveDetails ( ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2019-05-05 13:06:55 +02:00
}
2023-08-27 01:35:51 +02:00
// If the JSON response is a correct JSON object, and has an 'id' we can set these details
if ( ( defaultOneDriveDriveDetails . type ( ) = = JSONType . object ) & & ( hasId ( defaultOneDriveDriveDetails ) ) ) {
log . vdebug ( "OneDrive Account Default Drive Details: " , defaultOneDriveDriveDetails ) ;
appConfig . accountType = defaultOneDriveDriveDetails [ "driveType" ] . str ;
appConfig . defaultDriveId = defaultOneDriveDriveDetails [ "id" ] . str ;
2020-08-20 02:52:46 +02:00
2023-08-27 01:35:51 +02:00
// Get the initial remaining size from OneDrive API response JSON
// This will be updated as we upload data to OneDrive
if ( hasQuota ( defaultOneDriveDriveDetails ) ) {
if ( "remaining" in defaultOneDriveDriveDetails [ "quota" ] ) {
// use the value provided
appConfig . remainingFreeSpace = defaultOneDriveDriveDetails [ "quota" ] [ "remaining" ] . integer ;
}
2020-06-27 11:10:37 +02:00
}
2019-05-05 13:06:55 +02:00
// In some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero
2023-08-27 01:35:51 +02:00
if ( appConfig . remainingFreeSpace < = 0 ) {
2020-05-05 07:04:59 +02:00
// free space is <= 0 .. why ?
2023-08-27 01:35:51 +02:00
if ( "remaining" in defaultOneDriveDriveDetails [ "quota" ] ) {
if ( appConfig . accountType = = "personal" ) {
2020-08-20 02:52:46 +02:00
// zero space available
log . error ( "ERROR: OneDrive account currently has zero space available. Please free up some space online." ) ;
2023-08-27 01:35:51 +02:00
appConfig . quotaAvailable = false ;
2020-08-20 02:52:46 +02:00
} else {
// zero space available is being reported, maybe being restricted?
log . error ( "WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator." ) ;
2023-08-27 01:35:51 +02:00
appConfig . quotaRestricted = true ;
2020-08-20 02:52:46 +02:00
}
2020-05-05 07:04:59 +02:00
} else {
// json response was missing a 'remaining' value
2023-08-27 01:35:51 +02:00
if ( appConfig . accountType = = "personal" ) {
2020-05-05 07:04:59 +02:00
log . error ( "ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online." ) ;
2023-08-27 01:35:51 +02:00
appConfig . quotaAvailable = false ;
2020-05-05 07:04:59 +02:00
} else {
// quota details not available
log . error ( "ERROR: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator." ) ;
2023-08-27 01:35:51 +02:00
appConfig . quotaRestricted = true ;
}
2020-05-05 07:04:59 +02:00
}
2019-05-05 13:06:55 +02:00
}
2023-08-27 01:35:51 +02:00
// What did we set based on the data from the JSON
log . vdebug ( "appConfig.accountType = " , appConfig . accountType ) ;
log . vdebug ( "appConfig.defaultDriveId = " , appConfig . defaultDriveId ) ;
log . vdebug ( "appConfig.remainingFreeSpace = " , appConfig . remainingFreeSpace ) ;
log . vdebug ( "appConfig.quotaAvailable = " , appConfig . quotaAvailable ) ;
log . vdebug ( "appConfig.quotaRestricted = " , appConfig . quotaRestricted ) ;
2019-05-05 13:06:55 +02:00
2023-08-27 01:35:51 +02:00
// DEVELOPMENT SUPPORT HALT
2023-09-10 08:37:10 +02:00
if ( appConfig . accountType = = "documentLibrary" ) {
writeln ( "ERROR: SharePoint Account Type is not yet supported. Only 'Personal' and 'Business' account types have been validated and tested at this time ... sorry!" ) ;
2023-08-27 01:35:51 +02:00
}
2020-08-20 02:52:46 +02:00
2023-08-27 01:35:51 +02:00
// Make sure that appConfig.defaultDriveId is in our driveIDs array to use when checking if item is in database
// Keep the driveIDsArray with unique entries only
if ( ! canFind ( driveIDsArray , appConfig . defaultDriveId ) ) {
// Add this drive id to the array to search with
driveIDsArray ~ = appConfig . defaultDriveId ;
2020-08-20 02:52:46 +02:00
}
2023-08-27 01:35:51 +02:00
} else {
// Handle the invalid JSON response
invalidJSONResponseFromOneDriveAPI ( ) ;
}
}
// Get Default Root Details for this Account
void getDefaultRootDetails ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Function variables
JSONValue defaultOneDriveRootDetails ;
2018-11-21 08:54:08 +01:00
2023-08-27 01:35:51 +02:00
// Get Default Root Details for this Account
try {
log . vdebug ( "Getting Account Default Root Details" ) ;
defaultOneDriveRootDetails = oneDriveApiInstance . getDefaultRootDetails ( ) ;
} catch ( OneDriveException exception ) {
log . vdebug ( "defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails() generated a OneDriveException" ) ;
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
2023-09-21 21:34:42 +02:00
if ( ( exception . httpStatusCode = = 400 ) | | ( exception . httpStatusCode = = 401 ) ) {
// Handle the 400 | 401 error
handleClientUnauthorised ( exception . httpStatusCode , exception . msg ) ;
2023-09-02 04:27:10 +02:00
}
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( oneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query Account Default Root Details - retrying applicable request in 30 seconds" ) ;
log . vdebug ( "defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails() previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
2023-09-02 23:38:36 +02:00
log . vdebug ( "Retrying Function: getDefaultRootDetails()" ) ;
2023-09-02 04:27:10 +02:00
getDefaultRootDetails ( ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2023-08-27 01:35:51 +02:00
}
2019-05-05 13:06:55 +02:00
2023-08-27 01:35:51 +02:00
// If the JSON response is a correct JSON object, and has an 'id' we can set these details
if ( ( defaultOneDriveRootDetails . type ( ) = = JSONType . object ) & & ( hasId ( defaultOneDriveRootDetails ) ) ) {
log . vdebug ( "OneDrive Account Default Root Details: " , defaultOneDriveRootDetails ) ;
appConfig . defaultRootId = defaultOneDriveRootDetails [ "id" ] . str ;
log . vdebug ( "appConfig.defaultRootId = " , appConfig . defaultRootId ) ;
// Save the item to the database, so the account root drive is is always going to be present in the DB
saveItem ( defaultOneDriveRootDetails ) ;
} else {
// Handle the invalid JSON response
invalidJSONResponseFromOneDriveAPI ( ) ;
}
}
// Perform a sync of the OneDrive Account
// - Query /delta
// - If singleDirectoryScope or nationalCloudDeployment is used we need to generate a /delta like response
// - Process changes (add, changes, moves, deletes)
// - Process any items to add (download data to local)
// - Detail any files that we failed to download
// - Process any deletes (remove local data)
// - Walk local file system for any differences (new files / data to upload to OneDrive)
2023-09-02 04:27:10 +02:00
void syncOneDriveAccountToLocalDisk ( bool performFullScanTrueUp = false ) {
2023-09-24 03:07:26 +02:00
2023-09-02 04:27:10 +02:00
// performFullScanTrueUp value
log . vdebug ( "performFullScanTrueUp: " , performFullScanTrueUp ) ;
2023-08-27 01:35:51 +02:00
// Fetch the API response of /delta to track changes on OneDrive
2023-09-02 04:27:10 +02:00
fetchOneDriveDeltaAPIResponse ( null , null , null , performFullScanTrueUp ) ;
2023-08-27 01:35:51 +02:00
// Process any download activities or cleanup actions
processDownloadActivities ( ) ;
2019-04-13 00:59:03 +02:00
2023-08-27 01:35:51 +02:00
// If singleDirectoryScope is false, we are not targeting a single directory
// but if true, the target 'could' be a shared folder - so dont try and scan it again
if ( ! singleDirectoryScope ) {
// OneDrive Shared Folder Handling
if ( appConfig . accountType = = "personal" ) {
// Personal Account Type
// https://github.com/OneDrive/onedrive-api-docs/issues/764
2021-04-11 22:16:23 +02:00
2023-08-27 01:35:51 +02:00
// Get the Remote Items from the Database
Item [ ] remoteItems = itemDB . selectRemoteItems ( ) ;
foreach ( remoteItem ; remoteItems ) {
// Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty
if ( appConfig . getValueString ( "skip_dir" ) ! = "" ) {
// The path that needs to be checked needs to include the '/'
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if ( selectiveSync . isDirNameExcluded ( remoteItem . name ) ) {
// This directory name is excluded
log . vlog ( "Skipping item - excluded by skip_dir config: " , remoteItem . name ) ;
continue ;
2021-04-11 22:16:23 +02:00
}
}
2023-08-27 01:35:51 +02:00
// Directory name is not excluded or skip_dir is not populated
2023-09-02 04:27:10 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
log . log ( "Syncing this OneDrive Personal Shared Folder: " , remoteItem . name ) ;
}
2023-08-27 01:35:51 +02:00
// Check this OneDrive Personal Shared Folder for changes
2023-09-02 04:27:10 +02:00
fetchOneDriveDeltaAPIResponse ( remoteItem . remoteDriveId , remoteItem . remoteId , remoteItem . name , performFullScanTrueUp ) ;
2023-08-27 01:35:51 +02:00
// Process any download activities or cleanup actions for this OneDrive Personal Shared Folder
processDownloadActivities ( ) ;
2021-04-11 22:16:23 +02:00
}
2023-08-27 01:35:51 +02:00
} else {
2023-09-21 21:34:42 +02:00
// Is this a Business Account with Sync Business Shared Items enabled?
if ( ( appConfig . accountType = = "business" ) & & ( appConfig . getValueBool ( "sync_business_shared_items" ) ) ) {
// Business Account Shared Items Handling
// - OneDrive Business Shared Folder
// - OneDrive Business Shared Files ??
// - SharePoint Links
// Get the Remote Items from the Database
Item [ ] remoteItems = itemDB . selectRemoteItems ( ) ;
foreach ( remoteItem ; remoteItems ) {
// Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty
if ( appConfig . getValueString ( "skip_dir" ) ! = "" ) {
// The path that needs to be checked needs to include the '/'
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if ( selectiveSync . isDirNameExcluded ( remoteItem . name ) ) {
// This directory name is excluded
log . vlog ( "Skipping item - excluded by skip_dir config: " , remoteItem . name ) ;
continue ;
}
}
// Directory name is not excluded or skip_dir is not populated
if ( ! appConfig . surpressLoggingOutput ) {
log . log ( "Syncing this OneDrive Business Shared Folder: " , remoteItem . name ) ;
}
log . vdebug ( "Fetching /delta API response for:" ) ;
log . vdebug ( " remoteItem.remoteDriveId: " , remoteItem . remoteDriveId ) ;
log . vdebug ( " remoteItem.remoteId: " , remoteItem . remoteId ) ;
// Check this OneDrive Personal Shared Folder for changes
fetchOneDriveDeltaAPIResponse ( remoteItem . remoteDriveId , remoteItem . remoteId , remoteItem . name , performFullScanTrueUp ) ;
// Process any download activities or cleanup actions for this OneDrive Personal Shared Folder
processDownloadActivities ( ) ;
}
}
2021-04-11 22:16:23 +02:00
}
2018-11-23 20:26:30 +01:00
}
2015-09-14 23:56:14 +02:00
}
2020-01-26 22:42:00 +01:00
2023-08-27 01:35:51 +02:00
// Configure singleDirectoryScope = true if this function is called
2019-09-09 04:30:59 +02:00
// By default, singleDirectoryScope = false
2023-08-27 01:35:51 +02:00
void setSingleDirectoryScope ( string normalisedSingleDirectoryPath ) {
// Function variables
Item searchItem ;
JSONValue onlinePathData ;
// Set the main flag
2019-09-09 04:30:59 +02:00
singleDirectoryScope = true ;
2022-10-12 04:34:42 +02:00
2023-08-27 01:35:51 +02:00
// What are we doing?
log . log ( "The OneDrive Client was asked to search for this directory online and create it if it's not located: " , normalisedSingleDirectoryPath ) ;
// Query the OneDrive API for the specified path online
// In a --single-directory scenario, we need to travervse the entire path that we are wanting to sync
// and then check the path element does it exist online, if it does, is it a POSIX match, or if it does not, create the path
// Once we have searched online, we have the right drive id and item id so that we can downgrade the sync status, then build up
// any object items from that location
// This is because, in a --single-directory scenario, any folder in the entire path tree could be a 'case-insensitive match'
try {
onlinePathData = queryOneDriveForSpecificPathAndCreateIfMissing ( normalisedSingleDirectoryPath , true ) ;
} catch ( posixException e ) {
displayPosixErrorMessage ( e . msg ) ;
log . error ( "ERROR: Requested directory to search for and potentially create has a 'case-insensitive match' to an existing directory on OneDrive online." ) ;
}
// Was a valid JSON response provided?
if ( onlinePathData . type ( ) = = JSONType . object ) {
// Valid JSON item was returned
searchItem = makeItem ( onlinePathData ) ;
log . vdebug ( "searchItem: " , searchItem ) ;
// Is this item a potential Shared Folder?
// Is this JSON a remote object
if ( isItemRemote ( onlinePathData ) ) {
// The path we are seeking is remote to our account drive id
searchItem . driveId = onlinePathData [ "remoteItem" ] [ "parentReference" ] [ "driveId" ] . str ;
searchItem . id = onlinePathData [ "remoteItem" ] [ "id" ] . str ;
}
// Set these items so that these can be used as required
singleDirectoryScopeDriveId = searchItem . driveId ;
singleDirectoryScopeItemId = searchItem . id ;
} else {
log . error ( "\nThe requested --single-directory path to sync has generated an error. Please correct this error and try again.\n" ) ;
exit ( EXIT_FAILURE ) ;
}
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
// Query OneDrive API for /delta changes and iterate through items online
2023-09-02 04:31:16 +02:00
void fetchOneDriveDeltaAPIResponse ( string driveIdToQuery = null , string itemIdToQuery = null , string sharedFolderName = null , bool performFullScanTrueUp = false ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
string deltaLink = null ;
string deltaLinkAvailable ;
JSONValue deltaChanges ;
ulong responseBundleCount ;
2023-09-10 06:53:31 +02:00
ulong jsonItemsReceived = 0 ;
2023-09-12 07:31:59 +02:00
bool generateSimulatedDeltaResponse = false ;
2023-09-10 06:53:31 +02:00
// Reset jsonItemsToProcess & processedCount
jsonItemsToProcess = [ ] ;
processedCount = 0 ;
2023-08-27 01:35:51 +02:00
// Was a driveId provided as an input
if ( driveIdToQuery . empty ) {
// No provided driveId to query, use the account default
driveIdToQuery = appConfig . defaultDriveId ;
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
// Was an itemId provided as an input
if ( itemIdToQuery . empty ) {
// No provided itemId to query, use the account default
itemIdToQuery = appConfig . defaultRootId ;
}
// What OneDrive API query do we use?
// - Are we running against a National Cloud Deployments that does not support /delta ?
// National Cloud Deployments do not support /delta as a query
// https://docs.microsoft.com/en-us/graph/deployments#supported-features
//
// - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory
2023-09-12 07:31:59 +02:00
//
// - Are we performing a --download-only --cleanup-local-files action?
// - If we are, and we use a normal /delta query, we get all the local 'deleted' objects as well.
// - If the user deletes a folder online, then replaces it online, we download the deletion events and process the new 'upload' via the web iterface ..
// the net effect of this, is that the valid local files we want to keep, are actually deleted ...... not desirable
if ( ( singleDirectoryScope ) | | ( nationalCloudDeployment ) | | ( cleanupLocalFiles ) ) {
// Generate a simulated /delta response so that we correctly capture the current online state, less any 'online' delete and replace activity
2023-08-27 01:35:51 +02:00
generateSimulatedDeltaResponse = true ;
}
// What /delta query do we use?
if ( ! generateSimulatedDeltaResponse ) {
// This should be the majority default pathway application use
// Get the current delta link from the database for this DriveID and RootID
deltaLinkAvailable = itemDB . getDeltaLink ( driveIdToQuery , itemIdToQuery ) ;
if ( ! deltaLinkAvailable . empty ) {
log . vdebug ( "Using stored deltaLink" ) ;
deltaLink = deltaLinkAvailable ;
2021-11-16 09:48:44 +01:00
}
2023-09-02 04:27:10 +02:00
// Do we need to perform a 'performFullScanTrueUp' ?
if ( performFullScanTrueUp ) {
log . log ( "Performing a full scan of online data to ensure consistent local state" ) ;
deltaLink = null ;
}
2023-08-27 01:35:51 +02:00
// Dynamic output for a non-verbose run so that the user knows something is happening
if ( log . verbose < = 1 ) {
2023-09-02 04:27:10 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
2023-09-27 00:42:45 +02:00
log . fileOnly ( "Fetching items from the OneDrive API for Drive ID: " , driveIdToQuery ) ;
2023-09-02 04:27:10 +02:00
write ( "Fetching items from the OneDrive API for Drive ID: " , driveIdToQuery , " ." ) ;
}
2023-08-27 01:35:51 +02:00
} else {
log . vdebug ( "Fetching /delta response from the OneDrive API for driveId: " , driveIdToQuery ) ;
}
2020-10-26 07:23:24 +01:00
2023-08-27 01:35:51 +02:00
for ( ; ; ) {
responseBundleCount + + ;
// Get the /delta changes via the OneDrive API
2023-09-12 00:00:50 +02:00
log . vdebug ( "" ) ;
log . vdebug ( "driveIdToQuery: " , driveIdToQuery ) ;
log . vdebug ( "itemIdToQuery: " , itemIdToQuery ) ;
log . vdebug ( "deltaLink: " , deltaLink ) ;
// getDeltaChangesByItemId has the re-try logic for transient errors
2023-08-27 01:35:51 +02:00
deltaChanges = getDeltaChangesByItemId ( driveIdToQuery , itemIdToQuery , deltaLink ) ;
2023-09-12 00:00:50 +02:00
// If deltaChanges is an invalid JSON object, must exit
if ( deltaChanges . type ( ) ! = JSONType . object ) {
// Handle the invalid JSON response
invalidJSONResponseFromOneDriveAPI ( ) ;
}
2023-08-27 01:35:51 +02:00
ulong nrChanges = count ( deltaChanges [ "value" ] . array ) ;
int changeCount = 0 ;
if ( log . verbose < = 1 ) {
// Dynamic output for a non-verbose run so that the user knows something is happening
2023-09-02 04:27:10 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
write ( "." ) ;
}
2023-08-27 01:35:51 +02:00
} else {
log . vdebug ( "API Response Bundle: " , responseBundleCount , " - Quantity of 'changes|items' in this bundle to process: " , nrChanges ) ;
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
jsonItemsReceived = jsonItemsReceived + nrChanges ;
// This means we are most likely processing 200+ items at the same time as the OneDrive API bundles the JSON items
// into 200+ bundle lots and there is zero way to configure or change this
// The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed
foreach ( onedriveJSONItem ; deltaChanges [ "value" ] . array ) {
// increment change count for this item
changeCount + + ;
// Process the OneDrive object item JSON
processDeltaJSONItem ( onedriveJSONItem , nrChanges , changeCount , responseBundleCount , singleDirectoryScope ) ;
2021-11-16 09:48:44 +01:00
}
2023-08-27 01:35:51 +02:00
// The response may contain either @odata.deltaLink or @odata.nextLink
if ( "@odata.deltaLink" in deltaChanges ) {
deltaLink = deltaChanges [ "@odata.deltaLink" ] . str ;
log . vdebug ( "Setting next deltaLink to (@odata.deltaLink): " , deltaLink ) ;
2021-11-16 09:48:44 +01:00
}
2023-08-27 01:35:51 +02:00
// Update the deltaLink in the database so that we can reuse this
if ( ! deltaLink . empty ) {
log . vdebug ( "Updating completed deltaLink in DB to: " , deltaLink ) ;
itemDB . setDeltaLink ( driveIdToQuery , itemIdToQuery , deltaLink ) ;
2021-11-16 09:48:44 +01:00
}
2023-08-27 01:35:51 +02:00
// Update deltaLink to next changeSet bundle
if ( "@odata.nextLink" in deltaChanges ) {
deltaLink = deltaChanges [ "@odata.nextLink" ] . str ;
// Update deltaLinkAvailable to next changeSet bundle to quantify how many changes we have to process
deltaLinkAvailable = deltaChanges [ "@odata.nextLink" ] . str ;
log . vdebug ( "Setting next deltaLink & deltaLinkAvailable to (@odata.nextLink): " , deltaLink ) ;
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
else break ;
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
2023-09-24 03:07:26 +02:00
// To finish off the JSON processing items, this is needed to reflect this in the log
log . vdebug ( "------------------------------------------------------------------" ) ;
2023-08-27 01:35:51 +02:00
// Log that we have finished querying the /delta API
if ( log . verbose < = 1 ) {
2023-09-02 04:27:10 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
write ( "\n" ) ;
}
2021-07-06 10:11:53 +02:00
} else {
2023-09-24 03:07:26 +02:00
log . vdebug ( "Finished processing /delta JSON response from the OneDrive API" ) ;
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// We have to generate our own /delta response
// Log what we are doing so that the user knows something is happening
2023-09-02 05:06:37 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
log . log ( "Generating a /delta compatible JSON response from the OneDrive API ..." ) ;
}
2018-09-24 21:25:40 +02:00
2023-09-27 00:42:45 +02:00
// Why are are generating a /delta response
log . vdebug ( "Why are we generating a /delta response:" ) ;
log . vdebug ( " singleDirectoryScope: " , singleDirectoryScope ) ;
log . vdebug ( " nationalCloudDeployment: " , nationalCloudDeployment ) ;
log . vdebug ( " cleanupLocalFiles: " , cleanupLocalFiles ) ;
2023-08-27 01:35:51 +02:00
// What 'path' are we going to start generating the response for
string pathToQuery ;
// If --single-directory has been called, use the value that has been set
if ( singleDirectoryScope ) {
pathToQuery = appConfig . getValueString ( "single_directory" ) ;
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
// We could also be syncing a Shared Folder of some description
if ( ! sharedFolderName . empty ) {
pathToQuery = sharedFolderName ;
2018-09-24 21:25:40 +02:00
}
2018-11-21 21:03:04 +01:00
2023-08-27 01:35:51 +02:00
// Generate the simulated /delta response
//
// The generated /delta response however contains zero deleted JSON items, so the only way that we can track this, is if the object was in sync
// we have the object in the database, thus, what we need to do is for every DB object in the tree of items, flag 'syncStatus' as 'N', then when we process
// the returned JSON items from the API, we flag the item as back in sync, then we can cleanup any out-of-sync items
//
// The flagging of the local database items to 'N' is handled within the generateDeltaResponse() function
//
// When these JSON items are then processed, if the item exists online, and is in the DB, and that the values match, the DB item is flipped back to 'Y'
// This then allows the application to look for any remaining 'N' values, and delete these as no longer needed locally
deltaChanges = generateDeltaResponse ( pathToQuery ) ;
2023-09-21 21:34:42 +02:00
2023-08-27 01:35:51 +02:00
ulong nrChanges = count ( deltaChanges [ "value" ] . array ) ;
int changeCount = 0 ;
log . vdebug ( "API Response Bundle: " , responseBundleCount , " - Quantity of 'changes|items' in this bundle to process: " , nrChanges ) ;
jsonItemsReceived = jsonItemsReceived + nrChanges ;
2019-08-24 08:26:08 +02:00
2023-08-27 01:35:51 +02:00
// The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed
foreach ( onedriveJSONItem ; deltaChanges [ "value" ] . array ) {
// increment change count for this item
changeCount + + ;
// Process the OneDrive object item JSON
processDeltaJSONItem ( onedriveJSONItem , nrChanges , changeCount , responseBundleCount , singleDirectoryScope ) ;
2019-08-24 08:26:08 +02:00
}
2023-09-24 03:07:26 +02:00
// To finish off the JSON processing items, this is needed to reflect this in the log
log . vdebug ( "------------------------------------------------------------------" ) ;
2023-08-27 01:35:51 +02:00
// Log that we have finished generating our self generated /delta response
2023-09-02 05:06:37 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
log . log ( "Finished processing self generated /delta JSON response from the OneDrive API" ) ;
}
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
// We have JSON items received from the OneDrive API
log . vdebug ( "Number of JSON Objects received from OneDrive API: " , jsonItemsReceived ) ;
log . vdebug ( "Number of JSON Objects already processed (root and deleted items): " , ( jsonItemsReceived - jsonItemsToProcess . length ) ) ;
// We should have now at least processed all the JSON items as returned by the /delta call
// Additionally, we should have a new array, that now contains all the JSON items we need to process that are non 'root' or deleted items
log . vdebug ( "Number of JSON items to process is: " , jsonItemsToProcess . length ) ;
// Lets deal with the JSON items in a batch process
ulong batchSize = 500 ;
ulong batchCount = ( jsonItemsToProcess . length + batchSize - 1 ) / batchSize ;
ulong batchesProcessed = 0 ;
if ( log . verbose = = 0 ) {
// Dynamic output for a non-verbose run so that the user knows something is happening
2023-09-02 04:27:10 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
log . log ( "Processing changes and items received from OneDrive ..." ) ;
}
2023-08-27 01:35:51 +02:00
}
foreach ( batchOfJSONItems ; jsonItemsToProcess . chunks ( batchSize ) ) {
// Chunk the total items to process into 500 lot items
batchesProcessed + + ;
log . vlog ( "Processing OneDrive JSON item batch [" , batchesProcessed , "/" , batchCount , "] to ensure consistent local state" ) ;
2023-09-26 21:16:05 +02:00
processJSONItemsInBatch ( batchOfJSONItems , batchesProcessed , batchCount ) ;
2023-09-24 03:07:26 +02:00
// To finish off the JSON processing items, this is needed to reflect this in the log
log . vdebug ( "------------------------------------------------------------------" ) ;
2023-08-27 01:35:51 +02:00
}
log . vdebug ( "Number of JSON items to process is: " , jsonItemsToProcess . length ) ;
log . vdebug ( "Number of JSON items processed was: " , processedCount ) ;
2023-09-10 06:53:31 +02:00
// Free up memory and items processed as it is pointless now having this data around
jsonItemsToProcess = [ ] ;
2023-08-27 01:35:51 +02:00
// Keep the driveIDsArray with unique entries only
if ( ! canFind ( driveIDsArray , driveIdToQuery ) ) {
// Add this driveId to the array of driveId's we know about
driveIDsArray ~ = driveIdToQuery ;
}
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
// Process the /delta API JSON response items
void processDeltaJSONItem ( JSONValue onedriveJSONItem , ulong nrChanges , int changeCount , ulong responseBundleCount , bool singleDirectoryScope ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Variables for this foreach loop
string thisItemId ;
bool itemIsRoot = false ;
bool handleItemAsRootObject = false ;
bool itemIsDeletedOnline = false ;
bool itemHasParentReferenceId = false ;
bool itemHasParentReferencePath = false ;
bool itemIdMatchesDefaultRootId = false ;
bool itemNameExplicitMatchRoot = false ;
string objectParentDriveId ;
2018-04-20 23:32:31 +02:00
2023-08-27 01:35:51 +02:00
log . vdebug ( "------------------------------------------------------------------" ) ;
log . vdebug ( "Processing OneDrive Item " , changeCount , " of " , nrChanges , " from API Response Bundle " , responseBundleCount ) ;
log . vdebug ( "Raw JSON OneDrive Item: " , onedriveJSONItem ) ;
// What is this item's id
thisItemId = onedriveJSONItem [ "id" ] . str ;
// Is this a deleted item - only calculate this once
itemIsDeletedOnline = isItemDeleted ( onedriveJSONItem ) ;
2018-03-14 05:43:40 +01:00
2023-08-27 01:35:51 +02:00
if ( ! itemIsDeletedOnline ) {
// This is not a deleted item
log . vdebug ( "This item is not a OneDrive deletion change" ) ;
// Only calculate this once
itemIsRoot = isItemRoot ( onedriveJSONItem ) ;
itemHasParentReferenceId = hasParentReferenceId ( onedriveJSONItem ) ;
itemIdMatchesDefaultRootId = ( thisItemId = = appConfig . defaultRootId ) ;
itemNameExplicitMatchRoot = ( onedriveJSONItem [ "name" ] . str = = "root" ) ;
objectParentDriveId = onedriveJSONItem [ "parentReference" ] [ "driveId" ] . str ;
2018-09-24 21:25:40 +02:00
2023-08-27 01:35:51 +02:00
// Shared Folder Items
// !hasParentReferenceId(id)
// !hasParentReferenceId(path)
2020-03-19 20:12:47 +01:00
2023-08-27 01:35:51 +02:00
// Test is this is the OneDrive Users Root?
// Debug output of change evaluation items
log . vdebug ( "defaultRootId = " , appConfig . defaultRootId ) ;
log . vdebug ( "'search id' = " , thisItemId ) ;
log . vdebug ( "id == defaultRootId = " , itemIdMatchesDefaultRootId ) ;
log . vdebug ( "isItemRoot(onedriveJSONItem) = " , itemIsRoot ) ;
log . vdebug ( "onedriveJSONItem['name'].str == 'root' = " , itemNameExplicitMatchRoot ) ;
log . vdebug ( "itemHasParentReferenceId = " , itemHasParentReferenceId ) ;
if ( ( itemIdMatchesDefaultRootId | | singleDirectoryScope ) & & itemIsRoot & & itemNameExplicitMatchRoot ) {
// This IS a OneDrive Root item or should be classified as such in the case of 'singleDirectoryScope'
log . vdebug ( "JSON item will flagged as a 'root' item" ) ;
handleItemAsRootObject = true ;
2018-09-24 21:25:40 +02:00
}
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
// How do we handle this JSON item from the OneDrive API?
// Is this a confirmed 'root' item, has no Parent ID, or is a Deleted Item
if ( handleItemAsRootObject | | ! itemHasParentReferenceId | | itemIsDeletedOnline ) {
// Is a root item, has no id in parentReference or is a OneDrive deleted item
log . vdebug ( "objectParentDriveId = " , objectParentDriveId ) ;
log . vdebug ( "handleItemAsRootObject = " , handleItemAsRootObject ) ;
log . vdebug ( "itemHasParentReferenceId = " , itemHasParentReferenceId ) ;
log . vdebug ( "itemIsDeletedOnline = " , itemIsDeletedOnline ) ;
2023-09-21 21:34:42 +02:00
log . vdebug ( "Handling change immediately as 'root item', or has no parent reference id or is a deleted item" ) ;
2023-08-27 01:35:51 +02:00
// OK ... do something with this JSON post here ....
processRootAndDeletedJSONItems ( onedriveJSONItem , objectParentDriveId , handleItemAsRootObject , itemIsDeletedOnline , itemHasParentReferenceId ) ;
2018-03-14 05:43:40 +01:00
} else {
2023-09-21 21:34:42 +02:00
// Do we need to update this RAW JSON from OneDrive?
if ( ( objectParentDriveId ! = appConfig . defaultDriveId ) & & ( appConfig . accountType = = "business" ) & & ( appConfig . getValueBool ( "sync_business_shared_items" ) ) ) {
// Potentially need to update this JSON data
log . vdebug ( "Potentially need to update this source JSON .... need to check the database" ) ;
// Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id
Item remoteDBItem ;
itemDB . selectByRemoteId ( objectParentDriveId , thisItemId , remoteDBItem ) ;
// Is the data that was returned from the database what we are looking for?
if ( ( remoteDBItem . remoteDriveId = = objectParentDriveId ) & & ( remoteDBItem . remoteId = = thisItemId ) ) {
// Yes, this is the record we are looking for
log . vdebug ( "DB Item response for remoteDBItem: " , remoteDBItem ) ;
// Must compare remoteDBItem.name with remoteItem.name
if ( remoteDBItem . name ! = onedriveJSONItem [ "name" ] . str ) {
// Update JSON Item
string actualOnlineName = onedriveJSONItem [ "name" ] . str ;
log . vdebug ( "Updating source JSON 'name' to that which is the actual local directory" ) ;
log . vdebug ( "onedriveJSONItem['name'] was: " , onedriveJSONItem [ "name" ] . str ) ;
log . vdebug ( "Updating onedriveJSONItem['name'] to: " , remoteDBItem . name ) ;
onedriveJSONItem [ "name" ] = remoteDBItem . name ;
log . vdebug ( "onedriveJSONItem['name'] now: " , onedriveJSONItem [ "name" ] . str ) ;
// Add the original name to the JSON
onedriveJSONItem [ "actualOnlineName" ] = actualOnlineName ;
}
}
}
// Add this JSON item for further processing
log . vdebug ( "Adding this Raw JSON OneDrive Item to jsonItemsToProcess array for further processing" ) ;
2023-08-27 01:35:51 +02:00
jsonItemsToProcess ~ = onedriveJSONItem ;
2018-03-14 05:43:40 +01:00
}
}
2023-08-27 01:35:51 +02:00
// Process 'root' and 'deleted' OneDrive JSON items
void processRootAndDeletedJSONItems ( JSONValue onedriveJSONItem , string driveId , bool handleItemAsRootObject , bool itemIsDeletedOnline , bool itemHasParentReferenceId ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Is the item deleted online?
if ( ! itemIsDeletedOnline ) {
2018-09-24 21:25:40 +02:00
2023-08-27 01:35:51 +02:00
// Take the JSON item and create a consumable DB object
// Item newDatabaseItem = makeItem(onedriveJSONItem);
// Is the item a confirmed root object?
// The JSON item should be considered a 'root' item if:
// 1. Contains a ["root"] element
// 2. Has no ["parentReference"]["id"] ... #323 & #324 highlighted that this is false as some 'root' shared objects now can have an 'id' element .. OneDrive API change
// 2. Has no ["parentReference"]["path"]
// 3. Was detected by an input flag as to be handled as a root item regardless of actual status
if ( ( handleItemAsRootObject ) | | ( ! itemHasParentReferenceId ) ) {
log . vdebug ( "Handing JSON object as OneDrive 'root' object" ) ;
saveItem ( onedriveJSONItem ) ;
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// Change is to delete an item
log . vdebug ( "Handing a OneDrive Deleted Item" ) ;
2020-03-19 20:12:47 +01:00
2023-08-27 01:35:51 +02:00
// Use the JSON elements rather can computing a DB struct via makeItem()
string thisItemId = onedriveJSONItem [ "id" ] . str ;
string thisItemDriveId = onedriveJSONItem [ "parentReference" ] [ "driveId" ] . str ;
// Check if the item has been seen before
Item existingDatabaseItem ;
bool existingDBEntry = itemDB . selectById ( thisItemDriveId , thisItemId , existingDatabaseItem ) ;
if ( existingDBEntry ) {
// Flag to delete
log . vdebug ( "Flagging to delete item locally: " , onedriveJSONItem ) ;
idsToDelete ~ = [ thisItemDriveId , thisItemId ] ;
} else {
// Flag to ignore
log . vdebug ( "Flagging item to skip: " , onedriveJSONItem ) ;
skippedItems ~ = thisItemId ;
2018-09-24 21:25:40 +02:00
}
2018-03-14 05:43:40 +01:00
}
}
2023-08-27 01:35:51 +02:00
// Process each of the elements contained in jsonItemsToProcess[]
2023-09-26 21:16:05 +02:00
void processJSONItemsInBatch ( JSONValue [ ] array , ulong batchGroup , ulong batchCount ) {
2023-09-24 03:07:26 +02:00
ulong batchElementCount = array . length ;
2023-08-27 01:35:51 +02:00
foreach ( i , onedriveJSONItem ; array . enumerate ) {
// Use the JSON elements rather can computing a DB struct via makeItem()
2023-09-24 03:07:26 +02:00
ulong elementCount = i + 1 ;
// To show this is the processing for this particular item, start off with this breaker line
log . vdebug ( "------------------------------------------------------------------" ) ;
2023-09-26 21:16:05 +02:00
log . vdebug ( "Processing OneDrive JSON item " , elementCount , " of " , batchElementCount , " as part of JSON Item Batch " , batchGroup , " of " , batchCount ) ;
log . vdebug ( "Raw JSON OneDrive Item: " , onedriveJSONItem ) ;
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
string thisItemId = onedriveJSONItem [ "id" ] . str ;
string thisItemDriveId = onedriveJSONItem [ "parentReference" ] [ "driveId" ] . str ;
string thisItemParentId = onedriveJSONItem [ "parentReference" ] [ "id" ] . str ;
string thisItemName = onedriveJSONItem [ "name" ] . str ;
// Create an empty item struct for an existing DB item
Item existingDatabaseItem ;
// Do we NOT want this item?
bool unwanted = false ; // meaning by default we will WANT this item
// Is this parent is in the database
bool parentInDatabase = false ;
// What is the path of the new item
string newItemPath ;
2023-09-21 21:34:42 +02:00
// Configure the remoteItem - so if it is used, it can be utilised later
Item remoteItem ;
2023-08-27 01:35:51 +02:00
// Check the database for an existing entry for this JSON item
bool existingDBEntry = itemDB . selectById ( thisItemDriveId , thisItemId , existingDatabaseItem ) ;
// Calculate if the Parent Item is in the database so that it can be re-used
parentInDatabase = itemDB . idInLocalDatabase ( thisItemDriveId , thisItemParentId ) ;
// Calculate the path of this JSON item, but we can only do this if the parent is in the database
if ( parentInDatabase ) {
// Calculate this items path
newItemPath = computeItemPath ( thisItemDriveId , thisItemParentId ) ~ "/" ~ thisItemName ;
log . vdebug ( "New Item calculated full path is: " , newItemPath ) ;
} else {
// Parent not in the database
// Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us?
log . vdebug ( "Parent ID is not in DB .. " ) ;
// Why?
if ( thisItemDriveId = = appConfig . defaultDriveId ) {
// Flagging as unwanted
log . vdebug ( "Flagging as unwanted: thisItemDriveId (" , thisItemDriveId , "), thisItemParentId (" , thisItemParentId , ") not in local database" ) ;
if ( skippedItems . find ( thisItemParentId ) . length ! = 0 ) {
log . vdebug ( "Reason: thisItemParentId listed within skippedItems" ) ;
2020-08-08 00:56:00 +02:00
}
2023-08-27 01:35:51 +02:00
unwanted = true ;
2020-08-08 00:56:00 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Edge case as the parent (from another users OneDrive account) will never be in the database - potentially a shared object?
2023-09-21 21:34:42 +02:00
log . vdebug ( "Potential Shared Object Item: " , onedriveJSONItem ) ;
2023-08-27 01:35:51 +02:00
// Format the OneDrive change into a consumable object for the database
2023-09-21 21:34:42 +02:00
remoteItem = makeItem ( onedriveJSONItem ) ;
log . vdebug ( "The reported parentId is not in the database. This potentially is a shared folder as 'remoteItem.driveId' != 'appConfig.defaultDriveId'. Relevant Details: remoteItem.driveId (" , remoteItem . driveId , "), remoteItem.parentId (" , remoteItem . parentId , ")" ) ;
2023-08-27 01:35:51 +02:00
if ( appConfig . accountType = = "personal" ) {
2023-09-21 21:34:42 +02:00
// Personal Account Handling
// Ensure that this item has no parent
log . vdebug ( "Setting remoteItem.parentId to be null" ) ;
2023-08-27 01:35:51 +02:00
remoteItem . parentId = null ;
2023-09-21 21:34:42 +02:00
// Add this record to the local database
log . vdebug ( "Update/Insert local database with remoteItem details with remoteItem.parentId as null: " , remoteItem ) ;
itemDB . upsert ( remoteItem ) ;
2023-08-27 01:35:51 +02:00
} else {
2023-09-21 21:34:42 +02:00
// Business or SharePoint Account Handling
log . vdebug ( "Handling a Business or SharePoint Shared Item JSON object" ) ;
if ( appConfig . accountType = = "business" ) {
// Create a DB Tie Record for this parent object
Item parentItem ;
parentItem . driveId = onedriveJSONItem [ "parentReference" ] [ "driveId" ] . str ;
parentItem . id = onedriveJSONItem [ "parentReference" ] [ "id" ] . str ;
parentItem . name = "root" ;
parentItem . type = ItemType . dir ;
parentItem . mtime = remoteItem . mtime ;
parentItem . parentId = null ;
// Add this parent record to the local database
log . vdebug ( "Insert local database with remoteItem parent details: " , parentItem ) ;
itemDB . upsert ( parentItem ) ;
// Ensure that this item has no parent
log . vdebug ( "Setting remoteItem.parentId to be null" ) ;
remoteItem . parentId = null ;
// Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id
Item remoteDBItem ;
itemDB . selectByRemoteId ( remoteItem . driveId , remoteItem . id , remoteDBItem ) ;
// Must compare remoteDBItem.name with remoteItem.name
if ( ( ! remoteDBItem . name . empty ) & & ( remoteDBItem . name ! = remoteItem . name ) ) {
// Update DB Item
log . vdebug ( "The shared item stored in OneDrive, has a different name to the actual name on the remote drive" ) ;
log . vdebug ( "Updating remoteItem.name JSON data with the actual name being used on account drive and local folder" ) ;
log . vdebug ( "remoteItem.name was: " , remoteItem . name ) ;
log . vdebug ( "Updating remoteItem.name to: " , remoteDBItem . name ) ;
remoteItem . name = remoteDBItem . name ;
log . vdebug ( "Setting remoteItem.remoteName to: " , onedriveJSONItem [ "name" ] . str ) ;
// Update JSON Item
remoteItem . remoteName = onedriveJSONItem [ "name" ] . str ;
log . vdebug ( "Updating source JSON 'name' to that which is the actual local directory" ) ;
log . vdebug ( "onedriveJSONItem['name'] was: " , onedriveJSONItem [ "name" ] . str ) ;
log . vdebug ( "Updating onedriveJSONItem['name'] to: " , remoteDBItem . name ) ;
onedriveJSONItem [ "name" ] = remoteDBItem . name ;
log . vdebug ( "onedriveJSONItem['name'] now: " , onedriveJSONItem [ "name" ] . str ) ;
// Update newItemPath value
newItemPath = computeItemPath ( thisItemDriveId , thisItemParentId ) ~ "/" ~ remoteDBItem . name ;
log . vdebug ( "New Item updated calculated full path is: " , newItemPath ) ;
2023-08-27 01:35:51 +02:00
}
2023-09-21 21:34:42 +02:00
// Add this record to the local database
log . vdebug ( "Update/Insert local database with remoteItem details: " , remoteItem ) ;
itemDB . upsert ( remoteItem ) ;
2021-11-22 07:42:29 +01:00
}
2020-08-08 00:56:00 +02:00
}
}
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
// Check the skippedItems array for the parent id of this JSONItem if this is something we need to skip
if ( ! unwanted ) {
if ( skippedItems . find ( thisItemParentId ) . length ! = 0 ) {
// Flag this JSON item as unwanted
log . vdebug ( "Flagging as unwanted: find(thisItemParentId).length != 0" ) ;
unwanted = true ;
// Is this item id in the database?
if ( existingDBEntry ) {
// item exists in database, most likely moved out of scope for current client configuration
log . vdebug ( "This item was previously synced / seen by the client" ) ;
if ( ( "name" in onedriveJSONItem [ "parentReference" ] ) ! = null ) {
2019-09-09 04:30:59 +02:00
2023-08-27 01:35:51 +02:00
// How is this out of scope?
// is sync_list configured
if ( syncListConfigured ) {
// sync_list configured and in use
if ( selectiveSync . isPathExcludedViaSyncList ( onedriveJSONItem [ "parentReference" ] [ "name" ] . str ) ) {
// Previously synced item is now out of scope as it has been moved out of what is included in sync_list
log . vdebug ( "This previously synced item is now excluded from being synced due to sync_list exclusion" ) ;
}
}
// flag to delete local file as it now is no longer in sync with OneDrive
log . vdebug ( "Flagging to delete item locally: " , onedriveJSONItem ) ;
2023-09-12 07:31:59 +02:00
idsToDelete ~ = [ thisItemDriveId , thisItemId ] ;
2023-08-27 01:35:51 +02:00
}
}
2019-08-24 08:26:08 +02:00
}
2018-06-17 04:02:58 +02:00
}
2019-01-10 19:31:10 +01:00
2023-08-27 01:35:51 +02:00
// Check the item type - if it not an item type that we support, we cant process the JSON item
if ( ! unwanted ) {
if ( isItemFile ( onedriveJSONItem ) ) {
log . vdebug ( "The item we are syncing is a file" ) ;
} else if ( isItemFolder ( onedriveJSONItem ) ) {
log . vdebug ( "The item we are syncing is a folder" ) ;
} else if ( isItemRemote ( onedriveJSONItem ) ) {
log . vdebug ( "The item we are syncing is a remote item" ) ;
} else {
// Why was this unwanted?
if ( newItemPath . empty ) {
// Compute this item path & need the full path for this file
newItemPath = computeItemPath ( thisItemDriveId , thisItemParentId ) ~ "/" ~ thisItemName ;
log . vdebug ( "New Item calculated full path is: " , newItemPath ) ;
2020-06-16 23:57:14 +02:00
}
2023-08-27 01:35:51 +02:00
// Microsoft OneNote container objects present as neither folder or file but has file size
if ( ( ! isItemFile ( onedriveJSONItem ) ) & & ( ! isItemFolder ( onedriveJSONItem ) ) & & ( hasFileSize ( onedriveJSONItem ) ) ) {
// Log that this was skipped as this was a Microsoft OneNote item and unsupported
log . vlog ( "The Microsoft OneNote Notebook '" , newItemPath , "' is not supported by this client" ) ;
2020-06-16 23:57:14 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Log that this item was skipped as unsupported
log . vlog ( "The OneDrive item '" , newItemPath , "' is not supported by this client" ) ;
2020-06-16 23:57:14 +02:00
}
2023-08-27 01:35:51 +02:00
unwanted = true ;
log . vdebug ( "Flagging as unwanted: item type is not supported" ) ;
2018-05-05 09:00:50 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Check if this is excluded by config option: skip_dir
if ( ! unwanted ) {
// Only check path if config is != ""
if ( ! appConfig . getValueString ( "skip_dir" ) . empty ) {
// Is the item a folder?
if ( isItemFolder ( onedriveJSONItem ) ) {
// work out the 'snippet' path where this folder would be created
string simplePathToCheck = "" ;
string complexPathToCheck = "" ;
string matchDisplay = "" ;
if ( hasParentReference ( onedriveJSONItem ) ) {
// we need to workout the FULL path for this item
// simple path
if ( ( "name" in onedriveJSONItem [ "parentReference" ] ) ! = null ) {
simplePathToCheck = onedriveJSONItem [ "parentReference" ] [ "name" ] . str ~ "/" ~ onedriveJSONItem [ "name" ] . str ;
2020-06-16 23:57:14 +02:00
} else {
2023-08-27 01:35:51 +02:00
simplePathToCheck = onedriveJSONItem [ "name" ] . str ;
}
log . vdebug ( "skip_dir path to check (simple): " , simplePathToCheck ) ;
// complex path
if ( parentInDatabase ) {
// build up complexPathToCheck
complexPathToCheck = buildNormalizedPath ( newItemPath ) ;
} else {
log . vdebug ( "Parent details not in database - unable to compute complex path to check" ) ;
2020-05-01 20:05:06 +02:00
}
2023-09-21 21:34:42 +02:00
if ( ! complexPathToCheck . empty ) {
log . vdebug ( "skip_dir path to check (complex): " , complexPathToCheck ) ;
}
2023-08-27 01:35:51 +02:00
} else {
simplePathToCheck = onedriveJSONItem [ "name" ] . str ;
2020-05-01 20:05:06 +02:00
}
2023-08-27 01:35:51 +02:00
// If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder
// then isDirNameExcluded matching will not work
// Clean up 'root:' if present
if ( startsWith ( simplePathToCheck , "root:" ) ) {
log . vdebug ( "Updating simplePathToCheck to remove 'root:'" ) ;
simplePathToCheck = strip ( simplePathToCheck , "root:" ) ;
2020-12-09 04:03:49 +01:00
}
2023-08-27 01:35:51 +02:00
if ( startsWith ( complexPathToCheck , "root:" ) ) {
log . vdebug ( "Updating complexPathToCheck to remove 'root:'" ) ;
complexPathToCheck = strip ( complexPathToCheck , "root:" ) ;
}
// OK .. what checks are we doing?
if ( ( ! simplePathToCheck . empty ) & & ( complexPathToCheck . empty ) ) {
// just a simple check
log . vdebug ( "Performing a simple check only" ) ;
unwanted = selectiveSync . isDirNameExcluded ( simplePathToCheck ) ;
} else {
// simple and complex
log . vdebug ( "Performing a simple then complex path match if required" ) ;
// simple first
log . vdebug ( "Performing a simple check first" ) ;
unwanted = selectiveSync . isDirNameExcluded ( simplePathToCheck ) ;
matchDisplay = simplePathToCheck ;
if ( ! unwanted ) {
log . vdebug ( "Simple match was false, attempting complex match" ) ;
// simple didnt match, perform a complex check
unwanted = selectiveSync . isDirNameExcluded ( complexPathToCheck ) ;
matchDisplay = complexPathToCheck ;
}
}
// result
log . vdebug ( "skip_dir exclude result (directory based): " , unwanted ) ;
if ( unwanted ) {
// This path should be skipped
log . vlog ( "Skipping item - excluded by skip_dir config: " , matchDisplay ) ;
2020-06-16 23:57:14 +02:00
}
2020-05-01 20:05:06 +02:00
}
2023-08-27 01:35:51 +02:00
// Is the item a file?
// We need to check to see if this files path is excluded as well
if ( isItemFile ( onedriveJSONItem ) ) {
2020-06-16 23:57:14 +02:00
2023-08-27 01:35:51 +02:00
string pathToCheck ;
// does the newItemPath start with '/'?
if ( ! startsWith ( newItemPath , "/" ) ) {
// path does not start with '/', but we need to check skip_dir entries with and without '/'
// so always make sure we are checking a path with '/'
pathToCheck = '/' ~ dirName ( newItemPath ) ;
} else {
pathToCheck = dirName ( newItemPath ) ;
2020-06-16 23:57:14 +02:00
}
2023-08-27 01:35:51 +02:00
// perform the check
unwanted = selectiveSync . isDirNameExcluded ( pathToCheck ) ;
// result
log . vdebug ( "skip_dir exclude result (file based): " , unwanted ) ;
if ( unwanted ) {
// this files path should be skipped
log . vlog ( "Skipping item - file path is excluded by skip_dir config: " , newItemPath ) ;
2020-05-01 20:05:06 +02:00
}
}
2019-10-22 20:29:11 +02:00
}
}
2023-08-27 01:35:51 +02:00
// Check if this is excluded by config option: skip_file
if ( ! unwanted ) {
// Is the JSON item a file?
if ( isItemFile ( onedriveJSONItem ) ) {
// skip_file can contain 4 types of entries:
// - wildcard - *.txt
// - text + wildcard - name*.txt
// - full path + combination of any above two - /path/name*.txt
// - full path to file - /path/to/file.txt
// is the parent id in the database?
if ( parentInDatabase ) {
// Compute this item path & need the full path for this file
if ( newItemPath . empty ) {
newItemPath = computeItemPath ( thisItemDriveId , thisItemParentId ) ~ "/" ~ thisItemName ;
log . vdebug ( "New Item calculated full path is: " , newItemPath ) ;
2022-06-24 06:54:32 +02:00
}
2023-08-27 01:35:51 +02:00
// The path that needs to be checked needs to include the '/'
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
// However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks
string exclusionTestPath = "" ;
if ( ! startsWith ( newItemPath , "/" ) ) {
// Add '/' to the path
exclusionTestPath = '/' ~ newItemPath ;
2022-06-24 06:54:32 +02:00
}
2023-08-27 01:35:51 +02:00
log . vdebug ( "skip_file item to check: " , exclusionTestPath ) ;
unwanted = selectiveSync . isFileNameExcluded ( exclusionTestPath ) ;
log . vdebug ( "Result: " , unwanted ) ;
if ( unwanted ) log . vlog ( "Skipping item - excluded by skip_file config: " , thisItemName ) ;
2022-06-24 06:54:32 +02:00
} else {
2023-08-27 01:35:51 +02:00
// parent id is not in the database
unwanted = true ;
log . vlog ( "Skipping file - parent path not present in local database" ) ;
2022-06-24 06:54:32 +02:00
}
}
}
2023-08-27 01:35:51 +02:00
// Check if this is included or excluded by use of sync_list
if ( ! unwanted ) {
// No need to try and process something against a sync_list if it has been configured
if ( syncListConfigured ) {
// Compute the item path if empty - as to check sync_list we need an actual path to check
if ( newItemPath . empty ) {
// Calculate this items path
newItemPath = computeItemPath ( thisItemDriveId , thisItemParentId ) ~ "/" ~ thisItemName ;
log . vdebug ( "New Item calculated full path is: " , newItemPath ) ;
2022-10-12 04:34:42 +02:00
}
2023-08-27 01:35:51 +02:00
// What path are we checking?
log . vdebug ( "sync_list item to check: " , newItemPath ) ;
2022-10-12 04:34:42 +02:00
2023-08-27 01:35:51 +02:00
// Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list
if ( selectiveSync . isPathExcludedViaSyncList ( newItemPath ) ) {
// selective sync advised to skip, however is this a file and are we configured to upload / download files in the root?
if ( ( isItemFile ( onedriveJSONItem ) ) & & ( appConfig . getValueBool ( "sync_root_files" ) ) & & ( rootName ( newItemPath ) = = "" ) ) {
// This is a file
// We are configured to sync all files in the root
// This is a file in the logical root
unwanted = false ;
2018-05-09 08:39:23 +02:00
} else {
2023-08-27 01:35:51 +02:00
// path is unwanted
unwanted = true ;
log . vlog ( "Skipping item - excluded by sync_list config: " , newItemPath ) ;
// flagging to skip this item now, but does this exist in the DB thus needs to be removed / deleted?
if ( existingDBEntry ) {
// flag to delete
2023-09-12 07:31:59 +02:00
log . vlog ( "Flagging item for local delete as item exists in database: " , newItemPath ) ;
2023-08-27 01:35:51 +02:00
idsToDelete ~ = [ thisItemDriveId , thisItemId ] ;
2019-09-09 04:30:59 +02:00
}
2019-08-24 08:26:08 +02:00
}
2018-08-27 02:35:15 +02:00
}
2018-04-23 02:58:47 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Check if the user has configured to skip downloading .files or .folders: skip_dotfiles
if ( ! unwanted ) {
if ( appConfig . getValueBool ( "skip_dotfiles" ) ) {
if ( isDotFile ( newItemPath ) ) {
log . vlog ( "Skipping item - .file or .folder: " , newItemPath ) ;
unwanted = true ;
}
2020-05-01 20:05:06 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Check if this should be skipped due to a --check-for-nosync directive (.nosync)?
if ( ! unwanted ) {
if ( appConfig . getValueBool ( "check_nosync" ) ) {
// need the parent path for this object
string parentPath = dirName ( newItemPath ) ;
// Check for the presence of a .nosync in the parent path
if ( exists ( parentPath ~ "/.nosync" ) ) {
log . vlog ( "Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: " , newItemPath ) ;
unwanted = true ;
}
2022-10-12 04:34:42 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Check if this is excluded by a user set maximum filesize to download
if ( ! unwanted ) {
if ( isItemFile ( onedriveJSONItem ) ) {
if ( fileSizeLimit ! = 0 ) {
if ( onedriveJSONItem [ "size" ] . integer > = fileSizeLimit ) {
log . vlog ( "Skipping item - excluded by skip_size config: " , thisItemName , " (" , onedriveJSONItem [ "size" ] . integer / 2 ^ ^ 20 , " MB)" ) ;
}
}
2020-05-01 20:05:06 +02:00
}
2019-01-06 18:22:09 +01:00
}
2019-07-29 23:02:24 +02:00
2023-08-27 01:35:51 +02:00
// At this point all the applicable checks on this JSON object from OneDrive are complete:
// - skip_file
// - skip_dir
// - sync_list
// - skip_dotfiles
// - check_nosync
// - skip_size
// - We know if this item exists in the DB or not in the DB
// We know if this JSON item is unwanted or not
if ( unwanted ) {
// This JSON item is NOT wanted - it is excluded
log . vdebug ( "Skipping OneDrive change as this is determined to be unwanted" ) ;
// Add to the skippedItems array, but only if it is a directory ... pointless adding 'files' here, as it is the 'id' we check as the parent path which can only be a directory
if ( ! isItemFile ( onedriveJSONItem ) ) {
skippedItems ~ = thisItemId ;
2019-05-09 13:18:49 +02:00
}
2018-05-14 22:59:17 +02:00
} else {
2023-08-27 01:35:51 +02:00
// This JSON item is wanted - we need to process this JSON item further
// Take the JSON item and create a consumable object for eventual database insertion
Item newDatabaseItem = makeItem ( onedriveJSONItem ) ;
if ( existingDBEntry ) {
// The details of this JSON item are already in the DB
// Is the item in the DB the same as the JSON data provided - or is the JSON data advising this is an updated file?
log . vdebug ( "OneDrive change is an update to an existing local item" ) ;
// Compute the existing item path
// NOTE:
// string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.id);
//
// This will calculate the path as follows:
//
// existingItemPath: Document.txt
//
// Whereas above we use the following
//
// newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name;
//
// Which generates the following path:
//
// changedItemPath: ./Document.txt
//
// Need to be consistent here with how 'newItemPath' was calculated
string existingItemPath = computeItemPath ( existingDatabaseItem . driveId , existingDatabaseItem . parentId ) ~ "/" ~ existingDatabaseItem . name ;
// Attempt to apply this changed item
applyPotentiallyChangedItem ( existingDatabaseItem , existingItemPath , newDatabaseItem , newItemPath , onedriveJSONItem ) ;
2019-09-09 04:30:59 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Action this JSON item as a new item as we have no DB record of it
// The actual item may actually exist locally already, meaning that just the database is out-of-date or missing the data due to --resync
// But we also cannot compute the newItemPath as the parental objects may not exist as well
log . vdebug ( "OneDrive change is potentially a new local item" ) ;
2023-09-21 21:34:42 +02:00
// Attempt to apply this potentially new item
2023-08-27 01:35:51 +02:00
applyPotentiallyNewLocalItem ( newDatabaseItem , onedriveJSONItem , newItemPath ) ;
2019-09-09 04:30:59 +02:00
}
2018-05-14 22:59:17 +02:00
}
2023-08-27 01:35:51 +02:00
// Tracking as to if this item was processed
processedCount + + ;
2015-09-19 15:38:43 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Perform the download of any required objects in parallel
void processDownloadActivities ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Are there any items to delete locally? Cleanup space locally first
if ( ! idsToDelete . empty ) {
2023-09-12 07:31:59 +02:00
// There are elements that potentially need to be deleted locally
2023-09-10 08:37:10 +02:00
log . vlog ( "Items to potentially delete locally: " , idsToDelete . length ) ;
2023-09-12 07:31:59 +02:00
if ( appConfig . getValueBool ( "download_only" ) ) {
// Download only has been configured
if ( cleanupLocalFiles ) {
// Process online deleted items
log . vlog ( "Processing local deletion activity as --download-only & --cleanup-local-files configured" ) ;
processDeleteItems ( ) ;
} else {
// Not cleaning up local files
log . vlog ( "Skipping local deletion activity as --download-only has been used" ) ;
}
} else {
// Not using --download-only process normally
processDeleteItems ( ) ;
}
2023-08-27 01:35:51 +02:00
// Cleanup array memory
idsToDelete = [ ] ;
2015-09-19 15:38:43 +02:00
}
2023-08-27 01:35:51 +02:00
// Are there any items to download post fetching and processing the /delta data?
if ( ! fileJSONItemsToDownload . empty ) {
// There are elements to download
log . vlog ( "Number of items to download from OneDrive: " , fileJSONItemsToDownload . length ) ;
downloadOneDriveItems ( ) ;
// Cleanup array memory
fileJSONItemsToDownload = [ ] ;
2015-09-17 16:28:24 +02:00
}
2023-08-27 01:35:51 +02:00
// Are there any skipped items still?
if ( ! skippedItems . empty ) {
// Cleanup array memory
skippedItems = [ ] ;
2015-09-01 20:45:34 +02:00
}
}
2023-08-27 01:35:51 +02:00
// If the JSON item is not in the database, it is potentially a new item that we need to action
void applyPotentiallyNewLocalItem ( Item newDatabaseItem , JSONValue onedriveJSONItem , string newItemPath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// The JSON and Database items being passed in here have passed the following checks:
// - skip_file
// - skip_dir
// - sync_list
// - skip_dotfiles
// - check_nosync
// - skip_size
// - Is not currently cached in the local database
// As such, we should not be doing any other checks here to determine if the JSON item is wanted .. it is
if ( exists ( newItemPath ) ) {
2022-11-10 03:43:36 +01:00
// Issue #2209 fix - test if path is a bad symbolic link
2023-08-27 01:35:51 +02:00
if ( isSymlink ( newItemPath ) ) {
2022-11-09 20:50:37 +01:00
log . vdebug ( "Path on local disk is a symbolic link ........" ) ;
2023-08-27 01:35:51 +02:00
if ( ! exists ( readLink ( newItemPath ) ) ) {
2022-11-09 20:50:37 +01:00
// reading the symbolic link failed
log . vdebug ( "Reading the symbolic link target failed ........ " ) ;
2023-08-27 01:35:51 +02:00
log . logAndNotify ( "Skipping item - invalid symbolic link: " , newItemPath ) ;
2022-11-09 20:50:37 +01:00
return ;
}
}
2023-08-27 01:35:51 +02:00
// Path exists locally, is not a bad symbolic link
// Test if this item is actually in-sync
// What is the source of this item data?
2020-10-22 21:36:14 +02:00
string itemSource = "remote" ;
2023-08-27 01:35:51 +02:00
if ( isItemSynced ( newDatabaseItem , newItemPath , itemSource ) ) {
// Item details from OneDrive and local item details in database are in-sync
2023-09-26 21:16:05 +02:00
log . vdebug ( "The item to sync is already present on the local filesystem and is in-sync with what is reported online" ) ;
2023-08-27 01:35:51 +02:00
log . vdebug ( "Update/Insert local database with item details" ) ;
2023-09-21 21:34:42 +02:00
log . vdebug ( "item details to update/insert: " , newDatabaseItem ) ;
2023-08-27 01:35:51 +02:00
itemDB . upsert ( newDatabaseItem ) ;
2023-09-12 09:59:27 +02:00
return ;
2015-09-01 20:45:34 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Item details from OneDrive and local item details in database are NOT in-sync
log . vdebug ( "The item to sync exists locally but is NOT in the local database - otherwise this would be handled as changed item" ) ;
// Which object is newer? The local file or the remote file?
SysTime localModifiedTime = timeLastModified ( newItemPath ) . toUTC ( ) ;
SysTime itemModifiedTime = newDatabaseItem . mtime ;
// Reduce time resolution to seconds before comparing
2019-05-22 21:38:32 +02:00
localModifiedTime . fracSecs = Duration . zero ;
2020-10-22 21:36:14 +02:00
itemModifiedTime . fracSecs = Duration . zero ;
2019-05-22 21:38:32 +02:00
2023-08-27 01:35:51 +02:00
// If we need to rename the file, what do we rename it to?
auto ext = extension ( newItemPath ) ;
auto renamedNewItemPath = newItemPath . chomp ( ext ) ~ "-" ~ deviceName ~ ext ;
// Is the local modified time greater than that from OneDrive?
2020-05-20 03:37:11 +02:00
if ( localModifiedTime > itemModifiedTime ) {
2023-08-27 01:35:51 +02:00
// Local file is newer than item on OneDrive based on file modified time
2020-02-15 00:02:29 +01:00
// Is this item id in the database?
2023-08-27 01:35:51 +02:00
if ( itemDB . idInLocalDatabase ( newDatabaseItem . driveId , newDatabaseItem . id ) ) {
2020-05-26 09:04:22 +02:00
// item id is in the database
2020-02-15 00:02:29 +01:00
// no local rename
// no download needed
log . vlog ( "Local item modified time is newer based on UTC time conversion - keeping local item as this exists in the local database" ) ;
log . vdebug ( "Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer than OneDrive item and present in the sqlite database" ) ;
} else {
2020-05-26 09:04:22 +02:00
// item id is not in the database .. maybe a --resync ?
2020-02-15 00:02:29 +01:00
// file exists locally but is not in the sqlite database - maybe a failed download?
log . vlog ( "Local item does not exist in local database - replacing with file from OneDrive - failed download?" ) ;
2020-05-26 09:04:22 +02:00
2023-08-27 01:35:51 +02:00
// In a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file
// To this pint we have passed the following checks:
// 1. Any client side filtering checks - this determined this is a file that is wanted
// 2. A file with the exact name exists locally
// 3. The local modified time > remote modified time
// 4. The id of the item from OneDrive is not in the database
// Has the user configured to IGNORE local data protection rules?
2021-11-23 20:44:45 +01:00
if ( bypassDataPreservation ) {
// The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename
2023-08-27 01:35:51 +02:00
log . vlog ( "WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " , newItemPath ) ;
2021-11-23 20:44:45 +01:00
} else {
// local data protection is configured, renaming local file
2023-09-12 09:59:27 +02:00
log . log ( "The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: " , newItemPath , " -> " , renamedNewItemPath ) ;
2021-11-23 20:44:45 +01:00
// perform the rename action of the local file
if ( ! dryRun ) {
2023-08-27 01:35:51 +02:00
// Perform the local rename of the existing local file
safeRename ( newItemPath , renamedNewItemPath , dryRun ) ;
2020-05-26 09:04:22 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Expectation here is that there is a new file locally (renamedNewItemPath) however as we don't create this, the "new file" will not be uploaded as it does not exist
2021-11-23 20:44:45 +01:00
log . vdebug ( "DRY-RUN: Skipping local file rename" ) ;
2020-05-26 09:04:22 +02:00
}
}
2020-02-15 00:02:29 +01:00
}
2019-03-11 07:57:47 +01:00
} else {
2023-08-27 01:35:51 +02:00
// Remote file is newer than the existing local item
2020-10-22 21:36:14 +02:00
log . vlog ( "Remote item modified time is newer based on UTC time conversion" ) ; // correct message, remote item is newer
log . vdebug ( "localModifiedTime (local file): " , localModifiedTime ) ;
log . vdebug ( "itemModifiedTime (OneDrive item): " , itemModifiedTime ) ;
2023-08-27 01:35:51 +02:00
// Has the user configured to IGNORE local data protection rules?
2020-05-26 09:04:22 +02:00
if ( bypassDataPreservation ) {
2020-05-20 22:48:02 +02:00
// The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename
2023-08-27 01:35:51 +02:00
log . vlog ( "WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " , newItemPath ) ;
2019-05-22 21:38:32 +02:00
} else {
2020-05-20 22:48:02 +02:00
// local data protection is configured, renaming local file
2023-08-27 01:35:51 +02:00
log . vlog ( "The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: " , newItemPath , " -> " , renamedNewItemPath ) ;
2020-05-20 22:48:02 +02:00
// perform the rename action of the local file
if ( ! dryRun ) {
2023-08-27 01:35:51 +02:00
// Perform the local rename of the existing local file
safeRename ( newItemPath , renamedNewItemPath , dryRun ) ;
2020-05-20 22:48:02 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Expectation here is that there is a new file locally (renamedNewItemPath) however as we don't create this, the "new file" will not be uploaded as it does not exist
2020-05-20 22:48:02 +02:00
log . vdebug ( "DRY-RUN: Skipping local file rename" ) ;
}
2019-05-22 21:38:32 +02:00
}
2019-03-11 07:57:47 +01:00
}
2015-09-01 20:45:34 +02:00
}
2023-09-12 09:59:27 +02:00
}
// Path does not exist locally (should not exist locally if renamed file) - this will be a new file download or new folder creation
// How to handle this Potentially New Local Item JSON ?
final switch ( newDatabaseItem . type ) {
case ItemType . file :
// Add to the items to download array for processing
fileJSONItemsToDownload ~ = onedriveJSONItem ;
break ;
case ItemType . dir :
case ItemType . remote :
log . log ( "Creating local directory: " , newItemPath ) ;
if ( ! dryRun ) {
try {
// Create the new directory
log . vdebug ( "Requested path does not exist, creating directory structure: " , newItemPath ) ;
mkdirRecurse ( newItemPath ) ;
// Configure the applicable permissions for the folder
log . vdebug ( "Setting directory permissions for: " , newItemPath ) ;
newItemPath . setAttributes ( appConfig . returnRequiredDirectoryPermisions ( ) ) ;
// Update the time of the folder to match the last modified time as is provided by OneDrive
// If there are any files then downloaded into this folder, the last modified time will get
// updated by the local Operating System with the latest timestamp - as this is normal operation
// as the directory has been modified
log . vdebug ( "Setting directory lastModifiedDateTime for: " , newItemPath , " to " , newDatabaseItem . mtime ) ;
log . vdebug ( "Calling setTimes() for this file: " , newItemPath ) ;
setTimes ( newItemPath , newDatabaseItem . mtime , newDatabaseItem . mtime ) ;
// Save the item to the database
2023-08-27 01:35:51 +02:00
saveItem ( onedriveJSONItem ) ;
2023-09-12 09:59:27 +02:00
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2020-07-06 21:17:03 +02:00
}
2023-09-12 09:59:27 +02:00
} else {
// we dont create the directory, but we need to track that we 'faked it'
idsFaked ~ = [ newDatabaseItem . driveId , newDatabaseItem . id ] ;
// Save the item to the dry-run database
saveItem ( onedriveJSONItem ) ;
}
break ;
case ItemType . unknown :
// Unknown type - we dont action or sync these items
break ;
2015-09-01 20:45:34 +02:00
}
2023-08-27 01:35:51 +02:00
}
// If the JSON item IS in the database, this will be an update to an existing in-sync item
void applyPotentiallyChangedItem ( Item existingDatabaseItem , string existingItemPath , Item changedOneDriveItem , string changedItemPath , JSONValue onedriveJSONItem ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// If we are moving the item, we do not need to download it again
bool itemWasMoved = false ;
if ( existingDatabaseItem . eTag ! = changedOneDriveItem . eTag ) {
// The eTag has changed to what we previously cached
if ( existingItemPath ! = changedItemPath ) {
// Log that we are changing / moving an item to a new name
log . log ( "Moving " , existingItemPath , " to " , changedItemPath ) ;
// Is the destination path empty .. or does something exist at that location?
if ( exists ( changedItemPath ) ) {
2023-09-21 21:34:42 +02:00
// Destination we are moving to exists ...
2023-08-27 01:35:51 +02:00
Item changedLocalItem ;
// Query DB for this changed item in specified path that exists and see if it is in-sync
if ( itemDB . selectByPath ( changedItemPath , changedOneDriveItem . driveId , changedLocalItem ) ) {
// The 'changedItemPath' is in the database
string itemSource = "database" ;
if ( isItemSynced ( changedLocalItem , changedItemPath , itemSource ) ) {
// The destination item is in-sync
log . vlog ( "Destination is in sync and will be overwritten" ) ;
} else {
// The destination item is different
log . vlog ( "The destination is occupied with a different item, renaming the conflicting file..." ) ;
// Backup this item, passing in if we are performing a --dry-run or not
safeBackup ( changedItemPath , dryRun ) ;
}
} else {
// The to be overwritten item is not already in the itemdb, so it should saved to avoid data loss
log . vlog ( "The destination is occupied by an existing un-synced file, renaming the conflicting file..." ) ;
// Backup this item, passing in if we are performing a --dry-run or not
safeBackup ( changedItemPath , dryRun ) ;
}
}
// Try and rename path, catch any exception generated
2020-11-12 21:34:52 +01:00
try {
2023-08-27 01:35:51 +02:00
// Rename this item, passing in if we are performing a --dry-run or not
safeRename ( existingItemPath , changedItemPath , dryRun ) ;
// If the item is a file, make sure that the local timestamp now is the same as the timestamp online
// Otherwise when we do the DB check, the move on the file system, the file technically has a newer timestamp
// which is 'correct' .. but we need to report locally the online timestamp here as the move was made online
if ( changedOneDriveItem . type = = ItemType . file ) {
setTimes ( changedItemPath , changedOneDriveItem . mtime , changedOneDriveItem . mtime ) ;
}
// Flag that the item was moved | renamed
itemWasMoved = true ;
// If we are in a --dry-run situation, the actual rename did not occur - but we need to track like it did
if ( dryRun ) {
// Track this as a faked id item
idsFaked ~ = [ changedOneDriveItem . driveId , changedOneDriveItem . id ] ;
// We also need to track that we did not rename this path
pathsRenamed ~ = [ existingItemPath ] ;
2020-11-19 19:36:20 +01:00
}
2020-11-12 21:34:52 +01:00
} catch ( FileException e ) {
// display the error message
2020-12-09 04:18:16 +01:00
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2020-11-12 21:34:52 +01:00
}
2019-03-11 07:57:47 +01:00
}
2018-10-04 01:31:28 +02:00
2023-08-27 01:35:51 +02:00
// What sort of changed item is this?
// Is it a file, and we did not move it ..
if ( ( changedOneDriveItem . type = = ItemType . file ) & & ( ! itemWasMoved ) ) {
// The eTag is notorious for being 'changed' online by some backend Microsoft process
if ( existingDatabaseItem . quickXorHash ! = changedOneDriveItem . quickXorHash ) {
// Add to the items to download array for processing - the file hash we previously recorded is not the same as online
fileJSONItemsToDownload ~ = onedriveJSONItem ;
} else {
// Save this item in the database
saveItem ( onedriveJSONItem ) ;
2019-09-21 22:40:39 +02:00
}
2023-08-27 01:35:51 +02:00
} else {
// Save this item in the database
saveItem ( onedriveJSONItem ) ;
2023-09-21 21:34:42 +02:00
// If the 'Add shortcut to My files' link was the item that was actually renamed .. we have to update our DB records
if ( changedOneDriveItem . type = = ItemType . remote ) {
// Select remote item data from the database
Item existingRemoteDbItem ;
itemDB . selectById ( changedOneDriveItem . remoteDriveId , changedOneDriveItem . remoteId , existingRemoteDbItem ) ;
// Update the 'name' in existingRemoteDbItem and save it back to the database
// This is the local name stored on disk that was just 'moved'
existingRemoteDbItem . name = changedOneDriveItem . name ;
itemDB . upsert ( existingRemoteDbItem ) ;
}
2017-12-31 13:47:18 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// The existingDatabaseItem.eTag == changedOneDriveItem.eTag .. nothing has changed, so save this item
saveItem ( onedriveJSONItem ) ;
}
2015-09-01 20:45:34 +02:00
}
2023-08-27 01:35:51 +02:00
// Download new file items as identified
void downloadOneDriveItems ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Lets deal with the JSON items in a batch process
ulong batchSize = appConfig . concurrentThreads ;
ulong batchCount = ( fileJSONItemsToDownload . length + batchSize - 1 ) / batchSize ;
ulong batchesProcessed = 0 ;
2019-06-17 02:16:27 +02:00
2023-08-27 01:35:51 +02:00
foreach ( chunk ; fileJSONItemsToDownload . chunks ( batchSize ) ) {
downloadOneDriveItemsInParallel ( chunk ) ;
2019-06-17 02:16:27 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Download items in parallel
void downloadOneDriveItemsInParallel ( JSONValue [ ] array ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
foreach ( i , onedriveJSONItem ; taskPool . parallel ( array ) ) {
// Take the JSON item and create a consumable object for eventual database insertion
Item newDatabaseItem = makeItem ( onedriveJSONItem ) ;
downloadFileItem ( newDatabaseItem , onedriveJSONItem ) ;
2018-09-13 00:41:46 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Perform the actual download of an object from OneDrive
void downloadFileItem ( Item newDatabaseItem , JSONValue onedriveJSONItem ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
bool downloadFailed = false ;
string OneDriveFileXORHash ;
string OneDriveFileSHA256Hash ;
ulong jsonFileSize = 0 ;
// Calculate this items path
string newItemPath = computeItemPath ( newDatabaseItem . driveId , newDatabaseItem . parentId ) ~ "/" ~ newDatabaseItem . name ;
log . vdebug ( "New Item calculated full path is: " , newItemPath ) ;
// Is the item reported as Malware ?
if ( isMalware ( onedriveJSONItem ) ) {
// OneDrive reports that this file is malware
log . error ( "ERROR: MALWARE DETECTED IN FILE - DOWNLOAD SKIPPED: " , newItemPath ) ;
} else {
// Grab this file's filesize
if ( hasFileSize ( onedriveJSONItem ) ) {
// Use the configured filesize as reported by OneDrive
jsonFileSize = onedriveJSONItem [ "size" ] . integer ;
2019-07-05 08:17:06 +02:00
} else {
// filesize missing
2023-08-27 01:35:51 +02:00
log . vdebug ( "WARNING: onedriveJSONItem['size'] is missing" ) ;
2019-07-05 08:17:06 +02:00
}
2023-08-27 01:35:51 +02:00
// Configure the hashes for comparison post download
if ( hasHashes ( onedriveJSONItem ) ) {
2019-07-13 07:42:35 +02:00
// File details returned hash details
// QuickXorHash
2023-08-27 01:35:51 +02:00
if ( hasQuickXorHash ( onedriveJSONItem ) ) {
// Use the provided quickXorHash as reported by OneDrive
if ( onedriveJSONItem [ "file" ] [ "hashes" ] [ "quickXorHash" ] . str ! = "" ) {
OneDriveFileXORHash = onedriveJSONItem [ "file" ] [ "hashes" ] [ "quickXorHash" ] . str ;
2019-07-13 07:42:35 +02:00
}
2023-08-24 10:08:21 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Fallback: Check for SHA256Hash
if ( hasSHA256Hash ( onedriveJSONItem ) ) {
// Use the provided sha256Hash as reported by OneDrive
if ( onedriveJSONItem [ "file" ] [ "hashes" ] [ "sha256Hash" ] . str ! = "" ) {
OneDriveFileSHA256Hash = onedriveJSONItem [ "file" ] [ "hashes" ] [ "sha256Hash" ] . str ;
2023-08-24 10:08:21 +02:00
}
2019-07-13 07:42:35 +02:00
}
}
2019-06-18 02:32:27 +02:00
} else {
2019-07-13 07:42:35 +02:00
// file hash data missing
2023-08-27 01:35:51 +02:00
log . vdebug ( "WARNING: onedriveJSONItem['file']['hashes'] is missing - unable to compare file hash after download" ) ;
2019-06-18 02:32:27 +02:00
}
2023-08-27 01:35:51 +02:00
2022-05-31 21:57:05 +02:00
// Is there enough free space locally to download the file
// - We can use '.' here as we change the current working directory to the configured 'sync_dir'
ulong localActualFreeSpace = to ! ulong ( getAvailableDiskSpace ( "." ) ) ;
// So that we are not responsible in making the disk 100% full if we can download the file, compare the current available space against the reservation set and file size
// The reservation value is user configurable in the config file, 50MB by default
2023-08-27 01:35:51 +02:00
ulong freeSpaceReservation = appConfig . getValueLong ( "space_reservation" ) ;
2022-05-31 21:57:05 +02:00
// debug output
log . vdebug ( "Local Disk Space Actual: " , localActualFreeSpace ) ;
log . vdebug ( "Free Space Reservation: " , freeSpaceReservation ) ;
2023-08-27 01:35:51 +02:00
log . vdebug ( "File Size to Download: " , jsonFileSize ) ;
2022-05-31 21:57:05 +02:00
2023-08-27 01:35:51 +02:00
// Calculate if we can actually download file - is there enough free space?
if ( ( localActualFreeSpace < freeSpaceReservation ) | | ( jsonFileSize > localActualFreeSpace ) ) {
2022-05-31 21:57:05 +02:00
// localActualFreeSpace is less than freeSpaceReservation .. insufficient free space
2023-08-27 01:35:51 +02:00
// jsonFileSize is greater than localActualFreeSpace .. insufficient free space
log . log ( "Downloading file " , newItemPath , " ... failed!" ) ;
2022-05-31 21:57:05 +02:00
log . log ( "Insufficient local disk space to download file" ) ;
downloadFailed = true ;
2023-08-27 01:35:51 +02:00
} else {
// If we are in a --dry-run situation - if not, actually perform the download
if ( ! dryRun ) {
// Attempt to download the file as there is enough free space locally
OneDriveApi downloadFileOneDriveApiInstance ;
downloadFileOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
try {
downloadFileOneDriveApiInstance . initialise ( ) ;
downloadFileOneDriveApiInstance . downloadById ( newDatabaseItem . driveId , newDatabaseItem . id , newItemPath , jsonFileSize ) ;
downloadFileOneDriveApiInstance . shutdown ( ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
2023-08-27 01:35:51 +02:00
log . vdebug ( "downloadFileOneDriveApiInstance.downloadById(newDatabaseItem.driveId, newDatabaseItem.id, newItemPath, jsonFileSize); generated a OneDriveException" ) ;
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( downloadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to download an item from OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
downloadFileItem ( newDatabaseItem , onedriveJSONItem ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2023-08-27 01:35:51 +02:00
} catch ( FileException e ) {
// There was a file system error
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
downloadFailed = true ;
} catch ( ErrnoException e ) {
// There was a file system error
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
downloadFailed = true ;
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
// If we get to this point, something was downloaded .. does it match what we expected?
if ( exists ( newItemPath ) ) {
// When downloading some files from SharePoint, the OneDrive API reports one file size,
// but the SharePoint HTTP Server sends a totally different byte count for the same file
// we have implemented --disable-download-validation to disable these checks
if ( ! disableDownloadValidation ) {
// A 'file' was downloaded - does what we downloaded = reported jsonFileSize or if there is some sort of funky local disk compression going on
// Does the file hash OneDrive reports match what we have locally?
string onlineFileHash ;
string downloadedFileHash ;
ulong downloadFileSize = getSize ( newItemPath ) ;
if ( ! OneDriveFileXORHash . empty ) {
onlineFileHash = OneDriveFileXORHash ;
// Calculate the QuickXOHash for this file
downloadedFileHash = computeQuickXorHash ( newItemPath ) ;
} else {
onlineFileHash = OneDriveFileSHA256Hash ;
// Fallback: Calculate the SHA256 Hash for this file
downloadedFileHash = computeSHA256Hash ( newItemPath ) ;
}
if ( ( downloadFileSize = = jsonFileSize ) & & ( downloadedFileHash = = onlineFileHash ) ) {
// Downloaded file matches size and hash
log . vdebug ( "Downloaded file matches reported size and or reported file hash" ) ;
try {
log . vdebug ( "Calling setTimes() for this file: " , newItemPath ) ;
setTimes ( newItemPath , newDatabaseItem . mtime , newDatabaseItem . mtime ) ;
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2020-03-19 20:12:47 +01:00
}
} else {
2023-08-27 01:35:51 +02:00
// Downloaded file does not match size or hash .. which is it?
bool downloadValueMismatch = false ;
// Size error?
if ( downloadFileSize ! = jsonFileSize ) {
// downloaded file size does not match
downloadValueMismatch = true ;
log . vdebug ( "Actual file size on disk: " , downloadFileSize ) ;
log . vdebug ( "OneDrive API reported size: " , jsonFileSize ) ;
log . error ( "ERROR: File download size mis-match. Increase logging verbosity to determine why." ) ;
}
// Hash Error
if ( downloadedFileHash ! = onlineFileHash ) {
// downloaded file hash does not match
downloadValueMismatch = true ;
log . vdebug ( "Actual local file hash: " , downloadedFileHash ) ;
log . vdebug ( "OneDrive API reported hash: " , onlineFileHash ) ;
log . error ( "ERROR: File download hash mis-match. Increase logging verbosity to determine why." ) ;
}
// .heic data loss check
// - https://github.com/abraunegg/onedrive/issues/2471
// - https://github.com/OneDrive/onedrive-api-docs/issues/1532
// - https://github.com/OneDrive/onedrive-api-docs/issues/1723
if ( downloadValueMismatch & & ( toLower ( extension ( newItemPath ) ) = = ".heic" ) ) {
// Need to display a message to the user that they have experienced data loss
log . error ( "DATA-LOSS: File downloaded has experienced data loss due to a Microsoft OneDrive API bug. DO NOT DELETE THIS FILE ONLINE." ) ;
log . vlog ( " Please read https://github.com/OneDrive/onedrive-api-docs/issues/1723 for more details." ) ;
}
// Add some workaround messaging for SharePoint
if ( appConfig . accountType = = "documentLibrary" ) {
// It has been seen where SharePoint / OneDrive API reports one size via the JSON
// but the content length and file size written to disk is totally different - example:
// From JSON: "size": 17133
// From HTTPS Server: < Content-Length: 19340
// with no logical reason for the difference, except for a 302 redirect before file download
log . error ( "INFO: It is most likely that a SharePoint OneDrive API issue is the root cause. Add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed." ) ;
} else {
// other account types
log . error ( "INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed." ) ;
}
// We do not want this local file to remain on the local file system as it failed the integrity checks
log . log ( "Removing file " , newItemPath , " due to failed integrity checks" ) ;
if ( ! dryRun ) {
safeRemove ( newItemPath ) ;
}
downloadFailed = true ;
2019-03-11 07:57:47 +01:00
}
2021-11-22 21:34:16 +01:00
} else {
2023-08-27 01:35:51 +02:00
// Download validation checks were disabled
log . vdebug ( "Downloaded file validation disabled due to --disable-download-validation " ) ;
} // end of (!disableDownloadValidation)
} else {
log . error ( "ERROR: File failed to download. Increase logging verbosity to determine why." ) ;
2021-11-22 21:34:16 +01:00
downloadFailed = true ;
2019-09-21 22:40:39 +02:00
}
2023-08-27 01:35:51 +02:00
}
}
// File should have been downloaded
if ( ! downloadFailed ) {
// Download did not fail
log . log ( "Downloading file " , newItemPath , " ... done" ) ;
// Save this item into the database
log . vdebug ( "Inserting new item details to local database" ) ;
// What was the item that was saved
log . vdebug ( "item details: " , newDatabaseItem ) ;
itemDB . upsert ( newDatabaseItem ) ;
// If we are in a --dry-run situation - if we are, we need to track that we faked the download
if ( dryRun ) {
// track that we 'faked it'
idsFaked ~ = [ newDatabaseItem . driveId , newDatabaseItem . id ] ;
2019-06-28 05:13:32 +02:00
}
2019-05-13 14:22:15 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Output download failed
log . log ( "Downloading file " , newItemPath , " ... failed!" ) ;
// Add the path to a list of items that failed to download
fileDownloadFailures ~ = newItemPath ;
2019-05-13 14:22:15 +02:00
}
2018-08-27 02:45:26 +02:00
}
2017-12-31 02:30:31 +01:00
}
2023-08-27 01:35:51 +02:00
// Test if the given item is in-sync. Returns true if the given item corresponds to the local one
bool isItemSynced ( Item item , string path , string itemSource ) {
2023-09-24 03:07:26 +02:00
2015-09-19 15:38:43 +02:00
if ( ! exists ( path ) ) return false ;
2015-09-01 20:45:34 +02:00
final switch ( item . type ) {
case ItemType . file :
2015-09-19 15:38:43 +02:00
if ( isFile ( path ) ) {
2020-12-11 19:58:07 +01:00
// can we actually read the local file?
if ( readLocalFile ( path ) ) {
// local file is readable
SysTime localModifiedTime = timeLastModified ( path ) . toUTC ( ) ;
SysTime itemModifiedTime = item . mtime ;
2023-08-27 01:35:51 +02:00
// Reduce time resolution to seconds before comparing
2020-12-11 19:58:07 +01:00
localModifiedTime . fracSecs = Duration . zero ;
itemModifiedTime . fracSecs = Duration . zero ;
if ( localModifiedTime = = itemModifiedTime ) {
return true ;
} else {
2023-08-27 01:35:51 +02:00
log . vlog ( "Local item time discrepancy detected: " , path ) ;
log . vlog ( "This local item has a different modified time " , localModifiedTime , " when compared to " , itemSource , " modified time " , itemModifiedTime ) ;
2022-06-15 01:16:06 +02:00
// The file has been modified ... is the hash the same?
// Test the file hash as the date / time stamp is different
2023-08-27 01:35:51 +02:00
// Generating a hash is computationally expensive - we only generate the hash if timestamp was different
2022-06-15 01:16:06 +02:00
if ( testFileHash ( path , item ) ) {
2023-08-27 01:35:51 +02:00
// The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong
log . vlog ( "Local item has the same hash value as the item online - correcting timestamp" ) ;
// Test if the local timestamp is newer
if ( localModifiedTime > itemModifiedTime ) {
// The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different
log . vlog ( "The source of the incorrect timestamp was OneDrive online - correcting timestamp online" ) ;
if ( ! dryRun ) {
// Attempt to update the online date time stamp
uploadLastModifiedTime ( item . driveId , item . id , localModifiedTime . toUTC ( ) , item . eTag ) ;
}
} else {
// The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different
log . vlog ( "The source of the incorrect timestamp was the local file - correcting timestamp locally" ) ;
if ( ! dryRun ) {
log . vdebug ( "Calling setTimes() for this file: " , path ) ;
setTimes ( path , item . mtime , item . mtime ) ;
}
}
2022-06-15 01:16:06 +02:00
return true ;
} else {
2023-08-27 01:35:51 +02:00
// The hash is different so the content of the file has to be different as to what is stored online
2022-06-15 01:16:06 +02:00
log . vlog ( "The local item has a different hash when compared to " , itemSource , " item hash" ) ;
2023-08-27 01:35:51 +02:00
return false ;
2022-06-15 01:16:06 +02:00
}
2020-12-11 19:58:07 +01:00
}
2015-09-19 15:38:43 +02:00
} else {
2020-12-11 19:58:07 +01:00
// Unable to read local file
log . log ( "Unable to determine the sync state of this file as it cannot be read (file permissions or file corruption): " , path ) ;
return false ;
2015-09-01 20:45:34 +02:00
}
} else {
2016-08-04 23:35:58 +02:00
log . vlog ( "The local item is a directory but should be a file" ) ;
2015-09-01 20:45:34 +02:00
}
break ;
case ItemType . dir :
2017-12-27 15:13:28 +01:00
case ItemType . remote :
2015-09-19 15:38:43 +02:00
if ( isDir ( path ) ) {
2015-09-14 23:56:14 +02:00
return true ;
} else {
2016-08-04 23:35:58 +02:00
log . vlog ( "The local item is a file but should be a directory" ) ;
2015-09-01 20:45:34 +02:00
}
break ;
2023-08-27 01:35:51 +02:00
case ItemType . unknown :
// Unknown type - return true but we dont action or sync these items
return true ;
2015-09-01 20:45:34 +02:00
}
return false ;
}
2023-08-27 01:35:51 +02:00
// Get the /delta data using the provided details
JSONValue getDeltaChangesByItemId ( string selectedDriveId , string selectedItemId , string providedDeltaLink ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Function variables
JSONValue deltaChangesBundle ;
// Get the /delta data for this account | driveId | deltaLink combination
2023-08-28 08:56:16 +02:00
// Create a new API Instance for this thread and initialise it
OneDriveApi getDeltaQueryOneDriveApiInstance ;
getDeltaQueryOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
getDeltaQueryOneDriveApiInstance . initialise ( ) ;
2023-08-27 01:35:51 +02:00
try {
2023-08-28 08:56:16 +02:00
deltaChangesBundle = getDeltaQueryOneDriveApiInstance . viewChangesByItemId ( selectedDriveId , selectedItemId , providedDeltaLink ) ;
2023-08-27 01:35:51 +02:00
} catch ( OneDriveException exception ) {
2023-09-12 00:00:50 +02:00
// caught an exception
2023-08-29 01:56:55 +02:00
log . vdebug ( "deltaChangesBundle = getDeltaQueryOneDriveApiInstance.viewChangesByItemId() generated a OneDriveException" ) ;
2023-09-12 00:00:50 +02:00
auto errorArray = splitLines ( exception . msg ) ;
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( getDeltaQueryOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
log . log ( errorArray [ 0 ] , " when attempting to query OneDrive API for Delta Changes - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Query: " ) ;
deltaChangesBundle = getDeltaQueryOneDriveApiInstance . viewChangesByItemId ( selectedDriveId , selectedItemId , providedDeltaLink ) ;
} else {
// Default operation if not 408,429,503,504 errors
if ( exception . httpStatusCode = = 410 ) {
2023-09-12 07:31:59 +02:00
log . log ( "\nWARNING: The OneDrive API responded with an error that indicates the locally stored deltaLink value is invalid" ) ;
2023-09-12 00:00:50 +02:00
// Essentially the 'providedDeltaLink' that we have stored is no longer available ... re-try without the stored deltaLink
2023-09-12 07:31:59 +02:00
log . log ( "WARNING: Retrying OneDrive API call without using the locally stored deltaLink value" ) ;
2023-09-12 00:00:50 +02:00
// Configure an empty deltaLink
2023-09-12 07:31:59 +02:00
log . vdebug ( "Delta link expired for 'getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink)', setting 'deltaLink = null'" ) ;
2023-09-12 00:00:50 +02:00
string emptyDeltaLink ;
deltaChangesBundle = getDeltaQueryOneDriveApiInstance . viewChangesByItemId ( selectedDriveId , selectedItemId , emptyDeltaLink ) ;
} else {
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
}
2023-08-27 01:35:51 +02:00
}
2023-09-12 00:00:50 +02:00
// Check the response JSON
if ( deltaChangesBundle . type ( ) ! = JSONType . object ) {
2023-08-27 01:35:51 +02:00
// Handle the invalid JSON response
invalidJSONResponseFromOneDriveAPI ( ) ;
}
2023-09-12 00:00:50 +02:00
// Shutdown the API
getDeltaQueryOneDriveApiInstance . shutdown ( ) ;
2023-08-27 01:35:51 +02:00
return deltaChangesBundle ;
}
// Common code to handle a 408 or 429 response from the OneDrive API
void handleOneDriveThrottleRequest ( OneDriveApi activeOneDriveApiInstance ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// If OneDrive sends a status code 429 then this function will be used to process the Retry-After response header which contains the value by which we need to wait
log . vdebug ( "Handling a OneDrive HTTP 429 Response Code (Too Many Requests)" ) ;
// Read in the Retry-After HTTP header as set and delay as per this value before retrying the request
auto retryAfterValue = activeOneDriveApiInstance . getRetryAfterValue ( ) ;
log . vdebug ( "Using Retry-After Value = " , retryAfterValue ) ;
// HTTP request returned status code 429 (Too Many Requests)
// https://github.com/abraunegg/onedrive/issues/133
// https://github.com/abraunegg/onedrive/issues/815
ulong delayBeforeRetry = 0 ;
if ( retryAfterValue ! = 0 ) {
// Use the HTTP Response Header Value
delayBeforeRetry = retryAfterValue ;
} else {
// Use a 120 second delay as a default given header value was zero
// This value is based on log files and data when determining correct process for 429 response handling
delayBeforeRetry = 120 ;
// Update that we are over-riding the provided value with a default
log . vdebug ( "HTTP Response Header retry-after value was 0 - Using a preconfigured default of: " , delayBeforeRetry ) ;
}
// Sleep thread as per request
log . log ( "Thread sleeping due to 'HTTP request returned status code 429' - The request has been throttled" ) ;
log . log ( "Sleeping for " , delayBeforeRetry , " seconds" ) ;
Thread . sleep ( dur ! "seconds" ( delayBeforeRetry ) ) ;
// Reset retry-after value to zero as we have used this value now and it may be changed in the future to a different value
activeOneDriveApiInstance . resetRetryAfterValue ( ) ;
}
// If the JSON response is not correct JSON object, exit
void invalidJSONResponseFromOneDriveAPI ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
log . error ( "ERROR: Query of the OneDrive API returned an invalid JSON response" ) ;
2023-09-08 22:34:52 +02:00
// Must exit
2023-08-27 01:35:51 +02:00
exit ( - 1 ) ;
}
// Handle an unhandled API error
void defaultUnhandledHTTPErrorCode ( OneDriveException exception ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// display error
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
// Must exit here
exit ( - 1 ) ;
}
// Display the pertinant details of the sync engine
void displaySyncEngineDetails ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes
2023-08-27 02:57:29 +02:00
//log.vlog("Application version: ", strip(import("version")));
2023-08-27 06:22:06 +02:00
2023-09-21 21:34:42 +02:00
string tempVersion = "v2.5.0-alpha-1" ~ " GitHub version: " ~ strip ( import ( "version" ) ) ;
2023-08-27 06:22:06 +02:00
log . vlog ( "Application version: " , tempVersion ) ;
2023-08-27 02:57:29 +02:00
2023-08-27 01:35:51 +02:00
log . vlog ( "Account Type: " , appConfig . accountType ) ;
log . vlog ( "Default Drive ID: " , appConfig . defaultDriveId ) ;
log . vlog ( "Default Root ID: " , appConfig . defaultRootId ) ;
// What do we display here for space remaining
if ( appConfig . remainingFreeSpace > 0 ) {
// Display the actual value
log . vlog ( "Remaining Free Space: " , ( appConfig . remainingFreeSpace / 1024 ) , " KB" ) ;
} else {
// zero or non-zero value or restricted
if ( ! appConfig . quotaRestricted ) {
log . vlog ( "Remaining Free Space: 0 KB" ) ;
} else {
log . vlog ( "Remaining Free Space: Not Available" ) ;
}
}
}
// Query itemdb.computePath() and catch potential assert when DB consistency issue occurs
string computeItemPath ( string thisDriveId , string thisItemId ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// static declare this for this function
static import core.exception ;
string calculatedPath ;
log . vdebug ( "Attempting to calculate local filesystem path for " , thisDriveId , " and " , thisItemId ) ;
try {
calculatedPath = itemDB . computePath ( thisDriveId , thisItemId ) ;
} catch ( core . exception . AssertError ) {
// broken tree in the database, we cant compute the path for this item id, exit
log . error ( "ERROR: A database consistency issue has been caught. A --resync is needed to rebuild the database." ) ;
// Must exit here to preserve data
exit ( - 1 ) ;
}
// return calculated path as string
return calculatedPath ;
}
// Try and compute the file hash for the given item
2023-09-21 21:34:42 +02:00
bool testFileHash ( string path , Item item ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Generate QuickXORHash first before attempting to generate any other type of hash
if ( item . quickXorHash ) {
if ( item . quickXorHash = = computeQuickXorHash ( path ) ) return true ;
} else if ( item . sha256Hash ) {
if ( item . sha256Hash = = computeSHA256Hash ( path ) ) return true ;
}
return false ;
}
// Process items that need to be removed
void processDeleteItems ( ) {
2023-09-24 03:07:26 +02:00
2017-06-14 15:50:02 +02:00
foreach_reverse ( i ; idsToDelete ) {
2017-07-02 15:29:33 +02:00
Item item ;
2021-01-15 04:44:13 +01:00
string path ;
2023-08-27 01:35:51 +02:00
if ( ! itemDB . selectById ( i [ 0 ] , i [ 1 ] , item ) ) continue ; // check if the item is in the db
2021-01-15 04:44:13 +01:00
// Compute this item path
path = computeItemPath ( i [ 0 ] , i [ 1 ] ) ;
2023-08-27 01:35:51 +02:00
2023-09-10 08:37:10 +02:00
// Log the action if the path exists .. it may of already been removed and this is a legacy array item
if ( exists ( path ) ) {
if ( item . type = = ItemType . file ) {
log . log ( "Trying to delete file " , path ) ;
} else {
log . log ( "Trying to delete directory " , path ) ;
}
2023-08-27 01:35:51 +02:00
}
// Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy
itemDB . deleteById ( item . driveId , item . id ) ;
if ( item . remoteDriveId ! = null ) {
// delete the linked remote folder
itemDB . deleteById ( item . remoteDriveId , item . remoteId ) ;
2017-12-31 16:56:56 +01:00
}
2023-08-27 01:35:51 +02:00
// Add to pathFakeDeletedArray
// We dont want to try and upload this item again, so we need to track this object
if ( dryRun ) {
// We need to add './' here so that it can be correctly searched to ensure it is not uploaded
string pathToAdd = "./" ~ path ;
pathFakeDeletedArray ~ = pathToAdd ;
}
2018-11-29 11:31:44 +01:00
bool needsRemoval = false ;
2015-09-14 23:56:14 +02:00
if ( exists ( path ) ) {
2018-04-26 01:45:18 +02:00
// path exists on the local system
2018-11-29 11:31:44 +01:00
// make sure that the path refers to the correct item
Item pathItem ;
2023-08-27 01:35:51 +02:00
if ( itemDB . selectByPath ( path , item . driveId , pathItem ) ) {
2018-11-29 11:31:44 +01:00
if ( pathItem . id = = item . id ) {
needsRemoval = true ;
} else {
log . log ( "Skipped due to id difference!" ) ;
}
} else {
// item has disappeared completely
needsRemoval = true ;
}
}
if ( needsRemoval ) {
2023-08-27 01:35:51 +02:00
// Log the action
if ( item . type = = ItemType . file ) {
log . log ( "Deleting file " , path ) ;
} else {
log . log ( "Deleting directory " , path ) ;
}
// Perform the action
2019-03-11 07:57:47 +01:00
if ( ! dryRun ) {
if ( isFile ( path ) ) {
remove ( path ) ;
} else {
try {
// Remove any children of this path if they still exist
// Resolve 'Directory not empty' error when deleting local files
foreach ( DirEntry child ; dirEntries ( path , SpanMode . depth , false ) ) {
attrIsDir ( child . linkAttributes ) ? rmdir ( child . name ) : remove ( child . name ) ;
}
// Remove the path now that it is empty of children
rmdirRecurse ( path ) ;
} catch ( FileException e ) {
2019-08-24 07:54:48 +02:00
// display the error message
2020-12-09 04:18:16 +01:00
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2017-12-31 12:36:14 +01:00
}
2015-09-28 13:42:58 +02:00
}
2015-09-06 22:42:44 +02:00
}
2015-09-01 20:45:34 +02:00
}
}
2019-03-11 07:57:47 +01:00
if ( ! dryRun ) {
2023-08-27 01:35:51 +02:00
// Cleanup array memory
idsToDelete = [ ] ;
2019-03-11 07:57:47 +01:00
}
2015-09-01 20:45:34 +02:00
}
2017-12-28 19:58:31 +01:00
2023-08-27 01:35:51 +02:00
// Update the timestamp of an object online
void uploadLastModifiedTime ( string driveId , string id , SysTime mtime , string eTag ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
string itemModifiedTime ;
itemModifiedTime = mtime . toISOExtString ( ) ;
JSONValue data = [
"fileSystemInfo" : JSONValue ( [
"lastModifiedDateTime" : itemModifiedTime
] )
] ;
// What eTag value do we use?
string eTagValue ;
if ( appConfig . accountType = = "personal" ) {
eTagValue = null ;
2020-06-27 11:10:37 +02:00
} else {
2023-08-27 01:35:51 +02:00
eTagValue = eTag ;
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
JSONValue response ;
// Create a new OneDrive API instance
OneDriveApi uploadLastModifiedTimeApiInstance ;
uploadLastModifiedTimeApiInstance = new OneDriveApi ( appConfig ) ;
uploadLastModifiedTimeApiInstance . initialise ( ) ;
2021-03-26 21:29:08 +01:00
2023-08-27 01:35:51 +02:00
// Try and update the online last modified time
try {
// Use this instance
response = uploadLastModifiedTimeApiInstance . updateById ( driveId , id , data , eTagValue ) ;
// Shut the instance down
uploadLastModifiedTimeApiInstance . shutdown ( ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadLastModifiedTimeApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to update the timestamp on an item on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
uploadLastModifiedTime ( driveId , id , mtime , eTag ) ;
} else {
// Default operation if not 408,429,503,504 errors
2023-09-10 01:07:53 +02:00
if ( exception . httpStatusCode = = 409 ) {
// ETag does not match current item's value - use a null eTag
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
uploadLastModifiedTime ( driveId , id , mtime , null ) ;
} else {
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2023-09-02 04:27:10 +02:00
}
2023-08-27 01:35:51 +02:00
}
2021-03-26 21:29:08 +01:00
2023-08-27 01:35:51 +02:00
// Is the response a valid JSON object - validation checking done in saveItem
saveItem ( response ) ;
2015-09-04 21:00:22 +02:00
}
2020-06-11 22:46:59 +02:00
2023-08-27 01:35:51 +02:00
// Perform a database integrity check - checking all the items that are in-sync at the moment, validating what we know should be on disk, to what is actually on disk
void performDatabaseConsistencyAndIntegrityCheck ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Log what we are doing
2023-09-02 05:06:37 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
log . log ( "Performing a database consistency and integrity check on locally stored data ... " ) ;
}
2020-11-04 19:19:39 +01:00
2023-08-27 01:35:51 +02:00
// What driveIDsArray do we use? If we are doing a --single-directory we need to use just the drive id associated with that operation
string [ ] consistencyCheckDriveIdsArray ;
if ( singleDirectoryScope ) {
consistencyCheckDriveIdsArray ~ = singleDirectoryScopeDriveId ;
} else {
consistencyCheckDriveIdsArray = driveIDsArray ;
2020-06-16 23:57:14 +02:00
}
2023-08-27 01:35:51 +02:00
// Create a new DB blank item
2020-06-11 22:46:59 +02:00
Item item ;
2023-08-27 01:35:51 +02:00
// Use the array we populate, rather than selecting all distinct driveId's from the database
foreach ( driveId ; consistencyCheckDriveIdsArray ) {
// Make the logging more accurate - we cant update driveId as this then breaks the below queries
log . vlog ( "Processing DB entries for this Drive ID: " , driveId ) ;
2023-09-12 07:31:59 +02:00
// What OneDrive API query do we use?
// - Are we running against a National Cloud Deployments that does not support /delta ?
// National Cloud Deployments do not support /delta as a query
// https://docs.microsoft.com/en-us/graph/deployments#supported-features
//
// - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory
//
// - Are we performing a --download-only --cleanup-local-files action?
//
// If we did, we self generated a /delta response, thus need to now process elements that are still flagged as out-of-sync
if ( ( singleDirectoryScope ) | | ( nationalCloudDeployment ) | | ( cleanupLocalFiles ) ) {
2023-08-27 01:35:51 +02:00
// Any entry in the DB than is flagged as out-of-sync needs to be cleaned up locally first before we scan the entire DB
// Normally, this is done at the end of processing all /delta queries, however when using --single-directory or a National Cloud Deployments is configured
// We cant use /delta to query the OneDrive API as National Cloud Deployments dont support /delta
// https://docs.microsoft.com/en-us/graph/deployments#supported-features
// We dont use /delta for --single-directory as, in order to sync a single path with /delta, we need to query the entire OneDrive API JSON data to then filter out
// objects that we dont want, thus, it is easier to use the same method as National Cloud Deployments, but query just the objects we are after
// For each unique OneDrive driveID we know about
Item [ ] outOfSyncItems = itemDB . selectOutOfSyncItems ( driveId ) ;
foreach ( outOfSyncItem ; outOfSyncItems ) {
if ( ! dryRun ) {
// clean up idsToDelete
idsToDelete . length = 0 ;
assumeSafeAppend ( idsToDelete ) ;
// flag to delete local file as it now is no longer in sync with OneDrive
log . vdebug ( "Flagging to delete local item as it now is no longer in sync with OneDrive" ) ;
log . vdebug ( "outOfSyncItem: " , outOfSyncItem ) ;
idsToDelete ~ = [ outOfSyncItem . driveId , outOfSyncItem . id ] ;
// delete items in idsToDelete
if ( idsToDelete . length > 0 ) processDeleteItems ( ) ;
2021-07-06 10:11:53 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Fetch database items associated with this path
Item [ ] driveItems ;
if ( singleDirectoryScope ) {
// Use the --single-directory items we previously configured
// - query database for children objects using those items
driveItems = getChildren ( singleDirectoryScopeDriveId , singleDirectoryScopeItemId ) ;
2021-07-06 10:11:53 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Check everything associated with each driveId we know about
log . vdebug ( "Selecting DB items via itemDB.selectByDriveId(driveId)" ) ;
// Query database
driveItems = itemDB . selectByDriveId ( driveId ) ;
}
log . vdebug ( "Database items to process for this driveId: " , driveItems . count ) ;
// Process each database database item associated with the driveId
foreach ( dbItem ; driveItems ) {
// Does it still exist on disk in the location the DB thinks it is
checkDatabaseItemForConsistency ( dbItem ) ;
2020-06-27 11:10:37 +02:00
}
} else {
2023-08-27 01:35:51 +02:00
// Check everything associated with each driveId we know about
log . vdebug ( "Selecting DB items via itemDB.selectByDriveId(driveId)" ) ;
// Query database
auto driveItems = itemDB . selectByDriveId ( driveId ) ;
log . vdebug ( "Database items to process for this driveId: " , driveItems . count ) ;
// Process each database database item associated with the driveId
foreach ( dbItem ; driveItems ) {
2020-06-27 11:10:37 +02:00
// Does it still exist on disk in the location the DB thinks it is
2023-08-27 01:35:51 +02:00
checkDatabaseItemForConsistency ( dbItem ) ;
2020-06-27 11:10:37 +02:00
}
}
2020-06-11 22:46:59 +02:00
}
2023-08-27 01:35:51 +02:00
2023-09-12 07:31:59 +02:00
// Are we doing a --download-only sync?
if ( ! appConfig . getValueBool ( "download_only" ) ) {
// Do we have any known items, where the content has changed locally, that needs to be uploaded?
if ( ! databaseItemsWhereContentHasChanged . empty ) {
// There are changed local files that were in the DB to upload
log . log ( "Changed local items to upload to OneDrive: " , databaseItemsWhereContentHasChanged . length ) ;
processChangedLocalItemsToUpload ( ) ;
// Cleanup array memory
databaseItemsWhereContentHasChanged = [ ] ;
}
}
2020-06-11 22:46:59 +02:00
}
2023-08-27 01:35:51 +02:00
// Check this Database Item for its consistency on disk
void checkDatabaseItemForConsistency ( Item dbItem ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// What is the local path item
string localFilePath ;
// Do we want to onward process this item?
bool unwanted = false ;
// Compute this dbItem path early as we we use this path often
localFilePath = buildNormalizedPath ( computeItemPath ( dbItem . driveId , dbItem . id ) ) ;
// To improve logging output for this function, what is the 'logical path'?
string logOutputPath ;
if ( localFilePath = = "." ) {
// get the configured sync_dir
logOutputPath = buildNormalizedPath ( appConfig . getValueString ( "sync_dir" ) ) ;
} else {
// use what was computed
logOutputPath = localFilePath ;
}
2023-09-21 21:34:42 +02:00
// Log what we are doing
log . vlog ( "Processing " , logOutputPath ) ;
2023-08-27 01:35:51 +02:00
// Determine which action to take
final switch ( dbItem . type ) {
case ItemType . file :
// Logging output
checkFileDatabaseItemForConsistency ( dbItem , localFilePath ) ;
break ;
case ItemType . dir :
// Logging output
checkDirectoryDatabaseItemForConsistency ( dbItem , localFilePath ) ;
break ;
case ItemType . remote :
// checkRemoteDirectoryDatabaseItemForConsistency(dbItem, localFilePath);
break ;
case ItemType . unknown :
// Unknown type - we dont action these items
break ;
2022-06-29 23:08:29 +02:00
}
}
2023-08-27 01:35:51 +02:00
// Perform the database consistency check on this file item
void checkFileDatabaseItemForConsistency ( Item dbItem , string localFilePath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// What is the source of this item data?
string itemSource = "database" ;
// Does this item|file still exist on disk?
if ( exists ( localFilePath ) ) {
// Path exists locally, is this path a file?
if ( isFile ( localFilePath ) ) {
// Can we actually read the local file?
if ( readLocalFile ( localFilePath ) ) {
// File is readable
SysTime localModifiedTime = timeLastModified ( localFilePath ) . toUTC ( ) ;
SysTime itemModifiedTime = dbItem . mtime ;
// Reduce time resolution to seconds before comparing
itemModifiedTime . fracSecs = Duration . zero ;
localModifiedTime . fracSecs = Duration . zero ;
2020-11-04 19:19:39 +01:00
2023-08-27 01:35:51 +02:00
if ( localModifiedTime ! = itemModifiedTime ) {
// The modified dates are different
log . vdebug ( "The local item has a different modified time " , localModifiedTime , " when compared to " , itemSource , " modified time " , itemModifiedTime ) ;
// Test the file hash
if ( ! testFileHash ( localFilePath , dbItem ) ) {
// Is the local file 'newer' or 'older' (ie was an old file 'restored locally' by a different backup / replacement process?)
if ( localModifiedTime > = itemModifiedTime ) {
// Local file is newer
2023-09-12 07:31:59 +02:00
if ( ! appConfig . getValueBool ( "download_only" ) ) {
log . vlog ( "The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive" ) ;
// Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check
databaseItemsWhereContentHasChanged ~ = [ dbItem . driveId , dbItem . id , localFilePath ] ;
} else {
log . vlog ( "The file content has changed locally and has a newer timestamp. The file will remain different to online file due to --download-only being used" ) ;
}
2023-08-27 01:35:51 +02:00
} else {
// Local file is older - data recovery process? something else?
2023-09-12 07:31:59 +02:00
if ( ! appConfig . getValueBool ( "download_only" ) ) {
log . vlog ( "The file content has changed locally and file now has a older timestamp. Uploading this file to OneDrive may potentially cause data-loss online" ) ;
// Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check
databaseItemsWhereContentHasChanged ~ = [ dbItem . driveId , dbItem . id , localFilePath ] ;
} else {
log . vlog ( "The file content has changed locally and file now has a older timestamp. The file will remain different to online file due to --download-only being used" ) ;
}
2023-08-27 01:35:51 +02:00
}
} else {
// The file contents have not changed, but the modified timestamp has
log . vlog ( "The last modified timestamp has changed however the file content has not changed" ) ;
log . vlog ( "The local item has the same hash value as the item online - correcting timestamp online" ) ;
if ( ! dryRun ) {
// Attempt to update the online date time stamp
uploadLastModifiedTime ( dbItem . driveId , dbItem . id , localModifiedTime . toUTC ( ) , dbItem . eTag ) ;
2020-11-04 19:19:39 +01:00
}
}
2023-08-27 01:35:51 +02:00
} else {
// The file has not changed
log . vlog ( "The file has not changed" ) ;
2020-11-04 19:19:39 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
//The file is not readable - skipped
log . log ( "Skipping processing this file as it cannot be read (file permissions or file corruption): " , localFilePath ) ;
2020-11-04 19:19:39 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// The item was a file but now is a directory
log . vlog ( "The item was a file but now is a directory" ) ;
2020-11-04 19:19:39 +01:00
}
2020-06-27 11:10:37 +02:00
} else {
2023-08-27 01:35:51 +02:00
// File does not exist locally, but is in our database as a dbItem containing all the data was passed into this function
// If we are in a --dry-run situation - this file may never have existed as we never downloaded it
if ( ! dryRun ) {
// Not --dry-run situation
log . vlog ( "The file has been deleted locally" ) ;
// Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set
uploadDeletedItem ( dbItem , localFilePath ) ;
} else {
// We are in a --dry-run situation, file appears to have been deleted locally - this file may never have existed locally as we never downloaded it due to --dry-run
// Did we 'fake create it' as part of --dry-run ?
bool idsFakedMatch = false ;
foreach ( i ; idsFaked ) {
if ( i [ 1 ] = = dbItem . id ) {
log . vdebug ( "Matched faked file which is 'supposed' to exist but not created due to --dry-run use" ) ;
log . vlog ( "The file has not changed" ) ;
idsFakedMatch = true ;
}
}
if ( ! idsFakedMatch ) {
// dbItem.id did not match a 'faked' download new file creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation
log . vlog ( "The file has been deleted locally" ) ;
// Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set
uploadDeletedItem ( dbItem , localFilePath ) ;
}
2022-08-30 22:41:52 +02:00
}
2021-02-10 18:07:45 +01:00
}
2020-06-11 22:46:59 +02:00
}
2023-08-27 01:35:51 +02:00
// Perform the database consistency check on this directory item
void checkDirectoryDatabaseItemForConsistency ( Item dbItem , string localFilePath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// What is the source of this item data?
string itemSource = "database" ;
2018-10-04 01:33:39 +02:00
2023-08-27 01:35:51 +02:00
// Does this item|directory still exist on disk?
if ( exists ( localFilePath ) ) {
2022-04-07 21:50:47 +02:00
// Fix https://github.com/abraunegg/onedrive/issues/1915
try {
2023-08-27 01:35:51 +02:00
if ( ! isDir ( localFilePath ) ) {
2022-04-07 21:50:47 +02:00
log . vlog ( "The item was a directory but now it is a file" ) ;
2023-08-27 01:35:51 +02:00
uploadDeletedItem ( dbItem , localFilePath ) ;
uploadNewFile ( localFilePath ) ;
2022-04-07 21:50:47 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Directory still exists locally
2022-04-07 21:50:47 +02:00
log . vlog ( "The directory has not changed" ) ;
2023-08-27 01:35:51 +02:00
// When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed
// Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal
if ( ! singleDirectoryScope ) {
// loop through the children
foreach ( Item child ; itemDB . selectChildren ( dbItem . driveId , dbItem . id ) ) {
checkDatabaseItemForConsistency ( child ) ;
}
2022-04-07 21:50:47 +02:00
}
2015-09-18 21:42:27 +02:00
}
2022-04-07 21:50:47 +02:00
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2015-09-17 17:34:58 +02:00
}
2015-09-18 21:42:27 +02:00
} else {
2023-09-10 02:45:47 +02:00
// Directory does not exist locally, but it is in our database as a dbItem containing all the data was passed into this function
2023-08-27 01:35:51 +02:00
// If we are in a --dry-run situation - this directory may never have existed as we never created it
2019-03-11 07:57:47 +01:00
if ( ! dryRun ) {
2020-06-11 22:46:59 +02:00
// Not --dry-run situation
2023-08-27 01:35:51 +02:00
if ( ! appConfig . getValueBool ( "monitor" ) ) {
2020-06-11 22:46:59 +02:00
// Not in --monitor mode
log . vlog ( "The directory has been deleted locally" ) ;
2019-03-11 07:57:47 +01:00
} else {
2020-06-11 22:46:59 +02:00
// Appropriate message as we are in --monitor mode
2020-10-03 01:21:56 +02:00
log . vlog ( "The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'" ) ;
log . vdebug ( "Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped" ) ;
}
2023-08-27 01:35:51 +02:00
// A moved directory will be uploaded as 'new', delete the old directory and database reference
// Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set
uploadDeletedItem ( dbItem , localFilePath ) ;
2018-08-13 23:16:20 +02:00
} else {
2023-08-27 01:35:51 +02:00
// We are in a --dry-run situation, directory appears to have been deleted locally - this directory may never have existed locally as we never created it due to --dry-run
// Did we 'fake create it' as part of --dry-run ?
bool idsFakedMatch = false ;
foreach ( i ; idsFaked ) {
if ( i [ 1 ] = = dbItem . id ) {
log . vdebug ( "Matched faked dir which is 'supposed' to exist but not created due to --dry-run use" ) ;
log . vlog ( "The directory has not changed" ) ;
idsFakedMatch = true ;
2019-03-11 07:57:47 +01:00
}
2023-08-27 01:35:51 +02:00
}
if ( ! idsFakedMatch ) {
// dbItem.id did not match a 'faked' download new directory creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation
log . vlog ( "The directory has been deleted locally" ) ;
// Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set
uploadDeletedItem ( dbItem , localFilePath ) ;
2019-03-11 07:57:47 +01:00
} else {
2023-08-27 01:35:51 +02:00
// When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed
// Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal
if ( ! singleDirectoryScope ) {
// loop through the children
foreach ( Item child ; itemDB . selectChildren ( dbItem . driveId , dbItem . id ) ) {
checkDatabaseItemForConsistency ( child ) ;
2019-03-11 07:57:47 +01:00
}
}
}
2018-08-13 23:16:20 +02:00
}
2015-09-18 21:42:27 +02:00
}
}
2023-08-27 01:35:51 +02:00
/ * *
// Perform the database consistency check on this remote directory item
void checkRemoteDirectoryDatabaseItemForConsistency ( Item dbItem , string localFilePath ) {
writeln ( "CODING TO DO: checkRemoteDirectoryDatabaseItemForConsistency" ) ;
}
* * /
// Does this Database Item (directory or file) get excluded from any operation based on any client side filtering rules?
bool checkDBItemAndPathAgainstClientSideFiltering ( Item dbItem , string localFilePath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Check the item and path against client side filtering rules
// Return a true|false response
bool clientSideRuleExcludesItem = false ;
// Is this item a directory or 'remote' type? A 'remote' type is a folder DB tie so should be compared as directory for exclusion
if ( ( dbItem . type = = ItemType . dir ) | | ( dbItem . type = = ItemType . remote ) ) {
// Directory Path Tests
if ( ! clientSideRuleExcludesItem ) {
// Do we need to check for .nosync? Only if --check-for-nosync was passed in
if ( appConfig . getValueBool ( "check_nosync" ) ) {
if ( exists ( localFilePath ~ "/.nosync" ) ) {
log . vlog ( "Skipping item - .nosync found & --check-for-nosync enabled: " , localFilePath ) ;
clientSideRuleExcludesItem = true ;
}
2018-10-29 20:20:52 +01:00
}
2023-08-27 01:35:51 +02:00
// Is this item excluded by user configuration of skip_dir?
if ( ! clientSideRuleExcludesItem ) {
clientSideRuleExcludesItem = selectiveSync . isDirNameExcluded ( dbItem . name ) ;
2020-04-27 12:59:35 +02:00
}
2023-08-27 01:35:51 +02:00
}
}
// Is this item a file?
if ( dbItem . type = = ItemType . file ) {
// Is this item excluded by user configuration of skip_file?
if ( ! clientSideRuleExcludesItem ) {
clientSideRuleExcludesItem = selectiveSync . isFileNameExcluded ( dbItem . name ) ;
}
if ( ! clientSideRuleExcludesItem ) {
// Check if file should be skipped based on user configured size limit 'skip_size'
if ( fileSizeLimit ! = 0 ) {
// Get the file size
ulong thisFileSize = getSize ( localFilePath ) ;
if ( thisFileSize > = fileSizeLimit ) {
clientSideRuleExcludesItem = true ;
log . vlog ( "Skipping item - excluded by skip_size config: " , localFilePath , " (" , thisFileSize / 2 ^ ^ 20 , " MB)" ) ;
2020-04-27 12:59:35 +02:00
}
}
2023-08-27 01:35:51 +02:00
}
}
if ( ! clientSideRuleExcludesItem ) {
// Is sync_list configured?
if ( syncListConfigured ) {
// Is this item excluded by user configuration of sync_list?
clientSideRuleExcludesItem = selectiveSync . isPathExcludedViaSyncList ( localFilePath ) ;
2020-04-27 12:59:35 +02:00
}
2017-12-31 16:56:56 +01:00
}
2023-08-27 01:35:51 +02:00
// Return bool value
return clientSideRuleExcludesItem ;
2017-12-31 16:56:56 +01:00
}
2023-08-27 01:35:51 +02:00
// Does this local path (directory or file) conform with the Microsoft Naming Restrictions?
bool checkPathAgainstMicrosoftNamingRestrictions ( string localFilePath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Check if the given path violates certain Microsoft restrictions and limitations
// Return a true|false response
bool invalidPath = false ;
2021-02-10 18:07:45 +01:00
2023-08-27 01:35:51 +02:00
// Check against Microsoft OneDrive restriction and limitations about Windows naming files
if ( ! invalidPath ) {
if ( ! isValidName ( localFilePath ) ) {
log . logAndNotify ( "Skipping item - invalid name (Microsoft Naming Convention): " , localFilePath ) ;
invalidPath = true ;
}
}
// Check for bad whitespace items
if ( ! invalidPath ) {
if ( ! containsBadWhiteSpace ( localFilePath ) ) {
log . logAndNotify ( "Skipping item - invalid name (Contains an invalid whitespace item): " , localFilePath ) ;
invalidPath = true ;
}
}
// Check for HTML ASCII Codes as part of file name
if ( ! invalidPath ) {
if ( ! containsASCIIHTMLCodes ( localFilePath ) ) {
log . logAndNotify ( "Skipping item - invalid name (Contains HTML ASCII Code): " , localFilePath ) ;
invalidPath = true ;
}
}
// Return if this is a valid path
return invalidPath ;
}
2019-08-24 07:54:48 +02:00
2023-08-27 01:35:51 +02:00
// Does this local path (directory or file) get excluded from any operation based on any client side filtering rules?
bool checkPathAgainstClientSideFiltering ( string localFilePath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Unlike checkDBItemAndPathAgainstClientSideFiltering - we need to check the path only
2021-03-26 21:31:03 +01:00
2023-08-27 01:35:51 +02:00
// Check the path against client side filtering rules
// - check_nosync
// - skip_dotfiles
// - skip_symlinks
// - skip_file
// - skip_dir
// - sync_list
// - skip_size
// Return a true|false response
bool clientSideRuleExcludesPath = false ;
// - check_nosync
if ( ! clientSideRuleExcludesPath ) {
// Do we need to check for .nosync? Only if --check-for-nosync was passed in
if ( appConfig . getValueBool ( "check_nosync" ) ) {
if ( exists ( localFilePath ~ "/.nosync" ) ) {
log . vlog ( "Skipping item - .nosync found & --check-for-nosync enabled: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2021-03-26 21:31:03 +01:00
}
}
2019-05-13 13:52:49 +02:00
}
2023-08-27 01:35:51 +02:00
// - skip_dotfiles
if ( ! clientSideRuleExcludesPath ) {
// Do we need to check skip dot files if configured
if ( appConfig . getValueBool ( "skip_dotfiles" ) ) {
if ( isDotFile ( localFilePath ) ) {
log . vlog ( "Skipping item - .file or .folder: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2019-03-02 19:58:36 +01:00
}
}
2023-08-27 01:35:51 +02:00
}
// - skip_symlinks
if ( ! clientSideRuleExcludesPath ) {
2020-07-07 10:05:36 +02:00
// Is the path a symbolic link
2023-08-27 01:35:51 +02:00
if ( isSymlink ( localFilePath ) ) {
2018-08-02 22:02:10 +02:00
// if config says so we skip all symlinked items
2023-08-27 01:35:51 +02:00
if ( appConfig . getValueBool ( "skip_symlinks" ) ) {
log . vlog ( "Skipping item - skip symbolic links configured: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2018-08-02 22:02:10 +02:00
}
// skip unexisting symbolic links
2023-08-27 01:35:51 +02:00
else if ( ! exists ( readLink ( localFilePath ) ) ) {
2020-06-02 05:46:57 +02:00
// reading the symbolic link failed - is the link a relative symbolic link
// drwxrwxr-x. 2 alex alex 46 May 30 09:16 .
// drwxrwxr-x. 3 alex alex 35 May 30 09:14 ..
// lrwxrwxrwx. 1 alex alex 61 May 30 09:16 absolute.txt -> /home/alex/OneDrivePersonal/link_tests/intercambio/prueba.txt
// lrwxrwxrwx. 1 alex alex 13 May 30 09:16 relative.txt -> ../prueba.txt
//
// absolute links will be able to be read, but 'relative' links will fail, because they cannot be read based on the current working directory 'sync_dir'
string currentSyncDir = getcwd ( ) ;
2023-08-27 01:35:51 +02:00
string fullLinkPath = buildNormalizedPath ( absolutePath ( localFilePath ) ) ;
2020-06-02 05:46:57 +02:00
string fileName = baseName ( fullLinkPath ) ;
string parentLinkPath = dirName ( fullLinkPath ) ;
// test if this is a 'relative' symbolic link
chdir ( parentLinkPath ) ;
auto relativeLink = readLink ( fileName ) ;
auto relativeLinkTest = exists ( readLink ( fileName ) ) ;
// reset back to our 'sync_dir'
chdir ( currentSyncDir ) ;
// results
if ( relativeLinkTest ) {
2023-08-27 01:35:51 +02:00
log . vdebug ( "Not skipping item - symbolic link is a 'relative link' to target ('" , relativeLink , "') which can be supported: " , localFilePath ) ;
2020-06-02 05:46:57 +02:00
} else {
2023-08-27 01:35:51 +02:00
log . logAndNotify ( "Skipping item - invalid symbolic link: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2020-06-02 05:46:57 +02:00
}
2018-08-02 22:02:10 +02:00
}
2017-03-12 16:35:47 +01:00
}
2023-08-27 01:35:51 +02:00
}
// Is this item excluded by user configuration of skip_dir or skip_file?
if ( ! clientSideRuleExcludesPath ) {
if ( localFilePath ! = "." ) {
// skip_dir handling
if ( isDir ( localFilePath ) ) {
log . vdebug ( "Checking local path: " , localFilePath ) ;
2019-04-01 20:21:02 +02:00
// Only check path if config is != ""
2023-08-27 01:35:51 +02:00
if ( appConfig . getValueString ( "skip_dir" ) ! = "" ) {
2020-07-06 21:17:03 +02:00
// The path that needs to be checked needs to include the '/'
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
2023-08-27 01:35:51 +02:00
if ( selectiveSync . isDirNameExcluded ( localFilePath . strip ( '.' ) ) ) {
log . vlog ( "Skipping item - excluded by skip_dir config: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2019-04-01 20:21:02 +02:00
}
2019-03-14 20:55:05 +01:00
}
}
2020-07-07 10:05:36 +02:00
2023-08-27 01:35:51 +02:00
// skip_file handling
if ( isFile ( localFilePath ) ) {
log . vdebug ( "Checking file: " , localFilePath ) ;
2020-07-06 21:17:03 +02:00
// The path that needs to be checked needs to include the '/'
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
2023-08-27 01:35:51 +02:00
if ( selectiveSync . isFileNameExcluded ( localFilePath . strip ( '.' ) ) ) {
log . vlog ( "Skipping item - excluded by skip_file config: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2019-03-14 20:55:05 +01:00
}
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
}
}
// Is this item excluded by user configuration of sync_list?
if ( ! clientSideRuleExcludesPath ) {
if ( localFilePath ! = "." ) {
2022-11-10 06:58:54 +01:00
if ( syncListConfigured ) {
// sync_list configured and in use
2023-08-27 01:35:51 +02:00
if ( selectiveSync . isPathExcludedViaSyncList ( localFilePath ) ) {
if ( ( isFile ( localFilePath ) ) & & ( appConfig . getValueBool ( "sync_root_files" ) ) & & ( rootName ( localFilePath . strip ( '.' ) . strip ( '/' ) ) = = "" ) ) {
log . vdebug ( "Not skipping path due to sync_root_files inclusion: " , localFilePath ) ;
2019-10-08 08:34:35 +02:00
} else {
2023-08-27 01:35:51 +02:00
if ( exists ( appConfig . syncListFilePath ) ) {
2022-11-10 06:58:54 +01:00
// skipped most likely due to inclusion in sync_list
2023-08-27 01:35:51 +02:00
log . vlog ( "Skipping item - excluded by sync_list config: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2022-11-10 06:58:54 +01:00
} else {
// skipped for some other reason
2023-08-27 01:35:51 +02:00
log . vlog ( "Skipping item - path excluded by user config: " , localFilePath ) ;
clientSideRuleExcludesPath = true ;
2022-11-10 06:58:54 +01:00
}
2019-10-08 08:34:35 +02:00
}
2019-05-09 13:18:49 +02:00
}
2018-03-14 05:43:40 +01:00
}
2017-03-12 16:07:45 +01:00
}
2023-08-27 01:35:51 +02:00
}
// Check if this is excluded by a user set maximum filesize to upload
if ( ! clientSideRuleExcludesPath ) {
if ( isFile ( localFilePath ) ) {
if ( fileSizeLimit ! = 0 ) {
// Get the file size
ulong thisFileSize = getSize ( localFilePath ) ;
if ( thisFileSize > = fileSizeLimit ) {
log . vlog ( "Skipping item - excluded by skip_size config: " , localFilePath , " (" , thisFileSize / 2 ^ ^ 20 , " MB)" ) ;
2023-03-27 08:22:48 +02:00
}
}
}
2023-08-27 01:35:51 +02:00
}
return clientSideRuleExcludesPath ;
}
// Does this JSON item (as received from OneDrive API) get excluded from any operation based on any client side filtering rules?
// This function is only used when we are fetching objects from the OneDrive API using a /children query to help speed up what object we query
bool checkJSONAgainstClientSideFiltering ( JSONValue onedriveJSONItem ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
bool clientSideRuleExcludesPath = false ;
// Check the path against client side filtering rules
// - check_nosync (MISSING)
// - skip_dotfiles (MISSING)
// - skip_symlinks (MISSING)
// - skip_file (MISSING)
// - skip_dir
// - sync_list
// - skip_size (MISSING)
// Return a true|false response
// Use the JSON elements rather can computing a DB struct via makeItem()
string thisItemId = onedriveJSONItem [ "id" ] . str ;
string thisItemDriveId = onedriveJSONItem [ "parentReference" ] [ "driveId" ] . str ;
string thisItemParentId = onedriveJSONItem [ "parentReference" ] [ "id" ] . str ;
string thisItemName = onedriveJSONItem [ "name" ] . str ;
// Is this parent is in the database
bool parentInDatabase = false ;
// Calculate if the Parent Item is in the database so that it can be re-used
parentInDatabase = itemDB . idInLocalDatabase ( thisItemDriveId , thisItemParentId ) ;
// Check if this is excluded by config option: skip_dir
if ( ! clientSideRuleExcludesPath ) {
// Only check path if config is != ""
if ( ! appConfig . getValueString ( "skip_dir" ) . empty ) {
// Is the item a folder?
if ( isItemFolder ( onedriveJSONItem ) ) {
// work out the 'snippet' path where this folder would be created
string simplePathToCheck = "" ;
string complexPathToCheck = "" ;
string matchDisplay = "" ;
if ( hasParentReference ( onedriveJSONItem ) ) {
// we need to workout the FULL path for this item
// simple path
if ( ( "name" in onedriveJSONItem [ "parentReference" ] ) ! = null ) {
simplePathToCheck = onedriveJSONItem [ "parentReference" ] [ "name" ] . str ~ "/" ~ onedriveJSONItem [ "name" ] . str ;
} else {
simplePathToCheck = onedriveJSONItem [ "name" ] . str ;
}
log . vdebug ( "skip_dir path to check (simple): " , simplePathToCheck ) ;
// complex path
if ( parentInDatabase ) {
// build up complexPathToCheck
//complexPathToCheck = buildNormalizedPath(newItemPath);
complexPathToCheck = computeItemPath ( thisItemDriveId , thisItemParentId ) ~ "/" ~ thisItemName ;
} else {
log . vdebug ( "Parent details not in database - unable to compute complex path to check" ) ;
}
2023-09-21 21:34:42 +02:00
if ( ! complexPathToCheck . empty ) {
log . vdebug ( "skip_dir path to check (complex): " , complexPathToCheck ) ;
}
2023-08-27 01:35:51 +02:00
} else {
simplePathToCheck = onedriveJSONItem [ "name" ] . str ;
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
// If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder
// then isDirNameExcluded matching will not work
// Clean up 'root:' if present
if ( startsWith ( simplePathToCheck , "root:" ) ) {
log . vdebug ( "Updating simplePathToCheck to remove 'root:'" ) ;
simplePathToCheck = strip ( simplePathToCheck , "root:" ) ;
}
if ( startsWith ( complexPathToCheck , "root:" ) ) {
log . vdebug ( "Updating complexPathToCheck to remove 'root:'" ) ;
complexPathToCheck = strip ( complexPathToCheck , "root:" ) ;
}
// OK .. what checks are we doing?
if ( ( ! simplePathToCheck . empty ) & & ( complexPathToCheck . empty ) ) {
// just a simple check
log . vdebug ( "Performing a simple check only" ) ;
clientSideRuleExcludesPath = selectiveSync . isDirNameExcluded ( simplePathToCheck ) ;
2022-08-30 22:41:52 +02:00
} else {
2023-08-27 01:35:51 +02:00
// simple and complex
log . vdebug ( "Performing a simple then complex path match if required" ) ;
// simple first
log . vdebug ( "Performing a simple check first" ) ;
clientSideRuleExcludesPath = selectiveSync . isDirNameExcluded ( simplePathToCheck ) ;
matchDisplay = simplePathToCheck ;
if ( ! clientSideRuleExcludesPath ) {
log . vdebug ( "Simple match was false, attempting complex match" ) ;
// simple didnt match, perform a complex check
clientSideRuleExcludesPath = selectiveSync . isDirNameExcluded ( complexPathToCheck ) ;
matchDisplay = complexPathToCheck ;
2022-08-30 22:41:52 +02:00
}
}
2023-08-27 01:35:51 +02:00
// result
log . vdebug ( "skip_dir exclude result (directory based): " , clientSideRuleExcludesPath ) ;
if ( clientSideRuleExcludesPath ) {
// This path should be skipped
log . vlog ( "Skipping item - excluded by skip_dir config: " , matchDisplay ) ;
}
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
}
}
// Check if this is included or excluded by use of sync_list
if ( ! clientSideRuleExcludesPath ) {
// No need to try and process something against a sync_list if it has been configured
if ( syncListConfigured ) {
// Compute the item path if empty - as to check sync_list we need an actual path to check
2020-06-27 11:10:37 +02:00
2023-08-27 01:35:51 +02:00
// What is the path of the new item
string newItemPath ;
// Is the parent in the database? If not, we cannot compute the the full path based on the database entries
// In a --resync scenario - the database is empty
if ( parentInDatabase ) {
// Calculate this items path based on database entries
newItemPath = computeItemPath ( thisItemDriveId , thisItemParentId ) ~ "/" ~ thisItemName ;
} else {
newItemPath = thisItemName ;
2018-12-06 10:28:03 +01:00
}
2019-06-15 01:23:32 +02:00
2023-08-27 01:35:51 +02:00
// What path are we checking?
log . vdebug ( "sync_list item to check: " , newItemPath ) ;
// Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list
if ( selectiveSync . isPathExcludedViaSyncList ( newItemPath ) ) {
// selective sync advised to skip, however is this a file and are we configured to upload / download files in the root?
if ( ( isItemFile ( onedriveJSONItem ) ) & & ( appConfig . getValueBool ( "sync_root_files" ) ) & & ( rootName ( newItemPath ) = = "" ) ) {
// This is a file
// We are configured to sync all files in the root
// This is a file in the logical root
clientSideRuleExcludesPath = false ;
} else {
// path is unwanted
clientSideRuleExcludesPath = true ;
log . vlog ( "Skipping item - excluded by sync_list config: " , newItemPath ) ;
2019-06-15 01:23:32 +02:00
}
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
}
}
// return if path is excluded
return clientSideRuleExcludesPath ;
}
// Process the list of local changes to upload to OneDrive
void processChangedLocalItemsToUpload ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Each element in this array 'databaseItemsWhereContentHasChanged' is an Database Item ID that has been modified locally
ulong batchSize = appConfig . concurrentThreads ;
ulong batchCount = ( databaseItemsWhereContentHasChanged . length + batchSize - 1 ) / batchSize ;
ulong batchesProcessed = 0 ;
// For each batch of files to upload, upload the changed data to OneDrive
foreach ( chunk ; databaseItemsWhereContentHasChanged . chunks ( batchSize ) ) {
uploadChangedLocalFileToOneDrive ( chunk ) ;
}
}
// Upload changed local files to OneDrive in parallel
void uploadChangedLocalFileToOneDrive ( string [ 3 ] [ ] array ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
foreach ( i , localItemDetails ; taskPool . parallel ( array ) ) {
log . vdebug ( "Thread " , i , " Starting: " , Clock . currTime ( ) ) ;
// These are the details of the item we need to upload
string changedItemParentId = localItemDetails [ 0 ] ;
string changedItemId = localItemDetails [ 1 ] ;
string localFilePath = localItemDetails [ 2 ] ;
// How much space is remaining on OneDrive
ulong remainingFreeSpace ;
// Did the upload fail?
bool uploadFailed = false ;
// Did we skip due to exceeding maximum allowed size?
bool skippedMaxSize = false ;
// Did we skip to an exception error?
bool skippedExceptionError = false ;
// Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here
// This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function
Item dbItem ;
itemDB . selectById ( changedItemParentId , changedItemId , dbItem ) ;
// Query the available space online
// This will update appConfig.quotaAvailable & appConfig.quotaRestricted values
remainingFreeSpace = getRemainingFreeSpace ( dbItem . driveId ) ;
// Get the file size
ulong thisFileSizeLocal = getSize ( localFilePath ) ;
ulong thisFileSizeFromDB = to ! ulong ( dbItem . size ) ;
// remainingFreeSpace online includes the current file online
// we need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value
ulong calculatedSpaceOnlinePostUpload = ( remainingFreeSpace + thisFileSizeFromDB ) - thisFileSizeLocal ;
// Based on what we know, for this thread - can we safely upload this modified local file?
log . vdebug ( "This Thread Current Free Space Online: " , remainingFreeSpace ) ;
log . vdebug ( "This Thread Calculated Free Space Online Post Upload: " , calculatedSpaceOnlinePostUpload ) ;
JSONValue uploadResponse ;
bool spaceAvailableOnline = false ;
// If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated
// If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true
// If 'business' accounts, if driveId == defaultDriveId, then we will have data
// If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true
// What was the latest getRemainingFreeSpace() value?
if ( appConfig . quotaAvailable ) {
// Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload?
if ( calculatedSpaceOnlinePostUpload > 0 ) {
// Based on this thread action, we beleive that there is space available online to upload - proceed
spaceAvailableOnline = true ;
}
}
// Is quota being restricted?
if ( appConfig . quotaRestricted ) {
// Space available online is being restricted - so we have no way to really know if there is space available online
spaceAvailableOnline = true ;
}
// Do we have space available or is space available being restricted (so we make the blind assumption that there is space available)
if ( spaceAvailableOnline ) {
// Does this file exceed the maximum file size to upload to OneDrive?
if ( thisFileSizeLocal < = maxUploadFileSize ) {
// Attempt to upload the modified file
// Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result
uploadResponse = performModifiedFileUpload ( dbItem , localFilePath , thisFileSizeLocal ) ;
2020-08-08 00:56:00 +02:00
2023-08-27 01:35:51 +02:00
// Evaluate the returned JSON uploadResponse
// If there was an error uploading the file, uploadResponse should be empty and invalid
if ( uploadResponse . type ( ) ! = JSONType . object ) {
uploadFailed = true ;
skippedExceptionError = true ;
2020-08-08 00:56:00 +02:00
}
2018-04-21 06:54:50 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Skip file - too large
uploadFailed = true ;
skippedMaxSize = true ;
2020-07-07 10:05:36 +02:00
}
2023-08-27 01:35:51 +02:00
} else {
// Cant upload this file - no space available
uploadFailed = true ;
2015-09-18 21:42:27 +02:00
}
2023-08-27 01:35:51 +02:00
// Did the upload fail?
if ( uploadFailed ) {
// Upload failed .. why?
// No space available online
if ( ! spaceAvailableOnline ) {
log . logAndNotify ( "Skipping uploading modified file " , localFilePath , " due to insufficient free space available on OneDrive" ) ;
}
// File exceeds max allowed size
if ( skippedMaxSize ) {
log . logAndNotify ( "Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: " , localFilePath ) ;
}
// Generic message
if ( skippedExceptionError ) {
// normal failure message if API or exception error generated
log . logAndNotify ( "Uploading modified file " , localFilePath , " ... failed!" ) ;
}
2020-06-27 11:10:37 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Upload was successful
log . logAndNotify ( "Uploading modified file " , localFilePath , " ... done." ) ;
// Save JSON item in database
saveItem ( uploadResponse ) ;
// Update the date / time of the file online to match the local item
if ( ( appConfig . accountType = = "personal" ) & & ( ! dryRun ) ) {
// Get the local file last modified time
SysTime localModifiedTime = timeLastModified ( localFilePath ) . toUTC ( ) ;
localModifiedTime . fracSecs = Duration . zero ;
// Get the latest eTag, and use that
string etagFromUploadResponse = uploadResponse [ "eTag" ] . str ;
// Attempt to update the online date time stamp based on our local data
uploadLastModifiedTime ( dbItem . driveId , dbItem . id , localModifiedTime . toUTC ( ) , etagFromUploadResponse ) ;
2020-06-27 11:10:37 +02:00
}
}
2023-08-27 01:35:51 +02:00
log . vdebug ( "Thread " , i , " Finished: " , Clock . currTime ( ) ) ;
} // end of 'foreach (i, localItemDetails; array.enumerate)'
}
// Perform the upload of a locally modified file to OneDrive
JSONValue performModifiedFileUpload ( Item dbItem , string localFilePath , ulong thisFileSizeLocal ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
JSONValue uploadResponse ;
OneDriveApi uploadFileOneDriveApiInstance ;
uploadFileOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
uploadFileOneDriveApiInstance . initialise ( ) ;
// Is this a dry-run scenario?
if ( ! dryRun ) {
// Do we use simpleUpload or create an upload session?
bool useSimpleUpload = false ;
if ( ( appConfig . accountType = = "personal" ) & & ( thisFileSizeLocal < = sessionThresholdFileSize ) ) {
useSimpleUpload = true ;
}
// We can only upload zero size files via simpleFileUpload regardless of account type
// Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53
// Additionally, only Personal accounts where file size is < 4MB should be uploaded by simpleUploadReplace - everything else should use a session to upload modified file
if ( ( thisFileSizeLocal = = 0 ) | | ( useSimpleUpload ) ) {
// Must use Simple Upload to replace the file online
2018-03-14 05:43:40 +01:00
try {
2023-08-27 01:35:51 +02:00
uploadResponse = uploadFileOneDriveApiInstance . simpleUploadReplace ( localFilePath , dbItem . driveId , dbItem . id ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to upload a modified file to OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
performModifiedFileUpload ( dbItem , localFilePath , thisFileSizeLocal ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2023-08-27 01:35:51 +02:00
} catch ( FileException e ) {
writeln ( "DEBUG TO REMOVE: Modified file upload FileException Handling (simpleUploadReplace)" ) ;
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// Configure JSONValue variables we use for a session upload
JSONValue currentOnlineData ;
JSONValue uploadSessionData ;
string currentETag ;
2019-03-07 22:40:29 +01:00
2023-08-27 01:35:51 +02:00
// As this is a unique thread, the sessionFilePath for where we save the data needs to be unique
// The best way to do this is calculate the CRC32 of the file, and use this as the suffix of the session file we save
string threadUploadSessionFilePath = appConfig . uploadSessionFilePath ~ "." ~ computeCRC32 ( localFilePath ) ;
// Get the absolute latest object details from online
try {
currentOnlineData = uploadFileOneDriveApiInstance . getPathDetailsByDriveId ( dbItem . driveId , localFilePath ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to obtain latest file details from OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
performModifiedFileUpload ( dbItem , localFilePath , thisFileSizeLocal ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2018-03-14 05:43:40 +01:00
}
2018-09-24 21:25:40 +02:00
2023-08-27 01:35:51 +02:00
// Was a valid JSON response provided?
if ( currentOnlineData . type ( ) = = JSONType . object ) {
// Does the response contain an eTag?
if ( hasETag ( currentOnlineData ) ) {
// Use the value returned from online
currentETag = currentOnlineData [ "eTag" ] . str ;
} else {
// Use the database value
currentETag = dbItem . eTag ;
}
} else {
// no valid JSON response
currentETag = dbItem . eTag ;
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
// Create the Upload Session
try {
uploadSessionData = createSessionFileUpload ( uploadFileOneDriveApiInstance , localFilePath , dbItem . driveId , dbItem . parentId , baseName ( localFilePath ) , currentETag , threadUploadSessionFilePath ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to create an upload session on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
performModifiedFileUpload ( dbItem , localFilePath , thisFileSizeLocal ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2023-08-27 01:35:51 +02:00
} catch ( FileException e ) {
writeln ( "DEBUG TO REMOVE: Modified file upload FileException Handling (Create the Upload Session)" ) ;
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2018-09-24 21:25:40 +02:00
}
2019-07-26 21:24:59 +02:00
2023-08-27 01:35:51 +02:00
// Perform the Upload using the session
try {
uploadResponse = performSessionFileUpload ( uploadFileOneDriveApiInstance , thisFileSizeLocal , uploadSessionData , threadUploadSessionFilePath ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to upload a file via a session to OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
performModifiedFileUpload ( dbItem , localFilePath , thisFileSizeLocal ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
}
2023-08-27 01:35:51 +02:00
} catch ( FileException e ) {
writeln ( "DEBUG TO REMOVE: Modified file upload FileException Handling (Perform the Upload using the session)" ) ;
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
}
}
} else {
// We are in a --dry-run scenario
uploadResponse = createFakeResponse ( localFilePath ) ;
}
// Log the results
log . vdebug ( "Modified File Upload Response: " , uploadResponse ) ;
// Shutdown the API instance
uploadFileOneDriveApiInstance . shutdown ( ) ;
// Return JSON
return uploadResponse ;
}
// Query the OneDrive API using the provided driveId to get the latest quota details
ulong getRemainingFreeSpace ( string driveId ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Get the quota details for this driveId, as this could have changed since we started the application - the user could have added / deleted data online, or purchased additional storage
// Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders
JSONValue currentDriveQuota ;
ulong remainingQuota ;
try {
// Create a new OneDrive API instance
OneDriveApi getCurrentDriveQuotaApiInstance ;
getCurrentDriveQuotaApiInstance = new OneDriveApi ( appConfig ) ;
getCurrentDriveQuotaApiInstance . initialise ( ) ;
2023-09-24 03:07:26 +02:00
log . vdebug ( "Seeking available quota for this drive id: " , driveId ) ;
2023-08-27 01:35:51 +02:00
currentDriveQuota = getCurrentDriveQuotaApiInstance . getDriveQuota ( driveId ) ;
// Shut this API instance down
getCurrentDriveQuotaApiInstance . shutdown ( ) ;
} catch ( OneDriveException e ) {
log . vdebug ( "currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException" ) ;
}
// validate that currentDriveQuota is a JSON value
if ( currentDriveQuota . type ( ) = = JSONType . object ) {
// Response from API contains valid data
// If 'personal' accounts, if driveId == defaultDriveId, then we will have data
// If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data
// If 'business' accounts, if driveId == defaultDriveId, then we will have data
// If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value
if ( "quota" in currentDriveQuota ) {
if ( driveId = = appConfig . defaultDriveId ) {
// We potentially have updated quota remaining details available
// However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero
if ( "remaining" in currentDriveQuota [ "quota" ] ) {
// We have valid quota remaining details returned for the provided drive id
remainingQuota = currentDriveQuota [ "quota" ] [ "remaining" ] . integer ;
if ( remainingQuota < = 0 ) {
if ( appConfig . accountType = = "personal" ) {
// zero space available
log . error ( "ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional space." ) ;
remainingQuota = 0 ;
appConfig . quotaAvailable = false ;
} else {
// zero space available is being reported, maybe being restricted?
log . error ( "WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator." ) ;
remainingQuota = 0 ;
appConfig . quotaRestricted = true ;
2021-07-22 00:17:02 +02:00
}
2020-04-01 21:56:50 +02:00
}
2019-07-26 21:24:59 +02:00
}
2018-08-27 02:47:01 +02:00
} else {
2023-08-27 01:35:51 +02:00
// quota details returned, but for a drive id that is not ours
if ( "remaining" in currentDriveQuota [ "quota" ] ) {
// remaining is in the quota JSON response
if ( currentDriveQuota [ "quota" ] [ "remaining" ] . integer < = 0 ) {
// value returned is 0 or less than 0
log . vlog ( "OneDrive quota information is set at zero, as this is not our drive id, ignoring" ) ;
remainingQuota = 0 ;
appConfig . quotaRestricted = true ;
}
}
2018-08-27 02:47:01 +02:00
}
2018-03-14 05:43:40 +01:00
} else {
2023-08-27 01:35:51 +02:00
// No quota details returned
if ( driveId = = appConfig . defaultDriveId ) {
// no quota details returned for current drive id
log . error ( "ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online or purchase additional space." ) ;
remainingQuota = 0 ;
appConfig . quotaRestricted = true ;
} else {
// quota details not available
log . vdebug ( "WARNING: OneDrive quota information is being restricted as this is not our drive id." ) ;
remainingQuota = 0 ;
appConfig . quotaRestricted = true ;
}
2018-03-14 05:43:40 +01:00
}
}
2023-08-27 01:35:51 +02:00
2023-09-24 03:07:26 +02:00
// what was the determined available quota?
log . vdebug ( "Available quota: " , remainingQuota ) ;
2023-08-27 01:35:51 +02:00
return remainingQuota ;
2018-03-14 05:43:40 +01:00
}
2023-08-27 01:35:51 +02:00
// Perform a filesystem walk to uncover new data to upload to OneDrive
void scanLocalFilesystemPathForNewData ( string path ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences?
string logPath ;
if ( path = = "." ) {
// get the configured sync_dir
logPath = buildNormalizedPath ( appConfig . getValueString ( "sync_dir" ) ) ;
2020-06-27 11:10:37 +02:00
} else {
2023-08-27 01:35:51 +02:00
// use what was passed in
if ( ! appConfig . getValueBool ( "monitor" ) ) {
logPath = buildNormalizedPath ( appConfig . getValueString ( "sync_dir" ) ) ~ "/" ~ path ;
} else {
logPath = path ;
2020-06-27 11:10:37 +02:00
}
}
2020-08-08 00:56:00 +02:00
2023-08-27 01:35:51 +02:00
// Log the action that we are performing
2023-09-02 04:27:10 +02:00
if ( ! appConfig . surpressLoggingOutput ) {
2023-09-12 07:31:59 +02:00
if ( ! cleanupLocalFiles ) {
log . log ( "Scanning local filesystem '" , logPath , "' for new data to upload ..." ) ;
} else {
log . log ( "Scanning local filesystem '" , logPath , "' for data to cleanup ..." ) ;
}
2023-09-02 04:27:10 +02:00
}
2023-08-27 01:35:51 +02:00
auto startTime = Clock . currTime ( ) ;
log . vdebug ( "Starting Filesystem Walk: " , startTime ) ;
// Perform the filesystem walk of this path, building an array of new items to upload
scanPathForNewData ( path ) ;
2023-09-24 05:17:44 +02:00
// To finish off the processing items, this is needed to reflect this in the log
log . vdebug ( "------------------------------------------------------------------" ) ;
2023-08-27 01:35:51 +02:00
auto finishTime = Clock . currTime ( ) ;
log . vdebug ( "Finished Filesystem Walk: " , finishTime ) ;
auto elapsedTime = finishTime - startTime ;
log . vdebug ( "Elapsed Time Filesystem Walk: " , elapsedTime ) ;
// Upload new data that has been identified
// Are there any items to download post fetching the /delta data?
if ( ! newLocalFilesToUploadToOneDrive . empty ) {
// There are elements to upload
2023-09-10 08:37:10 +02:00
log . vlog ( "New items to upload to OneDrive: " , newLocalFilesToUploadToOneDrive . length ) ;
2023-08-27 01:35:51 +02:00
// How much data do we need to upload? This is important, as, we need to know how much data to determine if all the files can be uploaded
foreach ( uploadFilePath ; newLocalFilesToUploadToOneDrive ) {
2023-09-10 01:07:53 +02:00
// validate that the path actually exists so that it can be counted
if ( exists ( uploadFilePath ) ) {
totalDataToUpload = totalDataToUpload + getSize ( uploadFilePath ) ;
}
2023-08-27 01:35:51 +02:00
}
// How many bytes to upload
if ( totalDataToUpload < 1024 ) {
// Display as Bytes to upload
log . vlog ( "Total New Data to Upload: " , totalDataToUpload , " Bytes" ) ;
} else {
if ( ( totalDataToUpload > 1024 ) & & ( totalDataToUpload < 1048576 ) ) {
// Display as KB to upload
log . vlog ( "Total New Data to Upload: " , ( totalDataToUpload / 1024 ) , " KB" ) ;
2020-08-20 02:52:46 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Display as MB to upload
log . vlog ( "Total New Data to Upload: " , ( totalDataToUpload / 1024 / 1024 ) , " MB" ) ;
2020-08-20 02:52:46 +02:00
}
2020-08-08 00:56:00 +02:00
}
2023-08-27 01:35:51 +02:00
// How much space is available (Account Drive ID)
// The file, could be uploaded to a shared folder, which, we are not tracking how much free space is available there ...
log . vdebug ( "Current Available Space Online (Account Drive ID): " , ( appConfig . remainingFreeSpace / 1024 / 1024 ) , " MB" ) ;
// Perform the upload
uploadNewLocalFileItems ( ) ;
// Cleanup array memory
newLocalFilesToUploadToOneDrive = [ ] ;
}
}
// Scan this path for new data
void scanPathForNewData ( string path ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
ulong maxPathLength ;
ulong pathWalkLength ;
2023-09-24 05:17:44 +02:00
// Add this logging break to assist with what was checked for each path
if ( path ! = "." ) {
log . vdebug ( "------------------------------------------------------------------" ) ;
}
2023-08-27 01:35:51 +02:00
// https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders
// If the path is greater than allowed characters, then one drive will return a '400 - Bad Request'
// Need to ensure that the URI is encoded before the check is made:
// - 400 Character Limit for OneDrive Business / Office 365
// - 430 Character Limit for OneDrive Personal
// Configure maxPathLength based on account type
if ( appConfig . accountType = = "personal" ) {
// Personal Account
maxPathLength = 430 ;
2020-08-08 00:56:00 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Business Account / Office365 / SharePoint
maxPathLength = 400 ;
2020-08-08 00:56:00 +02:00
}
2023-08-27 01:35:51 +02:00
// A short lived item that has already disappeared will cause an error - is the path still valid?
if ( ! exists ( path ) ) {
log . log ( "Skipping item - path has disappeared: " , path ) ;
return ;
}
// Calculate the path length by walking the path and catch any UTF-8 sequence errors at the same time
// https://github.com/skilion/onedrive/issues/57
// https://github.com/abraunegg/onedrive/issues/487
// https://github.com/abraunegg/onedrive/issues/1192
try {
pathWalkLength = path . byGrapheme . walkLength ;
} catch ( std . utf . UTFException e ) {
// Path contains characters which generate a UTF exception
log . logAndNotify ( "Skipping item - invalid UTF sequence: " , path ) ;
log . vdebug ( " Error Reason:" , e . msg ) ;
return ;
}
// Is the path length is less than maxPathLength
if ( pathWalkLength < maxPathLength ) {
// Is this path unwanted
bool unwanted = false ;
// First check of this item - if we are in a --dry-run scenario, we may have 'fake deleted' this path
// thus, the entries are not in the dry-run DB copy, thus, at this point the client thinks that this is an item to upload
// Check this 'path' for an entry in pathFakeDeletedArray - if it is there, this is unwanted
if ( dryRun ) {
2023-09-02 23:38:36 +02:00
// Is this path in the array of fake deleted items? If yes, return early, nothing else to do, save processing
if ( canFind ( pathFakeDeletedArray , path ) ) return ;
2023-08-27 01:35:51 +02:00
}
// This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly
// Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252
if ( ! unwanted ) {
if ( ! isValid ( path ) ) {
// Path is not valid according to https://dlang.org/phobos/std_encoding.html
log . logAndNotify ( "Skipping item - invalid character encoding sequence: " , path ) ;
unwanted = true ;
}
}
// Check this path against the Client Side Filtering Rules
// - check_nosync
// - skip_dotfiles
// - skip_symlinks
// - skip_file
// - skip_dir
// - sync_list
// - skip_size
if ( ! unwanted ) {
unwanted = checkPathAgainstClientSideFiltering ( path ) ;
}
// Check this path against the Microsoft Naming Conventions & Restristions
// - Microsoft OneDrive restriction and limitations about Windows naming files
// - Bad whitespace items
// - HTML ASCII Codes as part of file name
if ( ! unwanted ) {
unwanted = checkPathAgainstMicrosoftNamingRestrictions ( path ) ;
}
if ( ! unwanted ) {
// At this point, this path, we want to scan for new data as it is not excluded
if ( isDir ( path ) ) {
// Check if this path in the database
bool directoryFoundInDB = pathFoundInDatabase ( path ) ;
// Was the path found in the database?
if ( ! directoryFoundInDB ) {
// Path not found in database when searching all drive id's
if ( ! cleanupLocalFiles ) {
// --download-only --cleanup-local-files not used
// Create this directory on OneDrive so that we can upload files to it
createDirectoryOnline ( path ) ;
2021-07-20 05:27:35 +02:00
} else {
2023-08-27 01:35:51 +02:00
// we need to clean up this directory
log . log ( "Removing local directory as --download-only & --cleanup-local-files configured" ) ;
// Remove any children of this path if they still exist
// Resolve 'Directory not empty' error when deleting local files
try {
foreach ( DirEntry child ; dirEntries ( path , SpanMode . depth , false ) ) {
// what sort of child is this?
if ( isDir ( child . name ) ) {
log . log ( "Removing local directory: " , child . name ) ;
2018-08-27 02:47:01 +02:00
} else {
2023-08-27 01:35:51 +02:00
log . log ( "Removing local file: " , child . name ) ;
}
2023-09-12 07:31:59 +02:00
2023-08-27 01:35:51 +02:00
// are we in a --dry-run scenario?
if ( ! dryRun ) {
// No --dry-run ... process local delete
2023-09-12 07:31:59 +02:00
if ( exists ( child ) ) {
try {
attrIsDir ( child . linkAttributes ) ? rmdir ( child . name ) : remove ( child . name ) ;
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
}
2018-09-24 21:25:40 +02:00
}
2018-08-27 02:47:01 +02:00
}
2019-03-11 07:57:47 +01:00
}
2023-08-27 01:35:51 +02:00
// Remove the path now that it is empty of children
log . log ( "Removing local directory: " , path ) ;
// are we in a --dry-run scenario?
if ( ! dryRun ) {
// No --dry-run ... process local delete
try {
rmdirRecurse ( path ) ;
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2020-08-08 00:56:00 +02:00
}
2018-12-06 10:28:03 +01:00
}
2023-08-27 01:35:51 +02:00
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
2018-11-23 20:26:30 +01:00
return ;
2018-08-14 10:30:13 +02:00
}
2018-07-02 23:24:57 +02:00
}
2023-08-27 01:35:51 +02:00
}
2023-09-22 23:55:59 +02:00
// flag for if we are going traverse this path
bool skipFolderTraverse = false ;
// Before we traverse this 'path', we need to make a last check to see if this was just excluded
if ( appConfig . accountType = = "business" ) {
// search businessSharedFoldersOnlineToSkip for this path
if ( canFind ( businessSharedFoldersOnlineToSkip , path ) ) {
// This path was skipped - why?
log . logAndNotify ( "Skipping item '" , path , "' due to this path matching an existing online Business Shared Folder name" ) ;
skipFolderTraverse = true ;
2018-09-24 21:25:40 +02:00
}
2018-05-14 22:59:17 +02:00
}
2018-08-14 10:30:13 +02:00
2023-09-22 23:55:59 +02:00
// Do we traverse this path?
if ( ! skipFolderTraverse ) {
// Try and access this directory and any path below
try {
auto entries = dirEntries ( path , SpanMode . shallow , false ) ;
foreach ( DirEntry entry ; entries ) {
string thisPath = entry . name ;
scanPathForNewData ( thisPath ) ;
}
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
return ;
}
}
2023-08-27 01:35:51 +02:00
} else {
// https://github.com/abraunegg/onedrive/issues/984
// path is not a directory, is it a valid file?
// pipes - whilst technically valid files, are not valid for this client
// prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe
if ( isFile ( path ) ) {
// Path is a valid file, not a pipe
bool fileFoundInDB = pathFoundInDatabase ( path ) ;
// Was the file found in the database?
if ( ! fileFoundInDB ) {
// File not found in database when searching all drive id's
// Do we upload the file or clean up the file?
if ( ! cleanupLocalFiles ) {
// --download-only --cleanup-local-files not used
// Add this path as a file we need to upload
log . vdebug ( "OneDrive Client flagging to upload this file to OneDrive: " , path ) ;
newLocalFilesToUploadToOneDrive ~ = path ;
2019-04-13 00:31:54 +02:00
} else {
2023-08-27 01:35:51 +02:00
// we need to clean up this file
log . log ( "Removing local file as --download-only & --cleanup-local-files configured" ) ;
// are we in a --dry-run scenario?
log . log ( "Removing local file: " , path ) ;
2020-04-01 21:56:50 +02:00
if ( ! dryRun ) {
2023-08-27 01:35:51 +02:00
// No --dry-run ... process local file delete
safeRemove ( path ) ;
2020-04-01 21:56:50 +02:00
}
2018-08-27 02:47:01 +02:00
}
2023-08-27 01:35:51 +02:00
}
2018-07-02 23:24:57 +02:00
} else {
2023-08-27 01:35:51 +02:00
// path is not a valid file
log . logAndNotify ( "Skipping item - item is not a valid file: " , path ) ;
2018-07-02 23:24:57 +02:00
}
2018-03-14 05:43:40 +01:00
}
}
2018-08-27 02:47:01 +02:00
} else {
2023-08-27 01:35:51 +02:00
// This path was skipped - why?
log . logAndNotify ( "Skipping item '" , path , "' due to the full path exceeding " , maxPathLength , " characters (Microsoft OneDrive limitation)" ) ;
}
}
// Query the database to determine if this path is within the existing database
bool pathFoundInDatabase ( string searchPath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Check if this path in the database
Item databaseItem ;
bool pathFoundInDB = false ;
foreach ( driveId ; driveIDsArray ) {
if ( itemDB . selectByPath ( searchPath , driveId , databaseItem ) ) {
2023-09-21 21:34:42 +02:00
pathFoundInDB = true ;
2020-08-08 00:56:00 +02:00
}
2015-09-27 18:47:41 +02:00
}
2023-08-27 01:35:51 +02:00
return pathFoundInDB ;
2015-09-16 10:29:20 +02:00
}
2023-08-27 01:35:51 +02:00
// Create a new directory online on OneDrive
// - Test if we can get the parent path details from the database, otherwise we need to search online
// for the path flow and create the folder that way
void createDirectoryOnline ( string thisNewPathToCreate ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
log . log ( "OneDrive Client requested to create this directory online: " , thisNewPathToCreate ) ;
2021-03-26 21:31:03 +01:00
2023-08-27 01:35:51 +02:00
Item parentItem ;
JSONValue onlinePathData ;
2021-03-26 21:31:03 +01:00
2023-09-08 22:34:52 +02:00
// Create a new API Instance for this thread and initialise it
OneDriveApi createDirectoryOnlineOneDriveApiInstance ;
createDirectoryOnlineOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
createDirectoryOnlineOneDriveApiInstance . initialise ( ) ;
2023-08-27 01:35:51 +02:00
// What parent path to use?
string parentPath = dirName ( thisNewPathToCreate ) ; // will be either . or something else
2021-03-26 21:31:03 +01:00
2023-08-27 01:35:51 +02:00
// Configure the parentItem by if this is the account 'root' use the root details, or search the database for the parent details
if ( parentPath = = "." ) {
// Parent path is '.' which is the account root
// Use client defaults
parentItem . driveId = appConfig . defaultDriveId ; // Should give something like 12345abcde1234a1
parentItem . id = appConfig . defaultRootId ; // Should give something like 12345ABCDE1234A1!101
2021-03-26 21:31:03 +01:00
} else {
2023-09-09 11:08:51 +02:00
// Query the parent path online
2023-09-21 21:34:42 +02:00
log . vlog ( "Attempting to query Local Database for this parent path: " , parentPath ) ;
// Attempt a 2 step process to work out where to create the directory
// Step 1: Query the DB first
// Step 2: Query online as last resort
// Step 1: Check if this path in the database
Item databaseItem ;
bool pathFoundInDB = false ;
foreach ( driveId ; driveIDsArray ) {
if ( itemDB . selectByPath ( parentPath , driveId , databaseItem ) ) {
pathFoundInDB = true ;
log . vdebug ( "databaseItem: " , databaseItem ) ;
log . vdebug ( "pathFoundInDB: " , pathFoundInDB ) ;
}
}
// Step 2: Query for the path online
if ( ! pathFoundInDB ) {
try {
log . vlog ( "Attempting to query OneDrive Online for this parent path: " , parentPath ) ;
onlinePathData = createDirectoryOnlineOneDriveApiInstance . getPathDetails ( parentPath ) ;
saveItem ( onlinePathData ) ;
parentItem = makeItem ( onlinePathData ) ;
} catch ( OneDriveException exception ) {
2023-09-09 11:08:51 +02:00
2023-09-21 21:34:42 +02:00
if ( exception . httpStatusCode = = 404 ) {
// Parent does not exist ... need to create parent
log . vdebug ( "Parent path does not exist online: " , parentPath ) ;
createDirectoryOnline ( parentPath ) ;
2023-09-09 11:08:51 +02:00
} else {
2023-09-21 21:34:42 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( createDirectoryOnlineOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to create a remote directory on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
createDirectoryOnline ( thisNewPathToCreate ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2023-09-09 11:08:51 +02:00
}
2021-03-26 21:31:03 +01:00
}
2023-09-21 21:34:42 +02:00
} else {
// parent path found in database ... use those details ...
parentItem = databaseItem ;
2021-03-26 21:31:03 +01:00
}
2023-09-21 21:34:42 +02:00
2021-03-26 21:31:03 +01:00
}
2023-08-27 01:35:51 +02:00
// Make sure the full path does not exist online, this should generate a 404 response, to which then the folder will be created online
try {
// Try and query the OneDrive API for the path we need to create
log . vlog ( "Attempting to query OneDrive for this path: " , thisNewPathToCreate ) ;
2020-01-02 21:46:58 +01:00
2023-08-27 01:35:51 +02:00
if ( parentItem . driveId = = appConfig . defaultDriveId ) {
// Use getPathDetailsByDriveId
2023-09-08 22:34:52 +02:00
onlinePathData = createDirectoryOnlineOneDriveApiInstance . getPathDetailsByDriveId ( parentItem . driveId , thisNewPathToCreate ) ;
2023-08-27 01:35:51 +02:00
} else {
// If the parentItem.driveId is not our driveId - the path we are looking for will not be at the logical location that getPathDetailsByDriveId
// can use - as it will always return a 404 .. even if the path actually exists (which is the whole point of this test)
// Search the parentItem.driveId for any folder name match that we are going to create, then compare response JSON items with parentItem.id
// If no match, the folder we want to create does not exist at the location we are seeking to create it at, thus generate a 404
2023-09-08 22:34:52 +02:00
onlinePathData = createDirectoryOnlineOneDriveApiInstance . searchDriveForPath ( parentItem . driveId , baseName ( thisNewPathToCreate ) ) ;
2020-09-01 22:23:41 +02:00
2023-09-21 21:34:42 +02:00
// Process the response from searching the drive
2023-08-27 01:35:51 +02:00
ulong responseCount = count ( onlinePathData [ "value" ] . array ) ;
if ( responseCount > 0 ) {
// Search 'name' matches were found .. need to match these against parentItem.id
bool foundDirectoryOnline = false ;
JSONValue foundDirectoryJSONItem ;
// Items were returned .. but is one of these what we are looking for?
foreach ( childJSON ; onlinePathData [ "value" ] . array ) {
// Is this item not a file?
if ( ! isFileItem ( childJSON ) ) {
Item thisChildItem = makeItem ( childJSON ) ;
// Direct Match Check
if ( ( parentItem . id = = thisChildItem . parentId ) & & ( baseName ( thisNewPathToCreate ) = = thisChildItem . name ) ) {
// High confidence that this child folder is a direct match we are trying to create and it already exists online
log . vdebug ( "Path we are searching for exists online: " , baseName ( thisNewPathToCreate ) ) ;
log . vdebug ( "childJSON: " , childJSON ) ;
foundDirectoryOnline = true ;
foundDirectoryJSONItem = childJSON ;
break ;
}
// Full Lower Case POSIX Match Check
string childAsLower = toLower ( childJSON [ "name" ] . str ) ;
string thisFolderNameAsLower = toLower ( baseName ( thisNewPathToCreate ) ) ;
if ( childAsLower = = thisFolderNameAsLower ) {
// This is a POSIX 'case in-sensitive match' .....
// Local item name has a 'case-insensitive match' to an existing item on OneDrive
foundDirectoryOnline = true ;
foundDirectoryJSONItem = childJSON ;
break ;
2020-06-16 22:49:43 +02:00
}
2019-03-11 07:57:47 +01:00
}
2023-08-27 01:35:51 +02:00
}
if ( foundDirectoryOnline ) {
// Directory we are seeking was found online ...
onlinePathData = foundDirectoryJSONItem ;
2019-03-11 07:57:47 +01:00
} else {
2023-08-27 01:35:51 +02:00
// No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder
throw new OneDriveException ( 404 , "Name not found via search" ) ;
2019-01-19 02:59:39 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder
throw new OneDriveException ( 404 , "Name not found via search" ) ;
2019-01-19 02:59:39 +01:00
}
}
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
if ( exception . httpStatusCode = = 404 ) {
2023-08-27 01:35:51 +02:00
// This is a good error - it means that the directory to create 100% does not exist online
// The directory was not found on the drive id we queried
log . vlog ( "The requested directory to create was not found on OneDrive - creating remote directory: " , thisNewPathToCreate ) ;
// Build up the create directory request
JSONValue createDirectoryOnlineAPIResponse ;
JSONValue newDriveItem = [
"name" : JSONValue ( baseName ( thisNewPathToCreate ) ) ,
"folder" : parseJSON ( "{}" )
] ;
// Submit the creation request
// Fix for https://github.com/skilion/onedrive/issues/356
if ( ! dryRun ) {
try {
// Attempt to create a new folder on the configured parent driveId & parent id
2023-09-08 22:34:52 +02:00
createDirectoryOnlineAPIResponse = createDirectoryOnlineOneDriveApiInstance . createById ( parentItem . driveId , parentItem . id , newDriveItem ) ;
2023-08-27 01:35:51 +02:00
// Is the response a valid JSON object - validation checking done in saveItem
saveItem ( createDirectoryOnlineAPIResponse ) ;
2023-09-10 01:07:53 +02:00
// Log that the directory was created
log . log ( "Successfully created the remote directory " , thisNewPathToCreate , " on OneDrive" ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
if ( exception . httpStatusCode = = 409 ) {
2023-08-27 01:35:51 +02:00
// OneDrive API returned a 404 (above) to say the directory did not exist
// but when we attempted to create it, OneDrive responded that it now already exists
log . vlog ( "OneDrive reported that " , thisNewPathToCreate , " already exists .. OneDrive API race condition" ) ;
return ;
} else {
// some other error from OneDrive was returned - display what it is
log . error ( "OneDrive generated an error when creating this path: " , thisNewPathToCreate ) ;
2023-09-02 04:27:10 +02:00
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
2023-08-27 01:35:51 +02:00
return ;
2021-07-22 00:17:02 +02:00
}
}
2023-08-27 01:35:51 +02:00
} else {
// Simulate a successful 'directory create' & save it to the dryRun database copy
// The simulated response has to pass 'makeItem' as part of saveItem
auto fakeResponse = createFakeResponse ( thisNewPathToCreate ) ;
saveItem ( fakeResponse ) ;
2021-04-05 22:43:19 +02:00
}
2023-08-27 01:35:51 +02:00
2023-09-10 01:07:53 +02:00
// Shutdown API instance
createDirectoryOnlineOneDriveApiInstance . shutdown ( ) ;
2020-05-25 02:51:57 +02:00
return ;
2023-09-10 01:07:53 +02:00
2023-08-27 01:35:51 +02:00
} else {
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
2023-09-08 22:34:52 +02:00
handleOneDriveThrottleRequest ( createDirectoryOnlineOneDriveApiInstance ) ;
2023-09-02 04:27:10 +02:00
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to create a remote directory on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
createDirectoryOnline ( thisNewPathToCreate ) ;
} else {
2023-09-10 01:07:53 +02:00
// Re-Try
createDirectoryOnline ( thisNewPathToCreate ) ;
2023-09-02 04:27:10 +02:00
}
2020-05-25 02:51:57 +02:00
}
}
2023-09-08 22:34:52 +02:00
// If we get to this point - onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, thisNewPathToCreate) generated a 'valid' response ....
2023-08-27 01:35:51 +02:00
// This means that the folder potentially exists online .. which is odd .. as it should not have existed
if ( onlinePathData . type ( ) = = JSONType . object ) {
// A valid object was responded with
if ( onlinePathData [ "name" ] . str = = baseName ( thisNewPathToCreate ) ) {
// OneDrive 'name' matches local path name
2023-09-22 23:55:59 +02:00
if ( appConfig . accountType = = "business" ) {
// We are a business account, this existing online folder, could be a Shared Online Folder and is the 'Add shortcut to My files' item
log . vdebug ( "onlinePathData: " , onlinePathData ) ;
if ( isItemRemote ( onlinePathData ) ) {
// The folder is a remote item ... we do not want to create this ...
log . vdebug ( "Remote Existing Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'" ) ;
log . vdebug ( "We need to skip this path: " , thisNewPathToCreate ) ;
// Add this path to businessSharedFoldersOnlineToSkip
businessSharedFoldersOnlineToSkip ~ = [ thisNewPathToCreate ] ;
// no save to database, no online create
return ;
}
}
2023-08-27 01:35:51 +02:00
log . vlog ( "The requested directory to create was found on OneDrive - skipping creating the directory: " , thisNewPathToCreate ) ;
// Is the response a valid JSON object - validation checking done in saveItem
saveItem ( onlinePathData ) ;
2020-03-28 21:08:01 +01:00
return ;
} else {
2023-08-27 01:35:51 +02:00
// Normally this would throw an error, however we cant use throw new posixException()
string msg = format ( "POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention" , baseName ( thisNewPathToCreate ) , onlinePathData [ "name" ] . str ) ;
displayPosixErrorMessage ( msg ) ;
log . error ( "ERROR: Requested directory to create has a 'case-insensitive match' to an existing directory on OneDrive online." ) ;
2023-09-12 21:53:53 +02:00
log . error ( "ERROR: To resolve, rename this local directory: " , buildNormalizedPath ( absolutePath ( thisNewPathToCreate ) ) ) ;
2023-08-27 01:35:51 +02:00
log . log ( "Skipping creating this directory online due to 'case-insensitive match': " , thisNewPathToCreate ) ;
2023-09-12 21:53:53 +02:00
// Add this path to posixViolationPaths
posixViolationPaths ~ = [ thisNewPathToCreate ] ;
2020-03-28 21:08:01 +01:00
return ;
}
2018-01-01 18:38:08 +01:00
} else {
2023-08-27 01:35:51 +02:00
// response is not valid JSON, an error was returned from OneDrive
log . error ( "ERROR: There was an error performing this operation on OneDrive" ) ;
log . error ( "ERROR: Increase logging verbosity to assist determining why." ) ;
log . log ( "Skipping: " , buildNormalizedPath ( absolutePath ( thisNewPathToCreate ) ) ) ;
return ;
2018-01-01 18:38:08 +01:00
}
2015-09-11 18:33:22 +02:00
}
2023-08-27 01:35:51 +02:00
// Test that the online name actually matches the requested local name
2023-09-12 21:53:53 +02:00
void performPosixTest ( string localNameToCheck , string onlineName ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file
// Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same,
// even though some file systems (such as a POSIX-compliant file system) may consider them as different.
// Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior.
2023-09-12 21:53:53 +02:00
if ( localNameToCheck ! = onlineName ) {
2023-08-27 01:35:51 +02:00
// POSIX Error
// Local item name has a 'case-insensitive match' to an existing item on OneDrive
throw new posixException ( localNameToCheck , onlineName ) ;
2015-09-11 18:33:22 +02:00
}
2023-08-27 01:35:51 +02:00
}
// Upload new file items as identified
void uploadNewLocalFileItems ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Lets deal with the new local items in a batch process
ulong batchSize = appConfig . concurrentThreads ;
ulong batchCount = ( newLocalFilesToUploadToOneDrive . length + batchSize - 1 ) / batchSize ;
ulong batchesProcessed = 0 ;
2021-03-19 07:29:55 +01:00
2023-08-27 01:35:51 +02:00
foreach ( chunk ; newLocalFilesToUploadToOneDrive . chunks ( batchSize ) ) {
uploadNewLocalFileItemsInParallel ( chunk ) ;
2015-10-04 16:25:31 +02:00
}
2015-09-11 18:33:22 +02:00
}
2018-03-14 05:43:40 +01:00
2023-08-27 01:35:51 +02:00
// Upload the file batches in parallel
void uploadNewLocalFileItemsInParallel ( string [ ] array ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
foreach ( i , fileToUpload ; taskPool . parallel ( array ) ) {
log . vdebug ( "Upload Thread " , i , " Starting: " , Clock . currTime ( ) ) ;
uploadNewFile ( fileToUpload ) ;
log . vdebug ( "Upload Thread " , i , " Finished: " , Clock . currTime ( ) ) ;
2018-03-14 05:43:40 +01:00
}
}
2018-12-04 00:59:23 +01:00
2023-08-27 01:35:51 +02:00
// Upload a new file to OneDrive
void uploadNewFile ( string fileToUpload ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Debug for the moment
log . vdebug ( "fileToUpload: " , fileToUpload ) ;
2022-12-15 20:08:46 +01:00
2023-08-27 01:35:51 +02:00
// These are the details of the item we need to upload
// How much space is remaining on OneDrive
ulong remainingFreeSpaceOnline ;
// Did the upload fail?
bool uploadFailed = false ;
// Did we skip due to exceeding maximum allowed size?
bool skippedMaxSize = false ;
// Did we skip to an exception error?
bool skippedExceptionError = false ;
// Is the parent path in the item database?
bool parentPathFoundInDB = false ;
// Get this file size
ulong thisFileSize ;
// Is there space available online
bool spaceAvailableOnline = false ;
2019-01-19 03:01:41 +01:00
2023-08-27 01:35:51 +02:00
// Check the database for the parent path of fileToUpload
Item parentItem ;
// What parent path to use?
string parentPath = dirName ( fileToUpload ) ; // will be either . or something else
if ( parentPath = = "." ) {
// Assume this is a new file in the users configured sync_dir root
// Use client defaults
parentItem . id = appConfig . defaultRootId ; // Should give something like 12345ABCDE1234A1!101
parentItem . driveId = appConfig . defaultDriveId ; // Should give something like 12345abcde1234a1
parentPathFoundInDB = true ;
} else {
// Query the database using each of the driveId's we are using
foreach ( driveId ; driveIDsArray ) {
// Query the database for this parent path using each driveId
Item dbResponse ;
if ( itemDB . selectByPath ( parentPath , driveId , dbResponse ) ) {
// parent path was found in the database
parentItem = dbResponse ;
parentPathFoundInDB = true ;
2021-03-12 19:11:44 +01:00
}
2018-12-04 00:59:23 +01:00
}
2023-08-27 01:35:51 +02:00
}
2023-09-12 21:53:53 +02:00
// If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty
if ( ( parentPathFoundInDB ) & & ( parentItem . driveId . empty ) ) {
2023-08-27 01:35:51 +02:00
// switch to using defaultDriveId
2023-09-12 21:53:53 +02:00
log . log ( "parentItem.driveId is empty - using defaultDriveId for upload API calls" ) ;
2023-08-27 01:35:51 +02:00
parentItem . driveId = appConfig . defaultDriveId ;
}
// Can we read the file - as a permissions issue or actual file corruption will cause a failure
// Resolves: https://github.com/abraunegg/onedrive/issues/113
if ( readLocalFile ( fileToUpload ) ) {
2023-09-12 21:53:53 +02:00
if ( parentPathFoundInDB ) {
// The local file can be read - so we can read it to attemtp to upload it in this thread
// Get the file size
thisFileSize = getSize ( fileToUpload ) ;
// Does this file exceed the maximum filesize for OneDrive
// Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329
if ( thisFileSize < = maxUploadFileSize ) {
// Is there enough free space on OneDrive when we started this thread, to upload the file to OneDrive?
remainingFreeSpaceOnline = getRemainingFreeSpace ( parentItem . driveId ) ;
log . vdebug ( "Current Available Space Online (Upload Target Drive ID): " , ( remainingFreeSpaceOnline / 1024 / 1024 ) , " MB" ) ;
// When we compare the space online to the total we are trying to upload - is there space online?
ulong calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize ;
// If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated
// If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true
// If 'business' accounts, if driveId == defaultDriveId, then we will have data
// If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true
if ( remainingFreeSpaceOnline > totalDataToUpload ) {
// Space available
spaceAvailableOnline = true ;
} else {
// we need to look more granular
// What was the latest getRemainingFreeSpace() value?
if ( appConfig . quotaAvailable ) {
// Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload?
if ( calculatedSpaceOnlinePostUpload > 0 ) {
// Based on this thread action, we beleive that there is space available online to upload - proceed
spaceAvailableOnline = true ;
}
2019-08-09 10:14:10 +02:00
}
}
2023-09-12 21:53:53 +02:00
// Is quota being restricted?
if ( appConfig . quotaRestricted ) {
// If the upload target drive is not our drive id, then it is a shared folder .. we need to print a space warning message
if ( parentItem . driveId ! = appConfig . defaultDriveId ) {
// Different message depending on account type
if ( appConfig . accountType = = "personal" ) {
log . vlog ( "WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed." ) ;
} else {
log . vlog ( "WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator." ) ;
}
2023-08-27 01:35:51 +02:00
} else {
2023-09-12 21:53:53 +02:00
if ( appConfig . accountType = = "personal" ) {
log . vlog ( "WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed." ) ;
} else {
log . vlog ( "WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator." ) ;
}
2021-05-10 00:04:14 +02:00
}
2023-09-12 21:53:53 +02:00
// Space available online is being restricted - so we have no way to really know if there is space available online
spaceAvailableOnline = true ;
2021-03-12 19:11:44 +01:00
}
2023-08-27 01:35:51 +02:00
2023-09-12 21:53:53 +02:00
// Do we have space available or is space available being restricted (so we make the blind assumption that there is space available)
if ( spaceAvailableOnline ) {
// We need to check that this new local file does not exist on OneDrive
// Create a new API Instance for this thread and initialise it
OneDriveApi checkFileOneDriveApiInstance ;
checkFileOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
checkFileOneDriveApiInstance . initialise ( ) ;
JSONValue fileDetailsFromOneDrive ;
2023-08-27 01:35:51 +02:00
2023-09-12 21:53:53 +02:00
// https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file
// Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same,
// even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different.
// Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this.
// In order to upload this file - this query HAS to respond as a 404 - Not Found
// Does this 'file' already exist on OneDrive?
try {
fileDetailsFromOneDrive = checkFileOneDriveApiInstance . getPathDetailsByDriveId ( parentItem . driveId , fileToUpload ) ;
// Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API
performPosixTest ( baseName ( fileToUpload ) , fileDetailsFromOneDrive [ "name" ] . str ) ;
} catch ( OneDriveException exception ) {
// If we get a 404 .. the file is not online .. this is what we want .. file does not exist online
if ( exception . httpStatusCode = = 404 ) {
// The file has been checked, client side filtering checked, does not exist online - we need to upload it
log . vdebug ( "fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it" ) ;
uploadFailed = performNewFileUpload ( parentItem , fileToUpload , thisFileSize ) ;
2023-09-02 04:27:10 +02:00
} else {
2023-09-12 21:53:53 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( checkFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to validate file details on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
uploadNewFile ( fileToUpload ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2023-09-02 04:27:10 +02:00
}
2023-09-12 21:53:53 +02:00
} catch ( posixException e ) {
displayPosixErrorMessage ( e . msg ) ;
uploadFailed = true ;
2023-08-27 01:35:51 +02:00
}
2023-09-12 21:53:53 +02:00
// Operations in this thread are done / complete - either upload was done or it failed
checkFileOneDriveApiInstance . shutdown ( ) ;
} else {
// skip file upload - insufficent space to upload
log . log ( "Skipping uploading this new file as it exceeds the available free space on OneDrive: " , fileToUpload ) ;
uploadFailed = true ;
2023-08-27 01:35:51 +02:00
}
} else {
2023-09-12 21:53:53 +02:00
// Skip file upload - too large
log . log ( "Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: " , fileToUpload ) ;
2023-08-27 01:35:51 +02:00
uploadFailed = true ;
2022-09-26 09:56:42 +02:00
}
2020-09-14 09:49:50 +02:00
} else {
2023-09-12 21:53:53 +02:00
// why was the parent path not in the database?
if ( canFind ( posixViolationPaths , parentPath ) ) {
log . error ( "ERROR: POSIX 'case-insensitive match' for the parent path which violates the Microsoft OneDrive API namespace convention." ) ;
} else {
log . error ( "ERROR: Parent path is not in the database or online." ) ;
}
log . error ( "ERROR: Unable to upload this file: " , fileToUpload ) ;
2023-08-27 01:35:51 +02:00
uploadFailed = true ;
2020-09-14 09:49:50 +02:00
}
} else {
2023-08-27 01:35:51 +02:00
// Unable to read local file
log . log ( "Skipping uploading this file as it cannot be read (file permissions or file corruption): " , fileToUpload ) ;
uploadFailed = true ;
}
// Upload success or failure?
if ( uploadFailed ) {
// Need to add this to fileUploadFailures to capture at the end
fileUploadFailures ~ = fileToUpload ;
}
2020-09-14 09:49:50 +02:00
}
2023-08-27 01:35:51 +02:00
// Perform the actual upload to OneDrive
bool performNewFileUpload ( Item parentItem , string fileToUpload , ulong thisFileSize ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Assume that by default the upload fails
bool uploadFailed = true ;
// OneDrive API Upload Response
JSONValue uploadResponse ;
// Create the OneDriveAPI Upload Instance
OneDriveApi uploadFileOneDriveApiInstance ;
uploadFileOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
uploadFileOneDriveApiInstance . initialise ( ) ;
// Calculate upload speed
auto uploadStartTime = Clock . currTime ( ) ;
// Is this a dry-run scenario?
if ( ! dryRun ) {
// Not a dry-run situation
// Do we use simpleUpload or create an upload session?
bool useSimpleUpload = false ;
2023-09-10 08:37:10 +02:00
if ( thisFileSize < = sessionThresholdFileSize ) {
2023-08-27 01:35:51 +02:00
useSimpleUpload = true ;
}
// We can only upload zero size files via simpleFileUpload regardless of account type
// Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53
2023-09-10 08:37:10 +02:00
// Additionally, only where file size is < 4MB should be uploaded by simpleUpload - everything else should use a session to upload
2023-08-27 01:35:51 +02:00
if ( ( thisFileSize = = 0 ) | | ( useSimpleUpload ) ) {
try {
// Attempt to upload the zero byte file using simpleUpload for all account types
uploadResponse = uploadFileOneDriveApiInstance . simpleUpload ( fileToUpload , parentItem . driveId , parentItem . id , baseName ( fileToUpload ) ) ;
uploadFailed = false ;
log . log ( "Uploading new file " , fileToUpload , " ... done." ) ;
2023-09-08 22:34:52 +02:00
// Shutdown the API
uploadFileOneDriveApiInstance . shutdown ( ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
2023-08-27 01:35:51 +02:00
// An error was responded with - what was it
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to upload a new file to OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
performNewFileUpload ( parentItem , fileToUpload , thisFileSize ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2023-08-27 01:35:51 +02:00
} catch ( FileException e ) {
// display the error message
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
}
} else {
// Session Upload for this criteria:
// - Personal Account and file size > 4MB
// - All Business | Office365 | SharePoint files > 0 bytes
JSONValue uploadSessionData ;
// As this is a unique thread, the sessionFilePath for where we save the data needs to be unique
// The best way to do this is calculate the CRC32 of the file, and use this as the suffix of the session file we save
string threadUploadSessionFilePath = appConfig . uploadSessionFilePath ~ "." ~ computeCRC32 ( fileToUpload ) ;
// Attempt to upload the > 4MB file using an upload session for all account types
try {
// Create the Upload Session
uploadSessionData = createSessionFileUpload ( uploadFileOneDriveApiInstance , fileToUpload , parentItem . driveId , parentItem . id , baseName ( fileToUpload ) , null , threadUploadSessionFilePath ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
2023-08-27 01:35:51 +02:00
// An error was responded with - what was it
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to create an upload session on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
performNewFileUpload ( parentItem , fileToUpload , thisFileSize ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2023-08-27 01:35:51 +02:00
} catch ( FileException e ) {
// display the error message
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
}
// Do we have a valid session URL that we can use ?
if ( uploadSessionData . type ( ) = = JSONType . object ) {
// This is a valid JSON object
bool sessionDataValid = true ;
// Validate that we have the following items which we need
if ( ! hasUploadURL ( uploadSessionData ) ) {
sessionDataValid = false ;
log . vdebug ( "Session data missing 'uploadUrl'" ) ;
2021-01-20 09:46:56 +01:00
}
2022-03-09 21:00:07 +01:00
2023-08-27 01:35:51 +02:00
if ( ! hasNextExpectedRanges ( uploadSessionData ) ) {
sessionDataValid = false ;
log . vdebug ( "Session data missing 'nextExpectedRanges'" ) ;
}
2022-03-09 21:00:07 +01:00
2023-08-27 01:35:51 +02:00
if ( ! hasLocalPath ( uploadSessionData ) ) {
sessionDataValid = false ;
log . vdebug ( "Session data missing 'localPath'" ) ;
}
if ( sessionDataValid ) {
// We have a valid Upload Session Data we can use
try {
// Try and perform the upload session
uploadResponse = performSessionFileUpload ( uploadFileOneDriveApiInstance , thisFileSize , uploadSessionData , threadUploadSessionFilePath ) ;
2023-09-10 01:07:53 +02:00
if ( uploadResponse . type ( ) = = JSONType . object ) {
uploadFailed = false ;
log . log ( "Uploading new file " , fileToUpload , " ... done." ) ;
} else {
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
uploadFailed = true ;
}
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
2023-08-27 01:35:51 +02:00
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( uploadFileOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to upload a new file via a session to OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
performNewFileUpload ( parentItem , fileToUpload , thisFileSize ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2023-08-27 01:35:51 +02:00
2022-03-09 21:00:07 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// No Upload URL or nextExpectedRanges or localPath .. not a valid JSON we can use
log . vlog ( "Session data is missing required elements to perform a session upload." ) ;
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
2021-01-20 09:46:56 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// Create session Upload URL failed
log . log ( "Uploading new file " , fileToUpload , " ... failed." ) ;
}
}
} else {
// We are in a --dry-run scenario
uploadResponse = createFakeResponse ( fileToUpload ) ;
uploadFailed = false ;
log . logAndNotify ( "Uploading new file " , fileToUpload , " ... done." ) ;
}
// Upload has finished
auto uploadFinishTime = Clock . currTime ( ) ;
// If no upload failure, calculate metrics
if ( ! uploadFailed ) {
// Upload did not fail ...
auto uploadDuration = uploadFinishTime - uploadStartTime ;
log . vdebug ( "File Size: " , thisFileSize , " Bytes" ) ;
log . vdebug ( "Upload Duration: " , ( uploadDuration . total ! "msecs" / 1e3 ) , " Seconds" ) ;
auto uploadSpeed = ( thisFileSize / ( uploadDuration . total ! "msecs" / 1e3 ) / 1024 / 1024 ) ;
log . vdebug ( "Upload Speed: " , uploadSpeed , " Mbps (approx)" ) ;
// OK as the upload did not fail, we need to save the response from OneDrive, but it has to be a valid JSON response
if ( uploadResponse . type ( ) = = JSONType . object ) {
2023-09-11 06:51:46 +02:00
// Update the item's metadata on OneDrive
string newFileId = uploadResponse [ "id" ] . str ;
string newFileETag = uploadResponse [ "eTag" ] . str ;
// check if the path still exists locally before we try to set the file times online - as short lived files, whilst we uploaded it - it may not exist locally aready
if ( exists ( fileToUpload ) ) {
SysTime mtime = timeLastModified ( fileToUpload ) . toUTC ( ) ;
// update the file modified time on OneDrive and save item details to database
if ( ! dryRun ) {
// We are not in a --dry-run situation, ensure that the uploaded file has the correct timestamp
uploadLastModifiedTime ( parentItem . driveId , newFileId , mtime , newFileETag ) ;
2023-08-27 01:35:51 +02:00
}
} else {
2023-09-11 06:51:46 +02:00
// will be removed in different event!
log . log ( "File disappeared locally after upload: " , fileToUpload ) ;
2019-08-02 10:43:31 +02:00
}
2023-09-11 06:51:46 +02:00
2023-08-27 01:35:51 +02:00
} else {
// Log that an invalid JSON object was returned
log . vdebug ( "uploadFileOneDriveApiInstance.simpleUpload or session.upload call returned an invalid JSON Object from the OneDrive API" ) ;
2021-01-20 09:46:56 +01:00
}
2023-08-27 01:35:51 +02:00
}
// Return upload status
return uploadFailed ;
}
// Create the OneDrive Upload Session
JSONValue createSessionFileUpload ( OneDriveApi activeOneDriveApiInstance , string fileToUpload , string parentDriveId , string parentId , string filename , string eTag , string threadUploadSessionFilePath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Upload file via a OneDrive API session
JSONValue uploadSession ;
// Calculate modification time
SysTime localFileLastModifiedTime = timeLastModified ( fileToUpload ) . toUTC ( ) ;
localFileLastModifiedTime . fracSecs = Duration . zero ;
// Construct the fileSystemInfo JSON component needed to create the Upload Session
JSONValue fileSystemInfo = [
"item" : JSONValue ( [
"@microsoft.graph.conflictBehavior" : JSONValue ( "replace" ) ,
"fileSystemInfo" : JSONValue ( [
"lastModifiedDateTime" : localFileLastModifiedTime . toISOExtString ( )
] )
] )
] ;
// Try to create the upload session for this file
uploadSession = activeOneDriveApiInstance . createUploadSession ( parentDriveId , parentId , filename , eTag , fileSystemInfo ) ;
if ( uploadSession . type ( ) = = JSONType . object ) {
// a valid session object was created
if ( "uploadUrl" in uploadSession ) {
// Add the file path we are uploading to this JSON Session Data
uploadSession [ "localPath" ] = fileToUpload ;
// Save this session
saveSessionFile ( threadUploadSessionFilePath , uploadSession ) ;
2019-08-02 10:43:31 +02:00
}
} else {
2023-08-27 01:35:51 +02:00
// no valid session was created
log . vlog ( "Creation of OneDrive API Upload Session failed." ) ;
// return upload() will return a JSONValue response, create an empty JSONValue response to return
uploadSession = null ;
2019-08-02 10:43:31 +02:00
}
2023-08-27 01:35:51 +02:00
// Return the JSON
return uploadSession ;
2019-08-02 10:43:31 +02:00
}
2023-08-27 01:35:51 +02:00
// Save the session upload data
void saveSessionFile ( string threadUploadSessionFilePath , JSONValue uploadSessionData ) {
2023-09-24 03:07:26 +02:00
2018-12-28 02:26:03 +01:00
try {
2023-08-27 01:35:51 +02:00
std . file . write ( threadUploadSessionFilePath , uploadSessionData . toString ( ) ) ;
} catch ( FileException e ) {
// display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
}
}
// Perform the upload of file via the Upload Session that was created
JSONValue performSessionFileUpload ( OneDriveApi activeOneDriveApiInstance , ulong thisFileSize , JSONValue uploadSessionData , string threadUploadSessionFilePath ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Response for upload
JSONValue uploadResponse ;
// Session JSON needs to contain valid elements
// Get the offset details
ulong fragmentSize = 10 * 2 ^ ^ 20 ; // 10 MiB
ulong fragmentCount = 0 ;
ulong fragSize = 0 ;
ulong offset = uploadSessionData [ "nextExpectedRanges" ] [ 0 ] . str . splitter ( '-' ) . front . to ! ulong ;
size_t iteration = ( roundTo ! int ( double ( thisFileSize ) / double ( fragmentSize ) ) ) + 1 ;
Progress p = new Progress ( iteration ) ;
p . title = "Uploading" ;
// Initialise the download bar at 0%
p . next ( ) ;
// Start the session upload using the active API instance for this thread
while ( true ) {
fragmentCount + + ;
log . vdebugNewLine ( "Fragment: " , fragmentCount , " of " , iteration ) ;
p . next ( ) ;
log . vdebugNewLine ( "fragmentSize: " , fragmentSize , "offset: " , offset , " thisFileSize: " , thisFileSize ) ;
fragSize = fragmentSize < thisFileSize - offset ? fragmentSize : thisFileSize - offset ;
log . vdebugNewLine ( "Using fragSize: " , fragSize ) ;
2020-03-19 20:12:47 +01:00
2023-08-27 01:35:51 +02:00
// fragSize must not be a negative value
if ( fragSize < 0 ) {
// Session upload will fail
// not a JSON object - fragment upload failed
log . vlog ( "File upload session failed - invalid calculation of fragment size" ) ;
if ( exists ( threadUploadSessionFilePath ) ) {
remove ( threadUploadSessionFilePath ) ;
}
// set uploadResponse to null as error
uploadResponse = null ;
return uploadResponse ;
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
// If the resume upload fails, we need to check for a return code here
try {
uploadResponse = activeOneDriveApiInstance . uploadFragment (
uploadSessionData [ "uploadUrl" ] . str ,
uploadSessionData [ "localPath" ] . str ,
offset ,
fragSize ,
thisFileSize
) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
2023-08-27 01:35:51 +02:00
// if a 100 uploadResponse is generated, continue
2023-09-02 04:27:10 +02:00
if ( exception . httpStatusCode = = 100 ) {
2023-08-27 01:35:51 +02:00
continue ;
}
2023-09-02 04:27:10 +02:00
// There was an error uploadResponse from OneDrive when uploading the file fragment
// Handle transient errors:
// 408 - Request Time Out
// 429 - Too Many Requests
// 503 - Service Unavailable
// 504 - Gateway Timeout
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle 'HTTP request returned status code 429 (Too Many Requests)' first
2023-08-27 01:35:51 +02:00
log . vdebug ( "Fragment upload failed - received throttle request uploadResponse from OneDrive" ) ;
2023-09-02 04:27:10 +02:00
if ( exception . httpStatusCode = = 429 ) {
auto retryAfterValue = activeOneDriveApiInstance . getRetryAfterValue ( ) ;
log . vdebug ( "Using Retry-After Value = " , retryAfterValue ) ;
// Sleep thread as per request
log . log ( "\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled" ) ;
log . log ( "Sleeping for " , retryAfterValue , " seconds" ) ;
Thread . sleep ( dur ! "seconds" ( retryAfterValue ) ) ;
log . log ( "Retrying fragment upload" ) ;
} else {
// Handle 408, 503 and 504
auto errorArray = splitLines ( exception . msg ) ;
auto retryAfterValue = 30 ;
log . log ( "\nThread sleeping due to '" , errorArray [ 0 ] , "' - retrying applicable request in 30 seconds" ) ;
log . log ( "Sleeping for " , retryAfterValue , " seconds" ) ;
Thread . sleep ( dur ! "seconds" ( retryAfterValue ) ) ;
log . log ( "Retrying fragment upload" ) ;
}
2023-08-27 01:35:51 +02:00
} else {
// insert a new line as well, so that the below error is inserted on the console in the right location
log . vlog ( "\nFragment upload failed - received an exception response from OneDrive API" ) ;
// display what the error is
2023-09-02 04:27:10 +02:00
displayOneDriveErrorMessage ( exception . msg , getFunctionName ! ( { } ) ) ;
2023-08-27 01:35:51 +02:00
// retry fragment upload in case error is transient
log . vlog ( "Retrying fragment upload" ) ;
}
try {
uploadResponse = activeOneDriveApiInstance . uploadFragment (
uploadSessionData [ "uploadUrl" ] . str ,
uploadSessionData [ "localPath" ] . str ,
offset ,
fragSize ,
thisFileSize
) ;
} catch ( OneDriveException e ) {
// OneDrive threw another error on retry
log . vlog ( "Retry to upload fragment failed" ) ;
// display what the error is
displayOneDriveErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
// set uploadResponse to null as the fragment upload was in error twice
uploadResponse = null ;
2023-09-10 01:07:53 +02:00
} catch ( std . exception . ErrnoException e ) {
// There was a file system error - display the error message
displayFileSystemErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
return uploadResponse ;
2023-08-27 01:35:51 +02:00
}
}
2023-09-10 01:07:53 +02:00
2023-08-27 01:35:51 +02:00
// was the fragment uploaded without issue?
if ( uploadResponse . type ( ) = = JSONType . object ) {
offset + = fragmentSize ;
if ( offset > = thisFileSize ) break ;
// update the uploadSessionData details
uploadSessionData [ "expirationDateTime" ] = uploadResponse [ "expirationDateTime" ] ;
uploadSessionData [ "nextExpectedRanges" ] = uploadResponse [ "nextExpectedRanges" ] ;
saveSessionFile ( threadUploadSessionFilePath , uploadSessionData ) ;
2018-12-28 02:26:03 +01:00
} else {
2023-08-27 01:35:51 +02:00
// not a JSON object - fragment upload failed
log . vlog ( "File upload session failed - invalid response from OneDrive API" ) ;
if ( exists ( threadUploadSessionFilePath ) ) {
remove ( threadUploadSessionFilePath ) ;
}
// set uploadResponse to null as error
uploadResponse = null ;
return uploadResponse ;
2018-12-28 02:26:03 +01:00
}
}
2023-08-27 01:35:51 +02:00
// upload complete
p . next ( ) ;
writeln ( ) ;
if ( exists ( threadUploadSessionFilePath ) ) {
remove ( threadUploadSessionFilePath ) ;
2018-12-28 02:26:03 +01:00
}
2023-08-27 01:35:51 +02:00
// Return the session upload response
return uploadResponse ;
}
// Delete an item on OneDrive
void uploadDeletedItem ( Item itemToDelete , string path ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Are we in a situation where we HAVE to keep the data online - do not delete the remote object
if ( noRemoteDelete ) {
2023-09-10 02:45:47 +02:00
if ( ( itemToDelete . type = = ItemType . dir ) ) {
2023-08-27 01:35:51 +02:00
// Do not process remote directory delete
log . vlog ( "Skipping remote directory delete as --upload-only & --no-remote-delete configured" ) ;
2020-03-19 20:12:47 +01:00
} else {
2023-08-27 01:35:51 +02:00
// Do not process remote file delete
log . vlog ( "Skipping remote file delete as --upload-only & --no-remote-delete configured" ) ;
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
} else {
// Process the delete - delete the object online
log . log ( "Deleting item from OneDrive: " , path ) ;
bool flagAsBigDelete = false ;
2023-09-10 02:45:47 +02:00
Item [ ] children ;
2023-09-21 21:34:42 +02:00
ulong itemsToDelete ;
2023-09-10 02:45:47 +02:00
if ( ( itemToDelete . type = = ItemType . dir ) ) {
// Query the database - how many objects will this remove?
children = getChildren ( itemToDelete . driveId , itemToDelete . id ) ;
// Count the returned items + the original item (1)
2023-09-21 21:34:42 +02:00
itemsToDelete = count ( children ) + 1 ;
2023-09-10 02:45:47 +02:00
log . vdebug ( "Number of items online to delete: " , itemsToDelete ) ;
2023-09-21 21:34:42 +02:00
} else {
itemsToDelete = 1 ;
}
// A local delete of a file|folder when using --monitor will issue a inotify event, which will trigger the local & remote data immediately be deleted
// The user may also be --sync process, so we are checking if something was deleted between application use
if ( itemsToDelete > = appConfig . getValueLong ( "classify_as_big_delete" ) ) {
// A big delete has been detected
flagAsBigDelete = true ;
if ( ! appConfig . getValueBool ( "force" ) ) {
log . error ( "ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on OneDrive" ) ;
log . error ( "ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value" ) ;
// Must exit here to preserve data on online
exit ( - 1 ) ;
2023-08-27 01:35:51 +02:00
}
}
// Are we in a --dry-run scenario?
if ( ! dryRun ) {
// We are not in a dry run scenario
log . vdebug ( "itemToDelete: " , itemToDelete ) ;
2023-08-29 01:56:55 +02:00
2023-09-11 06:31:10 +02:00
// Create new OneDrive API Instance
OneDriveApi uploadDeletedItemOneDriveApiInstance ;
uploadDeletedItemOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
uploadDeletedItemOneDriveApiInstance . initialise ( ) ;
// what item are we trying to delete?
2023-09-11 06:32:22 +02:00
log . vdebug ( "Attempting to delete this single item id: " , itemToDelete . id , " from drive: " , itemToDelete . driveId ) ;
2023-09-11 06:31:10 +02:00
try {
// perform the delete via the default OneDrive API instance
uploadDeletedItemOneDriveApiInstance . deleteById ( itemToDelete . driveId , itemToDelete . id ) ;
// Shutdown API
uploadDeletedItemOneDriveApiInstance . shutdown ( ) ;
} catch ( OneDriveException e ) {
if ( e . httpStatusCode = = 404 ) {
// item.id, item.eTag could not be found on the specified driveId
log . vlog ( "OneDrive reported: The resource could not be found to be deleted." ) ;
2023-09-10 02:45:47 +02:00
}
2023-09-11 06:31:10 +02:00
}
2023-08-29 01:56:55 +02:00
2023-09-11 06:31:10 +02:00
// Delete the reference in the local database
itemDB . deleteById ( itemToDelete . driveId , itemToDelete . id ) ;
if ( itemToDelete . remoteId ! = null ) {
// If the item is a remote item, delete the reference in the local database
itemDB . deleteById ( itemToDelete . remoteDriveId , itemToDelete . remoteId ) ;
2023-09-10 01:07:53 +02:00
}
2023-09-11 06:31:10 +02:00
2023-09-10 02:45:47 +02:00
} else {
// log that this is a dry-run activity
log . log ( "dry run - no delete activity" ) ;
2018-12-28 02:26:03 +01:00
}
}
}
2019-03-11 07:57:47 +01:00
2023-08-27 01:35:51 +02:00
// Get the children of an item id from the database
Item [ ] getChildren ( string driveId , string id ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
Item [ ] children ;
children ~ = itemDB . selectChildren ( driveId , id ) ;
foreach ( Item child ; children ) {
if ( child . type ! = ItemType . file ) {
// recursively get the children of this child
children ~ = getChildren ( child . driveId , child . id ) ;
}
}
return children ;
}
// Perform a 'reverse' delete of all child objects on OneDrive
void performReverseDeletionOfOneDriveItems ( Item [ ] children , Item itemToDelete ) {
2023-09-24 03:07:26 +02:00
// Log what is happening
2023-08-27 01:35:51 +02:00
log . vdebug ( "Attempting a reverse delete of all child objects from OneDrive" ) ;
2023-09-08 22:34:52 +02:00
// Create a new API Instance for this thread and initialise it
OneDriveApi performReverseDeletionOneDriveApiInstance ;
performReverseDeletionOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
performReverseDeletionOneDriveApiInstance . initialise ( ) ;
2023-08-27 01:35:51 +02:00
foreach_reverse ( Item child ; children ) {
// Log the action
log . vdebug ( "Attempting to delete this child item id: " , child . id , " from drive: " , child . driveId ) ;
// perform the delete via the default OneDrive API instance
2023-09-08 22:34:52 +02:00
performReverseDeletionOneDriveApiInstance . deleteById ( child . driveId , child . id , child . eTag ) ;
2023-08-27 01:35:51 +02:00
// delete the child reference in the local database
itemDB . deleteById ( child . driveId , child . id ) ;
}
// Log the action
log . vdebug ( "Attempting to delete this parent item id: " , itemToDelete . id , " from drive: " , itemToDelete . driveId ) ;
// Perform the delete via the default OneDrive API instance
2023-09-08 22:34:52 +02:00
performReverseDeletionOneDriveApiInstance . deleteById ( itemToDelete . driveId , itemToDelete . id , itemToDelete . eTag ) ;
// Shutdown API instance
performReverseDeletionOneDriveApiInstance . shutdown ( ) ;
2023-08-27 01:35:51 +02:00
}
2019-03-11 07:57:47 +01:00
// Create a fake OneDrive response suitable for use with saveItem
2023-08-27 01:35:51 +02:00
JSONValue createFakeResponse ( const ( string ) path ) {
2023-09-24 03:07:26 +02:00
2019-03-11 07:57:47 +01:00
import std.digest.sha ;
// Generate a simulated JSON response which can be used
// At a minimum we need:
// 1. eTag
// 2. cTag
// 3. fileSystemInfo
// 4. file or folder. if file, hash of file
// 5. id
// 6. name
// 7. parent reference
2023-08-27 01:35:51 +02:00
string fakeDriveId = appConfig . defaultDriveId ;
string fakeRootId = appConfig . defaultRootId ;
2019-03-11 07:57:47 +01:00
SysTime mtime = timeLastModified ( path ) . toUTC ( ) ;
2021-07-20 05:27:35 +02:00
// Need to update the 'fakeDriveId' & 'fakeRootId' with elements from the --dry-run database
// Otherwise some calls to validate objects will fail as the actual driveId being used is invalid
string parentPath = dirName ( path ) ;
Item databaseItem ;
if ( parentPath ! = "." ) {
// Not a 'root' parent
// For each driveid in the existing driveIDsArray
foreach ( searchDriveId ; driveIDsArray ) {
log . vdebug ( "FakeResponse: searching database for: " , searchDriveId , " " , parentPath ) ;
2023-08-27 01:35:51 +02:00
if ( itemDB . selectByPath ( parentPath , searchDriveId , databaseItem ) ) {
2021-07-20 05:27:35 +02:00
log . vdebug ( "FakeResponse: Found Database Item: " , databaseItem ) ;
fakeDriveId = databaseItem . driveId ;
fakeRootId = databaseItem . id ;
2020-11-04 19:19:39 +01:00
}
}
}
2019-03-11 07:57:47 +01:00
// real id / eTag / cTag are different format for personal / business account
auto sha1 = new SHA1Digest ( ) ;
2023-06-19 22:55:00 +02:00
ubyte [ ] fakedOneDriveItemValues = sha1 . digest ( path ) ;
2019-03-11 07:57:47 +01:00
JSONValue fakeResponse ;
if ( isDir ( path ) ) {
// path is a directory
fakeResponse = [
2023-06-19 22:55:00 +02:00
"id" : JSONValue ( toHexString ( fakedOneDriveItemValues ) ) ,
"cTag" : JSONValue ( toHexString ( fakedOneDriveItemValues ) ) ,
"eTag" : JSONValue ( toHexString ( fakedOneDriveItemValues ) ) ,
2019-03-11 07:57:47 +01:00
"fileSystemInfo" : JSONValue ( [
"createdDateTime" : mtime . toISOExtString ( ) ,
"lastModifiedDateTime" : mtime . toISOExtString ( )
] ) ,
"name" : JSONValue ( baseName ( path ) ) ,
"parentReference" : JSONValue ( [
2020-11-04 19:19:39 +01:00
"driveId" : JSONValue ( fakeDriveId ) ,
2023-08-27 01:35:51 +02:00
"driveType" : JSONValue ( appConfig . accountType ) ,
2020-11-04 19:19:39 +01:00
"id" : JSONValue ( fakeRootId )
2019-03-11 07:57:47 +01:00
] ) ,
"folder" : JSONValue ( "" )
] ;
} else {
// path is a file
// compute file hash - both business and personal responses use quickXorHash
string quickXorHash = computeQuickXorHash ( path ) ;
fakeResponse = [
2023-06-19 22:55:00 +02:00
"id" : JSONValue ( toHexString ( fakedOneDriveItemValues ) ) ,
"cTag" : JSONValue ( toHexString ( fakedOneDriveItemValues ) ) ,
"eTag" : JSONValue ( toHexString ( fakedOneDriveItemValues ) ) ,
2019-03-11 07:57:47 +01:00
"fileSystemInfo" : JSONValue ( [
"createdDateTime" : mtime . toISOExtString ( ) ,
"lastModifiedDateTime" : mtime . toISOExtString ( )
] ) ,
"name" : JSONValue ( baseName ( path ) ) ,
"parentReference" : JSONValue ( [
2020-11-04 19:19:39 +01:00
"driveId" : JSONValue ( fakeDriveId ) ,
2023-08-27 01:35:51 +02:00
"driveType" : JSONValue ( appConfig . accountType ) ,
2020-11-04 19:19:39 +01:00
"id" : JSONValue ( fakeRootId )
2019-03-11 07:57:47 +01:00
] ) ,
"file" : JSONValue ( [
"hashes" : JSONValue ( [
"quickXorHash" : JSONValue ( quickXorHash )
] )
] )
] ;
}
2023-08-27 01:35:51 +02:00
log . vdebug ( "Generated Fake OneDrive Response: " , fakeResponse ) ;
return fakeResponse ;
}
// Save JSON item details into the item database
void saveItem ( JSONValue jsonItem ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// jsonItem has to be a valid object
if ( jsonItem . type ( ) = = JSONType . object ) {
// Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id'
if ( hasId ( jsonItem ) ) {
// Are we in a --upload-only & --remove-source-files scenario?
// We do not want to add the item to the database in this situation as there is no local reference to the file post file deletion
// If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added
if ( ( uploadOnly ) & & ( localDeleteAfterUpload ) & & ( isItemFile ( jsonItem ) ) ) {
// Log that we skipping adding item to the local DB and the reason why
log . vdebug ( "Skipping adding to database as --upload-only & --remove-source-files configured" ) ;
} else {
// What is the JSON item we are trying to create a DB record with?
2023-09-24 03:07:26 +02:00
log . vdebug ( "saveItem - creating DB item from this JSON: " , jsonItem ) ;
2023-08-27 01:35:51 +02:00
// Takes a JSON input and formats to an item which can be used by the database
Item item = makeItem ( jsonItem ) ;
// Is this JSON item a 'root' item?
if ( ( isItemRoot ( jsonItem ) ) & & ( item . name = = "root" ) ) {
log . vdebug ( "Updating DB Item object with correct values as this is a 'root' object" ) ;
item . parentId = null ; // ensures that this database entry has no parent
// Check for parentReference
if ( hasParentReference ( jsonItem ) ) {
// Set the correct item.driveId
2023-09-21 21:34:42 +02:00
log . vdebug ( "ROOT JSON Item HAS parentReference .... setting item.driveId = jsonItem['parentReference']['driveId'].str" ) ;
2023-08-27 01:35:51 +02:00
item . driveId = jsonItem [ "parentReference" ] [ "driveId" ] . str ;
}
2023-09-21 21:34:42 +02:00
2023-08-27 01:35:51 +02:00
// We only should be adding our account 'root' to the database, not shared folder 'root' items
if ( item . driveId ! = appConfig . defaultDriveId ) {
// Shared Folder drive 'root' object .. we dont want this item
log . vdebug ( "NOT adding 'remote root' object to database: " , item ) ;
return ;
}
}
// Add to the local database
log . vdebug ( "Adding to database: " , item ) ;
itemDB . upsert ( item ) ;
// If we have a remote drive ID, add this to our list of known drive id's
if ( ! item . remoteDriveId . empty ) {
// Keep the driveIDsArray with unique entries only
if ( ! canFind ( driveIDsArray , item . remoteDriveId ) ) {
// Add this drive id to the array to search with
driveIDsArray ~ = item . remoteDriveId ;
}
}
}
} else {
// log error
log . error ( "ERROR: OneDrive response missing required 'id' element" ) ;
log . error ( "ERROR: " , jsonItem ) ;
}
} else {
// log error
log . error ( "ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object" ) ;
log . error ( "ERROR: Increase logging verbosity to assist determining why." ) ;
}
2019-03-11 07:57:47 +01:00
}
2020-03-19 20:12:47 +01:00
2023-08-27 01:35:51 +02:00
// Wrapper function for makeDatabaseItem so we can check to ensure that the item has the required hashes
Item makeItem ( JSONValue onedriveJSONItem ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Make the DB Item from the JSON data provided
Item newDatabaseItem = makeDatabaseItem ( onedriveJSONItem ) ;
2020-03-19 20:12:47 +01:00
2023-08-27 01:35:51 +02:00
// Is this a 'file' item that has not been deleted? Deleted items have no hash
if ( ( newDatabaseItem . type = = ItemType . file ) & & ( ! isItemDeleted ( onedriveJSONItem ) ) ) {
// Does this item have a file size attribute?
if ( hasFileSize ( onedriveJSONItem ) ) {
// Is the file size greater than 0?
if ( onedriveJSONItem [ "size" ] . integer > 0 ) {
// Does the DB item have any hashes as per the API provided JSON data?
if ( ( newDatabaseItem . quickXorHash . empty ) & & ( newDatabaseItem . sha256Hash . empty ) ) {
// Odd .. there is no hash for this item .. why is that?
// Is there a 'file' JSON element?
if ( "file" in onedriveJSONItem ) {
// Microsoft OneDrive OneNote objects will report as files but have 'application/msonenote' and 'application/octet-stream' as mime types
if ( ( isMicrosoftOneNoteMimeType1 ( onedriveJSONItem ) ) | | ( isMicrosoftOneNoteMimeType2 ( onedriveJSONItem ) ) ) {
// Debug log output that this is a potential OneNote object
log . vdebug ( "This item is potentially an associated Microsoft OneNote Object Item" ) ;
} else {
// Not a Microsoft OneNote Mime Type Object ..
string apiWarningMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: " ;
// This is computationally expensive .. but we are only doing this if there are no hashses provided
bool parentInDatabase = itemDB . idInLocalDatabase ( newDatabaseItem . driveId , newDatabaseItem . parentId ) ;
// Is the parent id in the database?
if ( parentInDatabase ) {
// This is again computationally expensive .. calculate this item path to advise the user the actual path of this item that has no hash
string newItemPath = computeItemPath ( newDatabaseItem . driveId , newDatabaseItem . parentId ) ~ "/" ~ newDatabaseItem . name ;
log . log ( apiWarningMessage , newItemPath ) ;
} else {
// Parent is not in the database .. why?
// Check if the parent item had been skipped ..
if ( skippedItems . find ( newDatabaseItem . parentId ) . length ! = 0 ) {
log . vdebug ( apiWarningMessage , "newDatabaseItem.parentId listed within skippedItems" ) ;
} else {
// Use the item ID .. there is no other reference available, parent is not being skipped, so we should have been able to calculate this - but we could not
log . log ( apiWarningMessage , newDatabaseItem . id ) ;
}
}
}
}
}
} else {
// zero file size
log . vdebug ( "This item file is zero size - potentially no hash provided by the OneDrive API" ) ;
}
}
2020-03-19 20:12:47 +01:00
}
2023-08-27 01:35:51 +02:00
// Return the new database item
return newDatabaseItem ;
}
// Print the fileDownloadFailures and fileUploadFailures arrays if they are not empty
void displaySyncFailures ( ) {
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Were there any file download failures?
if ( ! fileDownloadFailures . empty ) {
// There are download failures ...
2023-09-08 22:34:52 +02:00
log . log ( "\nFailed items to download from OneDrive: " , fileDownloadFailures . length ) ;
2023-08-27 01:35:51 +02:00
foreach ( failedFileToDownload ; fileDownloadFailures ) {
2023-09-08 22:34:52 +02:00
// List the detail of the item that failed to download
2023-09-26 21:16:05 +02:00
log . logAndNotify ( "Failed to download: " , failedFileToDownload ) ;
2023-09-08 22:34:52 +02:00
// Is this failed item in the DB? It should not be ..
Item downloadDBItem ;
// Need to check all driveid's we know about, not just the defaultDriveId
foreach ( searchDriveId ; driveIDsArray ) {
if ( itemDB . selectByPath ( failedFileToDownload , searchDriveId , downloadDBItem ) ) {
// item was found in the DB
log . error ( "ERROR: Failed Download Path found in database, must delete this item from the database .. it should not be in there if it failed to download" ) ;
// Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy
itemDB . deleteById ( downloadDBItem . driveId , downloadDBItem . id ) ;
if ( downloadDBItem . remoteDriveId ! = null ) {
// delete the linked remote folder
itemDB . deleteById ( downloadDBItem . remoteDriveId , downloadDBItem . remoteId ) ;
}
}
}
2023-08-27 01:35:51 +02:00
}
// Set the flag
syncFailures = true ;
}
2020-03-19 20:12:47 +01:00
2023-08-27 01:35:51 +02:00
// Were there any file upload failures?
if ( ! fileUploadFailures . empty ) {
// There are download failures ...
2023-09-08 22:34:52 +02:00
log . log ( "\nFailed items to upload to OneDrive: " , fileUploadFailures . length ) ;
2023-08-27 01:35:51 +02:00
foreach ( failedFileToUpload ; fileUploadFailures ) {
2023-09-08 22:34:52 +02:00
// List the path of the item that failed to upload
2023-09-26 21:16:05 +02:00
log . logAndNotify ( "Failed to upload: " , failedFileToUpload ) ;
2023-09-08 22:34:52 +02:00
// Is this failed item in the DB? It should not be ..
Item uploadDBItem ;
// Need to check all driveid's we know about, not just the defaultDriveId
foreach ( searchDriveId ; driveIDsArray ) {
if ( itemDB . selectByPath ( failedFileToUpload , searchDriveId , uploadDBItem ) ) {
// item was found in the DB
log . error ( "ERROR: Failed Upload Path found in database, must delete this item from the database .. it should not be in there if it failed to upload" ) ;
// Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy
itemDB . deleteById ( uploadDBItem . driveId , uploadDBItem . id ) ;
if ( uploadDBItem . remoteDriveId ! = null ) {
// delete the linked remote folder
itemDB . deleteById ( uploadDBItem . remoteDriveId , uploadDBItem . remoteId ) ;
}
}
}
2023-08-27 01:35:51 +02:00
}
// Set the flag
syncFailures = true ;
}
2020-03-19 20:12:47 +01:00
}
2020-06-16 23:57:14 +02:00
2023-08-27 01:35:51 +02:00
// Generate a /delta compatible response - for use when we cant actually use /delta
// This is required when the application is configured to use National Azure AD deployments as these do not support /delta queries
// The same technique can also be used when we are using --single-directory. The parent objects up to the single directory target can be added,
// then once the target of the --single-directory request is hit, all of the children of that path can be queried, giving a much more focused
// JSON response which can then be processed, negating the need to continuously traverse the tree and 'exclude' items
JSONValue generateDeltaResponse ( string pathToQuery = null ) {
2023-09-24 03:07:26 +02:00
2020-06-16 23:57:14 +02:00
// JSON value which will be responded with
2023-08-27 01:35:51 +02:00
JSONValue selfGeneratedDeltaResponse ;
// Function variables
Item searchItem ;
2020-06-16 23:57:14 +02:00
JSONValue rootData ;
2020-06-27 11:10:37 +02:00
JSONValue driveData ;
2023-08-27 01:35:51 +02:00
JSONValue pathData ;
2020-06-16 23:57:14 +02:00
JSONValue topLevelChildren ;
JSONValue [ ] childrenData ;
string nextLink ;
2023-08-27 01:35:51 +02:00
// Was a path to query passed in?
if ( pathToQuery . empty ) {
// Will query for the 'root'
pathToQuery = "." ;
}
2023-08-30 22:29:24 +02:00
// Create new OneDrive API Instance
OneDriveApi generateDeltaResponseOneDriveApiInstance ;
generateDeltaResponseOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
generateDeltaResponseOneDriveApiInstance . initialise ( ) ;
2023-08-27 01:35:51 +02:00
if ( ! singleDirectoryScope ) {
// In a --resync scenario, there is no DB data to query, so we have to query the OneDrive API here to get relevant details
try {
// Query the OneDrive API
2023-08-30 22:29:24 +02:00
pathData = generateDeltaResponseOneDriveApiInstance . getPathDetails ( pathToQuery ) ;
2023-08-27 01:35:51 +02:00
// Is the path on OneDrive local or remote to our account drive id?
if ( isItemRemote ( pathData ) ) {
// The path we are seeking is remote to our account drive id
searchItem . driveId = pathData [ "remoteItem" ] [ "parentReference" ] [ "driveId" ] . str ;
searchItem . id = pathData [ "remoteItem" ] [ "id" ] . str ;
} else {
// The path we are seeking is local to our account drive id
searchItem . driveId = pathData [ "parentReference" ] [ "driveId" ] . str ;
searchItem . id = pathData [ "id" ] . str ;
}
} catch ( OneDriveException e ) {
// Display error message
displayOneDriveErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
// Must exit here
2023-08-30 22:29:24 +02:00
generateDeltaResponseOneDriveApiInstance . shutdown ( ) ;
2023-08-27 01:35:51 +02:00
exit ( - 1 ) ;
}
} else {
// When setSingleDirectoryScope() was called, the following were set to the correct items, even if the path was remote:
// - singleDirectoryScopeDriveId
// - singleDirectoryScopeItemId
// Reuse these prior set values
searchItem . driveId = singleDirectoryScopeDriveId ;
searchItem . id = singleDirectoryScopeItemId ;
}
// Before we get any data from the OneDrive API, flag any child object in the database as out-of-sync for this driveId & and object id
// Downgrade ONLY files associated with this driveId and idToQuery
log . vdebug ( "Downgrading all children for this searchItem.driveId (" ~ searchItem . driveId ~ ") and searchItem.id (" ~ searchItem . id ~ ") to an out-of-sync state" ) ;
auto drivePathChildren = getChildren ( searchItem . driveId , searchItem . id ) ;
if ( count ( drivePathChildren ) > 0 ) {
// Children to process and flag as out-of-sync
foreach ( drivePathChild ; drivePathChildren ) {
// Flag any object in the database as out-of-sync for this driveId & and object id
log . vdebug ( "Downgrading item as out-of-sync: " , drivePathChild . id ) ;
itemDB . downgradeSyncStatusFlag ( drivePathChild . driveId , drivePathChild . id ) ;
}
}
2020-06-27 11:10:37 +02:00
// Get drive details for the provided driveId
2020-06-16 23:57:14 +02:00
try {
2023-08-30 22:29:24 +02:00
driveData = generateDeltaResponseOneDriveApiInstance . getPathDetailsById ( searchItem . driveId , searchItem . id ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
2023-08-30 22:29:24 +02:00
log . vdebug ( "driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id) generated a OneDriveException" ) ;
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( generateDeltaResponseOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query path details on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
generateDeltaResponse ( pathToQuery ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2020-06-16 23:57:14 +02:00
}
2020-06-27 11:10:37 +02:00
2023-09-02 04:27:10 +02:00
// Was a valid JSON response for 'driveData' provided?
if ( driveData . type ( ) = = JSONType . object ) {
// Process this initial JSON response
if ( ! isItemRoot ( driveData ) ) {
// Get root details for the provided driveId
try {
rootData = generateDeltaResponseOneDriveApiInstance . getDriveIdRoot ( searchItem . driveId ) ;
} catch ( OneDriveException exception ) {
log . vdebug ( "rootData = onedrive.getDriveIdRoot(searchItem.driveId) generated a OneDriveException" ) ;
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( generateDeltaResponseOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query drive root details on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
2023-09-03 07:05:01 +02:00
log . log ( "Retrying Query: rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId)" ) ;
rootData = generateDeltaResponseOneDriveApiInstance . getDriveIdRoot ( searchItem . driveId ) ;
2023-09-02 04:27:10 +02:00
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
2020-06-27 11:10:37 +02:00
}
2023-09-02 04:27:10 +02:00
2020-06-27 11:10:37 +02:00
}
2023-09-02 04:27:10 +02:00
// Add driveData JSON data to array
log . vlog ( "Adding OneDrive root details for processing" ) ;
childrenData ~ = rootData ;
2020-06-27 11:10:37 +02:00
}
2023-09-02 04:27:10 +02:00
2020-06-27 11:10:37 +02:00
// Add driveData JSON data to array
2023-09-02 04:27:10 +02:00
log . vlog ( "Adding OneDrive folder details for processing" ) ;
childrenData ~ = driveData ;
} else {
// driveData is an invalid JSON object
writeln ( "CODING TO DO: The query of OneDrive API to getPathDetailsById generated an invalid JSON response - thus we cant build our own /delta simulated response ... how to handle?" ) ;
// Must exit here
generateDeltaResponseOneDriveApiInstance . shutdown ( ) ;
exit ( - 1 ) ;
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
// For each child object, query the OneDrive API
2020-06-16 23:57:14 +02:00
for ( ; ; ) {
// query top level children
try {
2023-08-30 22:29:24 +02:00
topLevelChildren = generateDeltaResponseOneDriveApiInstance . listChildren ( searchItem . driveId , searchItem . id , nextLink ) ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
2020-06-16 23:57:14 +02:00
// OneDrive threw an error
log . vdebug ( "------------------------------------------------------------------" ) ;
2023-08-30 22:29:24 +02:00
log . vdebug ( "Query Error: topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)" ) ;
2023-08-27 01:35:51 +02:00
log . vdebug ( "driveId: " , searchItem . driveId ) ;
log . vdebug ( "idToQuery: " , searchItem . id ) ;
log . vdebug ( "nextLink: " , nextLink ) ;
2020-06-16 23:57:14 +02:00
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( generateDeltaResponseOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)" ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query OneDrive top level drive children on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( "generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink) previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
2023-09-02 23:38:36 +02:00
//log.vdebug("Retrying Query: generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)");
//topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink);
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
generateDeltaResponse ( pathToQuery ) ;
2023-09-02 04:27:10 +02:00
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2020-06-16 23:57:14 +02:00
}
// process top level children
2023-08-27 01:35:51 +02:00
log . vlog ( "Adding " , count ( topLevelChildren [ "value" ] . array ) , " OneDrive items for processing from the OneDrive 'root' folder" ) ;
2020-06-16 23:57:14 +02:00
foreach ( child ; topLevelChildren [ "value" ] . array ) {
2023-08-27 01:35:51 +02:00
// Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway.
// This avoids needless calls to the OneDrive API, and potentially speeds up this process.
if ( ! checkJSONAgainstClientSideFiltering ( child ) ) {
// add this child to the array of objects
childrenData ~ = child ;
// is this child a folder?
if ( isItemFolder ( child ) ) {
// We have to query this folders children if childCount > 0
if ( child [ "folder" ] [ "childCount" ] . integer > 0 ) {
// This child folder has children
string childIdToQuery = child [ "id" ] . str ;
string childDriveToQuery = child [ "parentReference" ] [ "driveId" ] . str ;
auto childParentPath = child [ "parentReference" ] [ "path" ] . str . split ( ":" ) ;
string folderPathToScan = childParentPath [ 1 ] ~ "/" ~ child [ "name" ] . str ;
string pathForLogging ;
// Are we in a --single-directory situation? If we are, the path we are using for logging needs to use the input path as a base
if ( singleDirectoryScope ) {
pathForLogging = appConfig . getValueString ( "single_directory" ) ~ "/" ~ child [ "name" ] . str ;
} else {
pathForLogging = child [ "name" ] . str ;
}
// Query the children of this item
JSONValue [ ] grandChildrenData = queryForChildren ( childDriveToQuery , childIdToQuery , folderPathToScan , pathForLogging ) ;
foreach ( grandChild ; grandChildrenData . array ) {
// add the grandchild to the array
childrenData ~ = grandChild ;
}
2020-06-16 23:57:14 +02:00
}
}
}
}
// If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response
// to indicate more items are available and provide the request URL for the next page of items.
if ( "@odata.nextLink" in topLevelChildren ) {
// Update nextLink to next changeSet bundle
log . vdebug ( "Setting nextLink to (@odata.nextLink): " , nextLink ) ;
nextLink = topLevelChildren [ "@odata.nextLink" ] . str ;
} else break ;
}
2023-08-27 01:35:51 +02:00
// Craft response from all returned JSON elements
selfGeneratedDeltaResponse = [
2020-06-16 23:57:14 +02:00
"@odata.context" : JSONValue ( "https://graph.microsoft.com/v1.0/$metadata#Collection(driveItem)" ) ,
"value" : JSONValue ( childrenData . array )
] ;
2023-09-02 04:27:10 +02:00
// Shutdown API
generateDeltaResponseOneDriveApiInstance . shutdown ( ) ;
2023-08-27 01:35:51 +02:00
// Return the generated JSON response
return selfGeneratedDeltaResponse ;
2020-06-16 23:57:14 +02:00
}
2023-08-27 01:35:51 +02:00
// Query the OneDrive API for the specified child id for any children objects
JSONValue [ ] queryForChildren ( string driveId , string idToQuery , string childParentPath , string pathForLogging ) {
2023-09-24 03:07:26 +02:00
2020-06-16 23:57:14 +02:00
// function variables
JSONValue thisLevelChildren ;
JSONValue [ ] thisLevelChildrenData ;
string nextLink ;
for ( ; ; ) {
2023-09-03 07:05:01 +02:00
// query this level children
2023-09-02 23:38:36 +02:00
try {
thisLevelChildren = queryThisLevelChildren ( driveId , idToQuery , nextLink ) ;
} catch ( OneDriveException exception ) {
2023-09-03 07:05:01 +02:00
writeln ( "CODING TO DO: EXCEPTION HANDLING NEEDED: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink)" ) ;
2023-09-02 23:38:36 +02:00
2020-06-16 23:57:14 +02:00
}
2023-09-02 23:38:36 +02:00
// Was a valid JSON response for 'thisLevelChildren' provided?
if ( thisLevelChildren . type ( ) = = JSONType . object ) {
// process this level children
if ( ! childParentPath . empty ) {
// We dont use childParentPath to log, as this poses an information leak risk.
// The full parent path of the child, as per the JSON might be:
// /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder
// But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged.
// Plus, the application output now shows accuratly what is being shared - so that is a good thing.
log . vlog ( "Adding " , count ( thisLevelChildren [ "value" ] . array ) , " OneDrive items for processing from " , pathForLogging ) ;
}
foreach ( child ; thisLevelChildren [ "value" ] . array ) {
// Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway.
// This avoids needless calls to the OneDrive API, and potentially speeds up this process.
if ( ! checkJSONAgainstClientSideFiltering ( child ) ) {
// add this child to the array of objects
thisLevelChildrenData ~ = child ;
// is this child a folder?
if ( isItemFolder ( child ) ) {
// We have to query this folders children if childCount > 0
if ( child [ "folder" ] [ "childCount" ] . integer > 0 ) {
// This child folder has children
string childIdToQuery = child [ "id" ] . str ;
string childDriveToQuery = child [ "parentReference" ] [ "driveId" ] . str ;
auto grandchildParentPath = child [ "parentReference" ] [ "path" ] . str . split ( ":" ) ;
string folderPathToScan = grandchildParentPath [ 1 ] ~ "/" ~ child [ "name" ] . str ;
string newLoggingPath = pathForLogging ~ "/" ~ child [ "name" ] . str ;
JSONValue [ ] grandChildrenData = queryForChildren ( childDriveToQuery , childIdToQuery , folderPathToScan , newLoggingPath ) ;
foreach ( grandChild ; grandChildrenData . array ) {
// add the grandchild to the array
thisLevelChildrenData ~ = grandChild ;
}
2023-08-27 01:35:51 +02:00
}
2020-06-16 23:57:14 +02:00
}
}
}
2023-09-02 23:38:36 +02:00
// If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response
// to indicate more items are available and provide the request URL for the next page of items.
if ( "@odata.nextLink" in thisLevelChildren ) {
// Update nextLink to next changeSet bundle
nextLink = thisLevelChildren [ "@odata.nextLink" ] . str ;
log . vdebug ( "Setting nextLink to (@odata.nextLink): " , nextLink ) ;
} else break ;
} else {
2023-09-03 07:05:01 +02:00
// Invalid JSON response when querying this level children
log . vdebug ( "INVALID JSON response when attempting a retry of parent function - queryForChildren(driveId, idToQuery, childParentPath, pathForLogging)" ) ;
// retry thisLevelChildren = queryThisLevelChildren
log . vdebug ( "Thread sleeping for an additional 30 seconds" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
log . vdebug ( "Retry this call thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink)" ) ;
thisLevelChildren = queryThisLevelChildren ( driveId , idToQuery , nextLink ) ;
2020-06-16 23:57:14 +02:00
}
}
// return response
return thisLevelChildrenData ;
}
2020-06-27 11:10:37 +02:00
2023-08-27 01:35:51 +02:00
// Query the OneDrive API for the child objects for this element
JSONValue queryThisLevelChildren ( string driveId , string idToQuery , string nextLink ) {
2023-09-21 21:34:42 +02:00
2023-09-24 03:07:26 +02:00
// function variables
2022-02-03 03:27:57 +01:00
JSONValue thisLevelChildren ;
2023-08-30 22:29:24 +02:00
// Create new OneDrive API Instance
OneDriveApi queryChildrenOneDriveApiInstance ;
queryChildrenOneDriveApiInstance = new OneDriveApi ( appConfig ) ;
queryChildrenOneDriveApiInstance . initialise ( ) ;
2022-02-03 03:27:57 +01:00
// query children
try {
// attempt API call
2023-08-30 22:29:24 +02:00
log . vdebug ( "Attempting Query: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)" ) ;
thisLevelChildren = queryChildrenOneDriveApiInstance . listChildren ( driveId , idToQuery , nextLink ) ;
log . vdebug ( "Query 'thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)' performed successfully" ) ;
2023-09-02 04:27:10 +02:00
queryChildrenOneDriveApiInstance . shutdown ( ) ;
} catch ( OneDriveException exception ) {
2022-02-03 03:27:57 +01:00
// OneDrive threw an error
log . vdebug ( "------------------------------------------------------------------" ) ;
2023-08-30 22:29:24 +02:00
log . vdebug ( "Query Error: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)" ) ;
2022-02-03 03:27:57 +01:00
log . vdebug ( "driveId: " , driveId ) ;
log . vdebug ( "idToQuery: " , idToQuery ) ;
log . vdebug ( "nextLink: " , nextLink ) ;
2023-09-02 04:27:10 +02:00
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest ( queryChildrenOneDriveApiInstance ) ;
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query OneDrive drive item children - retrying applicable request in 30 seconds" ) ;
log . vdebug ( "thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
2023-09-02 23:38:36 +02:00
//log.vdebug("Retrying Query: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink)");
//thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink);
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
queryThisLevelChildren ( driveId , idToQuery , nextLink ) ;
2023-09-02 04:27:10 +02:00
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2022-02-03 03:27:57 +01:00
}
2023-09-08 22:34:52 +02:00
2022-02-03 03:27:57 +01:00
// return response
return thisLevelChildren ;
}
2023-08-27 01:35:51 +02:00
// Traverses the provided path online, via the OneDrive API, following correct parent driveId and itemId elements across the account
// to find if this full path exists. If this path exists online, the last item in the object path will be returned as a full JSON item.
//
// If the createPathIfMissing = false + no path exists online, a null invalid JSON item will be returned.
// If the createPathIfMissing = true + no path exists online, the requested path will be created in the correct location online. The resulting
// response to the directory creation will then be returned.
//
// This function also ensures that each path in the requested path actually matches the requested element to ensure that the OneDrive API response
// is not falsely matching a 'case insensitive' match to the actual request which is a POSIX compliance issue.
JSONValue queryOneDriveForSpecificPathAndCreateIfMissing ( string thisNewPathToSearch , bool createPathIfMissing ) {
2023-09-24 03:07:26 +02:00
// function variables
2023-08-27 01:35:51 +02:00
JSONValue getPathDetailsAPIResponse ;
string currentPathTree ;
Item parentDetails ;
JSONValue topLevelChildren ;
string nextLink ;
bool directoryFoundOnline = false ;
bool posixIssue = false ;
2021-11-16 19:51:30 +01:00
2023-09-08 22:34:52 +02:00
// Create a new API Instance for this thread and initialise it
OneDriveApi queryOneDriveForSpecificPath ;
queryOneDriveForSpecificPath = new OneDriveApi ( appConfig ) ;
queryOneDriveForSpecificPath . initialise ( ) ;
2023-08-27 01:35:51 +02:00
foreach ( thisFolderName ; pathSplitter ( thisNewPathToSearch ) ) {
log . vdebug ( "Testing for the existance online of this folder path: " , thisFolderName ) ;
directoryFoundOnline = false ;
// If this is '.' this is the account root
if ( thisFolderName = = "." ) {
currentPathTree = thisFolderName ;
} else {
currentPathTree = currentPathTree ~ "/" ~ thisFolderName ;
}
log . vdebug ( "Attempting to query OneDrive for this path: " , currentPathTree ) ;
// What query do we use?
if ( thisFolderName = = "." ) {
// Query the root, set the right details
try {
2023-09-08 22:34:52 +02:00
getPathDetailsAPIResponse = queryOneDriveForSpecificPath . getPathDetails ( currentPathTree ) ;
2023-08-27 01:35:51 +02:00
parentDetails = makeItem ( getPathDetailsAPIResponse ) ;
saveItem ( getPathDetailsAPIResponse ) ;
directoryFoundOnline = true ;
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
2023-09-08 22:34:52 +02:00
handleOneDriveThrottleRequest ( queryOneDriveForSpecificPath ) ;
2023-09-02 04:27:10 +02:00
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query path on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
queryOneDriveForSpecificPathAndCreateIfMissing ( thisNewPathToSearch , createPathIfMissing ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
2023-09-08 22:34:52 +02:00
}
2023-08-27 01:35:51 +02:00
}
2020-06-27 11:10:37 +02:00
} else {
2023-08-27 01:35:51 +02:00
// Ensure we have a valid driveId to search here
if ( parentDetails . driveId . empty ) {
parentDetails . driveId = appConfig . defaultDriveId ;
}
// If the prior JSON 'getPathDetailsAPIResponse' is on this account driveId .. then continue to use getPathDetails
if ( parentDetails . driveId = = appConfig . defaultDriveId ) {
try {
// Query OneDrive API for this path
2023-09-08 22:34:52 +02:00
getPathDetailsAPIResponse = queryOneDriveForSpecificPath . getPathDetails ( currentPathTree ) ;
2023-08-27 01:35:51 +02:00
// Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API
2023-09-12 21:53:53 +02:00
performPosixTest ( thisFolderName , getPathDetailsAPIResponse [ "name" ] . str ) ;
2023-08-27 01:35:51 +02:00
// No POSIX issue with requested path element
parentDetails = makeItem ( getPathDetailsAPIResponse ) ;
saveItem ( getPathDetailsAPIResponse ) ;
directoryFoundOnline = true ;
2020-11-01 00:06:08 +01:00
2023-08-27 01:35:51 +02:00
// Is this JSON a remote object
if ( isItemRemote ( getPathDetailsAPIResponse ) ) {
// Remote Directory .. need a DB Tie Item
log . vdebug ( "Creating a DB TIE for this Shared Folder" ) ;
// New DB Tie Item to bind the 'remote' path to our parent path
Item tieDBItem ;
// Set the name
tieDBItem . name = parentDetails . name ;
// Set the correct item type
tieDBItem . type = ItemType . dir ;
// Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie
tieDBItem . driveId = parentDetails . remoteDriveId ;
tieDBItem . id = parentDetails . remoteId ;
// Set the correct mtime
tieDBItem . mtime = parentDetails . mtime ;
// Add tie DB record to the local database
log . vdebug ( "Adding tie DB record to database: " , tieDBItem ) ;
itemDB . upsert ( tieDBItem ) ;
// Update parentDetails to use the DB Tie record
parentDetails = tieDBItem ;
}
2023-09-02 04:27:10 +02:00
} catch ( OneDriveException exception ) {
if ( exception . httpStatusCode = = 404 ) {
2023-08-27 01:35:51 +02:00
directoryFoundOnline = false ;
2023-09-02 04:27:10 +02:00
} else {
string thisFunctionName = getFunctionName ! ( { } ) ;
// HTTP request returned status code 408,429,503,504
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 429 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// Handle the 429
if ( exception . httpStatusCode = = 429 ) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
2023-09-08 22:34:52 +02:00
handleOneDriveThrottleRequest ( queryOneDriveForSpecificPath ) ;
2023-09-02 04:27:10 +02:00
log . vdebug ( "Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " , thisFunctionName ) ;
}
// re-try the specific changes queries
if ( ( exception . httpStatusCode = = 408 ) | | ( exception . httpStatusCode = = 503 ) | | ( exception . httpStatusCode = = 504 ) ) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines ( exception . msg ) ;
log . log ( errorArray [ 0 ] , " when attempting to query path on OneDrive - retrying applicable request in 30 seconds" ) ;
log . vdebug ( thisFunctionName , " previously threw an error - retrying" ) ;
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
log . vdebug ( "Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request" ) ;
Thread . sleep ( dur ! "seconds" ( 30 ) ) ;
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
log . vdebug ( "Retrying Function: " , thisFunctionName ) ;
queryOneDriveForSpecificPathAndCreateIfMissing ( thisNewPathToSearch , createPathIfMissing ) ;
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage ( exception . msg , thisFunctionName ) ;
}
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
}
} else {
// parentDetails.driveId is not the account drive id - thus will be a remote shared item
log . vdebug ( "This parent directory is a remote object this next path will be on a remote drive" ) ;
// For this parentDetails.driveId, parentDetails.id object, query the OneDrive API for it's children
for ( ; ; ) {
// Query this remote object for its children
2023-09-08 22:34:52 +02:00
topLevelChildren = queryOneDriveForSpecificPath . listChildren ( parentDetails . driveId , parentDetails . id , nextLink ) ;
2023-08-27 01:35:51 +02:00
// Process each child
foreach ( child ; topLevelChildren [ "value" ] . array ) {
// Is this child a folder?
if ( isItemFolder ( child ) ) {
// Is this the child folder we are looking for, and is a POSIX match?
if ( child [ "name" ] . str = = thisFolderName ) {
// EXACT MATCH including case sensitivity: Flag that we found the folder online
directoryFoundOnline = true ;
// Use these details for the next entry path
getPathDetailsAPIResponse = child ;
parentDetails = makeItem ( getPathDetailsAPIResponse ) ;
saveItem ( getPathDetailsAPIResponse ) ;
// No need to continue searching
break ;
} else {
string childAsLower = toLower ( child [ "name" ] . str ) ;
string thisFolderNameAsLower = toLower ( thisFolderName ) ;
if ( childAsLower = = thisFolderNameAsLower ) {
// This is a POSIX 'case in-sensitive match' .....
// Local item name has a 'case-insensitive match' to an existing item on OneDrive
posixIssue = true ;
throw new posixException ( thisFolderName , child [ "name" ] . str ) ;
}
}
2020-11-01 00:06:08 +01:00
}
2020-06-27 11:10:37 +02:00
}
2023-08-27 01:35:51 +02:00
if ( directoryFoundOnline ) {
// We found the folder, no need to continue searching nextLink data
break ;
}
// If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response
// to indicate more items are available and provide the request URL for the next page of items.
if ( "@odata.nextLink" in topLevelChildren ) {
// Update nextLink to next changeSet bundle
log . vdebug ( "Setting nextLink to (@odata.nextLink): " , nextLink ) ;
nextLink = topLevelChildren [ "@odata.nextLink" ] . str ;
} else break ;
}
}
}
// If we did not find the folder, we need to create this folder
if ( ! directoryFoundOnline ) {
// Folder not found online
// Set any response to be an invalid JSON item
getPathDetailsAPIResponse = null ;
// Was there a POSIX issue?
if ( ! posixIssue ) {
// No POSIX issue
if ( createPathIfMissing ) {
// Create this path as it is missing on OneDrive online and there is no POSIX issue with a 'case-insensitive match'
log . vdebug ( "FOLDER NOT FOUND ONLINE AND WE ARE REQUESTED TO CREATE IT" ) ;
log . vdebug ( "Create folder on this drive: " , parentDetails . driveId ) ;
log . vdebug ( "Create folder as a child on this object: " , parentDetails . id ) ;
log . vdebug ( "Create this folder name: " , thisFolderName ) ;
JSONValue newDriveItem = [
"name" : JSONValue ( thisFolderName ) ,
"folder" : parseJSON ( "{}" )
] ;
JSONValue createByIdAPIResponse ;
// Submit the creation request
// Fix for https://github.com/skilion/onedrive/issues/356
if ( ! dryRun ) {
try {
// Attempt to create a new folder on the configured parent driveId & parent id
2023-09-08 22:34:52 +02:00
createByIdAPIResponse = queryOneDriveForSpecificPath . createById ( parentDetails . driveId , parentDetails . id , newDriveItem ) ;
2023-08-27 01:35:51 +02:00
// Is the response a valid JSON object - validation checking done in saveItem
saveItem ( createByIdAPIResponse ) ;
// Set getPathDetailsAPIResponse to createByIdAPIResponse
getPathDetailsAPIResponse = createByIdAPIResponse ;
} catch ( OneDriveException e ) {
2023-09-02 04:27:10 +02:00
// 409 - API Race Condition
2023-08-27 01:35:51 +02:00
if ( e . httpStatusCode = = 409 ) {
// When we attempted to create it, OneDrive responded that it now already exists
log . vlog ( "OneDrive reported that " , thisFolderName , " already exists .. OneDrive API race condition" ) ;
} else {
// some other error from OneDrive was returned - display what it is
log . error ( "OneDrive generated an error when creating this path: " , thisFolderName ) ;
displayOneDriveErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
}
}
} else {
// Simulate a successful 'directory create' & save it to the dryRun database copy
// The simulated response has to pass 'makeItem' as part of saveItem
auto fakeResponse = createFakeResponse ( thisNewPathToSearch ) ;
saveItem ( fakeResponse ) ;
2020-06-27 11:10:37 +02:00
}
}
}
}
}
2023-08-27 01:35:51 +02:00
2023-09-08 22:34:52 +02:00
// Shutdown API instance
queryOneDriveForSpecificPath . shutdown ( ) ;
2023-08-27 01:35:51 +02:00
// Output our search results
log . vdebug ( "queryOneDriveForSpecificPathAndCreateIfMissing.getPathDetailsAPIResponse = " , getPathDetailsAPIResponse ) ;
return getPathDetailsAPIResponse ;
2020-06-27 11:10:37 +02:00
}
2021-01-15 04:44:13 +01:00
2023-08-27 01:35:51 +02:00
// Delete an item by it's path
// This function is only used in --monitor mode
void deleteByPath ( const ( string ) path ) {
2023-09-24 03:07:26 +02:00
// function variables
2023-08-27 01:35:51 +02:00
Item dbItem ;
2023-09-24 03:07:26 +02:00
2023-08-27 01:35:51 +02:00
// Need to check all driveid's we know about, not just the defaultDriveId
bool itemInDB = false ;
foreach ( searchDriveId ; driveIDsArray ) {
if ( itemDB . selectByPath ( path , searchDriveId , dbItem ) ) {
// item was found in the DB
itemInDB = true ;
break ;
}
}
if ( ! itemInDB ) {
throw new SyncException ( "The item to delete is not in the local database" ) ;
2021-01-15 04:44:13 +01:00
}
2023-08-27 01:35:51 +02:00
if ( dbItem . parentId = = null ) {
// the item is a remote folder, need to do the operation on the parent
enforce ( itemDB . selectByPathWithoutRemote ( path , appConfig . defaultDriveId , dbItem ) ) ;
}
try {
if ( noRemoteDelete ) {
// do not process remote delete
log . vlog ( "Skipping remote delete as --upload-only & --no-remote-delete configured" ) ;
} else {
uploadDeletedItem ( dbItem , path ) ;
}
} catch ( OneDriveException e ) {
if ( e . httpStatusCode = = 404 ) {
log . log ( e . msg ) ;
} else {
// display what the error is
displayOneDriveErrorMessage ( e . msg , getFunctionName ! ( { } ) ) ;
}
}
2022-08-18 01:14:13 +02:00
}
2023-06-21 05:21:05 +02:00
2023-08-27 01:35:51 +02:00
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_move
// This function is only called in monitor mode when an move event is coming from
// inotify and we try to move the item.
void uploadMoveItem ( string oldPath , string newPath ) {
// Log that we are doing a move
log . log ( "Moving " , oldPath , " to " , newPath ) ;
// Is this move unwanted?
bool unwanted = false ;
// Item variables
Item oldItem , newItem , parentItem ;
2023-06-21 05:21:05 +02:00
2023-08-27 01:35:51 +02:00
// This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly
// Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252
if ( ! unwanted ) {
if ( ! isValid ( newPath ) ) {
// Path is not valid according to https://dlang.org/phobos/std_encoding.html
log . logAndNotify ( "Skipping item - invalid character encoding sequence: " , newPath ) ;
unwanted = true ;
}
}
// Check this path against the Client Side Filtering Rules
// - check_nosync
// - skip_dotfiles
// - skip_symlinks
// - skip_file
// - skip_dir
// - sync_list
// - skip_size
if ( ! unwanted ) {
unwanted = checkPathAgainstClientSideFiltering ( newPath ) ;
}
// Check against Microsoft OneDrive restriction and limitations about Windows naming files
if ( ! unwanted ) {
unwanted = checkPathAgainstMicrosoftNamingRestrictions ( newPath ) ;
}
// 'newPath' has passed client side filtering validation
if ( ! unwanted ) {
if ( ! itemDB . selectByPath ( oldPath , appConfig . defaultDriveId , oldItem ) ) {
// The old path|item is not synced with the database, upload as a new file
log . log ( "Moved local item was not in-sync with local databse - uploading as new item" ) ;
uploadNewFile ( newPath ) ;
return ;
}
if ( oldItem . parentId = = null ) {
// the item is a remote folder, need to do the operation on the parent
enforce ( itemDB . selectByPathWithoutRemote ( oldPath , appConfig . defaultDriveId , oldItem ) ) ;
}
if ( itemDB . selectByPath ( newPath , appConfig . defaultDriveId , newItem ) ) {
// the destination has been overwritten
log . log ( "Moved local item overwrote an existing item - deleting old online item" ) ;
uploadDeletedItem ( newItem , newPath ) ;
}
if ( ! itemDB . selectByPath ( dirName ( newPath ) , appConfig . defaultDriveId , parentItem ) ) {
// the parent item is not in the database
throw new SyncException ( "Can't move an item to an unsynced directory" ) ;
}
if ( oldItem . driveId ! = parentItem . driveId ) {
// items cannot be moved between drives
uploadDeletedItem ( oldItem , oldPath ) ;
// what sort of move is this?
if ( isFile ( newPath ) ) {
// newPath is a file
uploadNewFile ( newPath ) ;
} else {
// newPath is a directory
scanLocalFilesystemPathForNewData ( newPath ) ;
}
} else {
if ( ! exists ( newPath ) ) {
log . vlog ( "uploadMoveItem target has disappeared: " , newPath ) ;
return ;
2023-06-21 05:21:05 +02:00
}
2023-08-27 01:35:51 +02:00
// Configure the modification JSON item
SysTime mtime = timeLastModified ( newPath ) . toUTC ( ) ;
JSONValue data = [
"name" : JSONValue ( baseName ( newPath ) ) ,
"parentReference" : JSONValue ( [
"id" : parentItem . id
] ) ,
"fileSystemInfo" : JSONValue ( [
"lastModifiedDateTime" : mtime . toISOExtString ( )
] )
] ;
// Perform the move operation on OneDrive
JSONValue response ;
2023-09-08 22:34:52 +02:00
// Create a new API Instance for this thread and initialise it
OneDriveApi movePathOnlineApiInstance ;
movePathOnlineApiInstance = new OneDriveApi ( appConfig ) ;
movePathOnlineApiInstance . initialise ( ) ;
2023-08-27 01:35:51 +02:00
try {
2023-09-08 22:34:52 +02:00
response = movePathOnlineApiInstance . updateById ( oldItem . driveId , oldItem . id , data , oldItem . eTag ) ;
2023-08-27 01:35:51 +02:00
} catch ( OneDriveException e ) {
if ( e . httpStatusCode = = 412 ) {
// OneDrive threw a 412 error, most likely: ETag does not match current item's value
// Retry without eTag
log . vdebug ( "File Move Failed - OneDrive eTag / cTag match issue" ) ;
log . vlog ( "OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error" ) ;
string nullTag = null ;
// move the file but without the eTag
2023-09-08 22:34:52 +02:00
response = movePathOnlineApiInstance . updateById ( oldItem . driveId , oldItem . id , data , nullTag ) ;
2023-08-27 01:35:51 +02:00
}
}
2023-09-08 22:34:52 +02:00
// Shutdown API instance
movePathOnlineApiInstance . shutdown ( ) ;
2023-08-27 01:35:51 +02:00
// save the move response from OneDrive in the database
// Is the response a valid JSON object - validation checking done in saveItem
saveItem ( response ) ;
2023-06-21 05:21:05 +02:00
}
2023-08-27 01:35:51 +02:00
} else {
// Moved item is unwanted
log . log ( "Item has been moved to a location that is excluded from sync operations. Removing item from OneDrive" ) ;
uploadDeletedItem ( oldItem , oldPath ) ;
2023-06-21 05:21:05 +02:00
}
}
2023-08-27 01:35:51 +02:00
}