Merge branch 'onedrive-v2.5.0-alpha-5' into shutdown_monitor

This commit is contained in:
JC-comp 2024-02-13 11:00:01 +08:00 committed by GitHub
commit 84966fc2d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 750 additions and 426 deletions

View file

@ -37,7 +37,9 @@ Additionally there are specific version release tags for each release. Refer to
## Configuration Steps
### 1. Install 'docker' on your platform
Install 'docker' as per your distribution platform's instructions if not already installed.
Install 'docker' as per your distribution platform's instructions if not already installed as per the instructions on https://docs.docker.com/engine/install/
**Note:** If you are using Ubuntu, do not install Docker from your distribution platform's repositories. You must install Docker from Docker provided packages.
### 2. Configure 'docker' to allow non-privileged users to run Docker commands
Read https://docs.docker.com/engine/install/linux-postinstall/ to configure the 'docker' user group with your user account to allow your non 'root' user to run 'docker' commands.

View file

@ -7,6 +7,7 @@ import std.file;
import std.datetime;
import std.concurrency;
import std.typecons;
import core.sync.condition;
import core.sync.mutex;
import core.thread;
import std.format;
@ -26,6 +27,7 @@ class LogBuffer {
private:
string[3][] buffer;
Mutex bufferLock;
Condition condReady;
string logFilePath;
bool writeToFile;
bool verboseLogging;
@ -38,6 +40,7 @@ class LogBuffer {
this(bool verboseLogging, bool debugLogging) {
// Initialise the mutex
bufferLock = new Mutex();
condReady = new Condition(bufferLock);
// Initialise other items
this.logFilePath = logFilePath;
this.writeToFile = writeToFile;
@ -50,8 +53,11 @@ class LogBuffer {
flushThread.start();
}
~this() {
void shutdown() {
synchronized(bufferLock) {
isRunning = false;
condReady.notify();
}
flushThread.join();
flush();
}
@ -86,6 +92,7 @@ class LogBuffer {
}
}
}
(cast()condReady).notify();
}
}
@ -99,14 +106,17 @@ class LogBuffer {
private void flushBuffer() {
while (isRunning) {
Thread.sleep(dur!("msecs")(200));
flush();
}
stdout.flush();
}
private void flush() {
string[3][] messages;
synchronized(bufferLock) {
while (buffer.empty && isRunning) {
condReady.wait();
}
messages = buffer;
buffer.length = 0;
}

View file

@ -629,6 +629,7 @@ int main(string[] cliArgs) {
string localPath = ".";
string remotePath = "/";
if (!appConfig.getValueBool("resync")) {
// Check if there are interrupted upload session(s)
if (syncEngineInstance.checkForInterruptedSessionUploads) {
// Need to re-process the session upload files to resume the failed session uploads
@ -636,6 +637,10 @@ int main(string[] cliArgs) {
// Process the session upload files
syncEngineInstance.processForInterruptedSessionUploads();
}
} else {
// Clean up any upload session files due to --resync being used
syncEngineInstance.clearInterruptedSessionUploads();
}
// Are we doing a single directory operation (--single-directory) ?
if (!appConfig.getValueString("single_directory").empty) {
@ -742,16 +747,11 @@ int main(string[] cliArgs) {
};
// Delegated function for when inotify detects a local file has been changed
filesystemMonitor.onFileChanged = delegate(string path) {
filesystemMonitor.onFileChanged = delegate(string[] changedLocalFilesToUploadToOneDrive) {
// Handle a potentially locally changed file
// Logging for this event moved to handleLocalFileTrigger() due to threading and false triggers from scanLocalFilesystemPathForNewData() above
try {
syncEngineInstance.handleLocalFileTrigger(path);
} catch (CurlException e) {
addLogEntry("Offline, cannot upload changed item: " ~ path, ["verbose"]);
} catch(Exception e) {
addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]);
}
addLogEntry("[M] Total number of local file changed: " ~ to!string(changedLocalFilesToUploadToOneDrive.length));
syncEngineInstance.handleLocalFileTrigger(changedLocalFilesToUploadToOneDrive);
};
// Delegated function for when inotify detects a delete event
@ -1143,9 +1143,8 @@ void performStandardExitProcess(string scopeCaller = null) {
thread_joinAll();
addLogEntry("Application exit");
addLogEntry("#######################################################################################################################################", ["logFileOnly"]);
// Sleep to allow any final logging output to be printed - this is needed as we are using buffered logging output
Thread.sleep(dur!("msecs")(500));
// Destroy the shared logging buffer
(cast() logBuffer).shutdown();
object.destroy(logBuffer);
}
}

View file

@ -8,6 +8,7 @@ import core.sys.linux.sys.inotify;
import core.sys.posix.poll;
import core.sys.posix.unistd;
import core.sys.posix.sys.select;
import core.thread;
import core.time;
import std.algorithm;
import std.concurrency;
@ -139,7 +140,6 @@ class MonitorBackgroundWorker {
}
}
void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid)
{
try {
@ -150,6 +150,96 @@ void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid)
}
}
enum ActionType {
moved,
deleted,
changed,
createDir
}
struct Action {
ActionType type;
bool skipped;
string src;
string dst;
}
struct ActionHolder {
Action[] actions;
ulong[string] srcMap;
void append(ActionType type, string src, string dst=null) {
ulong[] pendingTargets;
switch (type) {
case ActionType.changed:
if (src in srcMap && actions[srcMap[src]].type == ActionType.changed) {
// skip duplicate operations
return;
}
break;
case ActionType.createDir:
break;
case ActionType.deleted:
if (src in srcMap) {
ulong pendingTarget = srcMap[src];
// Skip operations require reading local file that is gone
switch (actions[pendingTarget].type) {
case ActionType.changed:
case ActionType.createDir:
actions[srcMap[src]].skipped = true;
srcMap.remove(src);
break;
default:
break;
}
}
break;
case ActionType.moved:
for(int i = 0; i < actions.length; i++) {
// Only match for latest operation
if (actions[i].src in srcMap) {
switch (actions[i].type) {
case ActionType.changed:
case ActionType.createDir:
// check if the source is the prefix of the target
string prefix = src ~ "/";
string target = actions[i].src;
if (prefix[0] != '.')
prefix = "./" ~ prefix;
if (target[0] != '.')
target = "./" ~ target;
string comm = commonPrefix(prefix, target);
if (src == actions[i].src || comm.length == prefix.length) {
// Hold operations require reading local file that is moved after the target is moved online
pendingTargets ~= i;
actions[i].skipped = true;
srcMap.remove(actions[i].src);
if (comm.length == target.length)
actions[i].src = dst;
else
actions[i].src = dst ~ target[comm.length - 1 .. target.length];
}
break;
default:
break;
}
}
}
break;
default:
break;
}
actions ~= Action(type, false, src, dst);
srcMap[src] = actions.length - 1;
foreach (pendingTarget; pendingTargets) {
actions ~= actions[pendingTarget];
actions[$-1].skipped = false;
srcMap[actions[$-1].src] = actions.length - 1;
}
}
}
final class Monitor {
// Class variables
ApplicationConfig appConfig;
@ -177,13 +267,15 @@ final class Monitor {
// Configure function delegates
void delegate(string path) onDirCreated;
void delegate(string path) onFileChanged;
void delegate(string[] path) onFileChanged;
void delegate(string path) onDelete;
void delegate(string from, string to) onMove;
// List of paths that were moved, not deleted
bool[string] movedNotDeleted;
ActionHolder actionHolder;
// Configure the class varaible to consume the application configuration including selective sync
this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) {
this.appConfig = appConfig;
@ -394,11 +486,13 @@ final class Monitor {
events: POLLIN
};
while (true) {
bool hasNotification = false;
while (true) {
int ret = poll(&fds, 1, 0);
if (ret == -1) throw new MonitorException("poll failed");
else if (ret == 0) break; // no events available
hasNotification = true;
size_t length = read(worker.fd, buffer.ptr, buffer.length);
if (length == -1) throw new MonitorException("read failed");
@ -493,32 +587,32 @@ final class Monitor {
auto from = event.cookie in cookieToPath;
if (from) {
cookieToPath.remove(event.cookie);
if (useCallbacks) onMove(*from, path);
if (useCallbacks) actionHolder.append(ActionType.moved, *from, path);
movedNotDeleted.remove(*from); // Clear moved status
} else {
// Handle file moved in from outside
if (event.mask & IN_ISDIR) {
if (useCallbacks) onDirCreated(path);
if (useCallbacks) actionHolder.append(ActionType.createDir, path);
} else {
if (useCallbacks) onFileChanged(path);
if (useCallbacks) actionHolder.append(ActionType.changed, path);
}
}
} else if (event.mask & IN_CREATE) {
addLogEntry("event IN_CREATE: " ~ path, ["debug"]);
if (event.mask & IN_ISDIR) {
addRecursive(path);
if (useCallbacks) onDirCreated(path);
if (useCallbacks) actionHolder.append(ActionType.createDir, path);
}
} else if (event.mask & IN_DELETE) {
if (path in movedNotDeleted) {
movedNotDeleted.remove(path); // Ignore delete for moved files
} else {
addLogEntry("event IN_DELETE: " ~ path, ["debug"]);
if (useCallbacks) onDelete(path);
if (useCallbacks) actionHolder.append(ActionType.deleted, path);
}
} else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) {
addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]);
if (useCallbacks) onFileChanged(path);
if (useCallbacks) actionHolder.append(ActionType.changed, path);
} else {
addLogEntry("event unhandled: " ~ path, ["debug"]);
assert(0);
@ -527,6 +621,11 @@ final class Monitor {
skip:
i += inotify_event.sizeof + event.len;
}
Thread.sleep(dur!"seconds"(1));
}
if (!hasNotification) break;
processChanges();
// Assume that the items moved outside the watched directory have been deleted
foreach (cookie, path; cookieToPath) {
addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]);
@ -538,4 +637,33 @@ final class Monitor {
addLogEntry("inotify events flushed", ["debug"]);
}
}
private void processChanges() {
string[] changes;
foreach(action; actionHolder.actions) {
if (action.skipped)
continue;
switch (action.type) {
case ActionType.changed:
changes ~= action.src;
break;
case ActionType.deleted:
onDelete(action.src);
break;
case ActionType.createDir:
onDirCreated(action.src);
break;
case ActionType.moved:
onMove(action.src, action.dst);
break;
default:
break;
}
}
if (!changes.empty)
onFileChanged(changes);
object.destroy(actionHolder);
}
}

View file

@ -1114,9 +1114,21 @@ class SyncEngine {
// Change is to delete an item
addLogEntry("Handing a OneDrive Deleted Item", ["debug"]);
if (existingDBEntry) {
// Is the item to delete locally actually in sync with OneDrive currently?
// What is the source of this item data?
string itemSource = "online";
// Compute this deleted items path based on the database entries
string localPathToDelete = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name;
if (isItemSynced(existingDatabaseItem, localPathToDelete, itemSource)) {
// Flag to delete
addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]);
idsToDelete ~= [thisItemDriveId, thisItemId];
} else {
// local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not
safeBackup(localPathToDelete, dryRun);
}
} else {
// Flag to ignore
addLogEntry("Flagging item to skip: " ~ to!string(onedriveJSONItem), ["debug"]);
@ -1197,22 +1209,8 @@ class SyncEngine {
if (hasSharedElement(onedriveJSONItem)) {
// Has the Shared JSON structure
addLogEntry("Personal Shared Item JSON object has the 'shared' JSON structure", ["debug"]);
// Create a DB Tie Record for this parent object
addLogEntry("Creating a DB Tie for this Personal Shared Folder", ["debug"]);
// DB Tie
Item parentItem;
parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
parentItem.id = onedriveJSONItem["parentReference"]["id"].str;
parentItem.name = "root";
parentItem.type = ItemType.dir;
parentItem.mtime = remoteItem.mtime;
parentItem.parentId = null;
// Add this DB Tie parent record to the local database
addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]);
itemDB.upsert(parentItem);
// Create a 'root' DB Tie Record for this JSON object
createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem);
}
// Ensure that this item has no parent
@ -1226,21 +1224,8 @@ class SyncEngine {
addLogEntry("Handling a Business or SharePoint Shared Item JSON object", ["debug"]);
if (appConfig.accountType == "business") {
// Create a DB Tie Record for this parent object
addLogEntry("Creating a DB Tie for this Business Shared Folder", ["debug"]);
// DB Tie
Item parentItem;
parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
parentItem.id = onedriveJSONItem["parentReference"]["id"].str;
parentItem.name = "root";
parentItem.type = ItemType.dir;
parentItem.mtime = remoteItem.mtime;
parentItem.parentId = null;
// Add this DB Tie parent record to the local database
addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]);
itemDB.upsert(parentItem);
// Create a 'root' DB Tie Record for this JSON object
createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem);
// Ensure that this item has no parent
addLogEntry("Setting remoteItem.parentId to be null", ["debug"]);
@ -1278,8 +1263,7 @@ class SyncEngine {
itemDB.upsert(remoteItem);
} else {
// Sharepoint account type
addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED ........ ", ["debug"]);
addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED YET ........ ", ["info"]);
}
}
}
@ -2568,7 +2552,7 @@ class SyncEngine {
}
// Add to pathFakeDeletedArray
// We dont want to try and upload this item again, so we need to track this object
// We dont want to try and upload this item again, so we need to track this objects removal
if (dryRun) {
// We need to add './' here so that it can be correctly searched to ensure it is not uploaded
string pathToAdd = "./" ~ path;
@ -3502,22 +3486,29 @@ class SyncEngine {
// For each batch of files to upload, upload the changed data to OneDrive
foreach (chunk; databaseItemsWhereContentHasChanged.chunks(batchSize)) {
uploadChangedLocalFileToOneDrive(chunk);
processChangedLocalItemsToUploadInParallel(chunk);
}
}
// Upload the changed file batches in parallel
void processChangedLocalItemsToUploadInParallel(string[3][] array) {
foreach (i, localItemDetails; taskPool.parallel(array)) {
addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]);
uploadChangedLocalFileToOneDrive(localItemDetails);
addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]);
}
}
// Upload changed local files to OneDrive in parallel
void uploadChangedLocalFileToOneDrive(string[3][] array) {
foreach (i, localItemDetails; taskPool.parallel(array)) {
addLogEntry("Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]);
void uploadChangedLocalFileToOneDrive(string[3] localItemDetails) {
// These are the details of the item we need to upload
string changedItemParentId = localItemDetails[0];
string changedItemId = localItemDetails[1];
string localFilePath = localItemDetails[2];
addLogEntry("uploadChangedLocalFileToOneDrive: " ~ localFilePath, ["debug"]);
// How much space is remaining on OneDrive
ulong remainingFreeSpace;
// Did the upload fail?
@ -3646,10 +3637,6 @@ class SyncEngine {
uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse);
}
}
addLogEntry("Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]);
} // end of 'foreach (i, localItemDetails; array.enumerate)'
}
// Perform the upload of a locally modified file to OneDrive
@ -3959,10 +3946,17 @@ class SyncEngine {
// Perform a filesystem walk to uncover new data to upload to OneDrive
void scanLocalFilesystemPathForNewData(string path) {
// Cleanup array memory before we start adding files
newLocalFilesToUploadToOneDrive = [];
// Perform a filesystem walk to uncover new data
scanLocalFilesystemPathForNewDataToUpload(path);
// Upload new data that has been identified
processNewLocalItemsToUpload();
}
void scanLocalFilesystemPathForNewDataToUpload(string path) {
// To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences?
string logPath;
if (path == ".") {
@ -3993,10 +3987,12 @@ class SyncEngine {
// Perform the filesystem walk of this path, building an array of new items to upload
scanPathForNewData(path);
if (isDir(path)) {
if (!appConfig.surpressLoggingOutput) {
if (appConfig.verbosityCount == 0)
addLogEntry("\n", ["consoleOnlyNoNewLine"]);
}
}
// To finish off the processing items, this is needed to reflect this in the log
addLogEntry("------------------------------------------------------------------", ["debug"]);
@ -4006,7 +4002,10 @@ class SyncEngine {
auto elapsedTime = finishTime - startTime;
addLogEntry("Elapsed Time Filesystem Walk: " ~ to!string(elapsedTime), ["debug"]);
}
// Perform a filesystem walk to uncover new data to upload to OneDrive
void processNewLocalItemsToUpload() {
// Upload new data that has been identified
// Are there any items to download post fetching the /delta data?
if (!newLocalFilesToUploadToOneDrive.empty) {
@ -4052,23 +4051,17 @@ class SyncEngine {
// Cleanup array memory after uploading all files
newLocalFilesToUploadToOneDrive = [];
}
if (!databaseItemsWhereContentHasChanged.empty) {
// There are changed local files that were in the DB to upload
addLogEntry("Changed local items to upload to OneDrive: " ~ to!string(databaseItemsWhereContentHasChanged.length));
processChangedLocalItemsToUpload();
// Cleanup array memory
databaseItemsWhereContentHasChanged = [];
}
}
// Scan this path for new data
void scanPathForNewData(string path) {
// Add a processing '.'
if (isDir(path)) {
if (!appConfig.surpressLoggingOutput) {
if (appConfig.verbosityCount == 0)
addProcessingDotEntry();
}
}
ulong maxPathLength;
ulong pathWalkLength;
@ -4228,6 +4221,7 @@ class SyncEngine {
if (canFind(businessSharedFoldersOnlineToSkip, path)) {
// This path was skipped - why?
addLogEntry("Skipping item '" ~ path ~ "' due to this path matching an existing online Business Shared Folder name", ["info", "notify"]);
addLogEntry("To sync this Business Shared Folder, consider enabling 'sync_business_shared_folders' within your application configuration.", ["info"]);
skipFolderTraverse = true;
}
}
@ -4286,12 +4280,13 @@ class SyncEngine {
}
// Handle a single file inotify trigger when using --monitor
void handleLocalFileTrigger(string localFilePath) {
void handleLocalFileTrigger(string[] changedLocalFilesToUploadToOneDrive) {
// Is this path a new file or an existing one?
// Normally we would use pathFoundInDatabase() to calculate, but we need 'databaseItem' as well if the item is in the database
foreach (localFilePath; changedLocalFilesToUploadToOneDrive) {
try {
Item databaseItem;
bool fileFoundInDB = false;
string[3][] modifiedItemToUpload;
foreach (driveId; onlineDriveDetails.keys) {
if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) {
@ -4305,7 +4300,7 @@ class SyncEngine {
// This is a new file as it is not in the database
// Log that the file has been added locally
addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]);
scanLocalFilesystemPathForNewData(localFilePath);
scanLocalFilesystemPathForNewDataToUpload(localFilePath);
} else {
// This is a potentially modified file, needs to be handled as such. Is the item truly modified?
if (!testFileHash(localFilePath, databaseItem)) {
@ -4313,10 +4308,14 @@ class SyncEngine {
// Log that the file has changed locally
addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]);
// Add the modified item to the array to upload
modifiedItemToUpload ~= [databaseItem.driveId, databaseItem.id, localFilePath];
uploadChangedLocalFileToOneDrive(modifiedItemToUpload);
uploadChangedLocalFileToOneDrive([databaseItem.driveId, databaseItem.id, localFilePath]);
}
}
} catch(Exception e) {
addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]);
}
}
processNewLocalItemsToUpload();
}
// Query the database to determine if this path is within the existing database
@ -4449,33 +4448,17 @@ class SyncEngine {
addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]);
// Depending on the data within parentItem, will depend on what method we are using to search
// In a --local-first scenario, a Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details
// A Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details
Item queryItem;
if ((appConfig.getValueBool("local_first")) && (parentItem.type == ItemType.remote)) {
// We are --local-first scenario and this folder is a potential shared object
addLogEntry("--localfirst & parentItem is a remote item object", ["debug"]);
if (parentItem.type == ItemType.remote) {
// This folder is a potential shared object
addLogEntry("ParentItem is a remote item object", ["debug"]);
// Need to create the DB Tie for this shared object to ensure this exists in the database
createDatabaseTieRecordForOnlineSharedFolder(parentItem);
// Update the queryItem values
queryItem.driveId = parentItem.remoteDriveId;
queryItem.id = parentItem.remoteId;
// Need to create the DB Tie for this object
addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]);
// New DB Tie Item to bind the 'remote' path to our parent path
Item tieDBItem;
// Set the name
tieDBItem.name = parentItem.name;
// Set the correct item type
tieDBItem.type = ItemType.dir;
// Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie
tieDBItem.driveId = parentItem.remoteDriveId;
tieDBItem.id = parentItem.remoteId;
// Set the correct mtime
tieDBItem.mtime = parentItem.mtime;
// Add tie DB record to the local database
addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
} else {
// Use parent item for the query item
addLogEntry("Standard Query, use parentItem", ["debug"]);
@ -4567,14 +4550,14 @@ class SyncEngine {
string requiredDriveId;
string requiredParentItemId;
// Is this a Personal Account and is the item a Remote Object (Shared Folder) ?
if ((appConfig.accountType == "personal") && (parentItem.type == ItemType.remote)) {
// Is the item a Remote Object (Shared Folder) ?
if (parentItem.type == ItemType.remote) {
// Yes .. Shared Folder
addLogEntry("parentItem data: " ~ to!string(parentItem), ["debug"]);
requiredDriveId = parentItem.remoteDriveId;
requiredParentItemId = parentItem.remoteId;
} else {
// Not a personal account + Shared Folder
// Not a Shared Folder
requiredDriveId = parentItem.driveId;
requiredParentItemId = parentItem.id;
}
@ -4675,14 +4658,18 @@ class SyncEngine {
if (onlinePathData["name"].str == baseName(thisNewPathToCreate)) {
// OneDrive 'name' matches local path name
if (appConfig.accountType == "business") {
// We are a business account, this existing online folder, could be a Shared Online Folder and is the 'Add shortcut to My files' item
// We are a business account, this existing online folder, could be a Shared Online Folder could be a 'Add shortcut to My files' item
addLogEntry("onlinePathData: " ~ to!string(onlinePathData), ["debug"]);
// Is this a remote folder
if (isItemRemote(onlinePathData)) {
// The folder is a remote item ... we do not want to create this ...
addLogEntry("Remote Existing Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]);
addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]);
addLogEntry("Existing Remote Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]);
// Is Shared Business Folder Syncing enabled ?
if (!appConfig.getValueBool("sync_business_shared_items")) {
// Shared Business Folder Syncing is NOT enabled
addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]);
// Add this path to businessSharedFoldersOnlineToSkip
businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate];
// no save to database, no online create
@ -4691,6 +4678,17 @@ class SyncEngine {
// Free object and memory
object.destroy(createDirectoryOnlineOneDriveApiInstance);
return;
} else {
// As the 'onlinePathData' is potentially missing the actual correct parent folder id in the 'remoteItem' JSON response, we have to perform a further query to get the correct answer
// Failure to do this, means the 'root' DB Tie Record has a different parent reference id to that what this folder's parent reference id actually is
JSONValue sharedFolderParentPathData;
string remoteDriveId = onlinePathData["remoteItem"]["parentReference"]["driveId"].str;
string remoteItemId = onlinePathData["remoteItem"]["id"].str;
sharedFolderParentPathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsById(remoteDriveId, remoteItemId);
// A 'root' DB Tie Record needed for this folder using the correct parent data
createDatabaseRootTieRecordForOnlineSharedFolder(sharedFolderParentPathData);
}
}
}
@ -4824,7 +4822,7 @@ class SyncEngine {
// If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty
if ((parentPathFoundInDB) && (parentItem.driveId.empty)) {
// switch to using defaultDriveId
addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls");
addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls", ["debug"]);
parentItem.driveId = appConfig.defaultDriveId;
}
@ -4929,11 +4927,24 @@ class SyncEngine {
// even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different.
// Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this.
// In order to upload this file - this query HAS to respond as a 404 - Not Found
// In order to upload this file - this query HAS to respond with a '404 - Not Found' so that the upload is triggered
// Does this 'file' already exist on OneDrive?
try {
if (parentItem.driveId == appConfig.defaultDriveId) {
// getPathDetailsByDriveId is only reliable when the driveId is our driveId
fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload);
} else {
// We need to curate a response by listing the children of this parentItem.driveId and parentItem.id , without traversing directories
// So that IF the file is on a Shared Folder, it can be found, and, if it exists, checked correctly
fileDetailsFromOneDrive = searchDriveItemForFile(parentItem.driveId, parentItem.id, fileToUpload);
// Was the file found?
if (fileDetailsFromOneDrive.type() != JSONType.object) {
// No ....
throw new OneDriveException(404, "Name not found via searchDriveItemForFile");
}
}
// Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API
if (hasName(fileDetailsFromOneDrive)) {
performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str);
@ -4962,10 +4973,10 @@ class SyncEngine {
string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str;
string changedItemId = fileDetailsFromOneDrive["id"].str;
addLogEntry("Skipping uploading this file as moving it to upload as a modified file (online item already exists): " ~ fileToUpload);
databaseItemsWhereContentHasChanged ~= [changedItemParentId, changedItemId, fileToUpload];
// In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB
saveItem(fileDetailsFromOneDrive);
uploadChangedLocalFileToOneDrive([changedItemParentId, changedItemId, fileToUpload]);
}
} catch (OneDriveException exception) {
// If we get a 404 .. the file is not online .. this is what we want .. file does not exist online
@ -6537,24 +6548,23 @@ class SyncEngine {
// Is this JSON a remote object
addLogEntry("Testing if this is a remote Shared Folder", ["debug"]);
if (isItemRemote(getPathDetailsAPIResponse)) {
// Remote Directory .. need a DB Tie Item
addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]);
// New DB Tie Item to bind the 'remote' path to our parent path
Item tieDBItem;
// Remote Directory .. need a DB Tie Record
createDatabaseTieRecordForOnlineSharedFolder(parentDetails);
// Temp DB Item to bind the 'remote' path to our parent path
Item tempDBItem;
// Set the name
tieDBItem.name = parentDetails.name;
tempDBItem.name = parentDetails.name;
// Set the correct item type
tieDBItem.type = ItemType.dir;
tempDBItem.type = ItemType.dir;
// Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie
tieDBItem.driveId = parentDetails.remoteDriveId;
tieDBItem.id = parentDetails.remoteId;
tempDBItem.driveId = parentDetails.remoteDriveId;
tempDBItem.id = parentDetails.remoteId;
// Set the correct mtime
tieDBItem.mtime = parentDetails.mtime;
// Add tie DB record to the local database
addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
// Update parentDetails to use the DB Tie record
parentDetails = tieDBItem;
tempDBItem.mtime = parentDetails.mtime;
// Update parentDetails to use this temp record
parentDetails = tempDBItem;
}
} catch (OneDriveException exception) {
if (exception.httpStatusCode == 404) {
@ -6815,7 +6825,7 @@ class SyncEngine {
if (!itemDB.selectByPath(oldPath, appConfig.defaultDriveId, oldItem)) {
// The old path|item is not synced with the database, upload as a new file
addLogEntry("Moved local item was not in-sync with local databse - uploading as new item");
uploadNewFile(newPath);
scanLocalFilesystemPathForNewData(newPath);
return;
}
@ -6961,8 +6971,9 @@ class SyncEngine {
// What account type is this?
if (appConfig.accountType != "personal") {
// Not a personal account, thus the integrity failure is most likely due to SharePoint
addLogEntry("CAUTION: Microsoft OneDrive when using SharePoint as a backend enhances files after you upload them, which means this file may now have technical differences from your local copy, resulting in a data integrity issue.", ["verbose"]);
addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details", ["verbose"]);
addLogEntry("CAUTION: When you upload files to Microsoft OneDrive that uses SharePoint as its backend, Microsoft OneDrive will alter your files post upload.", ["verbose"]);
addLogEntry("CAUTION: This will lead to technical differences between the version stored online and your local original file, potentially causing issues with the accuracy or consistency of your data.", ["verbose"]);
addLogEntry("CAUTION: Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details.", ["verbose"]);
}
// How can this be disabled?
addLogEntry("To disable the integrity checking of uploaded files use --disable-upload-validation");
@ -7600,6 +7611,22 @@ class SyncEngine {
return interruptedUploads;
}
// Clear any session_upload.* files
void clearInterruptedSessionUploads() {
// Scan the filesystem for the files we are interested in, build up interruptedUploadsSessionFiles array
foreach (sessionFile; dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)) {
// calculate the full path
string tempPath = buildNormalizedPath(buildPath(appConfig.configDirName, sessionFile));
JSONValue sessionFileData = readText(tempPath).parseJSON();
addLogEntry("Removing interrupted session upload file due to --resync for: " ~ sessionFileData["localPath"].str, ["info"]);
// Process removal
if (!dryRun) {
safeRemove(tempPath);
}
}
}
// Process interrupted 'session_upload' files
void processForInterruptedSessionUploads() {
// For each upload_session file that has been found, process the data to ensure it is still valid
@ -7847,6 +7874,96 @@ class SyncEngine {
}
}
// Search a given Drive ID, Item ID and filename to see if this exists in the location specified
JSONValue searchDriveItemForFile(string parentItemDriveId, string parentItemId, string fileToUpload) {
JSONValue onedriveJSONItem;
string searchName = baseName(fileToUpload);
JSONValue thisLevelChildren;
string nextLink;
// Create a new API Instance for this thread and initialise it
OneDriveApi checkFileOneDriveApiInstance;
checkFileOneDriveApiInstance = new OneDriveApi(appConfig);
checkFileOneDriveApiInstance.initialise();
for (;;) {
// query top level children
try {
thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink);
} catch (OneDriveException exception) {
// OneDrive threw an error
addLogEntry("------------------------------------------------------------------", ["debug"]);
addLogEntry("Query Error: thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]);
addLogEntry("driveId: " ~ parentItemDriveId, ["debug"]);
addLogEntry("idToQuery: " ~ parentItemId, ["debug"]);
addLogEntry("nextLink: " ~ nextLink, ["debug"]);
string thisFunctionName = getFunctionName!({});
// HTTP request returned status code 408,429,503,504
if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) {
// Handle the 429
if (exception.httpStatusCode == 429) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest(checkFileOneDriveApiInstance);
addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]);
}
// re-try the specific changes queries
if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines(exception.msg);
addLogEntry(to!string(errorArray[0]) ~ " when attempting to query OneDrive top level drive children on OneDrive - retrying applicable request in 30 seconds");
addLogEntry("checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink) previously threw an error - retrying", ["debug"]);
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]);
Thread.sleep(dur!"seconds"(30));
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]);
searchDriveItemForFile(parentItemDriveId, parentItemId, fileToUpload);
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage(exception.msg, thisFunctionName);
}
}
// process thisLevelChildren response
foreach (child; thisLevelChildren["value"].array) {
// Only looking at files
if ((child["name"].str == searchName) && (("file" in child) != null)) {
// Found the matching file, return its JSON representation
// Operations in this thread are done / complete
checkFileOneDriveApiInstance.shutdown();
// Free object and memory
object.destroy(checkFileOneDriveApiInstance);
// Return child
return child;
}
}
// If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response
// to indicate more items are available and provide the request URL for the next page of items.
if ("@odata.nextLink" in thisLevelChildren) {
// Update nextLink to next changeSet bundle
addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]);
nextLink = thisLevelChildren["@odata.nextLink"].str;
} else break;
}
// Operations in this thread are done / complete
checkFileOneDriveApiInstance.shutdown();
// Free object and memory
object.destroy(checkFileOneDriveApiInstance);
// return an empty JSON item
return onedriveJSONItem;
}
// Update 'onlineDriveDetails' with the latest data about this drive
void updateDriveDetailsCache(string driveId, bool quotaRestricted, bool quotaAvailable, ulong localFileSize) {
@ -7886,4 +8003,72 @@ class SyncEngine {
addOrUpdateOneDriveOnlineDetails(driveId);
}
}
// Create a 'root' DB Tie Record for a Shared Folder from the JSON data
void createDatabaseRootTieRecordForOnlineSharedFolder(JSONValue onedriveJSONItem) {
// Creating|Updating a DB Tie
addLogEntry("Creating|Updating a 'root' DB Tie Record for this Shared Folder: " ~ onedriveJSONItem["name"].str, ["debug"]);
addLogEntry("Raw JSON for 'root' DB Tie Record: " ~ to!string(onedriveJSONItem), ["debug"]);
// New DB Tie Item to detail the 'root' of the Shared Folder
Item tieDBItem;
tieDBItem.name = "root";
// Get the right parentReference details
if (isItemRemote(onedriveJSONItem)) {
tieDBItem.driveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str;
tieDBItem.id = onedriveJSONItem["remoteItem"]["id"].str;
} else {
if (onedriveJSONItem["name"].str != "root") {
tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
tieDBItem.id = onedriveJSONItem["parentReference"]["id"].str;
} else {
tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
tieDBItem.id = onedriveJSONItem["id"].str;
}
}
tieDBItem.type = ItemType.dir;
tieDBItem.mtime = SysTime.fromISOExtString(onedriveJSONItem["fileSystemInfo"]["lastModifiedDateTime"].str);
tieDBItem.parentId = null;
// Add this DB Tie parent record to the local database
addLogEntry("Creating|Updating into local database a 'root' DB Tie record: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
}
// Create a DB Tie Record for a Shared Folder
void createDatabaseTieRecordForOnlineSharedFolder(Item parentItem) {
// Creating|Updating a DB Tie
addLogEntry("Creating|Updating a DB Tie Record for this Shared Folder: " ~ parentItem.name, ["debug"]);
addLogEntry("Parent Item Record: " ~ to!string(parentItem), ["debug"]);
// New DB Tie Item to bind the 'remote' path to our parent path
Item tieDBItem;
tieDBItem.name = parentItem.name;
tieDBItem.driveId = parentItem.remoteDriveId;
tieDBItem.id = parentItem.remoteId;
tieDBItem.type = ItemType.dir;
tieDBItem.mtime = parentItem.mtime;
// What account type is this as this determines what 'tieDBItem.parentId' should be set to
// There is a difference in the JSON responses between 'personal' and 'business' account types for Shared Folders
// Essentially an API inconsistency
if (appConfig.accountType == "personal") {
// Set tieDBItem.parentId to null
tieDBItem.parentId = null;
} else {
// The tieDBItem.parentId needs to be the correct driveId id reference
// Query the DB
Item[] rootDriveItems;
Item dbRecord;
rootDriveItems = itemDB.selectByDriveId(parentItem.remoteDriveId);
dbRecord = rootDriveItems[0];
tieDBItem.parentId = dbRecord.id;
}
// Add tie DB record to the local database
addLogEntry("Creating|Updating into local database a DB Tie record: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
}
}