Merge branch 'onedrive-v2.5.0-alpha-5' into shutdown_monitor

This commit is contained in:
JC-comp 2024-02-13 11:00:01 +08:00 committed by GitHub
commit 84966fc2d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 750 additions and 426 deletions

View file

@ -37,7 +37,9 @@ Additionally there are specific version release tags for each release. Refer to
## Configuration Steps
### 1. Install 'docker' on your platform
Install 'docker' as per your distribution platform's instructions if not already installed.
Install 'docker' as per your distribution platform's instructions if not already installed as per the instructions on https://docs.docker.com/engine/install/
**Note:** If you are using Ubuntu, do not install Docker from your distribution platform's repositories. You must install Docker from Docker provided packages.
### 2. Configure 'docker' to allow non-privileged users to run Docker commands
Read https://docs.docker.com/engine/install/linux-postinstall/ to configure the 'docker' user group with your user account to allow your non 'root' user to run 'docker' commands.

View file

@ -7,6 +7,7 @@ import std.file;
import std.datetime;
import std.concurrency;
import std.typecons;
import core.sync.condition;
import core.sync.mutex;
import core.thread;
import std.format;
@ -24,20 +25,22 @@ shared MonoTime lastInsertedTime;
class LogBuffer {
private:
string[3][] buffer;
Mutex bufferLock;
string logFilePath;
bool writeToFile;
bool verboseLogging;
bool debugLogging;
Thread flushThread;
bool isRunning;
string[3][] buffer;
Mutex bufferLock;
Condition condReady;
string logFilePath;
bool writeToFile;
bool verboseLogging;
bool debugLogging;
Thread flushThread;
bool isRunning;
bool sendGUINotification;
public:
this(bool verboseLogging, bool debugLogging) {
// Initialise the mutex
bufferLock = new Mutex();
condReady = new Condition(bufferLock);
// Initialise other items
this.logFilePath = logFilePath;
this.writeToFile = writeToFile;
@ -50,11 +53,14 @@ class LogBuffer {
flushThread.start();
}
~this() {
isRunning = false;
void shutdown() {
synchronized(bufferLock) {
isRunning = false;
condReady.notify();
}
flushThread.join();
flush();
}
}
shared void logThisMessage(string message, string[] levels = ["info"]) {
// Generate the timestamp for this log entry
@ -86,6 +92,7 @@ class LogBuffer {
}
}
}
(cast()condReady).notify();
}
}
@ -99,14 +106,17 @@ class LogBuffer {
private void flushBuffer() {
while (isRunning) {
Thread.sleep(dur!("msecs")(200));
flush();
}
stdout.flush();
}
private void flush() {
string[3][] messages;
synchronized(bufferLock) {
while (buffer.empty && isRunning) {
condReady.wait();
}
messages = buffer;
buffer.length = 0;
}

View file

@ -629,12 +629,17 @@ int main(string[] cliArgs) {
string localPath = ".";
string remotePath = "/";
// Check if there are interrupted upload session(s)
if (syncEngineInstance.checkForInterruptedSessionUploads) {
// Need to re-process the session upload files to resume the failed session uploads
addLogEntry("There are interrupted session uploads that need to be resumed ...");
// Process the session upload files
syncEngineInstance.processForInterruptedSessionUploads();
if (!appConfig.getValueBool("resync")) {
// Check if there are interrupted upload session(s)
if (syncEngineInstance.checkForInterruptedSessionUploads) {
// Need to re-process the session upload files to resume the failed session uploads
addLogEntry("There are interrupted session uploads that need to be resumed ...");
// Process the session upload files
syncEngineInstance.processForInterruptedSessionUploads();
}
} else {
// Clean up any upload session files due to --resync being used
syncEngineInstance.clearInterruptedSessionUploads();
}
// Are we doing a single directory operation (--single-directory) ?
@ -742,16 +747,11 @@ int main(string[] cliArgs) {
};
// Delegated function for when inotify detects a local file has been changed
filesystemMonitor.onFileChanged = delegate(string path) {
filesystemMonitor.onFileChanged = delegate(string[] changedLocalFilesToUploadToOneDrive) {
// Handle a potentially locally changed file
// Logging for this event moved to handleLocalFileTrigger() due to threading and false triggers from scanLocalFilesystemPathForNewData() above
try {
syncEngineInstance.handleLocalFileTrigger(path);
} catch (CurlException e) {
addLogEntry("Offline, cannot upload changed item: " ~ path, ["verbose"]);
} catch(Exception e) {
addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]);
}
addLogEntry("[M] Total number of local file changed: " ~ to!string(changedLocalFilesToUploadToOneDrive.length));
syncEngineInstance.handleLocalFileTrigger(changedLocalFilesToUploadToOneDrive);
};
// Delegated function for when inotify detects a delete event
@ -1143,9 +1143,8 @@ void performStandardExitProcess(string scopeCaller = null) {
thread_joinAll();
addLogEntry("Application exit");
addLogEntry("#######################################################################################################################################", ["logFileOnly"]);
// Sleep to allow any final logging output to be printed - this is needed as we are using buffered logging output
Thread.sleep(dur!("msecs")(500));
// Destroy the shared logging buffer
(cast() logBuffer).shutdown();
object.destroy(logBuffer);
}
}

View file

@ -8,6 +8,7 @@ import core.sys.linux.sys.inotify;
import core.sys.posix.poll;
import core.sys.posix.unistd;
import core.sys.posix.sys.select;
import core.thread;
import core.time;
import std.algorithm;
import std.concurrency;
@ -139,7 +140,6 @@ class MonitorBackgroundWorker {
}
}
void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid)
{
try {
@ -150,6 +150,96 @@ void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid)
}
}
enum ActionType {
moved,
deleted,
changed,
createDir
}
struct Action {
ActionType type;
bool skipped;
string src;
string dst;
}
struct ActionHolder {
Action[] actions;
ulong[string] srcMap;
void append(ActionType type, string src, string dst=null) {
ulong[] pendingTargets;
switch (type) {
case ActionType.changed:
if (src in srcMap && actions[srcMap[src]].type == ActionType.changed) {
// skip duplicate operations
return;
}
break;
case ActionType.createDir:
break;
case ActionType.deleted:
if (src in srcMap) {
ulong pendingTarget = srcMap[src];
// Skip operations require reading local file that is gone
switch (actions[pendingTarget].type) {
case ActionType.changed:
case ActionType.createDir:
actions[srcMap[src]].skipped = true;
srcMap.remove(src);
break;
default:
break;
}
}
break;
case ActionType.moved:
for(int i = 0; i < actions.length; i++) {
// Only match for latest operation
if (actions[i].src in srcMap) {
switch (actions[i].type) {
case ActionType.changed:
case ActionType.createDir:
// check if the source is the prefix of the target
string prefix = src ~ "/";
string target = actions[i].src;
if (prefix[0] != '.')
prefix = "./" ~ prefix;
if (target[0] != '.')
target = "./" ~ target;
string comm = commonPrefix(prefix, target);
if (src == actions[i].src || comm.length == prefix.length) {
// Hold operations require reading local file that is moved after the target is moved online
pendingTargets ~= i;
actions[i].skipped = true;
srcMap.remove(actions[i].src);
if (comm.length == target.length)
actions[i].src = dst;
else
actions[i].src = dst ~ target[comm.length - 1 .. target.length];
}
break;
default:
break;
}
}
}
break;
default:
break;
}
actions ~= Action(type, false, src, dst);
srcMap[src] = actions.length - 1;
foreach (pendingTarget; pendingTargets) {
actions ~= actions[pendingTarget];
actions[$-1].skipped = false;
srcMap[actions[$-1].src] = actions.length - 1;
}
}
}
final class Monitor {
// Class variables
ApplicationConfig appConfig;
@ -177,12 +267,14 @@ final class Monitor {
// Configure function delegates
void delegate(string path) onDirCreated;
void delegate(string path) onFileChanged;
void delegate(string[] path) onFileChanged;
void delegate(string path) onDelete;
void delegate(string from, string to) onMove;
// List of paths that were moved, not deleted
bool[string] movedNotDeleted;
ActionHolder actionHolder;
// Configure the class varaible to consume the application configuration including selective sync
this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) {
@ -395,138 +487,145 @@ final class Monitor {
};
while (true) {
int ret = poll(&fds, 1, 0);
if (ret == -1) throw new MonitorException("poll failed");
else if (ret == 0) break; // no events available
bool hasNotification = false;
while (true) {
int ret = poll(&fds, 1, 0);
if (ret == -1) throw new MonitorException("poll failed");
else if (ret == 0) break; // no events available
hasNotification = true;
size_t length = read(worker.fd, buffer.ptr, buffer.length);
if (length == -1) throw new MonitorException("read failed");
size_t length = read(worker.fd, buffer.ptr, buffer.length);
if (length == -1) throw new MonitorException("read failed");
int i = 0;
while (i < length) {
inotify_event *event = cast(inotify_event*) &buffer[i];
string path;
string evalPath;
// inotify event debug
addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]);
addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]);
addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]);
addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]);
addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]);
// inotify event handling
if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]);
if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]);
if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]);
if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]);
if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]);
if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]);
if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]);
if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]);
if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]);
if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]);
if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]);
if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]);
if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]);
if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]);
if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]);
if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]);
if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]);
if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]);
if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]);
if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]);
if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]);
if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]);
if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]);
// skip events that need to be ignored
if (event.mask & IN_IGNORED) {
// forget the directory associated to the watch descriptor
wdToDirName.remove(event.wd);
goto skip;
} else if (event.mask & IN_Q_OVERFLOW) {
throw new MonitorException("inotify overflow, inotify events will be missing");
}
// if the event is not to be ignored, obtain path
path = getPath(event);
// configure the skip_dir & skip skip_file comparison item
evalPath = path.strip('.');
// Skip events that should be excluded based on application configuration
// We cant use isDir or isFile as this information is missing from the inotify event itself
// Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995
// Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions
// Directory events should only be compared against skip_dir and file events should only be compared against skip_file
if (event.mask & IN_ISDIR) {
// The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if (selectiveSync.isDirNameExcluded(evalPath)) {
// The path to evaluate matches a path that the user has configured to skip
int i = 0;
while (i < length) {
inotify_event *event = cast(inotify_event*) &buffer[i];
string path;
string evalPath;
// inotify event debug
addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]);
addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]);
addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]);
addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]);
addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]);
// inotify event handling
if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]);
if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]);
if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]);
if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]);
if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]);
if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]);
if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]);
if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]);
if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]);
if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]);
if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]);
if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]);
if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]);
if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]);
if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]);
if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]);
if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]);
if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]);
if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]);
if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]);
if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]);
if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]);
if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]);
// skip events that need to be ignored
if (event.mask & IN_IGNORED) {
// forget the directory associated to the watch descriptor
wdToDirName.remove(event.wd);
goto skip;
} else if (event.mask & IN_Q_OVERFLOW) {
throw new MonitorException("inotify overflow, inotify events will be missing");
}
} else {
// The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
if (selectiveSync.isFileNameExcluded(evalPath)) {
// The path to evaluate matches a file that the user has configured to skip
goto skip;
}
}
// is the path, excluded via sync_list
if (selectiveSync.isPathExcludedViaSyncList(path)) {
// The path to evaluate matches a directory or file that the user has configured not to include in the sync
goto skip;
}
// handle the inotify events
if (event.mask & IN_MOVED_FROM) {
addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]);
cookieToPath[event.cookie] = path;
movedNotDeleted[path] = true; // Mark as moved, not deleted
} else if (event.mask & IN_MOVED_TO) {
addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]);
if (event.mask & IN_ISDIR) addRecursive(path);
auto from = event.cookie in cookieToPath;
if (from) {
cookieToPath.remove(event.cookie);
if (useCallbacks) onMove(*from, path);
movedNotDeleted.remove(*from); // Clear moved status
// if the event is not to be ignored, obtain path
path = getPath(event);
// configure the skip_dir & skip skip_file comparison item
evalPath = path.strip('.');
// Skip events that should be excluded based on application configuration
// We cant use isDir or isFile as this information is missing from the inotify event itself
// Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995
// Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions
// Directory events should only be compared against skip_dir and file events should only be compared against skip_file
if (event.mask & IN_ISDIR) {
// The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if (selectiveSync.isDirNameExcluded(evalPath)) {
// The path to evaluate matches a path that the user has configured to skip
goto skip;
}
} else {
// Handle file moved in from outside
if (event.mask & IN_ISDIR) {
if (useCallbacks) onDirCreated(path);
} else {
if (useCallbacks) onFileChanged(path);
// The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
if (selectiveSync.isFileNameExcluded(evalPath)) {
// The path to evaluate matches a file that the user has configured to skip
goto skip;
}
}
} else if (event.mask & IN_CREATE) {
addLogEntry("event IN_CREATE: " ~ path, ["debug"]);
if (event.mask & IN_ISDIR) {
addRecursive(path);
if (useCallbacks) onDirCreated(path);
// is the path, excluded via sync_list
if (selectiveSync.isPathExcludedViaSyncList(path)) {
// The path to evaluate matches a directory or file that the user has configured not to include in the sync
goto skip;
}
} else if (event.mask & IN_DELETE) {
if (path in movedNotDeleted) {
movedNotDeleted.remove(path); // Ignore delete for moved files
// handle the inotify events
if (event.mask & IN_MOVED_FROM) {
addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]);
cookieToPath[event.cookie] = path;
movedNotDeleted[path] = true; // Mark as moved, not deleted
} else if (event.mask & IN_MOVED_TO) {
addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]);
if (event.mask & IN_ISDIR) addRecursive(path);
auto from = event.cookie in cookieToPath;
if (from) {
cookieToPath.remove(event.cookie);
if (useCallbacks) actionHolder.append(ActionType.moved, *from, path);
movedNotDeleted.remove(*from); // Clear moved status
} else {
// Handle file moved in from outside
if (event.mask & IN_ISDIR) {
if (useCallbacks) actionHolder.append(ActionType.createDir, path);
} else {
if (useCallbacks) actionHolder.append(ActionType.changed, path);
}
}
} else if (event.mask & IN_CREATE) {
addLogEntry("event IN_CREATE: " ~ path, ["debug"]);
if (event.mask & IN_ISDIR) {
addRecursive(path);
if (useCallbacks) actionHolder.append(ActionType.createDir, path);
}
} else if (event.mask & IN_DELETE) {
if (path in movedNotDeleted) {
movedNotDeleted.remove(path); // Ignore delete for moved files
} else {
addLogEntry("event IN_DELETE: " ~ path, ["debug"]);
if (useCallbacks) actionHolder.append(ActionType.deleted, path);
}
} else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) {
addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]);
if (useCallbacks) actionHolder.append(ActionType.changed, path);
} else {
addLogEntry("event IN_DELETE: " ~ path, ["debug"]);
if (useCallbacks) onDelete(path);
addLogEntry("event unhandled: " ~ path, ["debug"]);
assert(0);
}
} else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) {
addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]);
if (useCallbacks) onFileChanged(path);
} else {
addLogEntry("event unhandled: " ~ path, ["debug"]);
assert(0);
}
skip:
i += inotify_event.sizeof + event.len;
skip:
i += inotify_event.sizeof + event.len;
}
Thread.sleep(dur!"seconds"(1));
}
if (!hasNotification) break;
processChanges();
// Assume that the items moved outside the watched directory have been deleted
foreach (cookie, path; cookieToPath) {
addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]);
@ -538,4 +637,33 @@ final class Monitor {
addLogEntry("inotify events flushed", ["debug"]);
}
}
private void processChanges() {
string[] changes;
foreach(action; actionHolder.actions) {
if (action.skipped)
continue;
switch (action.type) {
case ActionType.changed:
changes ~= action.src;
break;
case ActionType.deleted:
onDelete(action.src);
break;
case ActionType.createDir:
onDirCreated(action.src);
break;
case ActionType.moved:
onMove(action.src, action.dst);
break;
default:
break;
}
}
if (!changes.empty)
onFileChanged(changes);
object.destroy(actionHolder);
}
}

View file

@ -576,7 +576,7 @@ class SyncEngine {
}
} else {
// Is this a Business Account with Sync Business Shared Items enabled?
if ((appConfig.accountType == "business") && ( appConfig.getValueBool("sync_business_shared_items"))) {
if ((appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items"))) {
// Business Account Shared Items Handling
// - OneDrive Business Shared Folder
@ -1114,9 +1114,21 @@ class SyncEngine {
// Change is to delete an item
addLogEntry("Handing a OneDrive Deleted Item", ["debug"]);
if (existingDBEntry) {
// Flag to delete
addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]);
idsToDelete ~= [thisItemDriveId, thisItemId];
// Is the item to delete locally actually in sync with OneDrive currently?
// What is the source of this item data?
string itemSource = "online";
// Compute this deleted items path based on the database entries
string localPathToDelete = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name;
if (isItemSynced(existingDatabaseItem, localPathToDelete, itemSource)) {
// Flag to delete
addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]);
idsToDelete ~= [thisItemDriveId, thisItemId];
} else {
// local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not
safeBackup(localPathToDelete, dryRun);
}
} else {
// Flag to ignore
addLogEntry("Flagging item to skip: " ~ to!string(onedriveJSONItem), ["debug"]);
@ -1197,22 +1209,8 @@ class SyncEngine {
if (hasSharedElement(onedriveJSONItem)) {
// Has the Shared JSON structure
addLogEntry("Personal Shared Item JSON object has the 'shared' JSON structure", ["debug"]);
// Create a DB Tie Record for this parent object
addLogEntry("Creating a DB Tie for this Personal Shared Folder", ["debug"]);
// DB Tie
Item parentItem;
parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
parentItem.id = onedriveJSONItem["parentReference"]["id"].str;
parentItem.name = "root";
parentItem.type = ItemType.dir;
parentItem.mtime = remoteItem.mtime;
parentItem.parentId = null;
// Add this DB Tie parent record to the local database
addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]);
itemDB.upsert(parentItem);
// Create a 'root' DB Tie Record for this JSON object
createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem);
}
// Ensure that this item has no parent
@ -1226,21 +1224,8 @@ class SyncEngine {
addLogEntry("Handling a Business or SharePoint Shared Item JSON object", ["debug"]);
if (appConfig.accountType == "business") {
// Create a DB Tie Record for this parent object
addLogEntry("Creating a DB Tie for this Business Shared Folder", ["debug"]);
// DB Tie
Item parentItem;
parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
parentItem.id = onedriveJSONItem["parentReference"]["id"].str;
parentItem.name = "root";
parentItem.type = ItemType.dir;
parentItem.mtime = remoteItem.mtime;
parentItem.parentId = null;
// Add this DB Tie parent record to the local database
addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]);
itemDB.upsert(parentItem);
// Create a 'root' DB Tie Record for this JSON object
createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem);
// Ensure that this item has no parent
addLogEntry("Setting remoteItem.parentId to be null", ["debug"]);
@ -1278,8 +1263,7 @@ class SyncEngine {
itemDB.upsert(remoteItem);
} else {
// Sharepoint account type
addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED ........ ", ["debug"]);
addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED YET ........ ", ["info"]);
}
}
}
@ -2568,7 +2552,7 @@ class SyncEngine {
}
// Add to pathFakeDeletedArray
// We dont want to try and upload this item again, so we need to track this object
// We dont want to try and upload this item again, so we need to track this objects removal
if (dryRun) {
// We need to add './' here so that it can be correctly searched to ensure it is not uploaded
string pathToAdd = "./" ~ path;
@ -3502,154 +3486,157 @@ class SyncEngine {
// For each batch of files to upload, upload the changed data to OneDrive
foreach (chunk; databaseItemsWhereContentHasChanged.chunks(batchSize)) {
uploadChangedLocalFileToOneDrive(chunk);
processChangedLocalItemsToUploadInParallel(chunk);
}
}
// Upload the changed file batches in parallel
void processChangedLocalItemsToUploadInParallel(string[3][] array) {
foreach (i, localItemDetails; taskPool.parallel(array)) {
addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]);
uploadChangedLocalFileToOneDrive(localItemDetails);
addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]);
}
}
// Upload changed local files to OneDrive in parallel
void uploadChangedLocalFileToOneDrive(string[3][] array) {
foreach (i, localItemDetails; taskPool.parallel(array)) {
void uploadChangedLocalFileToOneDrive(string[3] localItemDetails) {
addLogEntry("Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]);
// These are the details of the item we need to upload
string changedItemParentId = localItemDetails[0];
string changedItemId = localItemDetails[1];
string localFilePath = localItemDetails[2];
// These are the details of the item we need to upload
string changedItemParentId = localItemDetails[0];
string changedItemId = localItemDetails[1];
string localFilePath = localItemDetails[2];
// How much space is remaining on OneDrive
ulong remainingFreeSpace;
// Did the upload fail?
bool uploadFailed = false;
// Did we skip due to exceeding maximum allowed size?
bool skippedMaxSize = false;
// Did we skip to an exception error?
bool skippedExceptionError = false;
// Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here
// This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function
Item dbItem;
itemDB.selectById(changedItemParentId, changedItemId, dbItem);
addLogEntry("uploadChangedLocalFileToOneDrive: " ~ localFilePath, ["debug"]);
// Fetch the details from cachedOnlineDriveData
// - cachedOnlineDriveData.quotaRestricted;
// - cachedOnlineDriveData.quotaAvailable;
// - cachedOnlineDriveData.quotaRemaining;
driveDetailsCache cachedOnlineDriveData;
cachedOnlineDriveData = getDriveDetails(dbItem.driveId);
remainingFreeSpace = cachedOnlineDriveData.quotaRemaining;
// Get the file size from the actual file
ulong thisFileSizeLocal = getSize(localFilePath);
// Get the file size from the DB data
ulong thisFileSizeFromDB;
if (!dbItem.size.empty) {
thisFileSizeFromDB = to!ulong(dbItem.size);
} else {
thisFileSizeFromDB = 0;
}
// 'remainingFreeSpace' online includes the current file online
// We need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value
ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal;
// Based on what we know, for this thread - can we safely upload this modified local file?
addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]);
addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]);
JSONValue uploadResponse;
bool spaceAvailableOnline = false;
// If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused
// If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true
// If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused
// If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true
// Is there quota available for the given drive where we are uploading to?
if (cachedOnlineDriveData.quotaAvailable) {
// Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload?
if (calculatedSpaceOnlinePostUpload > 0) {
// Based on this thread action, we beleive that there is space available online to upload - proceed
spaceAvailableOnline = true;
}
}
// Is quota being restricted?
if (cachedOnlineDriveData.quotaRestricted) {
// Space available online is being restricted - so we have no way to really know if there is space available online
// How much space is remaining on OneDrive
ulong remainingFreeSpace;
// Did the upload fail?
bool uploadFailed = false;
// Did we skip due to exceeding maximum allowed size?
bool skippedMaxSize = false;
// Did we skip to an exception error?
bool skippedExceptionError = false;
// Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here
// This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function
Item dbItem;
itemDB.selectById(changedItemParentId, changedItemId, dbItem);
// Fetch the details from cachedOnlineDriveData
// - cachedOnlineDriveData.quotaRestricted;
// - cachedOnlineDriveData.quotaAvailable;
// - cachedOnlineDriveData.quotaRemaining;
driveDetailsCache cachedOnlineDriveData;
cachedOnlineDriveData = getDriveDetails(dbItem.driveId);
remainingFreeSpace = cachedOnlineDriveData.quotaRemaining;
// Get the file size from the actual file
ulong thisFileSizeLocal = getSize(localFilePath);
// Get the file size from the DB data
ulong thisFileSizeFromDB;
if (!dbItem.size.empty) {
thisFileSizeFromDB = to!ulong(dbItem.size);
} else {
thisFileSizeFromDB = 0;
}
// 'remainingFreeSpace' online includes the current file online
// We need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value
ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal;
// Based on what we know, for this thread - can we safely upload this modified local file?
addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]);
addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]);
JSONValue uploadResponse;
bool spaceAvailableOnline = false;
// If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused
// If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true
// If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused
// If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true
// Is there quota available for the given drive where we are uploading to?
if (cachedOnlineDriveData.quotaAvailable) {
// Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload?
if (calculatedSpaceOnlinePostUpload > 0) {
// Based on this thread action, we beleive that there is space available online to upload - proceed
spaceAvailableOnline = true;
}
// Do we have space available or is space available being restricted (so we make the blind assumption that there is space available)
if (spaceAvailableOnline) {
// Does this file exceed the maximum file size to upload to OneDrive?
if (thisFileSizeLocal <= maxUploadFileSize) {
// Attempt to upload the modified file
// Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result
uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal);
// Evaluate the returned JSON uploadResponse
// If there was an error uploading the file, uploadResponse should be empty and invalid
if (uploadResponse.type() != JSONType.object) {
uploadFailed = true;
skippedExceptionError = true;
}
} else {
// Skip file - too large
uploadFailed = true;
skippedMaxSize = true;
}
} else {
// Cant upload this file - no space available
uploadFailed = true;
}
}
// Is quota being restricted?
if (cachedOnlineDriveData.quotaRestricted) {
// Space available online is being restricted - so we have no way to really know if there is space available online
spaceAvailableOnline = true;
}
// Did the upload fail?
if (uploadFailed) {
// Upload failed .. why?
// No space available online
if (!spaceAvailableOnline) {
addLogEntry("Skipping uploading modified file " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]);
}
// File exceeds max allowed size
if (skippedMaxSize) {
addLogEntry("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: " ~ localFilePath, ["info", "notify"]);
}
// Generic message
if (skippedExceptionError) {
// normal failure message if API or exception error generated
addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]);
// Do we have space available or is space available being restricted (so we make the blind assumption that there is space available)
if (spaceAvailableOnline) {
// Does this file exceed the maximum file size to upload to OneDrive?
if (thisFileSizeLocal <= maxUploadFileSize) {
// Attempt to upload the modified file
// Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result
uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal);
// Evaluate the returned JSON uploadResponse
// If there was an error uploading the file, uploadResponse should be empty and invalid
if (uploadResponse.type() != JSONType.object) {
uploadFailed = true;
skippedExceptionError = true;
}
} else {
// Upload was successful
addLogEntry("Uploading modified file " ~ localFilePath ~ " ... done.", ["info", "notify"]);
// Save JSON item in database
saveItem(uploadResponse);
// Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accuratly as possible for other threads
updateDriveDetailsCache(dbItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal);
// Check the integrity of the uploaded modified file if not in a --dry-run scenario
if (!dryRun) {
// Perform the integrity of the uploaded modified file
performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal);
// Update the date / time of the file online to match the local item
// Get the local file last modified time
SysTime localModifiedTime = timeLastModified(localFilePath).toUTC();
localModifiedTime.fracSecs = Duration.zero;
// Get the latest eTag, and use that
string etagFromUploadResponse = uploadResponse["eTag"].str;
// Attempt to update the online date time stamp based on our local data
uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse);
}
// Skip file - too large
uploadFailed = true;
skippedMaxSize = true;
}
addLogEntry("Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]);
} // end of 'foreach (i, localItemDetails; array.enumerate)'
} else {
// Cant upload this file - no space available
uploadFailed = true;
}
// Did the upload fail?
if (uploadFailed) {
// Upload failed .. why?
// No space available online
if (!spaceAvailableOnline) {
addLogEntry("Skipping uploading modified file " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]);
}
// File exceeds max allowed size
if (skippedMaxSize) {
addLogEntry("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: " ~ localFilePath, ["info", "notify"]);
}
// Generic message
if (skippedExceptionError) {
// normal failure message if API or exception error generated
addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]);
}
} else {
// Upload was successful
addLogEntry("Uploading modified file " ~ localFilePath ~ " ... done.", ["info", "notify"]);
// Save JSON item in database
saveItem(uploadResponse);
// Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accuratly as possible for other threads
updateDriveDetailsCache(dbItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal);
// Check the integrity of the uploaded modified file if not in a --dry-run scenario
if (!dryRun) {
// Perform the integrity of the uploaded modified file
performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal);
// Update the date / time of the file online to match the local item
// Get the local file last modified time
SysTime localModifiedTime = timeLastModified(localFilePath).toUTC();
localModifiedTime.fracSecs = Duration.zero;
// Get the latest eTag, and use that
string etagFromUploadResponse = uploadResponse["eTag"].str;
// Attempt to update the online date time stamp based on our local data
uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse);
}
}
}
// Perform the upload of a locally modified file to OneDrive
@ -3959,10 +3946,17 @@ class SyncEngine {
// Perform a filesystem walk to uncover new data to upload to OneDrive
void scanLocalFilesystemPathForNewData(string path) {
// Cleanup array memory before we start adding files
newLocalFilesToUploadToOneDrive = [];
// Perform a filesystem walk to uncover new data
scanLocalFilesystemPathForNewDataToUpload(path);
// Upload new data that has been identified
processNewLocalItemsToUpload();
}
void scanLocalFilesystemPathForNewDataToUpload(string path) {
// To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences?
string logPath;
if (path == ".") {
@ -3993,9 +3987,11 @@ class SyncEngine {
// Perform the filesystem walk of this path, building an array of new items to upload
scanPathForNewData(path);
if (!appConfig.surpressLoggingOutput) {
if (appConfig.verbosityCount == 0)
addLogEntry("\n", ["consoleOnlyNoNewLine"]);
if (isDir(path)) {
if (!appConfig.surpressLoggingOutput) {
if (appConfig.verbosityCount == 0)
addLogEntry("\n", ["consoleOnlyNoNewLine"]);
}
}
// To finish off the processing items, this is needed to reflect this in the log
@ -4006,7 +4002,10 @@ class SyncEngine {
auto elapsedTime = finishTime - startTime;
addLogEntry("Elapsed Time Filesystem Walk: " ~ to!string(elapsedTime), ["debug"]);
}
// Perform a filesystem walk to uncover new data to upload to OneDrive
void processNewLocalItemsToUpload() {
// Upload new data that has been identified
// Are there any items to download post fetching the /delta data?
if (!newLocalFilesToUploadToOneDrive.empty) {
@ -4052,22 +4051,16 @@ class SyncEngine {
// Cleanup array memory after uploading all files
newLocalFilesToUploadToOneDrive = [];
}
if (!databaseItemsWhereContentHasChanged.empty) {
// There are changed local files that were in the DB to upload
addLogEntry("Changed local items to upload to OneDrive: " ~ to!string(databaseItemsWhereContentHasChanged.length));
processChangedLocalItemsToUpload();
// Cleanup array memory
databaseItemsWhereContentHasChanged = [];
}
}
// Scan this path for new data
void scanPathForNewData(string path) {
// Add a processing '.'
if (!appConfig.surpressLoggingOutput) {
if (appConfig.verbosityCount == 0)
addProcessingDotEntry();
if (isDir(path)) {
if (!appConfig.surpressLoggingOutput) {
if (appConfig.verbosityCount == 0)
addProcessingDotEntry();
}
}
ulong maxPathLength;
@ -4228,6 +4221,7 @@ class SyncEngine {
if (canFind(businessSharedFoldersOnlineToSkip, path)) {
// This path was skipped - why?
addLogEntry("Skipping item '" ~ path ~ "' due to this path matching an existing online Business Shared Folder name", ["info", "notify"]);
addLogEntry("To sync this Business Shared Folder, consider enabling 'sync_business_shared_folders' within your application configuration.", ["info"]);
skipFolderTraverse = true;
}
}
@ -4286,37 +4280,42 @@ class SyncEngine {
}
// Handle a single file inotify trigger when using --monitor
void handleLocalFileTrigger(string localFilePath) {
void handleLocalFileTrigger(string[] changedLocalFilesToUploadToOneDrive) {
// Is this path a new file or an existing one?
// Normally we would use pathFoundInDatabase() to calculate, but we need 'databaseItem' as well if the item is in the database
Item databaseItem;
bool fileFoundInDB = false;
string[3][] modifiedItemToUpload;
foreach (driveId; onlineDriveDetails.keys) {
if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) {
fileFoundInDB = true;
break;
}
}
// Was the file found in the database?
if (!fileFoundInDB) {
// This is a new file as it is not in the database
// Log that the file has been added locally
addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]);
scanLocalFilesystemPathForNewData(localFilePath);
} else {
// This is a potentially modified file, needs to be handled as such. Is the item truly modified?
if (!testFileHash(localFilePath, databaseItem)) {
// The local file failed the hash comparison test - there is a data difference
// Log that the file has changed locally
addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]);
// Add the modified item to the array to upload
modifiedItemToUpload ~= [databaseItem.driveId, databaseItem.id, localFilePath];
uploadChangedLocalFileToOneDrive(modifiedItemToUpload);
foreach (localFilePath; changedLocalFilesToUploadToOneDrive) {
try {
Item databaseItem;
bool fileFoundInDB = false;
foreach (driveId; onlineDriveDetails.keys) {
if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) {
fileFoundInDB = true;
break;
}
}
// Was the file found in the database?
if (!fileFoundInDB) {
// This is a new file as it is not in the database
// Log that the file has been added locally
addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]);
scanLocalFilesystemPathForNewDataToUpload(localFilePath);
} else {
// This is a potentially modified file, needs to be handled as such. Is the item truly modified?
if (!testFileHash(localFilePath, databaseItem)) {
// The local file failed the hash comparison test - there is a data difference
// Log that the file has changed locally
addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]);
// Add the modified item to the array to upload
uploadChangedLocalFileToOneDrive([databaseItem.driveId, databaseItem.id, localFilePath]);
}
}
} catch(Exception e) {
addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]);
}
}
processNewLocalItemsToUpload();
}
// Query the database to determine if this path is within the existing database
@ -4449,33 +4448,17 @@ class SyncEngine {
addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]);
// Depending on the data within parentItem, will depend on what method we are using to search
// In a --local-first scenario, a Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details
// A Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details
Item queryItem;
if ((appConfig.getValueBool("local_first")) && (parentItem.type == ItemType.remote)) {
// We are --local-first scenario and this folder is a potential shared object
addLogEntry("--localfirst & parentItem is a remote item object", ["debug"]);
if (parentItem.type == ItemType.remote) {
// This folder is a potential shared object
addLogEntry("ParentItem is a remote item object", ["debug"]);
// Need to create the DB Tie for this shared object to ensure this exists in the database
createDatabaseTieRecordForOnlineSharedFolder(parentItem);
// Update the queryItem values
queryItem.driveId = parentItem.remoteDriveId;
queryItem.id = parentItem.remoteId;
// Need to create the DB Tie for this object
addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]);
// New DB Tie Item to bind the 'remote' path to our parent path
Item tieDBItem;
// Set the name
tieDBItem.name = parentItem.name;
// Set the correct item type
tieDBItem.type = ItemType.dir;
// Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie
tieDBItem.driveId = parentItem.remoteDriveId;
tieDBItem.id = parentItem.remoteId;
// Set the correct mtime
tieDBItem.mtime = parentItem.mtime;
// Add tie DB record to the local database
addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
} else {
// Use parent item for the query item
addLogEntry("Standard Query, use parentItem", ["debug"]);
@ -4567,14 +4550,14 @@ class SyncEngine {
string requiredDriveId;
string requiredParentItemId;
// Is this a Personal Account and is the item a Remote Object (Shared Folder) ?
if ((appConfig.accountType == "personal") && (parentItem.type == ItemType.remote)) {
// Is the item a Remote Object (Shared Folder) ?
if (parentItem.type == ItemType.remote) {
// Yes .. Shared Folder
addLogEntry("parentItem data: " ~ to!string(parentItem), ["debug"]);
requiredDriveId = parentItem.remoteDriveId;
requiredParentItemId = parentItem.remoteId;
} else {
// Not a personal account + Shared Folder
// Not a Shared Folder
requiredDriveId = parentItem.driveId;
requiredParentItemId = parentItem.id;
}
@ -4675,22 +4658,37 @@ class SyncEngine {
if (onlinePathData["name"].str == baseName(thisNewPathToCreate)) {
// OneDrive 'name' matches local path name
if (appConfig.accountType == "business") {
// We are a business account, this existing online folder, could be a Shared Online Folder and is the 'Add shortcut to My files' item
// We are a business account, this existing online folder, could be a Shared Online Folder could be a 'Add shortcut to My files' item
addLogEntry("onlinePathData: " ~ to!string(onlinePathData), ["debug"]);
// Is this a remote folder
if (isItemRemote(onlinePathData)) {
// The folder is a remote item ... we do not want to create this ...
addLogEntry("Remote Existing Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]);
addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]);
addLogEntry("Existing Remote Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]);
// Add this path to businessSharedFoldersOnlineToSkip
businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate];
// no save to database, no online create
// Shutdown API instance
createDirectoryOnlineOneDriveApiInstance.shutdown();
// Free object and memory
object.destroy(createDirectoryOnlineOneDriveApiInstance);
return;
// Is Shared Business Folder Syncing enabled ?
if (!appConfig.getValueBool("sync_business_shared_items")) {
// Shared Business Folder Syncing is NOT enabled
addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]);
// Add this path to businessSharedFoldersOnlineToSkip
businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate];
// no save to database, no online create
// Shutdown API instance
createDirectoryOnlineOneDriveApiInstance.shutdown();
// Free object and memory
object.destroy(createDirectoryOnlineOneDriveApiInstance);
return;
} else {
// As the 'onlinePathData' is potentially missing the actual correct parent folder id in the 'remoteItem' JSON response, we have to perform a further query to get the correct answer
// Failure to do this, means the 'root' DB Tie Record has a different parent reference id to that what this folder's parent reference id actually is
JSONValue sharedFolderParentPathData;
string remoteDriveId = onlinePathData["remoteItem"]["parentReference"]["driveId"].str;
string remoteItemId = onlinePathData["remoteItem"]["id"].str;
sharedFolderParentPathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsById(remoteDriveId, remoteItemId);
// A 'root' DB Tie Record needed for this folder using the correct parent data
createDatabaseRootTieRecordForOnlineSharedFolder(sharedFolderParentPathData);
}
}
}
@ -4824,7 +4822,7 @@ class SyncEngine {
// If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty
if ((parentPathFoundInDB) && (parentItem.driveId.empty)) {
// switch to using defaultDriveId
addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls");
addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls", ["debug"]);
parentItem.driveId = appConfig.defaultDriveId;
}
@ -4929,11 +4927,24 @@ class SyncEngine {
// even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different.
// Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this.
// In order to upload this file - this query HAS to respond as a 404 - Not Found
// In order to upload this file - this query HAS to respond with a '404 - Not Found' so that the upload is triggered
// Does this 'file' already exist on OneDrive?
try {
fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload);
if (parentItem.driveId == appConfig.defaultDriveId) {
// getPathDetailsByDriveId is only reliable when the driveId is our driveId
fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload);
} else {
// We need to curate a response by listing the children of this parentItem.driveId and parentItem.id , without traversing directories
// So that IF the file is on a Shared Folder, it can be found, and, if it exists, checked correctly
fileDetailsFromOneDrive = searchDriveItemForFile(parentItem.driveId, parentItem.id, fileToUpload);
// Was the file found?
if (fileDetailsFromOneDrive.type() != JSONType.object) {
// No ....
throw new OneDriveException(404, "Name not found via searchDriveItemForFile");
}
}
// Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API
if (hasName(fileDetailsFromOneDrive)) {
performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str);
@ -4962,10 +4973,10 @@ class SyncEngine {
string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str;
string changedItemId = fileDetailsFromOneDrive["id"].str;
addLogEntry("Skipping uploading this file as moving it to upload as a modified file (online item already exists): " ~ fileToUpload);
databaseItemsWhereContentHasChanged ~= [changedItemParentId, changedItemId, fileToUpload];
// In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB
saveItem(fileDetailsFromOneDrive);
uploadChangedLocalFileToOneDrive([changedItemParentId, changedItemId, fileToUpload]);
}
} catch (OneDriveException exception) {
// If we get a 404 .. the file is not online .. this is what we want .. file does not exist online
@ -6537,24 +6548,23 @@ class SyncEngine {
// Is this JSON a remote object
addLogEntry("Testing if this is a remote Shared Folder", ["debug"]);
if (isItemRemote(getPathDetailsAPIResponse)) {
// Remote Directory .. need a DB Tie Item
addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]);
// New DB Tie Item to bind the 'remote' path to our parent path
Item tieDBItem;
// Remote Directory .. need a DB Tie Record
createDatabaseTieRecordForOnlineSharedFolder(parentDetails);
// Temp DB Item to bind the 'remote' path to our parent path
Item tempDBItem;
// Set the name
tieDBItem.name = parentDetails.name;
tempDBItem.name = parentDetails.name;
// Set the correct item type
tieDBItem.type = ItemType.dir;
tempDBItem.type = ItemType.dir;
// Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie
tieDBItem.driveId = parentDetails.remoteDriveId;
tieDBItem.id = parentDetails.remoteId;
tempDBItem.driveId = parentDetails.remoteDriveId;
tempDBItem.id = parentDetails.remoteId;
// Set the correct mtime
tieDBItem.mtime = parentDetails.mtime;
// Add tie DB record to the local database
addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
// Update parentDetails to use the DB Tie record
parentDetails = tieDBItem;
tempDBItem.mtime = parentDetails.mtime;
// Update parentDetails to use this temp record
parentDetails = tempDBItem;
}
} catch (OneDriveException exception) {
if (exception.httpStatusCode == 404) {
@ -6815,7 +6825,7 @@ class SyncEngine {
if (!itemDB.selectByPath(oldPath, appConfig.defaultDriveId, oldItem)) {
// The old path|item is not synced with the database, upload as a new file
addLogEntry("Moved local item was not in-sync with local databse - uploading as new item");
uploadNewFile(newPath);
scanLocalFilesystemPathForNewData(newPath);
return;
}
@ -6961,8 +6971,9 @@ class SyncEngine {
// What account type is this?
if (appConfig.accountType != "personal") {
// Not a personal account, thus the integrity failure is most likely due to SharePoint
addLogEntry("CAUTION: Microsoft OneDrive when using SharePoint as a backend enhances files after you upload them, which means this file may now have technical differences from your local copy, resulting in a data integrity issue.", ["verbose"]);
addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details", ["verbose"]);
addLogEntry("CAUTION: When you upload files to Microsoft OneDrive that uses SharePoint as its backend, Microsoft OneDrive will alter your files post upload.", ["verbose"]);
addLogEntry("CAUTION: This will lead to technical differences between the version stored online and your local original file, potentially causing issues with the accuracy or consistency of your data.", ["verbose"]);
addLogEntry("CAUTION: Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details.", ["verbose"]);
}
// How can this be disabled?
addLogEntry("To disable the integrity checking of uploaded files use --disable-upload-validation");
@ -7600,6 +7611,22 @@ class SyncEngine {
return interruptedUploads;
}
// Clear any session_upload.* files
void clearInterruptedSessionUploads() {
// Scan the filesystem for the files we are interested in, build up interruptedUploadsSessionFiles array
foreach (sessionFile; dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)) {
// calculate the full path
string tempPath = buildNormalizedPath(buildPath(appConfig.configDirName, sessionFile));
JSONValue sessionFileData = readText(tempPath).parseJSON();
addLogEntry("Removing interrupted session upload file due to --resync for: " ~ sessionFileData["localPath"].str, ["info"]);
// Process removal
if (!dryRun) {
safeRemove(tempPath);
}
}
}
// Process interrupted 'session_upload' files
void processForInterruptedSessionUploads() {
// For each upload_session file that has been found, process the data to ensure it is still valid
@ -7847,6 +7874,96 @@ class SyncEngine {
}
}
// Search a given Drive ID, Item ID and filename to see if this exists in the location specified
JSONValue searchDriveItemForFile(string parentItemDriveId, string parentItemId, string fileToUpload) {
JSONValue onedriveJSONItem;
string searchName = baseName(fileToUpload);
JSONValue thisLevelChildren;
string nextLink;
// Create a new API Instance for this thread and initialise it
OneDriveApi checkFileOneDriveApiInstance;
checkFileOneDriveApiInstance = new OneDriveApi(appConfig);
checkFileOneDriveApiInstance.initialise();
for (;;) {
// query top level children
try {
thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink);
} catch (OneDriveException exception) {
// OneDrive threw an error
addLogEntry("------------------------------------------------------------------", ["debug"]);
addLogEntry("Query Error: thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]);
addLogEntry("driveId: " ~ parentItemDriveId, ["debug"]);
addLogEntry("idToQuery: " ~ parentItemId, ["debug"]);
addLogEntry("nextLink: " ~ nextLink, ["debug"]);
string thisFunctionName = getFunctionName!({});
// HTTP request returned status code 408,429,503,504
if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) {
// Handle the 429
if (exception.httpStatusCode == 429) {
// HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed.
handleOneDriveThrottleRequest(checkFileOneDriveApiInstance);
addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]);
}
// re-try the specific changes queries
if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) {
// 408 - Request Time Out
// 503 - Service Unavailable
// 504 - Gateway Timeout
// Transient error - try again in 30 seconds
auto errorArray = splitLines(exception.msg);
addLogEntry(to!string(errorArray[0]) ~ " when attempting to query OneDrive top level drive children on OneDrive - retrying applicable request in 30 seconds");
addLogEntry("checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink) previously threw an error - retrying", ["debug"]);
// The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request.
addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]);
Thread.sleep(dur!"seconds"(30));
}
// re-try original request - retried for 429, 503, 504 - but loop back calling this function
addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]);
searchDriveItemForFile(parentItemDriveId, parentItemId, fileToUpload);
} else {
// Default operation if not 408,429,503,504 errors
// display what the error is
displayOneDriveErrorMessage(exception.msg, thisFunctionName);
}
}
// process thisLevelChildren response
foreach (child; thisLevelChildren["value"].array) {
// Only looking at files
if ((child["name"].str == searchName) && (("file" in child) != null)) {
// Found the matching file, return its JSON representation
// Operations in this thread are done / complete
checkFileOneDriveApiInstance.shutdown();
// Free object and memory
object.destroy(checkFileOneDriveApiInstance);
// Return child
return child;
}
}
// If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response
// to indicate more items are available and provide the request URL for the next page of items.
if ("@odata.nextLink" in thisLevelChildren) {
// Update nextLink to next changeSet bundle
addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]);
nextLink = thisLevelChildren["@odata.nextLink"].str;
} else break;
}
// Operations in this thread are done / complete
checkFileOneDriveApiInstance.shutdown();
// Free object and memory
object.destroy(checkFileOneDriveApiInstance);
// return an empty JSON item
return onedriveJSONItem;
}
// Update 'onlineDriveDetails' with the latest data about this drive
void updateDriveDetailsCache(string driveId, bool quotaRestricted, bool quotaAvailable, ulong localFileSize) {
@ -7886,4 +8003,72 @@ class SyncEngine {
addOrUpdateOneDriveOnlineDetails(driveId);
}
}
// Create a 'root' DB Tie Record for a Shared Folder from the JSON data
void createDatabaseRootTieRecordForOnlineSharedFolder(JSONValue onedriveJSONItem) {
// Creating|Updating a DB Tie
addLogEntry("Creating|Updating a 'root' DB Tie Record for this Shared Folder: " ~ onedriveJSONItem["name"].str, ["debug"]);
addLogEntry("Raw JSON for 'root' DB Tie Record: " ~ to!string(onedriveJSONItem), ["debug"]);
// New DB Tie Item to detail the 'root' of the Shared Folder
Item tieDBItem;
tieDBItem.name = "root";
// Get the right parentReference details
if (isItemRemote(onedriveJSONItem)) {
tieDBItem.driveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str;
tieDBItem.id = onedriveJSONItem["remoteItem"]["id"].str;
} else {
if (onedriveJSONItem["name"].str != "root") {
tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
tieDBItem.id = onedriveJSONItem["parentReference"]["id"].str;
} else {
tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str;
tieDBItem.id = onedriveJSONItem["id"].str;
}
}
tieDBItem.type = ItemType.dir;
tieDBItem.mtime = SysTime.fromISOExtString(onedriveJSONItem["fileSystemInfo"]["lastModifiedDateTime"].str);
tieDBItem.parentId = null;
// Add this DB Tie parent record to the local database
addLogEntry("Creating|Updating into local database a 'root' DB Tie record: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
}
// Create a DB Tie Record for a Shared Folder
void createDatabaseTieRecordForOnlineSharedFolder(Item parentItem) {
// Creating|Updating a DB Tie
addLogEntry("Creating|Updating a DB Tie Record for this Shared Folder: " ~ parentItem.name, ["debug"]);
addLogEntry("Parent Item Record: " ~ to!string(parentItem), ["debug"]);
// New DB Tie Item to bind the 'remote' path to our parent path
Item tieDBItem;
tieDBItem.name = parentItem.name;
tieDBItem.driveId = parentItem.remoteDriveId;
tieDBItem.id = parentItem.remoteId;
tieDBItem.type = ItemType.dir;
tieDBItem.mtime = parentItem.mtime;
// What account type is this as this determines what 'tieDBItem.parentId' should be set to
// There is a difference in the JSON responses between 'personal' and 'business' account types for Shared Folders
// Essentially an API inconsistency
if (appConfig.accountType == "personal") {
// Set tieDBItem.parentId to null
tieDBItem.parentId = null;
} else {
// The tieDBItem.parentId needs to be the correct driveId id reference
// Query the DB
Item[] rootDriveItems;
Item dbRecord;
rootDriveItems = itemDB.selectByDriveId(parentItem.remoteDriveId);
dbRecord = rootDriveItems[0];
tieDBItem.parentId = dbRecord.id;
}
// Add tie DB record to the local database
addLogEntry("Creating|Updating into local database a DB Tie record: " ~ to!string(tieDBItem), ["debug"]);
itemDB.upsert(tieDBItem);
}
}