Implement warning on big deletes to safeguard data on OneDrive (#621)

* When testing changes to onedrive client configuration, the new configuration might be invalid (see #458 for example) and you remove all your data on OneDrive accidentally. This new feature attempts to protect your data on OneDrive when performing large deletes, so that a large delete is detected and asks for confirmation before actually processing this request. This feature does not impact `--monitor` mode of operation, only standalone mode of operation.
This commit is contained in:
abraunegg 2020-01-03 07:46:58 +11:00 committed by GitHub
parent 2217fc2b97
commit 5c2ad041bd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 79 additions and 22 deletions

1
config
View file

@ -30,4 +30,5 @@
# monitor_log_frequency = "5"
# monitor_fullscan_frequency = "10"
# sync_root_files = "false"
# classify_as_big_delete = "1000"
# user_agent = ""

View file

@ -62,6 +62,8 @@ final class Config
// Number of n sync runs before performing a full local scan of sync_dir
// By default 10 which means every ~7.5 minutes a full disk scan of sync_dir will occur
longValues["monitor_fullscan_frequency"] = 10;
// Number of children in a path that is locally removed which will be classified as a 'big data delete'
longValues["classify_as_big_delete"] = 1000;
// Determine the users home directory.
// Need to avoid using ~ here as expandTilde() below does not interpret correctly when running under init.d or systemd scripts
@ -163,6 +165,7 @@ final class Config
boolValues["logout"] = false;
boolValues["monitor"] = false;
boolValues["synchronize"] = false;
boolValues["force"] = false;
// Application Startup option validation
try {
@ -182,6 +185,9 @@ final class Config
"check-for-nosync",
"Check for the presence of .nosync in each directory. If found, skip directory from sync.",
&boolValues["check_nosync"],
"classify-as-big-delete",
"Number of children in a path that is locally removed which will be classified as a 'big data delete'",
&longValues["classify_as_big_delete"],
"create-directory",
"Create a directory on OneDrive - no sync will be performed.",
&stringValues["create_directory"],
@ -218,6 +224,9 @@ final class Config
"force-http-2",
"Force the use of HTTP/2 for all operations where applicable",
&boolValues["force_http_2"],
"force",
"Force the deletion of data when a 'big delete' is detected",
&boolValues["force"],
"get-file-link",
"Display the file link of a synced file",
&stringValues["get_file_link"],

View file

@ -355,37 +355,43 @@ int main(string[] args)
string userConfigFilePath = cfg.configDirName ~ "/config";
string userSyncList = cfg.configDirName ~ "/sync_list";
// Display application version
std.stdio.write("onedrive version = ", import("version"));
writeln("onedrive version = ", strip(import("version")));
// Display all of the pertinent configuration options
writeln("Config path = ", cfg.configDirName);
writeln("Config path = ", cfg.configDirName);
// Does a config file exist or are we using application defaults
if (exists(userConfigFilePath)){
writeln("Config file found in config path = true");
writeln("Config file found in config path = true");
} else {
writeln("Config file found in config path = false");
writeln("Config file found in config path = false");
}
// Config Options
writeln("Config option 'check_nosync' = ", cfg.getValueBool("check_nosync"));
writeln("Config option 'sync_dir' = ", syncDir);
writeln("Config option 'skip_dir' = ", cfg.getValueString("skip_dir"));
writeln("Config option 'skip_file' = ", cfg.getValueString("skip_file"));
writeln("Config option 'skip_dotfiles' = ", cfg.getValueBool("skip_dotfiles"));
writeln("Config option 'skip_symlinks' = ", cfg.getValueBool("skip_symlinks"));
writeln("Config option 'monitor_interval' = ", cfg.getValueLong("monitor_interval"));
writeln("Config option 'min_notify_changes' = ", cfg.getValueLong("min_notify_changes"));
writeln("Config option 'log_dir' = ", cfg.getValueString("log_dir"));
writeln("Config option 'check_nosync' = ", cfg.getValueBool("check_nosync"));
writeln("Config option 'sync_dir' = ", syncDir);
writeln("Config option 'skip_dir' = ", cfg.getValueString("skip_dir"));
writeln("Config option 'skip_file' = ", cfg.getValueString("skip_file"));
writeln("Config option 'skip_dotfiles' = ", cfg.getValueBool("skip_dotfiles"));
writeln("Config option 'skip_symlinks' = ", cfg.getValueBool("skip_symlinks"));
writeln("Config option 'monitor_interval' = ", cfg.getValueLong("monitor_interval"));
writeln("Config option 'min_notify_changes' = ", cfg.getValueLong("min_notify_changes"));
writeln("Config option 'log_dir' = ", cfg.getValueString("log_dir"));
writeln("Config option 'classify_as_big_delete' = ", cfg.getValueLong("classify_as_big_delete"));
// Is config option drive_id configured?
if (cfg.getValueString("drive_id") != ""){
writeln("Config option 'drive_id' = ", cfg.getValueString("drive_id"));
writeln("Config option 'drive_id' = ", cfg.getValueString("drive_id"));
}
// Is sync_list configured?
if (exists(userSyncList)){
writeln("Config option 'sync_root_files' = ", cfg.getValueBool("sync_root_files"));
writeln("Selective sync configured = true");
writeln("Config option 'sync_root_files' = ", cfg.getValueBool("sync_root_files"));
writeln("Selective sync configured = true");
writeln("sync_list contents:");
// Output the sync_list contents
auto syncListFile = File(userSyncList);
@ -395,8 +401,8 @@ int main(string[] args)
writeln(line);
}
} else {
writeln("Config option 'sync_root_files' = ", cfg.getValueBool("sync_root_files"));
writeln("Selective sync configured = false");
writeln("Config option 'sync_root_files' = ", cfg.getValueBool("sync_root_files"));
writeln("Selective sync configured = false");
}
// exit

View file

@ -3093,6 +3093,34 @@ final class SyncEngine
private void uploadDeleteItem(Item item, string path)
{
log.log("Deleting item from OneDrive: ", path);
bool flagAsBigDelete = false;
// query the database - how many objects will this remove?
long itemsToDelete = 0;
auto children = itemdb.selectChildren(item.driveId, item.id);
itemsToDelete = count(children);
foreach (Item child; children) {
if (child.type != ItemType.file) {
// recursively count the children of this child
itemsToDelete = itemsToDelete + countChildren(child.driveId, child.id);
}
}
// Are we running in monitor mode? A local delete of a file will issue a inotify event, which will trigger the local & remote data immediately
if (!cfg.getValueBool("monitor")) {
// not running in monitor mode
if (itemsToDelete > cfg.getValueLong("classify_as_big_delete")) {
// A big delete detected
flagAsBigDelete = true;
if (!cfg.getValueBool("force")) {
log.error("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on OneDrive");
log.error("ERROR: To delete delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value");
// Must exit here to preserve data on OneDrive
exit(-1);
}
}
}
if (!dryRun) {
// we are not in a --dry-run situation, process deletion to OneDrive
if ((item.driveId == "") && (item.id == "") && (item.eTag == "")){
@ -3105,16 +3133,15 @@ final class SyncEngine
item.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101
item.eTag = onedrivePathDetails["eTag"].str; // Should be something like aNjM2NjJFRUVGQjY2NjJFMSE5MzUuMA
}
// do the delete
try {
onedrive.deleteById(item.driveId, item.id, item.eTag);
} catch (OneDriveException e) {
if (e.httpStatusCode == 404) {
// item.id, item.eTag could not be found on driveId
log.vlog("OneDrive reported: The resource could not be found.");
}
else {
} else {
// Not a 404 response .. is this a 403 response due to OneDrive Business Retention Policy being enabled?
if ((e.httpStatusCode == 403) && (accountType != "personal")) {
auto errorArray = splitLines(e.msg);
@ -3144,6 +3171,20 @@ final class SyncEngine
}
}
}
private long countChildren(string driveId, string id){
// count children
long childrenCount = 0;
auto children = itemdb.selectChildren(driveId, id);
childrenCount = count(children);
foreach (Item child; children) {
if (child.type != ItemType.file) {
// recursively count the children of this child
childrenCount = childrenCount + countChildren(child.driveId, child.id);
}
}
return childrenCount;
}
// update the item's last modified time
private void uploadLastModifiedTime(const(char)[] driveId, const(char)[] id, const(char)[] eTag, SysTime mtime)