diff --git a/.travis-ci.sh b/.travis-ci.sh index 0409ceb5..95a79e96 100644 --- a/.travis-ci.sh +++ b/.travis-ci.sh @@ -16,7 +16,12 @@ DEBIAN_MIRROR="http://ftp.us.debian.org/debian" HOST_DEPENDENCIES="qemu-user-static binfmt-support debootstrap sbuild wget" # Debian package dependencies for the chrooted environment -GUEST_DEPENDENCIES="build-essential libcurl4-openssl-dev libsqlite3-dev libgnutls-openssl27 git" +GUEST_DEPENDENCIES="build-essential libcurl4-openssl-dev libsqlite3-dev libgnutls-openssl27 git libxml2" + +# LDC Version +# Different versions due to https://github.com/ldc-developers/ldc/issues/3027 +LDC_VERSION_ARMHF=1.13.0 +LDC_VERSION_ARM64=1.14.0 function setup_arm32_chroot { # Update apt repository details @@ -27,10 +32,10 @@ function setup_arm32_chroot { # Host dependencies sudo apt-get install -qq -y ${HOST_DEPENDENCIES} # Download LDC compiler - wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-armhf.tar.xz - tar -xf ldc2-1.11.0-linux-armhf.tar.xz - mv ldc2-1.11.0-linux-armhf dlang-${ARCH} - rm -rf ldc2-1.11.0-linux-armhf.tar.xz + wget https://github.com/ldc-developers/ldc/releases/download/v${LDC_VERSION_ARMHF}/ldc2-${LDC_VERSION_ARMHF}-linux-armhf.tar.xz + tar -xf ldc2-${LDC_VERSION_ARMHF}-linux-armhf.tar.xz + mv ldc2-${LDC_VERSION_ARMHF}-linux-armhf dlang-${ARCH} + rm -rf ldc2-${LDC_VERSION_ARMHF}-linux-armhf.tar.xz # Create chrooted environment sudo mkdir ${CHROOT_DIR} sudo debootstrap --foreign --no-check-gpg --variant=buildd --arch=${CHROOT_ARCH} ${VERSION} ${CHROOT_DIR} ${DEBIAN_MIRROR} @@ -49,10 +54,10 @@ function setup_arm64_chroot { # Host dependencies sudo apt-get install -qq -y ${HOST_DEPENDENCIES} # Download LDC compiler - wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-aarch64.tar.xz - tar -xf ldc2-1.11.0-linux-aarch64.tar.xz - mv ldc2-1.11.0-linux-aarch64 dlang-${ARCH} - rm -rf ldc2-1.11.0-linux-aarch64.tar.xz + wget https://github.com/ldc-developers/ldc/releases/download/v${LDC_VERSION_ARM64}/ldc2-${LDC_VERSION_ARM64}-linux-aarch64.tar.xz + tar -xf ldc2-${LDC_VERSION_ARM64}-linux-aarch64.tar.xz + mv ldc2-${LDC_VERSION_ARM64}-linux-aarch64 dlang-${ARCH} + rm -rf ldc2-${LDC_VERSION_ARM64}-linux-aarch64.tar.xz # ARM64 qemu-debootstrap needs to be 1.0.78, Trusty is 1.0.59 #sudo echo "deb http://archive.ubuntu.com/ubuntu xenial main restricted universe multiverse" >> /etc/apt/sources.list diff --git a/CHANGELOG.md b/CHANGELOG.md index 67b6a04e..b316a5fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,37 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 2.2.6 - 2019-03-12 +### Fixed +* Resolve application crash when unable to delete remote folders when business retention policies are enabled +* Resolve deprecation warning: loop index implicitly converted from size_t to int +* Resolve warnings regarding 'bashisms' +* Resolve handling of notification failure is dbus server has not started or available +* Resolve handling of response JSON to ensure that 'id' key element is always checked for +* Resolve excessive & needless logging in monitor mode +* Resolve compiling with LDC on Alpine as musl lacks some standard interfaces +* Resolve notification issues when offline and cannot act on changes +* Resolve Docker entrypoint.sh to accept command line arguments +* Resolve to create a new upload session on reinit +* Resolve where on OneDrive query failure, default root and drive id is used if a response is not returned +* Resolve Key not found: nextExpectedRanges when attempting session uploads and incorrect response is returned +* Resolve application crash when re-using an authentication URI twice after previous --logout +* Resolve creating a folder on a shared personal folder appears successful but returns a JSON error +* Resolve to treat mv of new file as upload of mv target +* Update Debian i386 build dependencies +* Update handling of --get-O365-drive-id to print out all 'site names' that match the explicit search entry rather than just the last match +* Update Docker readme & documentation +* Update handling of validating local file permissions for new file uploads +### Added +* Add support for install & uninstall on RHEL / CentOS 6.x +* Add support for when notifications are enabled, display the number of OneDrive changes to process if any are found +* Add 'config' option 'min_notif_changes' for minimum number of changes to notify on, default = 5 +* Add additional Docker container builds utilising a smaller OS footprint +* Add configurable interval of logging in monitor mode +* Implement new CLI option --skip-dot-files to skip .files and .folders if option is used +* Implement new CLI option --check-for-nosync to ignore folder when special file (.nosync) present +* Implement new CLI option --dry-run + ## 2.2.5 - 2019-01-16 ### Fixed * Update handling of HTTP 412 - Precondition Failed errors diff --git a/README.md b/README.md index e92eb82d..c81bc806 100644 --- a/README.md +++ b/README.md @@ -122,8 +122,9 @@ sudo pacman -S libnotify ```text sudo apt-get install libcurl4-openssl-dev sudo apt-get install libsqlite3-dev -wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-armhf.tar.xz -tar -xvf ldc2-1.11.0-linux-armhf.tar.xz +sudo apt-get install libxml2 +wget https://github.com/ldc-developers/ldc/releases/download/v1.13.0/ldc2-1.13.0-linux-armhf.tar.xz +tar -xvf ldc2-1.13.0-linux-armhf.tar.xz ``` For notifications the following is necessary: ```text @@ -134,8 +135,9 @@ sudo apt install libnotify-dev ```text sudo apt-get install libcurl4-openssl-dev sudo apt-get install libsqlite3-dev -wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-aarch64.tar.xz -tar -xvf ldc2-1.11.0-linux-aarch64.tar.xz +sudo apt-get install libxml2 +wget https://github.com/ldc-developers/ldc/releases/download/v1.14.0/ldc2-1.14.0-linux-aarch64.tar.xz +tar -xvf ldc2-1.14.0-linux-aarch64.tar.xz ``` For notifications the following is necessary: ```text @@ -206,7 +208,7 @@ sudo make install ```text git clone https://github.com/abraunegg/onedrive.git cd onedrive -make DC=~/ldc2-1.11.0-linux-armhf/bin/ldmd2 +make DC=~/ldc2-1.13.0-linux-armhf/bin/ldmd2 sudo make install ``` @@ -214,7 +216,7 @@ sudo make install ```text git clone https://github.com/abraunegg/onedrive.git cd onedrive -make DC=~/ldc2-1.11.0-linux-aarch64/bin/ldmd2 +make DC=~/ldc2-1.14.0-linux-aarch64/bin/ldmd2 sudo make install ``` @@ -253,6 +255,49 @@ If your system utilises curl >= 7.62.0 you may need to use `--force-http-1.1` in After installing the application you must run it at least once from the terminal to authorize it. You will be asked to open a specific link using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving the permission, you will be redirected to a blank page. Copy the URI of the blank page into the application. +```text +[user@hostname ~]$ onedrive + +Authorize this app visiting: + +https://..... + +Enter the response uri: + +``` + +### Testing your configuration +You are able to test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded or removed, however the application will display what 'would' have occurred. For example: +```text +onedrive --synchronize --verbose --dry-run +DRY-RUN Configured. Output below shows what 'would' have occurred. +Loading config ... +Using Config Dir: /home/user/.config/onedrive +Initializing the OneDrive API ... +Opening the item database ... +All operations will be performed in: /home/user/OneDrive +Initializing the Synchronization Engine ... +Account Type: personal +Default Drive ID: +Default Root ID: +Remaining Free Space: 5368709120 +Fetching details for OneDrive Root +OneDrive Root exists in the database +Syncing changes from OneDrive ... +Applying changes of Path ID: +Uploading differences of . +Processing root +The directory has not changed +Uploading new items of . +OneDrive Client requested to create remote path: ./newdir +The requested directory to create was not found on OneDrive - creating remote directory: ./newdir +Successfully created the remote directory ./newdir on OneDrive +Uploading new file ./newdir/newfile.txt ... done. +Remaining free space: 5368709076 +Applying changes of Path ID: +``` + +**Note:** `--dry-run` can only be used with `--synchronize`. It cannot be used with `--monitor` and will be ignored. ### Show your configuration To validate your configuration the application will use, utilise the following: @@ -264,6 +309,7 @@ This will display all the pertinent runtime interpretation of the options and co Config path = /home/alex/.config/onedrive Config file found in config path = false Config option 'sync_dir' = /home/alex/OneDrive +Config option 'skip_dir' = Config option 'skip_file' = ~* Config option 'skip_dotfiles' = false Config option 'skip_symlinks' = false @@ -383,6 +429,18 @@ If you want to just delete the application key, but keep the items database: rm -f ~/.config/onedrive/refresh_token ``` +### Handling a OneDrive account password change +If you change your OneDrive account password, the client will no longer be authorised to sync, and will generate the following error: +```text +ERROR: OneDrive returned a 'HTTP 401 Unauthorized' - Cannot Initialize Sync Engine +``` +To re-authorise the client, follow the steps below: +1. If running the client as a service (init.d or systemd), stop the service +2. Run the command `onedrive --logout`. This will clean up the previous authorisation, and will prompt you to re-authorise as per initial configuration. +3. Restart the client if running as a service or perform a manual sync + +The application will now sync with OneDrive with the new credentials. + ## Additional Configuration Additional configuration is optional. If you want to change the defaults, you can copy and edit the included config file into your `~/.config/onedrive` directory: @@ -409,11 +467,23 @@ Proceed with caution here when changing the default sync dir from ~/OneDrive to The issue here is around how the client stores the sync_dir path in the database. If the config file is missing, or you don't use the `--syncdir` parameter - what will happen is the client will default back to `~/OneDrive` and 'think' that either all your data has been deleted - thus delete the content on OneDrive, or will start downloading all data from OneDrive into the default location. -### skip_file -Example: `skip_file = "~*|Desktop|Documents/OneNote*|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/config.xlaunch|Documents/WindowsPowerShell"` +### skip_dir +Example: `skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell"` Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. +**Note:** after changing `skip_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` + +### skip_file +Example: `skip_file = "~*|Documents/OneNote*|Documents/config.xlaunch|myfile.ext"` + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +Files can be skipped in the following fashion: +* Specify a wildcard, eg: '*.txt' (skip all txt files) +* Explicitly specify the filename and it's full path relative to your sync_dir, eg: 'path/to/file/filename.ext' +* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' + **Note:** after changing `skip_file`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` **Note:** Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process. @@ -456,6 +526,17 @@ Year 2 ``` **Note:** after changing the sync_list, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` +### Skipping directories from syncing +There are several mechanisms available to 'skip' a directory from scanning: +* Utilise 'skip_dir' +* Utilise 'sync_list' + +One further method is to add a '.nosync' empty file to any folder. When this file is present, adding `--check-for-nosync` to your command line will now make the sync process skip any folder where the '.nosync' file is present. + +To make this a permanent change to always skip folders when a '.nosync' empty file is present, add the following to your config file: + +Example: `check_nosync = "true"` + ### Shared folders Folders shared with you can be synced by adding them to your OneDrive. To do that open your Onedrive, go to the Shared files list, right click on the folder you want to sync and then click on "Add to my OneDrive". @@ -553,6 +634,30 @@ systemctl --user start onedrive-work ``` Repeat these steps for each OneDrive account that you wish to use. +### Access OneDrive service through a proxy +If you have a requirement to run the client through a proxy, there are a couple of ways to achieve this: +1. Set proxy configuration in `~/.bashrc` to allow the authorization process and when utilizing `--synchronize` +2. If running as a systemd service, edit the applicable systemd service file to include the proxy configuration information: +```text +[Unit] +Description=OneDrive Free Client +Documentation=https://github.com/abraunegg/onedrive +After=network-online.target +Wants=network-online.target + +[Service] +Environment="HTTP_PROXY=http://ip.address:port" +Environment="HTTPS_PROXY=http://ip.address:port" +ExecStart=/usr/local/bin/onedrive --monitor +Restart=on-failure +RestartSec=3 + +[Install] +WantedBy=default.target +``` + +**Note:** After modifying the service files, you will need to run `sudo systemctl daemon-reload` to ensure the service file changes are picked up. A restart of the OneDrive service will also be required to pick up the change to send the traffic via the proxy server + ## Extra ### Reporting issues @@ -586,6 +691,8 @@ Options: --check-for-nomount Check for the presence of .nosync in the syncdir root. If found, do not perform sync. + --check-for-nosync + Check for the presence of .nosync in each directory. If found, skip directory from sync. --confdir ARG Set the directory used to store the configuration files --create-directory ARG @@ -604,6 +711,8 @@ Options: Only download remote changes --disable-upload-validation Disable upload validation when uploading to OneDrive + --dry-run + Perform a trial sync with no changes made --enable-logging Enable client activity to a separate log file --force-http-1.1 diff --git a/contrib/gentoo/onedrive-2.2.5.ebuild b/contrib/gentoo/onedrive-2.2.6.ebuild similarity index 100% rename from contrib/gentoo/onedrive-2.2.5.ebuild rename to contrib/gentoo/onedrive-2.2.6.ebuild diff --git a/onedrive.1.in b/onedrive.1.in index c4c19da7..d3fa2d74 100644 --- a/onedrive.1.in +++ b/onedrive.1.in @@ -1,4 +1,4 @@ -.TH ONEDRIVE "1" "January 2019" "2.2.5" "User Commands" +.TH ONEDRIVE "1" "March 2019" "2.2.6" "User Commands" .SH NAME onedrive \- folder synchronization with OneDrive .SH SYNOPSIS @@ -21,6 +21,9 @@ Without any option given, no sync is done and the program exits. \fB\-\-check\-for\-nomount\fP Check for the presence of .nosync in the syncdir root. If found, do not perform sync. .TP +\fB\-\-check\-for\-nosync\fP +Check for the presence of .nosync in each directory. If found, skip directory from sync. +.TP \fB\-\-confdir\fP ARG Set the directory used to store the configuration files .TP @@ -48,6 +51,9 @@ Display the sync status of the client \- no sync will be performed. \fB\-d \-\-download\-only\fP Only download remote changes .TP +\fB\-\-dry\-run\fP +Perform a trial sync with no changes made. Can ONLY be used with --synchronize. Will be ignored for --monitor +.TP \fB\-\-enable\-logging\fP Enable client activity to a separate log file .TP diff --git a/pacman/PKGBUILD b/pacman/PKGBUILD index c86b3bf3..a6ca008e 100644 --- a/pacman/PKGBUILD +++ b/pacman/PKGBUILD @@ -1,5 +1,5 @@ pkgname=onedrive -pkgver=2.2.5 +pkgver=2.2.6 pkgrel=1 #patch-level (Increment this when patch is applied) pkgdesc="A free OneDrive Client for Linux. This is a fork of the https://github.com/skilion/onedrive repository" license=("unknown") diff --git a/spec/onedrive.spec b/spec/onedrive.spec index 0b1835e7..1ab4b3fc 100644 --- a/spec/onedrive.spec +++ b/spec/onedrive.spec @@ -6,7 +6,7 @@ %endif Name: onedrive -Version: 2.2.5 +Version: 2.2.6 Release: 1%{?dist} Summary: Microsoft OneDrive Client Group: System Environment/Network diff --git a/src/config.d b/src/config.d index 32acba34..9bef00b8 100644 --- a/src/config.d +++ b/src/config.d @@ -9,6 +9,7 @@ final class Config public string refreshTokenFilePath; public string deltaLinkFilePath; public string databaseFilePath; + public string databaseFilePathDryRun; public string uploadStateFilePath; public string syncListFilePath; public string homePath; @@ -28,10 +29,12 @@ final class Config stringValues["single_directory"] = ""; stringValues["sync_dir"] = "~/OneDrive"; stringValues["skip_file"] = "~*"; + stringValues["skip_dir"] = ""; stringValues["log_dir"] = "/var/log/onedrive/"; stringValues["drive_id"] = ""; boolValues["upload_only"] = false; - boolValues["check_for_nomount"] = false; + boolValues["check_nomount"] = false; + boolValues["check_nosync"] = false; boolValues["download_only"] = false; boolValues["disable_notifications"] = false; boolValues["disable_upload_validation"] = false; @@ -41,7 +44,8 @@ final class Config boolValues["no_remote_delete"] = false; boolValues["skip_symlinks"] = false; boolValues["debug_https"] = false; - boolValues["skip_dotfiles"] = false; + boolValues["skip_dotfiles"] = false; + boolValues["dry_run"] = false; longValues["verbose"] = 0; longValues["monitor_interval"] = 45, longValues["min_notif_changes"] = 5; @@ -109,6 +113,7 @@ final class Config refreshTokenFilePath = configDirName ~ "/refresh_token"; deltaLinkFilePath = configDirName ~ "/delta_link"; databaseFilePath = configDirName ~ "/items.sqlite3"; + databaseFilePathDryRun = configDirName ~ "/items-dryrun.sqlite3"; uploadStateFilePath = configDirName ~ "/resume_upload"; userConfigFilePath = configDirName ~ "/config"; syncListFilePath = configDirName ~ "/sync_list"; @@ -157,9 +162,12 @@ final class Config args, std.getopt.config.bundling, std.getopt.config.caseSensitive, - "check-for-nomount", + "check-for-nomount", "Check for the presence of .nosync in the syncdir root. If found, do not perform sync.", - &boolValues["check_for_nomount"], + &boolValues["check_nomount"], + "check-for-nosync", + "Check for the presence of .nosync in each directory. If found, skip directory from sync.", + &boolValues["check_nosync"], "create-directory", "Create a directory on OneDrive - no sync will be performed.", &stringValues["create_directory"], @@ -184,6 +192,9 @@ final class Config "download-only|d", "Only download remote changes", &boolValues["download_only"], + "dry-run", + "Perform a trial sync with no changes made", + &boolValues["dry_run"], "enable-logging", "Enable client activity to a separate log file", &boolValues["enable_logging"], diff --git a/src/main.d b/src/main.d index 60323669..dfe72087 100644 --- a/src/main.d +++ b/src/main.d @@ -64,6 +64,16 @@ int main(string[] args) // Are we able to reach the OneDrive Service bool online = false; + + // dry-run database setup + if (cfg.getValueBool("dry_run")) { + // Make a copy of the original items.sqlite3 for use as the dry run copy if it exists + if (exists(cfg.databaseFilePath)) { + // copy the file + log.vdebug("Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations"); + copy(cfg.databaseFilePath,cfg.databaseFilePathDryRun); + } + } // sync_dir environment handling to handle ~ expansion properly string syncDir; @@ -107,18 +117,24 @@ int main(string[] args) // upgrades if (exists(configDirName ~ "/items.db")) { - remove(configDirName ~ "/items.db"); + if (!cfg.getValueBool("dry_run")) { + safeRemove(configDirName ~ "/items.db"); + } log.logAndNotify("Database schema changed, resync needed"); cfg.setValueBool("resync", true); } if (cfg.getValueBool("resync") || cfg.getValueBool("logout")) { log.vlog("Deleting the saved status ..."); - safeRemove(cfg.databaseFilePath); - safeRemove(cfg.deltaLinkFilePath); - safeRemove(cfg.uploadStateFilePath); + if (!cfg.getValueBool("dry_run")) { + safeRemove(cfg.databaseFilePath); + safeRemove(cfg.deltaLinkFilePath); + safeRemove(cfg.uploadStateFilePath); + } if (cfg.getValueBool("logout")) { - safeRemove(cfg.refreshTokenFilePath); + if (!cfg.getValueBool("dry_run")) { + safeRemove(cfg.refreshTokenFilePath); + } } } @@ -139,7 +155,9 @@ int main(string[] args) } // Config Options + writeln("Config option 'check_nosync' = ", cfg.getValueBool("check_nosync")); writeln("Config option 'sync_dir' = ", syncDir); + writeln("Config option 'skip_dir' = ", cfg.getValueString("skip_dir")); writeln("Config option 'skip_file' = ", cfg.getValueString("skip_file")); writeln("Config option 'skip_dotfiles' = ", cfg.getValueBool("skip_dotfiles")); writeln("Config option 'skip_symlinks' = ", cfg.getValueBool("skip_symlinks")); @@ -211,9 +229,17 @@ int main(string[] args) return EXIT_FAILURE; } - // initialize system + // Initialize the item database log.vlog("Opening the item database ..."); - itemDb = new ItemDatabase(cfg.databaseFilePath); + if (!cfg.getValueBool("dry_run")) { + // Load the items.sqlite3 file as the database + log.vdebug("Using database file: ", cfg.databaseFilePath); + itemDb = new ItemDatabase(cfg.databaseFilePath); + } else { + // Load the items-dryrun.sqlite3 file as the database + log.vdebug("Using database file: ", cfg.databaseFilePathDryRun); + itemDb = new ItemDatabase(cfg.databaseFilePathDryRun); + } log.vlog("All operations will be performed in: ", syncDir); if (!exists(syncDir)) { @@ -235,9 +261,16 @@ int main(string[] args) } } selectiveSync.load(cfg.syncListFilePath); - selectiveSync.setMask(cfg.getValueString("skip_file")); - // Initialise the sync engine + // Configure skip_dir & skip_file from config entries + log.vdebug("Configuring skip_dir ..."); + log.vdebug("skip_dir: ", cfg.getValueString("skip_dir")); + selectiveSync.setDirMask(cfg.getValueString("skip_dir")); + log.vdebug("Configuring skip_file ..."); + log.vdebug("skip_file: ", cfg.getValueString("skip_file")); + selectiveSync.setFileMask(cfg.getValueString("skip_file")); + + // Initialize the sync engine log.logAndNotify("Initializing the Synchronization Engine ..."); auto sync = new SyncEngine(cfg, oneDrive, itemDb, selectiveSync); @@ -280,7 +313,7 @@ int main(string[] args) if (cfg.getValueString("remove_directory") != "") { // remove a directory on OneDrive - sync.deleteDirectoryNoSync(cfg.getValueString("remove_directory")); + sync.deleteDirectoryNoSync(cfg.getValueString("remove_directory")); } } @@ -384,7 +417,7 @@ int main(string[] args) signal(SIGTERM, &exitHandler); // initialise the monitor class - if (!cfg.getValueBool("download_only")) m.init(cfg, cfg.getValueLong("verbose") > 0, cfg.getValueBool("skip_symlinks")); + if (!cfg.getValueBool("download_only")) m.init(cfg, cfg.getValueLong("verbose") > 0, cfg.getValueBool("skip_symlinks"), cfg.getValueBool("check_nosync")); // monitor loop immutable auto checkInterval = dur!"seconds"(cfg.getValueLong("monitor_interval")); immutable auto logInterval = cfg.getValueLong("monitor_log_frequency"); @@ -431,8 +464,21 @@ int main(string[] args) } } - // workaround for segfault in std.net.curl.Curl.shutdown() on exit + // Workaround for segfault in std.net.curl.Curl.shutdown() on exit oneDrive.http.shutdown(); + + // Make sure the .wal file is incorporated into the main db before we exit + destroy(itemDb); + + // --dry-run temp database cleanup + if (cfg.getValueBool("dry_run")) { + if (exists(cfg.databaseFilePathDryRun)) { + // remove the file + log.vdebug("Removing items-dryrun.sqlite3 as dry run operations complete"); + safeRemove(cfg.databaseFilePathDryRun); + } + } + return EXIT_SUCCESS; } diff --git a/src/monitor.d b/src/monitor.d index 135ceec7..ed108031 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -32,6 +32,8 @@ final class Monitor private void[] buffer; // skip symbolic links bool skip_symlinks; + // check for .nosync if enabled + bool check_nosync; private SelectiveSync selectiveSync; @@ -46,10 +48,11 @@ final class Monitor this.selectiveSync = selectiveSync; } - void init(Config cfg, bool verbose, bool skip_symlinks) + void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync) { this.verbose = verbose; this.skip_symlinks = skip_symlinks; + this.check_nosync = check_nosync; assert(onDirCreated && onFileChanged && onDelete && onMove); fd = inotify_init(); @@ -74,7 +77,10 @@ final class Monitor // skip filtered items if (dirname != ".") { - if (selectiveSync.isNameExcluded(baseName(dirname))) { + if (selectiveSync.isDirNameExcluded(strip(dirname,"./"))) { + return; + } + if (selectiveSync.isFileNameExcluded(baseName(dirname))) { return; } if (selectiveSync.isPathExcluded(buildNormalizedPath(dirname))) { @@ -91,6 +97,14 @@ final class Monitor } } + // Do we need to check for .nosync? Only if check_nosync is true + if (check_nosync) { + if (exists(buildNormalizedPath(dirname) ~ "/.nosync")) { + log.vlog("Skipping watching path - .nosync found & --check-for-nosync enabled: ", buildNormalizedPath(dirname)); + return; + } + } + add(dirname); foreach(DirEntry entry; dirEntries(dirname, SpanMode.shallow, false)) { if (entry.isDir) { @@ -178,7 +192,10 @@ final class Monitor // skip filtered items path = getPath(event); - if (selectiveSync.isNameExcluded(baseName(path))) { + if (selectiveSync.isDirNameExcluded(strip(path,"./"))) { + goto skip; + } + if (selectiveSync.isFileNameExcluded(strip(path,"./"))) { goto skip; } if (selectiveSync.isPathExcluded(path)) { diff --git a/src/onedrive.d b/src/onedrive.d index 64bac09f..b39bab86 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -8,6 +8,8 @@ import progress; import config; static import log; shared bool debugResponse = false; +private bool dryRun = false; +private bool simulateNoRefreshTokenFile = false; private immutable { // Client Identifier @@ -104,6 +106,14 @@ final class OneDriveApi // Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1 http.handle.set(CurlOption.http_version,2); } + + // Do we set the dryRun handlers? + if (cfg.getValueBool("dry_run")) { + .dryRun = true; + if (cfg.getValueBool("logout")) { + .simulateNoRefreshTokenFile = true; + } + } } bool init() @@ -112,17 +122,33 @@ final class OneDriveApi driveId = cfg.getValueString("drive_id"); if (driveId.length) { driveUrl = driveByIdUrl ~ driveId; - itemByIdUrl = driveUrl ~ "/items"; - itemByPathUrl = driveUrl ~ "/root:/"; + itemByIdUrl = driveUrl ~ "/items"; + itemByPathUrl = driveUrl ~ "/root:/"; } } catch (Exception e) {} - try { - refreshToken = readText(cfg.refreshTokenFilePath); - } catch (FileException e) { - return authorize(); + if (!.dryRun) { + // original code + try { + refreshToken = readText(cfg.refreshTokenFilePath); + } catch (FileException e) { + return authorize(); + } + return true; + } else { + // --dry-run + if (!.simulateNoRefreshTokenFile) { + try { + refreshToken = readText(cfg.refreshTokenFilePath); + } catch (FileException e) { + return authorize(); + } + return true; + } else { + // --dry-run & --logout + return authorize(); + } } - return true; } bool authorize() @@ -358,11 +384,19 @@ final class OneDriveApi private void acquireToken(const(char)[] postData) { JSONValue response = post(tokenUrl, postData); - accessToken = "bearer " ~ response["access_token"].str(); - refreshToken = response["refresh_token"].str(); - accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); - std.file.write(cfg.refreshTokenFilePath, refreshToken); - if (printAccessToken) writeln("New access token: ", accessToken); + if ("access_token" in response){ + accessToken = "bearer " ~ response["access_token"].str(); + refreshToken = response["refresh_token"].str(); + accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); + if (!.dryRun) { + std.file.write(cfg.refreshTokenFilePath, refreshToken); + } + if (printAccessToken) writeln("New access token: ", accessToken); + } else { + log.error("\nInvalid authentication response from OneDrive. Please check the response uri\n"); + // re-authorize + authorize(); + } } private void checkAccessTokenExpired() @@ -692,6 +726,12 @@ final class OneDriveApi { switch(http.statusLine.code) { + // 400 - Bad Request + case 400: + // Bad Request .. how should we act? + log.vlog("OneDrive returned a 'HTTP 400 - Bad Request' - gracefully handling error"); + break; + // 412 - Precondition Failed case 412: log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error"); diff --git a/src/selective.d b/src/selective.d index 829c3267..08661983 100644 --- a/src/selective.d +++ b/src/selective.d @@ -10,6 +10,7 @@ final class SelectiveSync { private string[] paths; private Regex!char mask; + private Regex!char dirmask; void load(string filepath) { @@ -22,20 +23,46 @@ final class SelectiveSync } } - void setMask(const(char)[] mask) + void setFileMask(const(char)[] mask) { this.mask = wild2regex(mask); } - // config file skip_file parameter - bool isNameExcluded(string name) + void setDirMask(const(char)[] dirmask) { - // Does the file match skip_file config entry? - // Returns true if the file matches a skip_file config entry - // Returns false if no match - return !name.matchFirst(mask).empty; + this.dirmask = wild2regex(dirmask); } - + + // config file skip_dir parameter + bool isDirNameExcluded(string name) + { + // Does the directory name match skip_dir config entry? + // Returns true if the name matches a skip_dir config entry + // Returns false if no match + return !name.matchFirst(dirmask).empty; + } + + // config file skip_file parameter + bool isFileNameExcluded(string name) + { + // Does the file name match skip_file config entry? + // Returns true if the name matches a skip_file config entry + // Returns false if no match + + // Try full path match first + if (!name.matchFirst(mask).empty) { + return true; + } else { + // check just the file name + string filename = baseName(name); + if(!filename.matchFirst(mask).empty) { + return true; + } + } + // no match + return false; + } + // config sync_list file handling bool isPathExcluded(string path) { diff --git a/src/sync.d b/src/sync.d index 4b0937e1..a2204dcb 100644 --- a/src/sync.d +++ b/src/sync.d @@ -186,6 +186,8 @@ final class SyncEngine private string[] skippedItems; // list of items to delete after the changes has been downloaded private string[2][] idsToDelete; + // list of items we fake created when running --dry-run + private string[2][] idsFaked; // default drive id private string defaultDriveId; // default root id @@ -200,6 +202,8 @@ final class SyncEngine private bool downloadFailed = false; // initialization has been done private bool initDone = false; + // sync engine dryRun flag + private bool dryRun = false; this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb, SelectiveSync selectiveSync) { @@ -209,6 +213,7 @@ final class SyncEngine this.itemdb = itemdb; this.selectiveSync = selectiveSync; // session = UploadSession(onedrive, cfg.uploadStateFilePath); + this.dryRun = cfg.getValueBool("dry_run"); } void reset() @@ -221,7 +226,6 @@ final class SyncEngine // Set accountType, defaultDriveId, defaultRootId & remainingFreeSpace once and reuse where possible JSONValue oneDriveDetails; - if (initDone) { return; } @@ -750,7 +754,7 @@ final class SyncEngine bool unwanted; unwanted |= skippedItems.find(item.parentId).length != 0; if (unwanted) log.vdebug("Flagging as unwanted: find(item.parentId).length != 0"); - unwanted |= selectiveSync.isNameExcluded(item.name); + unwanted |= selectiveSync.isFileNameExcluded(item.name); if (unwanted) log.vdebug("Flagging as unwanted: item name is excluded: ", item.name); // check the item type @@ -825,8 +829,17 @@ final class SyncEngine if (itemdb.idInLocalDatabase(item.driveId, item.id)){ oldPath = itemdb.computePath(item.driveId, item.id); if (!isItemSynced(oldItem, oldPath)) { - log.vlog("The local item is unsynced, renaming"); - if (exists(oldPath)) safeRename(oldPath); + if (exists(oldPath)) { + auto ext = extension(oldPath); + auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; + log.vlog("The local item is unsynced, renaming: ", oldPath, " -> ", newPath); + if (!dryRun) { + safeRename(oldPath); + } else { + log.vdebug("DRY-RUN: Skipping local file rename"); + // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist + } + } cached = false; } } @@ -866,8 +879,14 @@ final class SyncEngine return; } else { // TODO: force remote sync by deleting local item - log.vlog("The local item is out of sync, renaming..."); - safeRename(path); + auto ext = extension(path); + auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; + log.vlog("The local item is out of sync, renaming: ", path, " -> ", newPath); + if (!dryRun) { + safeRename(path); + } else { + log.vdebug("DRY-RUN: Skipping local file rename"); + } } } final switch (item.type) { @@ -877,7 +896,12 @@ final class SyncEngine case ItemType.dir: case ItemType.remote: log.log("Creating directory: ", path); - mkdirRecurse(path); + if (!dryRun) { + mkdirRecurse(path); + } else { + // we dont create the directory, but we need to track that we 'faked it' + idsFaked ~= [item.driveId, item.id]; + } break; } } @@ -945,39 +969,43 @@ final class SyncEngine } auto fileSize = fileDetails["size"].integer; - try { - onedrive.downloadById(item.driveId, item.id, path, fileSize); - } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests) - // https://github.com/abraunegg/onedrive/issues/133 - // Back off & retry with incremental delay - int retryCount = 10; - int retryAttempts = 1; - int backoffInterval = 2; - while (retryAttempts < retryCount){ - Thread.sleep(dur!"seconds"(retryAttempts*backoffInterval)); - try { - onedrive.downloadById(item.driveId, item.id, path, fileSize); - // successful download - retryAttempts = retryCount; - } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // Increment & loop around - retryAttempts++; + + if (!dryRun) { + try { + onedrive.downloadById(item.driveId, item.id, path, fileSize); + } catch (OneDriveException e) { + if (e.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests) + // https://github.com/abraunegg/onedrive/issues/133 + // Back off & retry with incremental delay + int retryCount = 10; + int retryAttempts = 1; + int backoffInterval = 2; + while (retryAttempts < retryCount){ + Thread.sleep(dur!"seconds"(retryAttempts*backoffInterval)); + try { + onedrive.downloadById(item.driveId, item.id, path, fileSize); + // successful download + retryAttempts = retryCount; + } catch (OneDriveException e) { + if (e.httpStatusCode == 429) { + // Increment & loop around + retryAttempts++; + } } } } + } catch (std.exception.ErrnoException e) { + // There was a file system error + log.error("ERROR: ", e.msg); + downloadFailed = true; + return; } - } catch (std.exception.ErrnoException e) { - // There was a file system error - log.error("ERROR: ", e.msg); - downloadFailed = true; - return; + setTimes(path, item.mtime, item.mtime); } + writeln("done."); log.fileOnly("Downloading file ", path, " ... done."); - setTimes(path, item.mtime, item.mtime); } // returns true if the given item corresponds to the local one @@ -1024,10 +1052,13 @@ final class SyncEngine if (!itemdb.selectById(i[0], i[1], item)) continue; // check if the item is in the db string path = itemdb.computePath(i[0], i[1]); log.log("Trying to delete item ", path); - itemdb.deleteById(item.driveId, item.id); - if (item.remoteDriveId != null) { - // delete the linked remote folder - itemdb.deleteById(item.remoteDriveId, item.remoteId); + if (!dryRun) { + // Actually process the database entry removal + itemdb.deleteById(item.driveId, item.id); + if (item.remoteDriveId != null) { + // delete the linked remote folder + itemdb.deleteById(item.remoteDriveId, item.remoteId); + } } bool needsRemoval = false; if (exists(path)) { @@ -1047,25 +1078,31 @@ final class SyncEngine } if (needsRemoval) { log.log("Deleting item ", path); - if (isFile(path)) { - remove(path); - } else { - try { - // Remove any children of this path if they still exist - // Resolve 'Directory not empty' error when deleting local files - foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { - attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); + if (!dryRun) { + if (isFile(path)) { + remove(path); + } else { + try { + // Remove any children of this path if they still exist + // Resolve 'Directory not empty' error when deleting local files + foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { + attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); + } + // Remove the path now that it is empty of children + rmdirRecurse(path); + } catch (FileException e) { + log.log(e.msg); } - // Remove the path now that it is empty of children - rmdirRecurse(path); - } catch (FileException e) { - log.log(e.msg); } } } } - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); + + if (!dryRun) { + // clean up idsToDelete + idsToDelete.length = 0; + assumeSafeAppend(idsToDelete); + } } // scan the given directory for differences and new items @@ -1079,14 +1116,33 @@ final class SyncEngine } log.vlog("Uploading new items of ", path); uploadNewItems(path); + + // clean up idsToDelete only if --dry-run is set + if (dryRun) { + idsToDelete.length = 0; + assumeSafeAppend(idsToDelete); + } } private void uploadDifferences(Item item) { + // see if this item.id we were supposed to have deleted + // match early and return + if (dryRun) { + foreach (i; idsToDelete) { + if (i[1] == item.id) { + return; + } + } + } + log.vlog("Processing ", item.name); - + bool unwanted = false; string path; - bool unwanted = selectiveSync.isNameExcluded(item.name); + + // Is item.name or the path excluded + unwanted = selectiveSync.isFileNameExcluded(item.name); + if (!unwanted) { path = itemdb.computePath(item.driveId, item.id); unwanted = selectiveSync.isPathExcluded(path); @@ -1145,12 +1201,41 @@ final class SyncEngine } } } else { - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); + // Directory does not exist locally + // If we are in a --dry-run situation - this directory may never have existed as we never downloaded it + if (!dryRun) { + log.vlog("The directory has been deleted locally"); + if (noRemoteDelete) { + // do not process remote directory delete + log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); + } else { + uploadDeleteItem(item, path); + } } else { - uploadDeleteItem(item, path); + // we are in a --dry-run situation, directory appears to have deleted locally - this directory may never have existed as we never downloaded it .. + // Check if path does not exist in database + if (!itemdb.selectByPath(path, defaultDriveId, item)) { + // Path not found in database + log.vlog("The directory has been deleted locally"); + if (noRemoteDelete) { + // do not process remote directory delete + log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); + } else { + uploadDeleteItem(item, path); + } + } else { + // Path was found in the database + // Did we 'fake create it' as part of --dry-run ? + foreach (i; idsFaked) { + if (i[1] == item.id) { + log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); + return; + } + } + // item.id did not match a 'faked' download new directory creation + log.vlog("The directory has been deleted locally"); + uploadDeleteItem(item, path); + } } } } @@ -1195,101 +1280,113 @@ final class SyncEngine string eTag = item.eTag; if (!testFileHash(path, item)) { log.vlog("The file content has changed"); - write("Uploading file ", path, " ... "); + write("Uploading modified file ", path, " ... "); JSONValue response; - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (getSize(path) <= thresholdFileSize) { - try { - response = onedrive.simpleUploadReplace(path, item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - // HTTP request returned status code 404 - the eTag provided does not exist - // Delete record from the local database - file will be uploaded as a new file - log.vlog("OneDrive returned a 'HTTP 404 - eTag Issue' - gracefully handling error"); - itemdb.deleteById(item.driveId, item.id); - return; + if (!dryRun) { + // Are we using OneDrive Personal or OneDrive Business? + // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) + // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here + if (accountType == "personal"){ + // Original file upload logic + if (getSize(path) <= thresholdFileSize) { + try { + response = onedrive.simpleUploadReplace(path, item.driveId, item.id, item.eTag); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + // HTTP request returned status code 404 - the eTag provided does not exist + // Delete record from the local database - file will be uploaded as a new file + log.vlog("OneDrive returned a 'HTTP 404 - eTag Issue' - gracefully handling error"); + itemdb.deleteById(item.driveId, item.id); + return; + } + + // Resolve https://github.com/abraunegg/onedrive/issues/36 + if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { + // The file is currently checked out or locked for editing by another user + // We cant upload this file at this time + writeln(" skipped."); + log.fileOnly("Uploading modified file ", path, " ... skipped."); + write("", path, " is currently checked out or locked for editing by another user."); + log.fileOnly(path, " is currently checked out or locked for editing by another user."); + return; + } + + if (e.httpStatusCode == 412) { + // HTTP request returned status code 412 - ETag does not match current item's value + // Delete record from the local database - file will be uploaded as a new file + log.vdebug("Simple Upload Replace Failed - OneDrive eTag / cTag match issue"); + log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); + itemdb.deleteById(item.driveId, item.id); + return; + } + + if (e.httpStatusCode == 504) { + // HTTP request returned status code 504 (Gateway Timeout) + // Try upload as a session + response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); + } + else throw e; } + writeln("done."); + } else { + writeln(""); + try { + response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); + } catch (OneDriveException e) { + if (e.httpStatusCode == 412) { + // HTTP request returned status code 412 - ETag does not match current item's value + // Delete record from the local database - file will be uploaded as a new file + log.vdebug("Simple Upload Replace Failed - OneDrive eTag / cTag match issue"); + log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); + itemdb.deleteById(item.driveId, item.id); + return; + } + } + writeln("done."); + } + } else { + // OneDrive Business Account - always use a session to upload + writeln(""); + + try { + response = session.upload(path, item.driveId, item.parentId, baseName(path)); + } catch (OneDriveException e) { // Resolve https://github.com/abraunegg/onedrive/issues/36 if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { // The file is currently checked out or locked for editing by another user // We cant upload this file at this time writeln(" skipped."); - log.fileOnly("Uploading file ", path, " ... skipped."); - write("", path, " is currently checked out or locked for editing by another user."); + log.fileOnly("Uploading modified file ", path, " ... skipped."); + writeln("", path, " is currently checked out or locked for editing by another user."); log.fileOnly(path, " is currently checked out or locked for editing by another user."); return; } - - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - log.vdebug("Simple Upload Replace Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - // Try upload as a session - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } - else throw e; } + writeln("done."); - } else { - writeln(""); - try { - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - log.vdebug("Simple Upload Replace Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - return; - } - } - writeln("done."); - } - } else { - // OneDrive Business Account - always use a session to upload - writeln(""); - - try { - response = session.upload(path, item.driveId, item.parentId, baseName(path)); - } catch (OneDriveException e) { - - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln(" skipped."); - log.fileOnly("Uploading file ", path, " ... skipped."); - writeln("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - return; - } + // As the session.upload includes the last modified time, save the response + saveItem(response); } - + log.fileOnly("Uploading modified file ", path, " ... done."); + // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded via simple upload + eTag = response["cTag"].str; + } else { + // we are --dry-run - simulate the file upload writeln("done."); - // As the session.upload includes the last modified time, save the response + response = createFakeResponse(path); + // Log action to log file + log.fileOnly("Uploading modified file ", path, " ... done."); saveItem(response); + return; } - log.fileOnly("Uploading file ", path, " ... done."); - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded via simple upload - eTag = response["cTag"].str; } if (accountType == "personal"){ // If Personal, call to update the modified time as stored on OneDrive - uploadLastModifiedTime(item.driveId, item.id, eTag, localModifiedTime.toUTC()); + if (!dryRun) { + uploadLastModifiedTime(item.driveId, item.id, eTag, localModifiedTime.toUTC()); + } } } else { log.vlog("The file has not changed"); @@ -1345,6 +1442,14 @@ final class SyncEngine } } + // Do we need to check for .nosync? Only if --check-for-nosync was passed in + if (cfg.getValueBool("check_nosync")) { + if (exists(path ~ "/.nosync")) { + log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", path); + return; + } + } + if (isSymlink(path)) { // if config says so we skip all symlinked items if (cfg.getValueBool("skip_symlinks")) { @@ -1379,12 +1484,20 @@ final class SyncEngine // filter out user configured items to skip if (path != ".") { - if (selectiveSync.isNameExcluded(baseName(path))) { - log.vlog("Skipping item - excluded by skip_file config: ", path); - return; + if (isDir(path)) { + if (selectiveSync.isDirNameExcluded(strip(path,"./"))) { + log.vlog("Skipping item - excluded by skip_dir config: ", path); + return; + } + } + if (isFile(path)) { + if (selectiveSync.isFileNameExcluded(strip(path,"./"))) { + log.vlog("Skipping item - excluded by skip_file config: ", path); + return; + } } if (selectiveSync.isPathExcluded(path)) { - log.vlog("Skipping item - path excluded: ", path); + log.vlog("Skipping item - path excluded by sync_list: ", path); return; } } @@ -1459,7 +1572,7 @@ final class SyncEngine return; } } - + // configure the parent item data if (hasId(onedrivePathDetails) && hasParentReference(onedrivePathDetails)){ log.vdebug("Parent path found, configuring parent item"); @@ -1467,11 +1580,10 @@ final class SyncEngine parent.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 } else { // OneDrive API query failed - log.error("\nERROR: Unable to query the following path due to OneDrive API regression: ", path); - log.error("ERROR: Refer to https://github.com/OneDrive/onedrive-api-docs/issues/976 for further details"); - log.error("WORKAROUND: Manually create the path above on OneDrive to workaround API issue\n"); - // return - return; + // Assume client defaults + log.vdebug("Parent path could not be queried, using OneDrive account defaults"); + parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 + parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 } } @@ -1485,27 +1597,35 @@ final class SyncEngine // The directory was not found log.vlog("The requested directory to create was not found on OneDrive - creating remote directory: ", path); - // Perform the database lookup - enforce(itemdb.selectByPath(dirName(path), parent.driveId, parent), "The parent item id is not in the database"); - JSONValue driveItem = [ - "name": JSONValue(baseName(path)), - "folder": parseJSON("{}") - ]; - - // Submit the creation request - // Fix for https://github.com/skilion/onedrive/issues/356 - try { - response = onedrive.createById(parent.driveId, parent.id, driveItem); - } catch (OneDriveException e) { - if (e.httpStatusCode == 409) { - // OneDrive API returned a 404 (above) to say the directory did not exist - // but when we attempted to create it, OneDrive responded that it now already exists - log.vlog("OneDrive reported that ", path, " already exists .. OneDrive API race condition"); - return; + if (!dryRun) { + // Perform the database lookup + enforce(itemdb.selectByPath(dirName(path), parent.driveId, parent), "The parent item id is not in the database"); + JSONValue driveItem = [ + "name": JSONValue(baseName(path)), + "folder": parseJSON("{}") + ]; + + // Submit the creation request + // Fix for https://github.com/skilion/onedrive/issues/356 + try { + response = onedrive.createById(parent.driveId, parent.id, driveItem); + } catch (OneDriveException e) { + if (e.httpStatusCode == 409) { + // OneDrive API returned a 404 (above) to say the directory did not exist + // but when we attempted to create it, OneDrive responded that it now already exists + log.vlog("OneDrive reported that ", path, " already exists .. OneDrive API race condition"); + return; + } } + // save the created directory + saveItem(response); + } else { + // Simulate a successful 'directory create' & save it to the dryRun database copy + // The simulated response has to pass 'makeItem' as part of saveItem + auto fakeResponse = createFakeResponse(path); + saveItem(fakeResponse); } - - saveItem(response); + log.vlog("Successfully created the remote directory ", path, " on OneDrive"); return; } @@ -1538,7 +1658,7 @@ final class SyncEngine } } else { // They are the "same" name wise but different in case sensitivity - log.error("ERROR: A local directory has the same name as another local directory."); + log.error("ERROR: Current directory has a 'case-insensitive match' to an existing directory on OneDrive"); log.error("ERROR: To resolve, rename this local directory: ", absolutePath(path)); log.log("Skipping: ", absolutePath(path)); return; @@ -1551,7 +1671,7 @@ final class SyncEngine Item parent; // Check the database for the parent //enforce(itemdb.selectByPath(dirName(path), defaultDriveId, parent), "The parent item is not in the local database"); - if (itemdb.selectByPath(dirName(path), defaultDriveId, parent)) { + if ((dryRun) || (itemdb.selectByPath(dirName(path), defaultDriveId, parent))) { // Maximum file size upload // https://support.microsoft.com/en-au/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders // 1. OneDrive Business say's 15GB @@ -1580,115 +1700,125 @@ final class SyncEngine } catch (OneDriveException e) { if (e.httpStatusCode == 404) { // The file was not found on OneDrive, need to upload it - write("Uploading file ", path, " ..."); + write("Uploading new file ", path, " ..."); JSONValue response; - // Resolve https://github.com/abraunegg/onedrive/issues/37 - if (thisFileSize == 0){ - // We can only upload zero size files via simpleFileUpload regardless of account type - // https://github.com/OneDrive/onedrive-api-docs/issues/53 - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - writeln(" done."); - } catch (OneDriveException e) { - // error uploading file - return; - } - - } else { - // File is not a zero byte file - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - // Try upload as a session - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - // error uploading file - return; - } - } - else throw e; - } + if (!dryRun) { + // Resolve https://github.com/abraunegg/onedrive/issues/37 + if (thisFileSize == 0){ + // We can only upload zero size files via simpleFileUpload regardless of account type + // https://github.com/OneDrive/onedrive-api-docs/issues/53 + try { + response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); writeln(" done."); + } catch (OneDriveException e) { + // error uploading file + return; + } + + } else { + // File is not a zero byte file + // Are we using OneDrive Personal or OneDrive Business? + // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) + // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here + if (accountType == "personal"){ + // Original file upload logic + if (thisFileSize <= thresholdFileSize) { + try { + response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); + } catch (OneDriveException e) { + if (e.httpStatusCode == 504) { + // HTTP request returned status code 504 (Gateway Timeout) + // Try upload as a session + try { + response = session.upload(path, parent.driveId, parent.id, baseName(path)); + } catch (OneDriveException e) { + // error uploading file + return; + } + } + else throw e; + } + writeln(" done."); + } else { + // File larger than threshold - use a session to upload + writeln(""); + try { + response = session.upload(path, parent.driveId, parent.id, baseName(path)); + writeln(" done."); + } catch (OneDriveException e) { + // error uploading file + log.vlog("Upload failed with OneDriveException: ", e.msg); + return; + } catch (FileException e) { + log.vlog("Upload failed with File Exception: ", e.msg); + return; + } + } } else { - // File larger than threshold - use a session to upload + // OneDrive Business Account - always use a session to upload writeln(""); try { response = session.upload(path, parent.driveId, parent.id, baseName(path)); writeln(" done."); } catch (OneDriveException e) { // error uploading file - log.vlog("Upload failed with OneDriveException: ", e.msg); - return; - } catch (FileException e) { - log.vlog("Upload failed with File Exception: ", e.msg); return; } } - } else { - // OneDrive Business Account - always use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - writeln(" done."); - } catch (OneDriveException e) { - // error uploading file + } + + // Log action to log file + log.fileOnly("Uploading new file ", path, " ... done."); + + // The file was uploaded + ulong uploadFileSize = response["size"].integer; + + // In some cases the file that was uploaded was not complete, but 'completed' without errors on OneDrive + // This has been seen with PNG / JPG files mainly, which then contributes to generating a 412 error when we attempt to update the metadata + // Validate here that the file uploaded, at least in size, matches in the response to what the size is on disk + if (thisFileSize != uploadFileSize){ + if(disableUploadValidation){ + // Print a warning message + log.log("WARNING: Uploaded file size does not match local file - skipping upload validation"); + } else { + // OK .. the uploaded file does not match and we did not disable this validation + log.log("Uploaded file size does not match local file - upload failure - retrying"); + // Delete uploaded bad file + onedrive.deleteById(response["parentReference"]["driveId"].str, response["id"].str, response["eTag"].str); + // Re-upload + uploadNewFile(path); return; } - } - } - - // Log action to log file - log.fileOnly("Uploading file ", path, " ... done."); - - // The file was uploaded - ulong uploadFileSize = response["size"].integer; - - // In some cases the file that was uploaded was not complete, but 'completed' without errors on OneDrive - // This has been seen with PNG / JPG files mainly, which then contributes to generating a 412 error when we attempt to update the metadata - // Validate here that the file uploaded, at least in size, matches in the response to what the size is on disk - if (thisFileSize != uploadFileSize){ - if(disableUploadValidation){ - // Print a warning message - log.log("WARNING: Uploaded file size does not match local file - skipping upload validation"); + } + + // File validation is OK + if ((accountType == "personal") || (thisFileSize == 0)){ + // Update the item's metadata on OneDrive + string id = response["id"].str; + string cTag = response["cTag"].str; + if (exists(path)) { + SysTime mtime = timeLastModified(path).toUTC(); + // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded + uploadLastModifiedTime(parent.driveId, id, cTag, mtime); + } else { + // will be removed in different event! + log.log("File disappeared after upload: ", path); + } + return; } else { - // OK .. the uploaded file does not match and we did not disable this validation - log.log("Uploaded file size does not match local file - upload failure - retrying"); - // Delete uploaded bad file - onedrive.deleteById(response["parentReference"]["driveId"].str, response["id"].str, response["eTag"].str); - // Re-upload - uploadNewFile(path); + // OneDrive Business Account - always use a session to upload + // The session includes a Request Body element containing lastModifiedDateTime + // which negates the need for a modify event against OneDrive + saveItem(response); return; } - } - - // File validation is OK - if ((accountType == "personal") || (thisFileSize == 0)){ - // Update the item's metadata on OneDrive - string id = response["id"].str; - string cTag = response["cTag"].str; - if (exists(path)) { - SysTime mtime = timeLastModified(path).toUTC(); - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); - } else { - // will be removed in different event! - log.log("File disappeared after upload: ", path); - } - return; } else { - // OneDrive Business Account - always use a session to upload - // The session includes a Request Body element containing lastModifiedDateTime - // which negates the need for a modify event against OneDrive + // we are --dry-run - simulate the file upload + writeln(" done."); + response = createFakeResponse(path); + // Log action to log file + log.fileOnly("Uploading new file ", path, " ... done."); saveItem(response); return; } @@ -1718,37 +1848,48 @@ final class SyncEngine if (localFileModifiedTime > remoteFileModifiedTime){ // local file is newer log.vlog("Requested file to upload is newer than existing file on OneDrive"); - write("Uploading file ", path, " ..."); + write("Uploading modified file ", path, " ..."); JSONValue response; - if (accountType == "personal"){ - // OneDrive Personal account upload handling - if (getSize(path) <= thresholdFileSize) { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - writeln(" done."); + if (!dryRun) { + if (accountType == "personal"){ + // OneDrive Personal account upload handling + if (getSize(path) <= thresholdFileSize) { + response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); + writeln(" done."); + } else { + writeln(""); + response = session.upload(path, parent.driveId, parent.id, baseName(path)); + writeln(" done."); + } + string id = response["id"].str; + string cTag = response["cTag"].str; + SysTime mtime = timeLastModified(path).toUTC(); + // use the cTag instead of the eTag because Onedrive may update the metadata of files AFTER they have been uploaded + uploadLastModifiedTime(parent.driveId, id, cTag, mtime); } else { + // OneDrive Business account upload handling writeln(""); response = session.upload(path, parent.driveId, parent.id, baseName(path)); writeln(" done."); + saveItem(response); } - string id = response["id"].str; - string cTag = response["cTag"].str; - SysTime mtime = timeLastModified(path).toUTC(); - // use the cTag instead of the eTag because Onedrive may update the metadata of files AFTER they have been uploaded - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); } else { - // OneDrive Business account upload handling - writeln(""); - response = session.upload(path, parent.driveId, parent.id, baseName(path)); + // we are --dry-run - simulate the file upload writeln(" done."); + response = createFakeResponse(path); + // Log action to log file + log.fileOnly("Uploading modified file ", path, " ... done."); saveItem(response); + return; } // Log action to log file - log.fileOnly("Uploading file ", path, " ... done."); + log.fileOnly("Uploading modified file ", path, " ... done."); } else { // Save the details of the file that we got from OneDrive + // --dry-run safe log.vlog("Updating the local database with details for this file: ", path); saveItem(fileDetailsFromOneDrive); } @@ -1772,57 +1913,59 @@ final class SyncEngine private void uploadDeleteItem(Item item, string path) { log.log("Deleting item from OneDrive: ", path); - - if ((item.driveId == "") && (item.id == "") && (item.eTag == "")){ - // These are empty ... we cannot delete if this is empty .... - log.vdebug("item.driveId, item.id & item.eTag are empty ... need to query OneDrive for values"); - log.vdebug("Checking OneDrive for path: ", path); - JSONValue onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path - log.vdebug("OneDrive path details: ", onedrivePathDetails); - item.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - item.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 - item.eTag = onedrivePathDetails["eTag"].str; // Should be something like aNjM2NjJFRUVGQjY2NjJFMSE5MzUuMA - } - - try { - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - // item.id, item.eTag could not be found on driveId - log.vlog("OneDrive reported: The resource could not be found."); + if (!dryRun) { + // we are not in a --dry-run situation, process deletion to OneDrive + if ((item.driveId == "") && (item.id == "") && (item.eTag == "")){ + // These are empty ... we cannot delete if this is empty .... + log.vdebug("item.driveId, item.id & item.eTag are empty ... need to query OneDrive for values"); + log.vdebug("Checking OneDrive for path: ", path); + JSONValue onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path + log.vdebug("OneDrive path details: ", onedrivePathDetails); + item.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 + item.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 + item.eTag = onedrivePathDetails["eTag"].str; // Should be something like aNjM2NjJFRUVGQjY2NjJFMSE5MzUuMA } - - else { - // Not a 404 response .. is this a 403 response due to OneDrive Business Retention Policy being enabled? - if ((e.httpStatusCode == 403) && (accountType != "personal")) { - auto errorArray = splitLines(e.msg); - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - if (errorMessage["error"]["message"].str == "Request was cancelled by event received. If attempting to delete a non-empty folder, it's possible that it's on hold") { - // Issue #338 - Unable to delete OneDrive content when OneDrive Business Retention Policy is enabled - // TODO: We have to recursively delete all files & folders from this path to delete - // WARN: - log.error("\nERROR: Unable to delete the requested remote path from OneDrive: ", path); - log.error("ERROR: This error is due to OneDrive Business Retention Policy being applied"); - log.error("WORKAROUND: Manually delete all files and folders from the above path as per Business Retention Policy\n"); + + try { + onedrive.deleteById(item.driveId, item.id, item.eTag); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + // item.id, item.eTag could not be found on driveId + log.vlog("OneDrive reported: The resource could not be found."); + } + + else { + // Not a 404 response .. is this a 403 response due to OneDrive Business Retention Policy being enabled? + if ((e.httpStatusCode == 403) && (accountType != "personal")) { + auto errorArray = splitLines(e.msg); + JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); + if (errorMessage["error"]["message"].str == "Request was cancelled by event received. If attempting to delete a non-empty folder, it's possible that it's on hold") { + // Issue #338 - Unable to delete OneDrive content when OneDrive Business Retention Policy is enabled + // TODO: We have to recursively delete all files & folders from this path to delete + // WARN: + log.error("\nERROR: Unable to delete the requested remote path from OneDrive: ", path); + log.error("ERROR: This error is due to OneDrive Business Retention Policy being applied"); + log.error("WORKAROUND: Manually delete all files and folders from the above path as per Business Retention Policy\n"); + } + } else { + // Not a 403 response & OneDrive Business Account / O365 Shared Folder / Library + log.log("\n\nOneDrive returned an error with the following message:\n"); + auto errorArray = splitLines(e.msg); + log.log("Error Message: ", errorArray[0]); + // extract 'message' as the reason + JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); + log.log("Error Reason: ", errorMessage["error"]["message"].str); + return; } - } else { - // Not a 403 response & OneDrive Business Account / O365 Shared Folder / Library - log.log("\n\nOneDrive returned an error with the following message:\n"); - auto errorArray = splitLines(e.msg); - log.log("Error Message: ", errorArray[0]); - // extract 'message' as the reason - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - log.log("Error Reason: ", errorMessage["error"]["message"].str); - return; } } - } - - // delete the reference in the local database - itemdb.deleteById(item.driveId, item.id); - if (item.remoteId != null) { - // If the item is a remote item, delete the reference in the local database - itemdb.deleteById(item.remoteDriveId, item.remoteId); + + // delete the reference in the local database + itemdb.deleteById(item.driveId, item.id); + if (item.remoteId != null) { + // If the item is a remote item, delete the reference in the local database + itemdb.deleteById(item.remoteDriveId, item.remoteId); + } } } @@ -2129,4 +2272,75 @@ final class SyncEngine writeln("No pending remote changes - in sync"); } } + + // Create a fake OneDrive response suitable for use with saveItem + JSONValue createFakeResponse(string path) { + import std.digest.sha; + // Generate a simulated JSON response which can be used + // At a minimum we need: + // 1. eTag + // 2. cTag + // 3. fileSystemInfo + // 4. file or folder. if file, hash of file + // 5. id + // 6. name + // 7. parent reference + + SysTime mtime = timeLastModified(path).toUTC(); + + // real id / eTag / cTag are different format for personal / business account + auto sha1 = new SHA1Digest(); + ubyte[] hash1 = sha1.digest(path); + + JSONValue fakeResponse; + + if (isDir(path)) { + // path is a directory + fakeResponse = [ + "id": JSONValue(toHexString(hash1)), + "cTag": JSONValue(toHexString(hash1)), + "eTag": JSONValue(toHexString(hash1)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(defaultDriveId), + "driveType": JSONValue(accountType), + "id": JSONValue(defaultRootId) + ]), + "folder": JSONValue("") + ]; + } else { + // path is a file + // compute file hash - both business and personal responses use quickXorHash + string quickXorHash = computeQuickXorHash(path); + + fakeResponse = [ + "id": JSONValue(toHexString(hash1)), + "cTag": JSONValue(toHexString(hash1)), + "eTag": JSONValue(toHexString(hash1)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(defaultDriveId), + "driveType": JSONValue(accountType), + "id": JSONValue(defaultRootId) + ]), + "file": JSONValue([ + "hashes":JSONValue([ + "quickXorHash": JSONValue(quickXorHash) + ]) + + ]) + ]; + } + + log.vdebug("Generated Fake OneDrive Response: ", fakeResponse); + return fakeResponse; + } } diff --git a/src/upload.d b/src/upload.d index cd5d63b6..eca8e4c3 100644 --- a/src/upload.d +++ b/src/upload.d @@ -111,8 +111,16 @@ struct UploadSession JSONValue upload() { - long offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long; - long fileSize = getSize(session["localPath"].str); + long offset; + long fileSize; + + if ("nextExpectedRanges" in session){ + offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long; + } + + if ("localPath" in session){ + fileSize = getSize(session["localPath"].str); + } // Upload Progress Bar size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1; diff --git a/src/util.d b/src/util.d index 8e4dc545..0387efb1 100644 --- a/src/util.d +++ b/src/util.d @@ -14,7 +14,7 @@ import std.uri; import qxor; static import log; -private string deviceName; +shared string deviceName; static this() { @@ -136,8 +136,9 @@ bool testNetwork() bool readLocalFile(string path) { try { - // attempt to read the first 10MB of the file - read(path,10000000); + // attempt to read up to the first 1 byte of the file + // validates we can 'read' the file based on file permissions + read(path,1); } catch (std.file.FileException e) { // unable to read the new local file log.log("Skipping uploading this file as it cannot be read (file permissions or file corruption): ", path);