diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 32f90b6d..fd5c8625 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -2,7 +2,7 @@ name: 'Lock Threads' on: schedule: - - cron: '0 * * * *' + - cron: '19 0 * * *' jobs: lock: diff --git a/CHANGELOG.md b/CHANGELOG.md index 57c18086..fec08307 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,22 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 2.4.13 - 2021-7-14 +### Fixed +* Support DMD 2.097.0 as compiler +* Fix to handle OneDrive API Bad Request response when querying if file exists +* Fix application crash and incorrect handling of --single-directory when syncing a OneDrive Business Shared Folder due to using 'Add Shortcut to My Files' +* Fix application crash due to invalid UTF-8 sequence in the pathname for the application configuration +* Fix error message when deleting a large number of files +* Fix Docker build process to source GOSU keys from updated GPG key location +* Fix application crash due to a conversion overflow when calculating file offset for session uploads +* Fix Docker Alpine build failing due to filesystem permissions issue due to Docker build system and Alpine Linux 3.14 incompatibility +* Fix that Business Shared Folders with parentheses are ignored + +### Updated +* Updated Lock Bot to run daily +* Updated documentation (various) + ## 2.4.12 - 2021-5-28 ### Fixed * Fix an unhandled Error 412 when uploading modified files to OneDrive Business Accounts diff --git a/configure b/configure index 88a1e9f2..1d06f284 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for onedrive v2.4.13-dev. +# Generated by GNU Autoconf 2.69 for onedrive v2.4.13. # # Report bugs to . # @@ -579,8 +579,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='onedrive' PACKAGE_TARNAME='onedrive' -PACKAGE_VERSION='v2.4.13-dev' -PACKAGE_STRING='onedrive v2.4.13-dev' +PACKAGE_VERSION='v2.4.13' +PACKAGE_STRING='onedrive v2.4.13' PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive' PACKAGE_URL='' @@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures onedrive v2.4.13-dev to adapt to many kinds of systems. +\`configure' configures onedrive v2.4.13 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1280,7 +1280,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of onedrive v2.4.13-dev:";; + short | recursive ) echo "Configuration of onedrive v2.4.13:";; esac cat <<\_ACEOF @@ -1393,7 +1393,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -onedrive configure v2.4.13-dev +onedrive configure v2.4.13 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by onedrive $as_me v2.4.13-dev, which was +It was created by onedrive $as_me v2.4.13, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2162,7 +2162,7 @@ fi -PACKAGE_DATE="May 2021" +PACKAGE_DATE="July 2021" @@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by onedrive $as_me v2.4.13-dev, which was +This file was extended by onedrive $as_me v2.4.13, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -3212,7 +3212,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -onedrive config.status v2.4.13-dev +onedrive config.status v2.4.13 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index d02092c0..01370a9a 100644 --- a/configure.ac +++ b/configure.ac @@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure) dnl - tag the release AC_PREREQ([2.69]) -AC_INIT([onedrive],[v2.4.13-dev], [https://github.com/abraunegg/onedrive], [onedrive]) +AC_INIT([onedrive],[v2.4.13], [https://github.com/abraunegg/onedrive], [onedrive]) AC_CONFIG_SRCDIR([src/main.d]) diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile index 521adaf7..019ae9d6 100644 --- a/contrib/docker/Dockerfile +++ b/contrib/docker/Dockerfile @@ -5,7 +5,7 @@ RUN yum install -y make git gcc libcurl-devel sqlite-devel pkg-config && \ yum install -y http://downloads.dlang.org/releases/2.x/2.092.1/dmd-2.092.1-0.fedora.x86_64.rpm && \ rm -rf /var/cache/yum/ && \ # gosu installation - gpg --keyserver ha.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \ + gpg --keyserver hkps://keys.openpgp.org --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 \ && curl -o /usr/local/bin/gosu -SL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-amd64" \ && curl -o /usr/local/bin/gosu.asc -SL "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-amd64.asc" \ && gpg --verify /usr/local/bin/gosu.asc \ diff --git a/contrib/docker/Dockerfile-alpine b/contrib/docker/Dockerfile-alpine index e9356984..5ff4c4b6 100644 --- a/contrib/docker/Dockerfile-alpine +++ b/contrib/docker/Dockerfile-alpine @@ -1,5 +1,5 @@ # -*-Dockerfile-*- -FROM alpine +FROM alpine:3.13 RUN apk add \ alpine-sdk gnupg xz curl-dev sqlite-dev binutils-gold \ autoconf automake ldc go @@ -12,7 +12,7 @@ RUN cd /usr/src/onedrive/ && \ make && \ make install -FROM alpine +FROM alpine:3.13 ENTRYPOINT ["/entrypoint.sh"] RUN apk add --no-cache \ bash libcurl libgcc shadow sqlite-libs ldc-runtime && \ diff --git a/contrib/gentoo/onedrive-2.4.12.ebuild b/contrib/gentoo/onedrive-2.4.13.ebuild similarity index 100% rename from contrib/gentoo/onedrive-2.4.12.ebuild rename to contrib/gentoo/onedrive-2.4.13.ebuild diff --git a/contrib/spec/onedrive.spec.in b/contrib/spec/onedrive.spec.in index 9d0a8b2c..70d1df39 100644 --- a/contrib/spec/onedrive.spec.in +++ b/contrib/spec/onedrive.spec.in @@ -6,7 +6,7 @@ %endif Name: onedrive -Version: 2.4.12 +Version: 2.4.13 Release: 1%{?dist} Summary: Microsoft OneDrive Client Group: System Environment/Network diff --git a/docs/Docker.md b/docs/Docker.md index 3c686a9d..c3f00766 100644 --- a/docs/Docker.md +++ b/docs/Docker.md @@ -205,7 +205,7 @@ docker container run -e ONEDRIVE_RESYNC=1 -e ONEDRIVE_VERBOSE=1 -v onedrive_conf **Perform a --logout and re-authenticate:** ```bash -docker container run -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:latest +docker container run -it -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:latest ``` ## Build instructions diff --git a/docs/INSTALL.md b/docs/INSTALL.md index e4f57a21..d1d814a1 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -135,7 +135,7 @@ sudo apt install libnotify-dev ``` ### Dependencies: CentOS 6.x / RHEL 6.x -CentOS 6.x and RHEL 6.x reached End of Linfe status on November 30th 2020 and is no longer supported. +CentOS 6.x and RHEL 6.x reached End of Life status on November 30th 2020 and is no longer supported. ### Dependencies: Fedora < Version 18 / CentOS 7.x / RHEL 7.x ```text @@ -259,7 +259,7 @@ sudo zypper install libnotify-devel ## Compilation & Installation ### High Level Steps -1. Install the platfrom dependancies for your Linux OS +1. Install the platform dependancies for your Linux OS 2. Activate your DMD or LDC compiler 3. Clone the GitHub repository, run configure and make, then install 4. Deactivate your DMD or LDC compiler diff --git a/docs/known-issues.md b/docs/known-issues.md index 6f245f3e..e40a9532 100644 --- a/docs/known-issues.md +++ b/docs/known-issues.md @@ -17,11 +17,13 @@ Technically, the client is 'working' correctly, as, when moving files, you are ' If the tracking of moving data to new local directories is requried, it is better to run the client in service mode (`--monitor`) rather than in standalone mode, as the 'move' of files can then be handled at the point when it occurs, so that the data is moved to the new location on OneDrive without the need to be deleted and re-uploaded. ## Application 'stops' running without any visible reason -**Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884) +**Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884), [#1162](https://github.com/abraunegg/onedrive/issues/1162), [#1408](https://github.com/abraunegg/onedrive/issues/1408), [#1520](https://github.com/abraunegg/onedrive/issues/1520), [#1526](https://github.com/abraunegg/onedrive/issues/1526) **Description:** -When running the client and performing an upload or download operation, the application just stops working without any reason or explanation. +When running the client and performing an upload or download operation, the application just stops working without any reason or explanation. If `echo $?` is used after the application has exited without visible reason, an error level of 141 may be provided. + +Additionally, this issue has mainly been seen when the client is operating against Microsoft's Europe Data Centre's. **Explanation:** diff --git a/src/log.d b/src/log.d index 02281bd2..638c719f 100644 --- a/src/log.d +++ b/src/log.d @@ -154,6 +154,7 @@ void notify(T...)(T args) private void logfileWriteLine(T...)(T args) { + static import std.exception; // Write to log file string logFileName = .logFilePath ~ .username ~ ".onedrive.log"; auto currentTime = Clock.currTime(); diff --git a/src/notifications/dnotify.d b/src/notifications/dnotify.d index 81ebd229..1cc09356 100644 --- a/src/notifications/dnotify.d +++ b/src/notifications/dnotify.d @@ -163,7 +163,7 @@ class Notification { this(in char[] summary, in char[] body_, in char[] icon="") in { assert(is_initted(), "call dnotify.init() before using Notification"); } - body { + do { this.summary = summary; this.body_ = body_; this.icon = icon; diff --git a/src/onedrive.d b/src/onedrive.d index 62a1d5ff..3c459334 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -391,6 +391,7 @@ final class OneDriveApi bool init() { + static import std.utf; // detail what we are using for applicaion identification log.vdebug("clientId = ", clientId); log.vdebug("companyName = ", companyName); @@ -416,6 +417,11 @@ final class OneDriveApi log.error("Cannot authorize with Microsoft OneDrive Service"); return false; } + } catch (std.utf.UTFException e) { + // path contains characters which generate a UTF exception + log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); + log.error(" Error Reason:", e.msg); + return false; } return true; } else { @@ -425,6 +431,11 @@ final class OneDriveApi refreshToken = readText(cfg.refreshTokenFilePath); } catch (FileException e) { return authorize(); + } catch (std.utf.UTFException e) { + // path contains characters which generate a UTF exception + log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); + log.error(" Error Reason:", e.msg); + return false; } return true; } else { @@ -754,7 +765,8 @@ final class OneDriveApi return get(url); } - // Return the requested details of the specified path on the specified drive id + // Return the requested details of the specified path on the specified drive id and path + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online JSONValue getPathDetailsByDriveId(const(char)[] driveId, const(string) path) { checkAccessTokenExpired(); @@ -765,6 +777,19 @@ final class OneDriveApi url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; return get(url); } + + // Return the requested details of the specified path on the specified drive id and item id + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online + JSONValue getPathDetailsByDriveIdAndItemId(const(char)[] driveId, const(char)[] itemId) + { + checkAccessTokenExpired(); + const(char)[] url; + // string driveByIdUrl = "https://graph.microsoft.com/v1.0/drives/"; + // Required format: /drives/{drive-id}/items/{item-id} + url = driveByIdUrl ~ driveId ~ "/items/" ~ itemId; + url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + return get(url); + } // Return the requested details of the specified item id // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get @@ -816,6 +841,7 @@ final class OneDriveApi auto file = File(filepath, "rb"); file.seek(offset); string contentRange = "bytes " ~ to!string(offset) ~ "-" ~ to!string(offset + offsetSize - 1) ~ "/" ~ to!string(fileSize); + log.vdebugNewLine("contentRange: ", contentRange); // function scopes scope(exit) { @@ -836,7 +862,8 @@ final class OneDriveApi http.url = uploadUrl; http.addRequestHeader("Content-Range", contentRange); http.onSend = data => file.rawRead(data).length; - http.contentLength = offsetSize; + // convert offsetSize to ulong + http.contentLength = to!ulong(offsetSize); auto response = perform(); // TODO: retry on 5xx errors checkHttpCode(response); diff --git a/src/qxor.d b/src/qxor.d index 0e521a49..2eedac02 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -5,11 +5,11 @@ import std.digest; // https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md struct QuickXor { - private immutable int widthInBits = 160; - private immutable size_t lengthInBytes = (widthInBits - 1) / 8 + 1; - private immutable size_t lengthInQWords = (widthInBits - 1) / 64 + 1; - private immutable int bitsInLastCell = widthInBits % 64; // 32 - private immutable int shift = 11; + private enum int widthInBits = 160; + private enum size_t lengthInBytes = (widthInBits - 1) / 8 + 1; + private enum size_t lengthInQWords = (widthInBits - 1) / 64 + 1; + private enum int bitsInLastCell = widthInBits % 64; // 32 + private enum int shift = 11; private ulong[lengthInQWords] _data; private ulong _lengthSoFar; diff --git a/src/selective.d b/src/selective.d index 8dc444a3..6b589d4d 100644 --- a/src/selective.d +++ b/src/selective.d @@ -179,6 +179,14 @@ final class SelectiveSync if (!name.matchFirst(businessSharedFoldersList).empty) { return true; } else { + // try a direct comparison just in case + foreach (userFolder; businessSharedFoldersList) { + if (userFolder == name) { + // direct match + log.vdebug("'matchFirst' failed to match, however direct comparison was matched: ", name); + return true; + } + } return false; } } diff --git a/src/sync.d b/src/sync.d index fe6dba2e..8323cd1c 100644 --- a/src/sync.d +++ b/src/sync.d @@ -131,7 +131,16 @@ private Item makeItem(const ref JSONValue driveItem) // https://github.com/abraunegg/onedrive/issues/11 if (isItemRemote(driveItem)) { // remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default - item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + // Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI + // to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash + // See: https://github.com/abraunegg/onedrive/issues/1533 + if ("fileSystemInfo" in driveItem["remoteItem"]) { + // 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases + item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + } else { + // is a remote item, but 'fileSystemInfo' is missing from 'remoteItem' + item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } } else { // item exists on account default drive id item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); @@ -664,6 +673,8 @@ final class SyncEngine if (isItemFolder(searchResult)) { // item returned is a shared folder, not a shared file sharedFolderName = searchResult["name"].str; + // Output Shared Folder Name early + log.vdebug("Shared Folder Name: ", sharedFolderName); // Compare this to values in business_shared_folders if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ // Folder name matches what we are looking for @@ -676,7 +687,7 @@ final class SyncEngine // "what if" there are 2 or more folders shared with me have the "same" name? // The folder name will be the same, but driveId will be different // This will then cause these 'shared folders' to cross populate data, which may not be desirable - log.vdebug("Shared Folder Name: ", sharedFolderName); + log.vdebug("Shared Folder Name: MATCHED to any entry in 'business_shared_folders'"); log.vdebug("Parent Drive Id: ", searchResult["remoteItem"]["parentReference"]["driveId"].str); log.vdebug("Shared Item Id: ", searchResult["remoteItem"]["id"].str); @@ -765,8 +776,10 @@ final class SyncEngine log.vlog("WARNING: Conflict Shared By: ", sharedByName); } } - } - } + } + } else { + log.vdebug("Shared Folder Name: NO MATCH to any entry in 'business_shared_folders'"); + } } else { // not a folder, is this a file? if (isItemFile(searchResult)) { @@ -816,6 +829,7 @@ final class SyncEngine string driveId = defaultDriveId; string rootId = defaultRootId; string folderId; + string itemId; JSONValue onedrivePathDetails; // Check OneDrive Business Shared Folders, if configured to do so @@ -823,29 +837,61 @@ final class SyncEngine log.vlog("Attempting to sync OneDrive Business Shared Folders"); // query OneDrive Business Shared Folders shared with me JSONValue graphQuery = onedrive.getSharedWithMe(); - if (graphQuery.type() == JSONType.object) { // valid response from OneDrive + string sharedFolderName; foreach (searchResult; graphQuery["value"].array) { - string sharedFolderName = searchResult["name"].str; + // set sharedFolderName + sharedFolderName = searchResult["name"].str; + // Configure additional logging items for this array element + string sharedByName; + string sharedByEmail; + + // Extra details for verbose logging + if ("sharedBy" in searchResult["remoteItem"]["shared"]) { + if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; + } + if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; + } + } + // Compare this to values in business_shared_folders if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ - // Folder matches a user configured sync entry - string[] allowedPath; - allowedPath ~= sharedFolderName; + // Matched sharedFolderName to item in business_shared_folders + log.vdebug("Matched sharedFolderName in business_shared_folders: ", sharedFolderName); // But is this shared folder what we are looking for as part of --single-directory? - if (selectiveSync.isPathIncluded(path,allowedPath)) { + // User could be using 'directory' or 'directory/directory1/directory2/directory3/' + // Can we find 'sharedFolderName' in the given 'path' + if (canFind(path, sharedFolderName)) { + // Found 'sharedFolderName' in the given 'path' + log.vdebug("Matched 'sharedFolderName' in the given 'path'"); + // What was the matched folder JSON + log.vdebug("Matched sharedFolderName in business_shared_folders JSON: ", searchResult); // Path we want to sync is on a OneDrive Business Shared Folder // Set the correct driveId driveId = searchResult["remoteItem"]["parentReference"]["driveId"].str; + // Set this items id + itemId = searchResult["remoteItem"]["id"].str; log.vdebug("Updated the driveId to a new value: ", driveId); + log.vdebug("Updated the itemId to a new value: ", itemId); // Keep the driveIDsArray with unique entries only if (!canFind(driveIDsArray, driveId)) { // Add this drive id to the array to search with driveIDsArray ~= driveId; } - } - } + + // Log who shared this to assist with sync data correlation + if ((sharedByName != "") && (sharedByEmail != "")) { + log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName, " (", sharedByEmail, ")"); + } else { + if (sharedByName != "") { + log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName); + } + } + } + } } } else { // Log that an invalid JSON object was returned @@ -856,13 +902,54 @@ final class SyncEngine // Test if the path we are going to sync from actually exists on OneDrive log.vlog("Getting path details from OneDrive ..."); try { - onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); + // Need to use different calls here - one call for majority, another if this is a OneDrive Business Shared Folder + if (!syncBusinessFolders){ + // Not a OneDrive Business Shared Folder + log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) with: ", driveId, ", ", path); + onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); + } else { + // OneDrive Business Shared Folder - Use another API call using the folders correct driveId and itemId + log.vdebug("Calling onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId) with: ", driveId, ", ", itemId); + onedrivePathDetails = onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId); + } } catch (OneDriveException e) { log.vdebug("onedrivePathDetails = onedrive.getPathDetails(path) generated a OneDriveException"); if (e.httpStatusCode == 404) { - // The directory was not found - log.error("ERROR: The requested single directory to sync was not found on OneDrive"); - return; + // The directory was not found + if (syncBusinessFolders){ + // 404 was returned when trying to use a specific driveId and itemId .. which 'should' work .... but didnt + // Try the query with the path as a backup failsafe + log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) as backup with: ", driveId, ", ", path); + try { + // try calling using the path + onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); + } catch (OneDriveException e) { + + if (e.httpStatusCode == 404) { + log.error("ERROR: The requested single directory to sync was not found on OneDrive - Check folder permissions and sharing status with folder owner"); + return; + } + + if (e.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(); + // Retry original request by calling function again to avoid replicating any further error handling + log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferencesSingleDirectory(path);"); + applyDifferencesSingleDirectory(path); + // return back to original call + return; + } + + if (e.httpStatusCode >= 500) { + // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged + return; + } + } + } else { + // Not a OneDrive Business Shared folder operation + log.error("ERROR: The requested single directory to sync was not found on OneDrive"); + return; + } } if (e.httpStatusCode == 429) { @@ -2613,6 +2700,7 @@ final class SyncEngine // downloads a File resource private void downloadFileItem(const ref Item item, const(string) path) { + static import std.exception; assert(item.type == ItemType.file); write("Downloading file ", path, " ... "); JSONValue fileDetails; @@ -2986,14 +3074,26 @@ final class SyncEngine log.vdebug("Processing DB entries for this driveId: ", driveId); // Database scan of every item in DB for the given driveId based on the root parent for that drive if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - uploadDifferences(dbItem); + // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? + if (cfg.getValueString("single_directory") != ""){ + // Limit the local filesystem check to just the requested directory + if (itemdb.selectByPath(path, driveId, item)) { + // Does it still exist on disk in the location the DB thinks it is + log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); + uploadDifferences(item); + } + } else { + // check everything associated with each driveId we know about + foreach(dbItem; itemdb.selectByDriveId(driveId)) { + // Does it still exist on disk in the location the DB thinks it is + log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); + uploadDifferences(dbItem); + } } } else { if (itemdb.selectByPath(path, driveId, item)) { // Does it still exist on disk in the location the DB thinks it is + log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); uploadDifferences(item); } } @@ -3074,16 +3174,26 @@ final class SyncEngine log.vdebug("Processing DB entries for this driveId: ", driveId); // Database scan of every item in DB for the given driveId based on the root parent for that drive if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(dbItem); + // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? + if (cfg.getValueString("single_directory") != ""){ + // Limit the local filesystem check to just the requested directory + if (itemdb.selectByPath(path, driveId, item)) { + // Does it still exist on disk in the location the DB thinks it is + log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); + uploadDifferences(item); + } + } else { + // check everything associated with each driveId we know about + foreach(dbItem; itemdb.selectByDriveId(driveId)) { + // Does it still exist on disk in the location the DB thinks it is + log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); + uploadDifferences(dbItem); + } } } else { if (itemdb.selectByPath(path, driveId, item)) { // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(item) as item is present in local cache DB"); + log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); uploadDifferences(item); } } @@ -3853,6 +3963,7 @@ final class SyncEngine // upload new items to OneDrive private void uploadNewItems(const(string) path) { + static import std.utf; import std.range : walkLength; import std.uni : byGrapheme; // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders @@ -4420,14 +4531,24 @@ final class SyncEngine // test if the local path exists on OneDrive fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); } catch (OneDriveException e) { - // A 404 is the expected response if the file was not present + // log that we generated an exception log.vdebug("fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - log.vlog("Skipping item - OneDrive returned a 'HTTP 401 - Unauthorized' when attempting to query if file exists"); + // OneDrive returned a 'HTTP/1.1 400 Bad Request' + // If the 'path', when encoded, cannot be interpreted by the OneDrive API, the API will generate a 400 error + if (e.httpStatusCode == 400) { + log.log("Skipping uploading this new file: ", buildNormalizedPath(absolutePath(path))); + log.vlog("Skipping item - OneDrive returned a 'HTTP 400 - Bad Request' when attempting to query if file exists"); + log.error("ERROR: To resolve, rename this local file: ", buildNormalizedPath(absolutePath(path))); + uploadFailed = true; return; } - + // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' + if (e.httpStatusCode == 401) { + log.vlog("Skipping item - OneDrive returned a 'HTTP 401 - Unauthorized' when attempting to query if file exists"); + uploadFailed = true; + return; + } + // A 404 is the expected response if the file was not present if (e.httpStatusCode == 404) { // The file was not found on OneDrive, need to upload it // Check if file should be skipped based on skip_size config @@ -4753,7 +4874,7 @@ final class SyncEngine return; } } - + // OneDrive returned a '429 - Too Many Requests' if (e.httpStatusCode == 429) { // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. handleOneDriveThrottleRequest(); @@ -4763,9 +4884,8 @@ final class SyncEngine // return back to original call return; } - + // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged uploadFailed = true; return; } @@ -5222,7 +5342,7 @@ final class SyncEngine flagAsBigDelete = true; if (!cfg.getValueBool("force")) { log.error("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on OneDrive"); - log.error("ERROR: To delete delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); + log.error("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); // Must exit here to preserve data on OneDrive exit(-1); } @@ -5384,6 +5504,8 @@ final class SyncEngine // Log that we skipping adding item to the local DB and the reason why log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); } else { + // What is the JSON item we are trying to create a DB record with? + log.vdebug("Createing DB item from this JSON: ", jsonItem); // Takes a JSON input and formats to an item which can be used by the database Item item = makeItem(jsonItem); // Add to the local database @@ -6576,6 +6698,7 @@ final class SyncEngine // Query itemdb.computePath() and catch potential assert when DB consistency issue occurs string computeItemPath(string thisDriveId, string thisItemId) { + static import core.exception; string calculatedPath; log.vdebug("Attempting to calculate local filesystem path for ", thisDriveId, " and ", thisItemId); try { diff --git a/src/upload.d b/src/upload.d index f49d0cc1..012598a0 100644 --- a/src/upload.d +++ b/src/upload.d @@ -173,6 +173,7 @@ struct UploadSession Progress p = new Progress(iteration); p.title = "Uploading"; long fragmentCount = 0; + long fragSize = 0; // Initialise the download bar at 0% p.next(); @@ -181,7 +182,23 @@ struct UploadSession fragmentCount++; log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration); p.next(); - long fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset; + log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " fileSize: ", fileSize ); + fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset; + log.vdebugNewLine("Using fragSize: ", fragSize); + + // fragSize must not be a negative value + if (fragSize < 0) { + // Session upload will fail + // not a JSON object - fragment upload failed + log.vlog("File upload session failed - invalid calculation of fragment size"); + if (exists(sessionFilePath)) { + remove(sessionFilePath); + } + // set response to null as error + response = null; + return response; + } + // If the resume upload fails, we need to check for a return code here try { response = onedrive.uploadFragment(