Initial commit of code re-write for v2.5.0

* Initial commit of v2.5.0-alpha-0 code changes, supporting fixing #232
This commit is contained in:
abraunegg 2023-08-27 09:35:51 +10:00
parent 43b0bed4cb
commit eb9d637eba
20 changed files with 8126 additions and 11330 deletions

View file

@ -2,43 +2,6 @@
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## 2.4.25 - 2023-06-21
### Fixed
* Fixed that the application was reporting as v2.2.24 when in fact it was v2.4.24 (release tagging issue)
* Fixed that the running version obsolete flag (due to above issue) was causing a false flag as being obsolete
* Fixed that zero-byte files do not have a hash as reported by the OneDrive API thus should not generate an error message
### Updated
* Update to Debian Docker file to resolve Docker image Operating System reported vulnerabilities
* Update to Alpine Docker file to resolve Docker image Operating System reported vulnerabilities
* Update to Fedora Docker file to resolve Docker image Operating System reported vulnerabilities
* Updated documentation (various)
## 2.4.24 - 2023-06-20
### Fixed
* Fix for extra encoded quotation marks surrounding Docker environment variables
* Fix webhook subscription creation for SharePoint Libraries
* Fix that a HTTP 504 - Gateway Timeout causes local files to be deleted when using --download-only & --cleanup-local-files mode
* Fix that folders are renamed despite using --dry-run
* Fix deprecation warnings with dmd 2.103.0
* Fix error that the application is unable to perform a database vacuum: out of memory when exiting
### Removed
* Remove sha1 from being used by the client as this is being depreciated by Microsoft in July 2023
* Complete the removal of crc32 elements
### Added
* Added ONEDRIVE_SINGLE_DIRECTORY configuration capability to Docker
* Added --get-file-link shell completion
* Added configuration to allow HTTP session timeout(s) tuning via config (taken from v2.5.x)
### Updated
* Update to Debian Docker file to resolve Docker image Operating System reported vulnerabilities
* Update to Alpine Docker file to resolve Docker image Operating System reported vulnerabilities
* Update to Fedora Docker file to resolve Docker image Operating System reported vulnerabilities
* Updated cgi.d to commit 680003a - last upstream change before requiring `core.d` dependency requirement
* Updated documentation (various)
## 2.4.23 - 2023-01-06
### Fixed
* Fixed RHEL7, RHEL8 and RHEL9 Makefile and SPEC file compatibility

View file

@ -34,7 +34,7 @@ DEBUG = @DEBUG@
DC = @DC@
DC_TYPE = @DC_TYPE@
DCFLAGS = @DCFLAGS@
DCFLAGS += -w -g -O -J.
DCFLAGS += -wi -g -O -J.
ifeq ($(DEBUG),yes)
ifeq ($(DC_TYPE),dmd)
DCFLAGS += -debug -gs
@ -66,20 +66,19 @@ RHEL_VERSION = 0
endif
SOURCES = \
src/config.d \
src/itemdb.d \
src/log.d \
src/main.d \
src/monitor.d \
src/onedrive.d \
src/qxor.d \
src/selective.d \
src/sqlite.d \
src/sync.d \
src/upload.d \
src/config.d \
src/log.d \
src/util.d \
src/qxor.d \
src/curlEngine.d \
src/onedrive.d \
src/sync.d \
src/itemdb.d \
src/sqlite.d \
src/clientSideFiltering.d \
src/progress.d \
src/arsd/cgi.d
src/monitor.d
ifeq ($(NOTIFICATIONS),yes)
SOURCES += src/notifications/notify.d src/notifications/dnotify.d

6
config
View file

@ -44,6 +44,7 @@
# sync_dir_permissions = "700"
# sync_file_permissions = "600"
# rate_limit = "131072"
# operation_timeout = "3600"
# webhook_enabled = "false"
# webhook_public_url = ""
# webhook_listening_host = ""
@ -54,8 +55,3 @@
# display_running_config = "false"
# read_only_auth_scope = "false"
# cleanup_local_files = "false"
# operation_timeout = "3600"
# dns_timeout = "60"
# connect_timeout = "10"
# data_timeout = "600"
# ip_protocol_version = "0"

20
configure vendored
View file

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for onedrive v2.4.25.
# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-alpha-0.
#
# Report bugs to <https://github.com/abraunegg/onedrive>.
#
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='onedrive'
PACKAGE_TARNAME='onedrive'
PACKAGE_VERSION='v2.4.25'
PACKAGE_STRING='onedrive v2.4.25'
PACKAGE_VERSION='v2.5.0-alpha-0'
PACKAGE_STRING='onedrive v2.5.0-alpha-0'
PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive'
PACKAGE_URL=''
@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures onedrive v2.4.25 to adapt to many kinds of systems.
\`configure' configures onedrive v2.5.0-alpha-0 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1280,7 +1280,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of onedrive v2.4.25:";;
short | recursive ) echo "Configuration of onedrive v2.5.0-alpha-0:";;
esac
cat <<\_ACEOF
@ -1393,7 +1393,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
onedrive configure v2.4.25
onedrive configure v2.5.0-alpha-0
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by onedrive $as_me v2.4.25, which was
It was created by onedrive $as_me v2.5.0-alpha-0, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -2162,7 +2162,7 @@ fi
PACKAGE_DATE="June 2023"
PACKAGE_DATE="August 2023"
@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by onedrive $as_me v2.4.25, which was
This file was extended by onedrive $as_me v2.5.0-alpha-0, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -3212,7 +3212,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
onedrive config.status v2.4.25
onedrive config.status v2.5.0-alpha-0
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View file

@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure)
dnl - tag the release
AC_PREREQ([2.69])
AC_INIT([onedrive],[v2.4.25], [https://github.com/abraunegg/onedrive], [onedrive])
AC_INIT([onedrive],[v2.5.0-alpha-0], [https://github.com/abraunegg/onedrive], [onedrive])
AC_CONFIG_SRCDIR([src/main.d])

388
src/clientSideFiltering.d Normal file
View file

@ -0,0 +1,388 @@
// What is this module called?
module clientSideFiltering;
// What does this module require to function?
import std.algorithm;
import std.array;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
class ClientSideFiltering {
// Class variables
ApplicationConfig appConfig;
string[] paths;
string[] businessSharedItemsList;
Regex!char fileMask;
Regex!char directoryMask;
bool skipDirStrictMatch = false;
bool skipDotfiles = false;
this(ApplicationConfig appConfig) {
// Configure the class varaible to consume the application configuration
this.appConfig = appConfig;
}
// Initialise the required items
bool initialise() {
//
log.vdebug("Configuring Client Side Filtering (Selective Sync)");
// Load the sync_list file if it exists
if (exists(appConfig.syncListFilePath)){
loadSyncList(appConfig.syncListFilePath);
}
// Load the Business Shared Items file if it exists
if (exists(appConfig.businessSharedItemsFilePath)){
loadSyncList(appConfig.businessSharedItemsFilePath);
}
// Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries
// Handle skip_dir configuration in config file
log.vdebug("Configuring skip_dir ...");
log.vdebug("skip_dir: ", appConfig.getValueString("skip_dir"));
setDirMask(appConfig.getValueString("skip_dir"));
// Was --skip-dir-strict-match configured?
log.vdebug("Configuring skip_dir_strict_match ...");
log.vdebug("skip_dir_strict_match: ", appConfig.getValueBool("skip_dir_strict_match"));
if (appConfig.getValueBool("skip_dir_strict_match")) {
setSkipDirStrictMatch();
}
// Was --skip-dot-files configured?
log.vdebug("Configuring skip_dotfiles ...");
log.vdebug("skip_dotfiles: ", appConfig.getValueBool("skip_dotfiles"));
if (appConfig.getValueBool("skip_dotfiles")) {
setSkipDotfiles();
}
// Handle skip_file configuration in config file
log.vdebug("Configuring skip_file ...");
// Validate skip_file to ensure that this does not contain an invalid configuration
// Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process.
foreach(entry; appConfig.getValueString("skip_file").split("|")){
if (entry == ".*") {
// invalid entry element detected
log.logAndNotify("ERROR: Invalid skip_file entry '.*' detected");
return false;
}
}
// All skip_file entries are valid
log.vdebug("skip_file: ", appConfig.getValueString("skip_file"));
setFileMask(appConfig.getValueString("skip_file"));
// All configured OK
return true;
}
// Load sync_list file if it exists
void loadSyncList(string filepath) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
paths ~= buildNormalizedPath(line);
}
file.close();
}
// load business_shared_folders file
void loadBusinessSharedItems(string filepath) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
businessSharedItemsList ~= buildNormalizedPath(line);
}
file.close();
}
// Configure the regex that will be used for 'skip_file'
void setFileMask(const(char)[] mask) {
fileMask = wild2regex(mask);
log.vdebug("Selective Sync File Mask: ", fileMask);
}
// Configure the regex that will be used for 'skip_dir'
void setDirMask(const(char)[] dirmask) {
directoryMask = wild2regex(dirmask);
log.vdebug("Selective Sync Directory Mask: ", directoryMask);
}
// Configure skipDirStrictMatch if function is called
// By default, skipDirStrictMatch = false;
void setSkipDirStrictMatch() {
skipDirStrictMatch = true;
}
// Configure skipDotfiles if function is called
// By default, skipDotfiles = false;
void setSkipDotfiles() {
skipDotfiles = true;
}
// return value of skipDotfiles
bool getSkipDotfiles() {
return skipDotfiles;
}
// Match against sync_list only
bool isPathExcludedViaSyncList(string path) {
// Debug output that we are performing a 'sync_list' inclusion / exclusion test
return isPathExcluded(path, paths);
}
// config file skip_dir parameter
bool isDirNameExcluded(string name) {
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
log.vdebug("skip_dir evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(directoryMask).empty) {
log.vdebug("'!name.matchFirst(directoryMask).empty' returned true = matched");
return true;
} else {
// Do we check the base name as well?
if (!skipDirStrictMatch) {
log.vdebug("No Strict Matching Enforced");
// Test the entire path working backwards from child
string path = buildNormalizedPath(name);
string checkPath;
auto paths = pathSplitter(path);
foreach_reverse(directory; paths) {
if (directory != "/") {
// This will add a leading '/' but that needs to be stripped to check
checkPath = "/" ~ directory ~ checkPath;
if(!checkPath.strip('/').matchFirst(directoryMask).empty) {
log.vdebug("'!checkPath.matchFirst(directoryMask).empty' returned true = matched");
return true;
}
}
}
} else {
log.vdebug("Strict Matching Enforced - No Match");
}
}
// no match
return false;
}
// config file skip_file parameter
bool isFileNameExcluded(string name) {
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
log.vdebug("skip_file evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(fileMask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(fileMask).empty) {
return true;
}
}
// no match
return false;
}
// test if the given path is not included in the allowed paths
// if there are no allowed paths always return false
private bool isPathExcluded(string path, string[] allowedPaths) {
// function variables
bool exclude = false;
bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
int offset;
string wildcard = "*";
// always allow the root
if (path == ".") return false;
// if there are no allowed paths always return false
if (allowedPaths.empty) return false;
path = buildNormalizedPath(path);
log.vdebug("Evaluation against 'sync_list' for this path: ", path);
log.vdebug("[S]exclude = ", exclude);
log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[S]excludeMatched = ", excludeMatched);
// unless path is an exact match, entire sync_list entries need to be processed to ensure
// negative matches are also correctly detected
foreach (allowedPath; allowedPaths) {
// is this an inclusion path or finer grained exclusion?
switch (allowedPath[0]) {
case '-':
// sync_list path starts with '-', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '-/' offset needs to be 2, else 1
if (startsWith(allowedPath, "-/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '!':
// sync_list path starts with '!', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '!/' offset needs to be 2, else 1
if (startsWith(allowedPath, "!/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '/':
// sync_list path starts with '/', this user wants to include this path
// but a '/' at the start causes matching issues, so use the offset for comparison
exclude = false;
offset = 1;
break;
default:
// no negative pattern, default is to not exclude
exclude = false;
offset = 0;
}
// What are we comparing against?
log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath);
// Generate the common prefix from the path vs the allowed path
auto comm = commonPrefix(path, allowedPath[offset..$]);
// Is path is an exact match of the allowed path?
if (comm.length == path.length) {
// we have a potential exact match
// strip any potential '/*' from the allowed path, to avoid a potential lesser common match
string strippedAllowedPath = strip(allowedPath[offset..$], "/*");
if (path == strippedAllowedPath) {
// we have an exact path match
log.vdebug("exact path match");
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded");
// do not set excludeMatched = true here, otherwise parental path also gets excluded
// flag exludeDirectMatch so that a 'wildcard match' will not override this exclude
exludeDirectMatch = true;
// final result
finalResult = true;
}
} else {
// no exact path match, but something common does match
log.vdebug("something 'common' matches the input path");
auto splitAllowedPaths = pathSplitter(strippedAllowedPath);
string pathToEvaluate = "";
foreach(base; splitAllowedPaths) {
pathToEvaluate ~= base;
if (path == pathToEvaluate) {
// The input path matches what we want to evaluate against as a direct match
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded");
finalResult = true;
// do not set excludeMatched = true here, otherwise parental path also gets excluded
}
}
pathToEvaluate ~= dirSeparator;
}
}
}
// Is path is a subitem/sub-folder of the allowed path?
if (comm.length == allowedPath[offset..$].length) {
// The given path is potentially a subitem of an allowed path
// We want to capture sub-folders / files of allowed paths here, but not explicitly match other items
// if there is no wildcard
auto subItemPathCheck = allowedPath[offset..$] ~ "/";
if (canFind(path, subItemPathCheck)) {
// The 'path' includes the allowed path, and is 'most likely' a sub-path item
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: parental path match");
finalResult = false;
// parental path matches, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
// Does the allowed path contain a wildcard? (*)
if (canFind(allowedPath[offset..$], wildcard)) {
// allowed path contains a wildcard
// manually replace '*' for '.*' to be compatible with regex
string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*");
auto allowedMask = regex(regexCompatiblePath);
if (matchAll(path, allowedMask)) {
// regex wildcard evaluation matches
// if we have a prior pattern match for an exclude, excludeMatched = true
if (!exclude && !excludeMatched && !exludeDirectMatch) {
// nothing triggered an exclusion before evaluation against wildcard match attempt
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match");
finalResult = false;
} else {
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
}
// Interim results
log.vdebug("[F]exclude = ", exclude);
log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[F]excludeMatched = ", excludeMatched);
// If exclude or excludeMatched is true, then finalResult has to be true
if ((exclude) || (excludeMatched) || (exludeDirectMatch)) {
finalResult = true;
}
// results
if (finalResult) {
log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED");
} else {
log.vdebug("Evaluation against 'sync_list' final result: included for sync");
}
return finalResult;
}
}

File diff suppressed because it is too large Load diff

98
src/curlEngine.d Normal file
View file

@ -0,0 +1,98 @@
// What is this module called?
module curlEngine;
// What does this module require to function?
import std.net.curl;
import etc.c.curl: CurlOption;
import std.datetime;
// What other modules that we have created do we need to import?
import log;
class CurlEngine {
HTTP http;
this() {
http = HTTP();
}
void initialise(long dnsTimeout, long connectTimeout, long dataTimeout, long operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, long userRateLimit, long protocolVersion) {
// Curl Timeout Handling
// libcurl dns_cache_timeout timeout
http.dnsTimeout = (dur!"seconds"(dnsTimeout));
// Timeout for HTTPS connections
http.connectTimeout = (dur!"seconds"(connectTimeout));
// Data Timeout for HTTPS connections
http.dataTimeout = (dur!"seconds"(dataTimeout));
// maximum time any operation is allowed to take
// This includes dns resolution, connecting, data transfer, etc.
http.operationTimeout = (dur!"seconds"(operationTimeout));
// Specify how many redirects should be allowed
http.maxRedirects(maxRedirects);
// Debug HTTPS
http.verbose = httpsDebug;
// Use the configured 'user_agent' value
http.setUserAgent = userAgent;
// What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6
http.handle.set(CurlOption.ipresolve,protocolVersion); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// What version of HTTP protocol do we use?
// Curl >= 7.62.0 defaults to http2 for a significant number of operations
if (httpProtocol) {
// Downgrade to curl to use HTTP 1.1 for all operations
log.vlog("Downgrading all HTTP operations to HTTP/1.1 due to user configuration");
// Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1
http.handle.set(CurlOption.http_version,2);
} else {
// Use curl defaults
log.vdebug("Using Curl defaults for HTTP operational protocol version (potentially HTTP/2)");
}
// Configure upload / download rate limits if configured
// 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts
// A 0 value means rate is unlimited, and is the curl default
if (userRateLimit > 0) {
// User configured rate limit
log.log("User Configured Rate Limit: ", userRateLimit);
// If user provided rate limit is < 131072, flag that this is too low, setting to the minimum of 131072
if (userRateLimit < 131072) {
// user provided limit too low
log.log("WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to default minimum of 131072 (128KB/s)");
userRateLimit = 131072;
}
// set rate limit
http.handle.set(CurlOption.max_send_speed_large,userRateLimit);
http.handle.set(CurlOption.max_recv_speed_large,userRateLimit);
}
// Explicitly set these libcurl options
// https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html
// Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals
http.handle.set(CurlOption.nosignal,0);
// https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html
// Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled
http.handle.set(CurlOption.tcp_nodelay,0);
// https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html
// Ensure that we ARE reusing connections - setting to 0 ensures that we are reusing connections
http.handle.set(CurlOption.forbid_reuse,0);
if (httpsDebug) {
// Output what options we are using so that in the debug log this can be tracked
log.vdebug("http.dnsTimeout = ", dnsTimeout);
log.vdebug("http.connectTimeout = ", connectTimeout);
log.vdebug("http.dataTimeout = ", dataTimeout);
log.vdebug("http.operationTimeout = ", operationTimeout);
log.vdebug("http.maxRedirects = ", maxRedirects);
}
}
void setMethodPost(){
http.method = HTTP.Method.post;
}
void setMethodPatch(){
http.method = HTTP.Method.patch;
}
}

View file

@ -1,3 +1,7 @@
// What is this module called?
module itemdb;
// What does this module require to function?
import std.datetime;
import std.exception;
import std.path;
@ -5,13 +9,19 @@ import std.string;
import std.stdio;
import std.algorithm.searching;
import core.stdc.stdlib;
import std.json;
import std.conv;
// What other modules that we have created do we need to import?
import sqlite;
static import log;
import util;
import log;
enum ItemType {
file,
dir,
remote
remote,
unknown
}
struct Item {
@ -28,12 +38,127 @@ struct Item {
string remoteDriveId;
string remoteId;
string syncStatus;
string size;
}
final class ItemDatabase
{
// Construct an Item struct from a JSON driveItem
Item makeDatabaseItem(JSONValue driveItem) {
Item item = {
id: driveItem["id"].str,
name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Biz
eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Biz
cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Biz)
};
// OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834
// OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive
if(isItemDeleted(driveItem)) {
// Set mtime to SysTime(0)
item.mtime = SysTime(0);
} else {
// Item is not in a deleted state
// Resolve 'Key not found: fileSystemInfo' when then item is a remote item
// https://github.com/abraunegg/onedrive/issues/11
if (isItemRemote(driveItem)) {
// remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default
// Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI
// to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash
// See: https://github.com/abraunegg/onedrive/issues/1533
if ("fileSystemInfo" in driveItem["remoteItem"]) {
// 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases
item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str);
} else {
// is a remote item, but 'fileSystemInfo' is missing from 'remoteItem'
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
} else {
// Does fileSystemInfo exist at all ?
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
}
// Set this item object type
bool typeSet = false;
if (isItemFile(driveItem)) {
// 'file' object exists in the JSON
log.vdebug("Flagging object as a file");
typeSet = true;
item.type = ItemType.file;
}
if (isItemFolder(driveItem)) {
// 'folder' object exists in the JSON
log.vdebug("Flagging object as a directory");
typeSet = true;
item.type = ItemType.dir;
}
if (isItemRemote(driveItem)) {
// 'remote' object exists in the JSON
log.vdebug("Flagging object as a remote");
typeSet = true;
item.type = ItemType.remote;
}
// root and remote items do not have parentReference
if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) {
item.driveId = driveItem["parentReference"]["driveId"].str;
if (hasParentReferenceId(driveItem)) {
item.parentId = driveItem["parentReference"]["id"].str;
}
}
// extract the file hash and file size
if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) {
// Get file size
if (hasFileSize(driveItem)) {
item.size = to!string(driveItem["size"].integer);
// Get quickXorHash as default
if ("quickXorHash" in driveItem["file"]["hashes"]) {
item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str;
} else {
log.vdebug("quickXorHash is missing from ", driveItem["id"].str);
}
// If quickXorHash is empty ..
if (item.quickXorHash.empty) {
// Is there a sha256Hash?
if ("sha256Hash" in driveItem["file"]["hashes"]) {
item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str;
} else {
log.vdebug("sha256Hash is missing from ", driveItem["id"].str);
}
}
} else {
// So that we have at least a zero value here as the API provided no 'size' data for this file item
item.size = "0";
}
}
// Is the object a remote drive item - living on another driveId ?
if (isItemRemote(driveItem)) {
item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str;
item.remoteId = driveItem["remoteItem"]["id"].str;
}
// National Cloud Deployments do not support /delta as a query
// Thus we need to track in the database that this item is in sync
// As we are making an item, set the syncStatus to Y
// ONLY when using a National Cloud Deployment, all the existing DB entries will get set to N
// so when processing /children, it can be identified what the 'deleted' difference is
item.syncStatus = "Y";
// Return the created item
return item;
}
final class ItemDatabase {
// increment this for every change in the db schema
immutable int itemDatabaseVersion = 11;
immutable int itemDatabaseVersion = 12;
Database db;
string insertItemStmt;
@ -43,8 +168,7 @@ final class ItemDatabase
string deleteItemByIdStmt;
bool databaseInitialised = false;
this(const(char)[] filename)
{
this(const(char)[] filename) {
db = Database(filename);
int dbVersion;
try {
@ -99,12 +223,12 @@ final class ItemDatabase
db.exec("PRAGMA locking_mode = EXCLUSIVE");
insertItemStmt = "
INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)
INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus, size)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
";
updateItemStmt = "
UPDATE item
SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentId = ?8, quickXorHash = ?9, sha256Hash = ?10, remoteDriveId = ?11, remoteId = ?12, syncStatus = ?13
SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentId = ?8, quickXorHash = ?9, sha256Hash = ?10, remoteDriveId = ?11, remoteId = ?12, syncStatus = ?13, size = ?14
WHERE driveId = ?1 AND id = ?2
";
selectItemByIdStmt = "
@ -119,13 +243,11 @@ final class ItemDatabase
databaseInitialised = true;
}
bool isDatabaseInitialised()
{
bool isDatabaseInitialised() {
return databaseInitialised;
}
void createTable()
{
void createTable() {
db.exec("CREATE TABLE item (
driveId TEXT NOT NULL,
id TEXT NOT NULL,
@ -141,6 +263,7 @@ final class ItemDatabase
remoteId TEXT,
deltaLink TEXT,
syncStatus TEXT,
size TEXT,
PRIMARY KEY (driveId, id),
FOREIGN KEY (driveId, parentId)
REFERENCES item (driveId, id)
@ -154,32 +277,27 @@ final class ItemDatabase
db.setVersion(itemDatabaseVersion);
}
void insert(const ref Item item)
{
void insert(const ref Item item) {
auto p = db.prepare(insertItemStmt);
bindItem(item, p);
p.exec();
}
void update(const ref Item item)
{
void update(const ref Item item) {
auto p = db.prepare(updateItemStmt);
bindItem(item, p);
p.exec();
}
void dump_open_statements()
{
void dump_open_statements() {
db.dump_open_statements();
}
int db_checkpoint()
{
int db_checkpoint() {
return db.db_checkpoint();
}
void upsert(const ref Item item)
{
void upsert(const ref Item item) {
auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?");
s.bind(1, item.driveId);
s.bind(2, item.id);
@ -191,8 +309,7 @@ final class ItemDatabase
stmt.exec();
}
Item[] selectChildren(const(char)[] driveId, const(char)[] id)
{
Item[] selectChildren(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(selectItemByParentIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -205,8 +322,7 @@ final class ItemDatabase
return items;
}
bool selectById(const(char)[] driveId, const(char)[] id, out Item item)
{
bool selectById(const(char)[] driveId, const(char)[] id, out Item item) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -219,8 +335,7 @@ final class ItemDatabase
}
// returns true if an item id is in the database
bool idInLocalDatabase(const(string) driveId, const(string)id)
{
bool idInLocalDatabase(const(string) driveId, const(string)id) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -233,8 +348,7 @@ final class ItemDatabase
// returns the item with the given path
// the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3"
bool selectByPath(const(char)[] path, string rootDriveId, out Item item)
{
bool selectByPath(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
@ -254,6 +368,7 @@ final class ItemDatabase
auto r = s.exec();
if (r.empty) return false;
currItem = buildItem(r);
// if the item is of type remote substitute it with the child
if (currItem.type == ItemType.remote) {
Item child;
@ -268,8 +383,7 @@ final class ItemDatabase
}
// same as selectByPath() but it does not traverse remote folders
bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item)
{
bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
@ -294,16 +408,14 @@ final class ItemDatabase
return true;
}
void deleteById(const(char)[] driveId, const(char)[] id)
{
void deleteById(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(deleteItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
p.exec();
}
private void bindItem(const ref Item item, ref Statement stmt)
{
private void bindItem(const ref Item item, ref Statement stmt) {
with (stmt) with (item) {
bind(1, driveId);
bind(2, id);
@ -313,6 +425,7 @@ final class ItemDatabase
case file: typeStr = "file"; break;
case dir: typeStr = "dir"; break;
case remote: typeStr = "remote"; break;
case unknown: typeStr = "unknown"; break;
}
bind(4, typeStr);
bind(5, eTag);
@ -324,17 +437,18 @@ final class ItemDatabase
bind(11, remoteDriveId);
bind(12, remoteId);
bind(13, syncStatus);
bind(14, size);
}
}
private Item buildItem(Statement.Result result)
{
private Item buildItem(Statement.Result result) {
assert(!result.empty, "The result must not be empty");
assert(result.front.length == 14, "The result must have 14 columns");
assert(result.front.length == 15, "The result must have 15 columns");
Item item = {
driveId: result.front[0].dup,
id: result.front[1].dup,
name: result.front[2].dup,
// Column 3 is type - not set here
eTag: result.front[4].dup,
cTag: result.front[5].dup,
mtime: SysTime.fromISOExtString(result.front[6]),
@ -343,7 +457,9 @@ final class ItemDatabase
sha256Hash: result.front[9].dup,
remoteDriveId: result.front[10].dup,
remoteId: result.front[11].dup,
syncStatus: result.front[12].dup
// Column 12 is deltaLink - not set here
syncStatus: result.front[13].dup,
size: result.front[14].dup
};
switch (result.front[3]) {
case "file": item.type = ItemType.file; break;
@ -357,8 +473,7 @@ final class ItemDatabase
// computes the path of the given item id
// the path is relative to the sync directory ex: "Music/Turbo Killer.mp3"
// the trailing slash is not added even if the item is a directory
string computePath(const(char)[] driveId, const(char)[] id)
{
string computePath(const(char)[] driveId, const(char)[] id) {
assert(driveId && id);
string path;
Item item;
@ -416,8 +531,7 @@ final class ItemDatabase
return path;
}
Item[] selectRemoteItems()
{
Item[] selectRemoteItems() {
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE remoteDriveId IS NOT NULL");
auto res = stmt.exec();
@ -428,8 +542,7 @@ final class ItemDatabase
return items;
}
string getDeltaLink(const(char)[] driveId, const(char)[] id)
{
string getDeltaLink(const(char)[] driveId, const(char)[] id) {
assert(driveId && id);
auto stmt = db.prepare("SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
@ -439,8 +552,7 @@ final class ItemDatabase
return res.front[0].dup;
}
void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink)
{
void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) {
assert(driveId && id);
assert(deltaLink);
auto stmt = db.prepare("UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2");
@ -455,8 +567,7 @@ final class ItemDatabase
// As we query /children to get all children from OneDrive, update anything in the database
// to be flagged as not-in-sync, thus, we can use that flag to determing what was previously
// in-sync, but now deleted on OneDrive
void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id)
{
void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) {
assert(driveId);
auto stmt = db.prepare("UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
@ -466,8 +577,7 @@ final class ItemDatabase
// National Cloud Deployments (US and DE) do not support /delta as a query
// Select items that have a out-of-sync flag set
Item[] selectOutOfSyncItems(const(char)[] driveId)
{
Item[] selectOutOfSyncItems(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1");
@ -482,8 +592,7 @@ final class ItemDatabase
// OneDrive Business Folders are stored in the database potentially without a root | parentRoot link
// Select items associated with the provided driveId
Item[] selectByDriveId(const(char)[] driveId)
{
Item[] selectByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL");
@ -496,9 +605,22 @@ final class ItemDatabase
return items;
}
// Select all items associated with the provided driveId
Item[] selectAllItemsByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1");
stmt.bind(1, driveId);
auto res = stmt.exec();
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
// Perform a vacuum on the database, commit WAL / SHM to file
void performVacuum()
{
void performVacuum() {
try {
auto stmt = db.prepare("VACUUM;");
stmt.exec();
@ -510,8 +632,7 @@ final class ItemDatabase
}
// Select distinct driveId items from database
string[] selectDistinctDriveIds()
{
string[] selectDistinctDriveIds() {
string[] driveIdArray;
auto stmt = db.prepare("SELECT DISTINCT driveId FROM item;");
auto res = stmt.exec();
@ -522,4 +643,4 @@ final class ItemDatabase
}
return driveIdArray;
}
}
}

View file

@ -1,28 +1,36 @@
// What is this module called?
module log;
// What does this module require to function?
import std.stdio;
import std.file;
import std.datetime;
import std.process;
import std.conv;
import core.memory;
import core.sys.posix.pwd, core.sys.posix.unistd, core.stdc.string : strlen;
import core.sys.posix.pwd;
import core.sys.posix.unistd;
import core.stdc.string : strlen;
import std.algorithm : splitter;
version(Notifications) {
import dnotify;
}
// enable verbose logging
// module variables
// verbose logging count
long verbose;
// do we write a log file? ... this should be a config falue
bool writeLogFile = false;
// did the log file write fail?
bool logFileWriteFailFlag = false;
private bool doNotifications;
// shared string variable for username
string username;
string logFilePath;
void init(string logDir)
{
void init(string logDir) {
writeLogFile = true;
username = getUserName();
logFilePath = logDir;
@ -41,8 +49,7 @@ void init(string logDir)
}
}
void setNotifications(bool value)
{
void setNotifications(bool value) {
version(Notifications) {
// if we try to enable notifications, check for server availability
// and disable in case dbus server is not reachable
@ -57,8 +64,7 @@ void setNotifications(bool value)
doNotifications = value;
}
void log(T...)(T args)
{
void log(T...)(T args) {
writeln(args);
if(writeLogFile){
// Write to log file
@ -66,22 +72,19 @@ void log(T...)(T args)
}
}
void logAndNotify(T...)(T args)
{
void logAndNotify(T...)(T args) {
notify(args);
log(args);
}
void fileOnly(T...)(T args)
{
void fileOnly(T...)(T args) {
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void vlog(T...)(T args)
{
void vlog(T...)(T args) {
if (verbose >= 1) {
writeln(args);
if(writeLogFile){
@ -91,8 +94,7 @@ void vlog(T...)(T args)
}
}
void vdebug(T...)(T args)
{
void vdebug(T...)(T args) {
if (verbose >= 2) {
writeln("[DEBUG] ", args);
if(writeLogFile){
@ -102,8 +104,7 @@ void vdebug(T...)(T args)
}
}
void vdebugNewLine(T...)(T args)
{
void vdebugNewLine(T...)(T args) {
if (verbose >= 2) {
writeln("\n[DEBUG] ", args);
if(writeLogFile){
@ -113,8 +114,7 @@ void vdebugNewLine(T...)(T args)
}
}
void error(T...)(T args)
{
void error(T...)(T args) {
stderr.writeln(args);
if(writeLogFile){
// Write to log file
@ -122,14 +122,12 @@ void error(T...)(T args)
}
}
void errorAndNotify(T...)(T args)
{
void errorAndNotify(T...)(T args) {
notify(args);
error(args);
}
void notify(T...)(T args)
{
void notify(T...)(T args) {
version(Notifications) {
if (doNotifications) {
string result;
@ -153,8 +151,7 @@ void notify(T...)(T args)
}
}
private void logfileWriteLine(T...)(T args)
{
private void logfileWriteLine(T...)(T args) {
static import std.exception;
// Write to log file
string logFileName = .logFilePath ~ .username ~ ".onedrive.log";
@ -190,8 +187,7 @@ private void logfileWriteLine(T...)(T args)
logFile.close();
}
private string getUserName()
{
private string getUserName() {
auto pw = getpwuid(getuid);
// get required details
@ -216,24 +212,20 @@ private string getUserName()
}
}
void displayMemoryUsagePreGC()
{
void displayMemoryUsagePreGC() {
// Display memory usage
writeln("\nMemory Usage pre GC (bytes)");
writeln("--------------------");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
// uncomment this if required, if not using LDC 1.16 as this does not exist in that version
//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
}
void displayMemoryUsagePostGC()
{
void displayMemoryUsagePostGC() {
// Display memory usage
writeln("\nMemory Usage post GC (bytes)");
writeln("--------------------");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
// uncomment this if required, if not using LDC 1.16 as this does not exist in that version
//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
}

2431
src/main.d

File diff suppressed because it is too large Load diff

View file

@ -1,27 +1,48 @@
import core.sys.linux.sys.inotify;
import core.stdc.errno;
import core.sys.posix.poll, core.sys.posix.unistd;
import std.exception, std.file, std.path, std.regex, std.stdio, std.string, std.algorithm;
import core.stdc.stdlib;
import config;
import selective;
import util;
static import log;
// What is this module called?
module monitor;
// relevant inotify events
// What does this module require to function?
import core.stdc.errno;
import core.stdc.stdlib;
import core.sys.linux.sys.inotify;
import core.sys.posix.poll;
import core.sys.posix.unistd;
import std.algorithm;
import std.exception;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
import clientSideFiltering;
// Relevant inotify events
private immutable uint32_t mask = IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_MOVE | IN_IGNORED | IN_Q_OVERFLOW;
class MonitorException: ErrnoException
{
@safe this(string msg, string file = __FILE__, size_t line = __LINE__)
{
class MonitorException: ErrnoException {
@safe this(string msg, string file = __FILE__, size_t line = __LINE__) {
super(msg, file, line);
}
}
final class Monitor
{
bool verbose;
final class Monitor {
// Class variables
ApplicationConfig appConfig;
ClientSideFiltering selectiveSync;
// Are we verbose in logging output
bool verbose = false;
// skip symbolic links
bool skip_symlinks = false;
// check for .nosync if enabled
bool check_nosync = false;
// Configure Private Class Variables
// inotify file descriptor
private int fd;
// map every inotify watch descriptor to its directory
@ -30,29 +51,27 @@ final class Monitor
private string[int] cookieToPath;
// buffer to receive the inotify events
private void[] buffer;
// skip symbolic links
bool skip_symlinks;
// check for .nosync if enabled
bool check_nosync;
private SelectiveSync selectiveSync;
// Configure function delegates
void delegate(string path) onDirCreated;
void delegate(string path) onFileChanged;
void delegate(string path) onDelete;
void delegate(string from, string to) onMove;
this(SelectiveSync selectiveSync)
{
assert(selectiveSync);
// Configure the class varaible to consume the application configuration including selective sync
this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) {
this.appConfig = appConfig;
this.selectiveSync = selectiveSync;
}
void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync)
{
this.verbose = verbose;
this.skip_symlinks = skip_symlinks;
this.check_nosync = check_nosync;
// Initialise the monitor class
void initialise() {
// Configure the variables
skip_symlinks = appConfig.getValueBool("skip_symlinks");
check_nosync = appConfig.getValueBool("check_nosync");
if (appConfig.getValueLong("verbose") > 0) {
verbose = true;
}
assert(onDirCreated && onFileChanged && onDelete && onMove);
fd = inotify_init();
@ -61,9 +80,9 @@ final class Monitor
// from which point do we start watching for changes?
string monitorPath;
if (cfg.getValueString("single_directory") != ""){
// single directory in use, monitor only this
monitorPath = "./" ~ cfg.getValueString("single_directory");
if (appConfig.getValueString("single_directory") != ""){
// single directory in use, monitor only this path
monitorPath = "./" ~ appConfig.getValueString("single_directory");
} else {
// default
monitorPath = ".";
@ -71,14 +90,14 @@ final class Monitor
addRecursive(monitorPath);
}
void shutdown()
{
// Shutdown the monitor class
void shutdown() {
if (fd > 0) close(fd);
wdToDirName = null;
}
private void addRecursive(string dirname)
{
// Recursivly add this path to be monitored
private void addRecursive(string dirname) {
// skip non existing/disappeared items
if (!exists(dirname)) {
log.vlog("Not adding non-existing/disappeared directory: ", dirname);
@ -173,8 +192,8 @@ final class Monitor
}
}
private void add(string pathname)
{
// Add this path to be monitored
private void add(string pathname) {
int wd = inotify_add_watch(fd, toStringz(pathname), mask);
if (wd < 0) {
if (errno() == ENOSPC) {
@ -185,7 +204,7 @@ final class Monitor
log.log("sudo sysctl fs.inotify.max_user_watches=524288");
}
if (errno() == 13) {
if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) {
if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) {
// no misleading output that we could not add a watch due to permission denied
return;
} else {
@ -206,18 +225,17 @@ final class Monitor
if (isDir(pathname)) {
// This is a directory
// is the path exluded if skip_dotfiles configured and path is a .folder?
if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) {
if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) {
// no misleading output that we are monitoring this directory
return;
}
// Log that this is directory is being monitored
log.vlog("Monitor directory: ", pathname);
log.vlog("Monitoring directory: ", pathname);
}
}
// remove a watch descriptor
private void remove(int wd)
{
// Remove a watch descriptor
private void remove(int wd) {
assert(wd in wdToDirName);
int ret = inotify_rm_watch(fd, wd);
if (ret < 0) throw new MonitorException("inotify_rm_watch failed");
@ -225,9 +243,8 @@ final class Monitor
wdToDirName.remove(wd);
}
// remove the watch descriptors associated to the given path
private void remove(const(char)[] path)
{
// Remove the watch descriptors associated to the given path
private void remove(const(char)[] path) {
path ~= "/";
foreach (wd, dirname; wdToDirName) {
if (dirname.startsWith(path)) {
@ -239,17 +256,17 @@ final class Monitor
}
}
// return the file path from an inotify event
private string getPath(const(inotify_event)* event)
{
// Return the file path from an inotify event
private string getPath(const(inotify_event)* event) {
string path = wdToDirName[event.wd];
if (event.len > 0) path ~= fromStringz(event.name.ptr);
log.vdebug("inotify path event for: ", path);
return path;
}
void update(bool useCallbacks = true)
{
// Update
void update(bool useCallbacks = true) {
pollfd fds = {
fd: fd,
events: POLLIN

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,7 @@
// What is this module called?
module progress;
// What does this module require to function?
import std.stdio;
import std.range;
import std.format;
@ -7,6 +9,8 @@ import std.datetime;
import core.sys.posix.unistd;
import core.sys.posix.sys.ioctl;
// What other modules that we have created do we need to import?
class Progress
{
private:

View file

@ -1,7 +1,11 @@
// What is this module called?
module qxor;
// What does this module require to function?
import std.algorithm;
import std.digest;
// implementation of the QuickXorHash algorithm in D
// Implementation of the QuickXorHash algorithm in D
// https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md
struct QuickXor
{
@ -71,18 +75,4 @@ struct QuickXor
}
return tmp;
}
}
unittest
{
assert(isDigest!QuickXor);
}
unittest
{
QuickXor qxor;
qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog");
assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE");
}
alias QuickXorDigest = WrapperDigest!(QuickXor);
}

View file

@ -1,422 +0,0 @@
import std.algorithm;
import std.array;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
import util;
import log;
final class SelectiveSync
{
private string[] paths;
private string[] businessSharedFoldersList;
private Regex!char mask;
private Regex!char dirmask;
private bool skipDirStrictMatch = false;
private bool skipDotfiles = false;
// load sync_list file
void load(string filepath)
{
if (exists(filepath)) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
paths ~= buildNormalizedPath(line);
}
file.close();
}
}
// Configure skipDirStrictMatch if function is called
// By default, skipDirStrictMatch = false;
void setSkipDirStrictMatch()
{
skipDirStrictMatch = true;
}
// load business_shared_folders file
void loadSharedFolders(string filepath)
{
if (exists(filepath)) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
businessSharedFoldersList ~= buildNormalizedPath(line);
}
file.close();
}
}
void setFileMask(const(char)[] mask)
{
this.mask = wild2regex(mask);
}
void setDirMask(const(char)[] dirmask)
{
this.dirmask = wild2regex(dirmask);
}
// Configure skipDotfiles if function is called
// By default, skipDotfiles = false;
void setSkipDotfiles()
{
skipDotfiles = true;
}
// return value of skipDotfiles
bool getSkipDotfiles()
{
return skipDotfiles;
}
// config file skip_dir parameter
bool isDirNameExcluded(string name)
{
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
log.vdebug("skip_dir evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(dirmask).empty) {
log.vdebug("'!name.matchFirst(dirmask).empty' returned true = matched");
return true;
} else {
// Do we check the base name as well?
if (!skipDirStrictMatch) {
log.vdebug("No Strict Matching Enforced");
// Test the entire path working backwards from child
string path = buildNormalizedPath(name);
string checkPath;
auto paths = pathSplitter(path);
foreach_reverse(directory; paths) {
if (directory != "/") {
// This will add a leading '/' but that needs to be stripped to check
checkPath = "/" ~ directory ~ checkPath;
if(!checkPath.strip('/').matchFirst(dirmask).empty) {
log.vdebug("'!checkPath.matchFirst(dirmask).empty' returned true = matched");
return true;
}
}
}
} else {
log.vdebug("Strict Matching Enforced - No Match");
}
}
// no match
return false;
}
// config file skip_file parameter
bool isFileNameExcluded(string name)
{
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
log.vdebug("skip_file evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(mask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(mask).empty) {
return true;
}
}
// no match
return false;
}
// Match against sync_list only
bool isPathExcludedViaSyncList(string path)
{
// Debug output that we are performing a 'sync_list' inclusion / exclusion test
return .isPathExcluded(path, paths);
}
// Match against skip_dir, skip_file & sync_list entries
bool isPathExcludedMatchAll(string path)
{
return .isPathExcluded(path, paths) || .isPathMatched(path, mask) || .isPathMatched(path, dirmask);
}
// is the path a dotfile?
bool isDotFile(string path)
{
// always allow the root
if (path == ".") return false;
path = buildNormalizedPath(path);
auto paths = pathSplitter(path);
foreach(base; paths) {
if (startsWith(base, ".")){
return true;
}
}
return false;
}
// is business shared folder matched
bool isSharedFolderMatched(string name)
{
// if there are no shared folder always return false
if (businessSharedFoldersList.empty) return false;
if (!name.matchFirst(businessSharedFoldersList).empty) {
return true;
} else {
// try a direct comparison just in case
foreach (userFolder; businessSharedFoldersList) {
if (userFolder == name) {
// direct match
log.vdebug("'matchFirst' failed to match, however direct comparison was matched: ", name);
return true;
}
}
return false;
}
}
// is business shared folder included
bool isPathIncluded(string path, string[] allowedPaths)
{
// always allow the root
if (path == ".") return true;
// if there are no allowed paths always return true
if (allowedPaths.empty) return true;
path = buildNormalizedPath(path);
foreach (allowed; allowedPaths) {
auto comm = commonPrefix(path, allowed);
if (comm.length == path.length) {
// the given path is contained in an allowed path
return true;
}
if (comm.length == allowed.length && path[comm.length] == '/') {
// the given path is a subitem of an allowed path
return true;
}
}
return false;
}
}
// test if the given path is not included in the allowed paths
// if there are no allowed paths always return false
private bool isPathExcluded(string path, string[] allowedPaths)
{
// function variables
bool exclude = false;
bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
int offset;
string wildcard = "*";
// always allow the root
if (path == ".") return false;
// if there are no allowed paths always return false
if (allowedPaths.empty) return false;
path = buildNormalizedPath(path);
log.vdebug("Evaluation against 'sync_list' for this path: ", path);
log.vdebug("[S]exclude = ", exclude);
log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[S]excludeMatched = ", excludeMatched);
// unless path is an exact match, entire sync_list entries need to be processed to ensure
// negative matches are also correctly detected
foreach (allowedPath; allowedPaths) {
// is this an inclusion path or finer grained exclusion?
switch (allowedPath[0]) {
case '-':
// sync_list path starts with '-', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '-/' offset needs to be 2, else 1
if (startsWith(allowedPath, "-/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '!':
// sync_list path starts with '!', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '!/' offset needs to be 2, else 1
if (startsWith(allowedPath, "!/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '/':
// sync_list path starts with '/', this user wants to include this path
// but a '/' at the start causes matching issues, so use the offset for comparison
exclude = false;
offset = 1;
break;
default:
// no negative pattern, default is to not exclude
exclude = false;
offset = 0;
}
// What are we comparing against?
log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath);
// Generate the common prefix from the path vs the allowed path
auto comm = commonPrefix(path, allowedPath[offset..$]);
// Is path is an exact match of the allowed path?
if (comm.length == path.length) {
// we have a potential exact match
// strip any potential '/*' from the allowed path, to avoid a potential lesser common match
string strippedAllowedPath = strip(allowedPath[offset..$], "/*");
if (path == strippedAllowedPath) {
// we have an exact path match
log.vdebug("exact path match");
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded");
// do not set excludeMatched = true here, otherwise parental path also gets excluded
// flag exludeDirectMatch so that a 'wildcard match' will not override this exclude
exludeDirectMatch = true;
// final result
finalResult = true;
}
} else {
// no exact path match, but something common does match
log.vdebug("something 'common' matches the input path");
auto splitAllowedPaths = pathSplitter(strippedAllowedPath);
string pathToEvaluate = "";
foreach(base; splitAllowedPaths) {
pathToEvaluate ~= base;
if (path == pathToEvaluate) {
// The input path matches what we want to evaluate against as a direct match
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded");
finalResult = true;
// do not set excludeMatched = true here, otherwise parental path also gets excluded
}
}
pathToEvaluate ~= dirSeparator;
}
}
}
// Is path is a subitem/sub-folder of the allowed path?
if (comm.length == allowedPath[offset..$].length) {
// The given path is potentially a subitem of an allowed path
// We want to capture sub-folders / files of allowed paths here, but not explicitly match other items
// if there is no wildcard
auto subItemPathCheck = allowedPath[offset..$] ~ "/";
if (canFind(path, subItemPathCheck)) {
// The 'path' includes the allowed path, and is 'most likely' a sub-path item
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: parental path match");
finalResult = false;
// parental path matches, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
// Does the allowed path contain a wildcard? (*)
if (canFind(allowedPath[offset..$], wildcard)) {
// allowed path contains a wildcard
// manually replace '*' for '.*' to be compatible with regex
string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*");
auto allowedMask = regex(regexCompatiblePath);
if (matchAll(path, allowedMask)) {
// regex wildcard evaluation matches
// if we have a prior pattern match for an exclude, excludeMatched = true
if (!exclude && !excludeMatched && !exludeDirectMatch) {
// nothing triggered an exclusion before evaluation against wildcard match attempt
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match");
finalResult = false;
} else {
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
}
// Interim results
log.vdebug("[F]exclude = ", exclude);
log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[F]excludeMatched = ", excludeMatched);
// If exclude or excludeMatched is true, then finalResult has to be true
if ((exclude) || (excludeMatched) || (exludeDirectMatch)) {
finalResult = true;
}
// results
if (finalResult) {
log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED");
} else {
log.vdebug("Evaluation against 'sync_list' final result: included for sync");
}
return finalResult;
}
// test if the given path is matched by the regex expression.
// recursively test up the tree.
private bool isPathMatched(string path, Regex!char mask) {
path = buildNormalizedPath(path);
auto paths = pathSplitter(path);
string prefix = "";
foreach(base; paths) {
prefix ~= base;
if (!path.matchFirst(mask).empty) {
// the given path matches something which we should skip
return true;
}
prefix ~= dirSeparator;
}
return false;
}
// unit tests
unittest
{
assert(isPathExcluded("Documents2", ["Documents"]));
assert(!isPathExcluded("Documents", ["Documents"]));
assert(!isPathExcluded("Documents/a.txt", ["Documents"]));
assert(isPathExcluded("Hello/World", ["Hello/John"]));
assert(!isPathExcluded(".", ["Documents"]));
}

View file

@ -1,27 +1,29 @@
// What is this module called?
module sqlite;
// What does this module require to function?
import std.stdio;
import etc.c.sqlite3;
import std.string: fromStringz, toStringz;
import core.stdc.stdlib;
import std.conv;
static import log;
// What other modules that we have created do we need to import?
import log;
extern (C) immutable(char)* sqlite3_errstr(int); // missing from the std library
static this()
{
static this() {
if (sqlite3_libversion_number() < 3006019) {
throw new SqliteException("sqlite 3.6.19 or newer is required");
}
}
private string ifromStringz(const(char)* cstr)
{
private string ifromStringz(const(char)* cstr) {
return fromStringz(cstr).dup;
}
class SqliteException: Exception
{
class SqliteException: Exception {
@safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
{
super(msg, file, line, next);
@ -33,28 +35,23 @@ class SqliteException: Exception
}
}
struct Database
{
struct Database {
private sqlite3* pDb;
this(const(char)[] filename)
{
this(const(char)[] filename) {
open(filename);
}
~this()
{
~this() {
close();
}
int db_checkpoint()
{
int db_checkpoint() {
return sqlite3_wal_checkpoint(pDb, null);
}
void dump_open_statements()
{
log.log("Dumpint open statements: \n");
void dump_open_statements() {
log.log("Dumping open statements: \n");
auto p = sqlite3_next_stmt(pDb, null);
while (p != null) {
log.log (" - " ~ ifromStringz(sqlite3_sql(p)) ~ "\n");
@ -63,13 +60,12 @@ struct Database
}
void open(const(char)[] filename)
{
void open(const(char)[] filename) {
// https://www.sqlite.org/c3ref/open.html
int rc = sqlite3_open(toStringz(filename), &pDb);
if (rc == SQLITE_CANTOPEN) {
// Database cannot be opened
log.error("\nThe database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3\n");
log.error("\nThe database cannot be opened. Please check the permissions of " ~ filename ~ "\n");
close();
exit(-1);
}
@ -81,8 +77,7 @@ struct Database
sqlite3_extended_result_codes(pDb, 1); // always use extended result codes
}
void exec(const(char)[] sql)
{
void exec(const(char)[] sql) {
// https://www.sqlite.org/c3ref/exec.html
int rc = sqlite3_exec(pDb, toStringz(sql), null, null, null);
if (rc != SQLITE_OK) {
@ -93,8 +88,7 @@ struct Database
}
}
int getVersion()
{
int getVersion() {
int userVersion;
extern (C) int callback(void* user_version, int count, char** column_text, char** column_name) {
import core.stdc.stdlib: atoi;
@ -108,19 +102,16 @@ struct Database
return userVersion;
}
string getErrorMessage()
{
string getErrorMessage() {
return ifromStringz(sqlite3_errmsg(pDb));
}
void setVersion(int userVersion)
{
void setVersion(int userVersion) {
import std.conv: to;
exec("PRAGMA user_version=" ~ to!string(userVersion));
}
Statement prepare(const(char)[] zSql)
{
Statement prepare(const(char)[] zSql) {
Statement s;
// https://www.sqlite.org/c3ref/prepare.html
int rc = sqlite3_prepare_v2(pDb, zSql.ptr, cast(int) zSql.length, &s.pStmt, null);
@ -130,41 +121,34 @@ struct Database
return s;
}
void close()
{
void close() {
// https://www.sqlite.org/c3ref/close.html
sqlite3_close_v2(pDb);
pDb = null;
}
}
struct Statement
{
struct Result
{
struct Statement {
struct Result {
private sqlite3_stmt* pStmt;
private const(char)[][] row;
private this(sqlite3_stmt* pStmt)
{
private this(sqlite3_stmt* pStmt) {
this.pStmt = pStmt;
step(); // initialize the range
}
@property bool empty()
{
@property bool empty() {
return row.length == 0;
}
@property auto front()
{
@property auto front() {
return row;
}
alias step popFront;
void step()
{
void step() {
// https://www.sqlite.org/c3ref/step.html
int rc = sqlite3_step(pStmt);
if (rc == SQLITE_BUSY) {
@ -194,14 +178,12 @@ struct Statement
private sqlite3_stmt* pStmt;
~this()
{
~this() {
// https://www.sqlite.org/c3ref/finalize.html
sqlite3_finalize(pStmt);
}
void bind(int index, const(char)[] value)
{
void bind(int index, const(char)[] value) {
reset();
// https://www.sqlite.org/c3ref/bind_blob.html
int rc = sqlite3_bind_text(pStmt, index, value.ptr, cast(int) value.length, SQLITE_STATIC);
@ -210,47 +192,16 @@ struct Statement
}
}
Result exec()
{
Result exec() {
reset();
return Result(pStmt);
}
private void reset()
{
private void reset() {
// https://www.sqlite.org/c3ref/reset.html
int rc = sqlite3_reset(pStmt);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
}
}
}
unittest
{
auto db = Database(":memory:");
db.exec("CREATE TABLE test(
id TEXT PRIMARY KEY,
value TEXT
)");
assert(db.getVersion() == 0);
db.setVersion(1);
assert(db.getVersion() == 1);
auto s = db.prepare("INSERT INTO test VALUES (?, ?)");
s.bind(1, "key1");
s.bind(2, "value");
s.exec();
s.bind(1, "key2");
s.bind(2, null);
s.exec();
s = db.prepare("SELECT * FROM test ORDER BY id ASC");
auto r = s.exec();
assert(r.front[0] == "key1");
r.popFront();
assert(r.front[1] == null);
r.popFront();
assert(r.empty);
}
}

10956
src/sync.d

File diff suppressed because it is too large Load diff

View file

@ -1,302 +0,0 @@
import std.algorithm, std.conv, std.datetime, std.file, std.json;
import std.stdio, core.thread, std.string;
import progress, onedrive, util;
static import log;
private long fragmentSize = 10 * 2^^20; // 10 MiB
struct UploadSession
{
private OneDriveApi onedrive;
private bool verbose;
// https://dev.onedrive.com/resources/uploadSession.htm
private JSONValue session;
// path where to save the session
private string sessionFilePath;
this(OneDriveApi onedrive, string sessionFilePath)
{
assert(onedrive);
this.onedrive = onedrive;
this.sessionFilePath = sessionFilePath;
this.verbose = verbose;
}
JSONValue upload(string localPath, const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null)
{
// Fix https://github.com/abraunegg/onedrive/issues/2
// More Details https://github.com/OneDrive/onedrive-api-docs/issues/778
SysTime localFileLastModifiedTime = timeLastModified(localPath).toUTC();
localFileLastModifiedTime.fracSecs = Duration.zero;
JSONValue fileSystemInfo = [
"item": JSONValue([
"@name.conflictBehavior": JSONValue("replace"),
"fileSystemInfo": JSONValue([
"lastModifiedDateTime": localFileLastModifiedTime.toISOExtString()
])
])
];
// Try to create the upload session for this file
session = onedrive.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo);
if ("uploadUrl" in session){
session["localPath"] = localPath;
save();
return upload();
} else {
// there was an error
log.vlog("Create file upload session failed ... skipping file upload");
// return upload() will return a JSONValue response, create an empty JSONValue response to return
JSONValue response;
return response;
}
}
/* Restore the previous upload session.
* Returns true if the session is valid. Call upload() to resume it.
* Returns false if there is no session or the session is expired. */
bool restore()
{
if (exists(sessionFilePath)) {
log.vlog("Trying to restore the upload session ...");
// We cant use JSONType.object check, as this is currently a string
// We cant use a try & catch block, as it does not catch std.json.JSONException
auto sessionFileText = readText(sessionFilePath);
if(canFind(sessionFileText,"@odata.context")) {
session = readText(sessionFilePath).parseJSON();
} else {
log.vlog("Upload session resume data is invalid");
remove(sessionFilePath);
return false;
}
// Check the session resume file for expirationDateTime
if ("expirationDateTime" in session){
// expirationDateTime in the file
auto expiration = SysTime.fromISOExtString(session["expirationDateTime"].str);
if (expiration < Clock.currTime()) {
log.vlog("The upload session is expired");
return false;
}
if (!exists(session["localPath"].str)) {
log.vlog("The file does not exist anymore");
return false;
}
// Can we read the file - as a permissions issue or file corruption will cause a failure on resume
// https://github.com/abraunegg/onedrive/issues/113
if (readLocalFile(session["localPath"].str)){
// able to read the file
// request the session status
JSONValue response;
try {
response = onedrive.requestUploadStatus(session["uploadUrl"].str);
} catch (OneDriveException e) {
// handle any onedrive error response
if (e.httpStatusCode == 400) {
log.vlog("Upload session not found");
return false;
}
}
// do we have a valid response from OneDrive?
if (response.type() == JSONType.object){
// JSON object
if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)){
// has the elements we need
session["expirationDateTime"] = response["expirationDateTime"];
session["nextExpectedRanges"] = response["nextExpectedRanges"];
if (session["nextExpectedRanges"].array.length == 0) {
log.vlog("The upload session is completed");
return false;
}
} else {
// bad data
log.vlog("Restore file upload session failed - invalid data response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
} else {
// not a JSON object
log.vlog("Restore file upload session failed - invalid response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
return true;
} else {
// unable to read the local file
log.vlog("Restore file upload session failed - unable to read the local file");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
} else {
// session file contains an error - cant resume
log.vlog("Restore file upload session failed - cleaning up session resume");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
}
return false;
}
JSONValue upload()
{
// Response for upload
JSONValue response;
// session JSON needs to contain valid elements
long offset;
long fileSize;
if ("nextExpectedRanges" in session){
offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long;
}
if ("localPath" in session){
fileSize = getSize(session["localPath"].str);
}
if ("uploadUrl" in session){
// Upload file via session created
// Upload Progress Bar
size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1;
Progress p = new Progress(iteration);
p.title = "Uploading";
long fragmentCount = 0;
long fragSize = 0;
// Initialise the download bar at 0%
p.next();
while (true) {
fragmentCount++;
log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration);
p.next();
log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " fileSize: ", fileSize );
fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset;
log.vdebugNewLine("Using fragSize: ", fragSize);
// fragSize must not be a negative value
if (fragSize < 0) {
// Session upload will fail
// not a JSON object - fragment upload failed
log.vlog("File upload session failed - invalid calculation of fragment size");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
// set response to null as error
response = null;
return response;
}
// If the resume upload fails, we need to check for a return code here
try {
response = onedrive.uploadFragment(
session["uploadUrl"].str,
session["localPath"].str,
offset,
fragSize,
fileSize
);
} catch (OneDriveException e) {
// if a 100 response is generated, continue
if (e.httpStatusCode == 100) {
continue;
}
// there was an error response from OneDrive when uploading the file fragment
// handle 'HTTP request returned status code 429 (Too Many Requests)' first
if (e.httpStatusCode == 429) {
auto retryAfterValue = onedrive.getRetryAfterValue();
log.vdebug("Fragment upload failed - received throttle request response from OneDrive");
log.vdebug("Using Retry-After Value = ", retryAfterValue);
// Sleep thread as per request
log.log("\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled");
log.log("Sleeping for ", retryAfterValue, " seconds");
Thread.sleep(dur!"seconds"(retryAfterValue));
log.log("Retrying fragment upload");
} else {
// insert a new line as well, so that the below error is inserted on the console in the right location
log.vlog("\nFragment upload failed - received an exception response from OneDrive");
// display what the error is
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
// retry fragment upload in case error is transient
log.vlog("Retrying fragment upload");
}
try {
response = onedrive.uploadFragment(
session["uploadUrl"].str,
session["localPath"].str,
offset,
fragSize,
fileSize
);
} catch (OneDriveException e) {
// OneDrive threw another error on retry
log.vlog("Retry to upload fragment failed");
// display what the error is
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
// set response to null as the fragment upload was in error twice
response = null;
}
}
// was the fragment uploaded without issue?
if (response.type() == JSONType.object){
offset += fragmentSize;
if (offset >= fileSize) break;
// update the session details
session["expirationDateTime"] = response["expirationDateTime"];
session["nextExpectedRanges"] = response["nextExpectedRanges"];
save();
} else {
// not a JSON object - fragment upload failed
log.vlog("File upload session failed - invalid response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
// set response to null as error
response = null;
return response;
}
}
// upload complete
p.next();
writeln();
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return response;
} else {
// session elements were not present
log.vlog("Session has no valid upload URL ... skipping this file upload");
// return an empty JSON response
response = null;
return response;
}
}
string getUploadSessionLocalFilePath() {
// return the session file path
string localPath = "";
if ("localPath" in session){
localPath = session["localPath"].str;
}
return localPath;
}
// save session details to temp file
private void save()
{
std.file.write(sessionFilePath, session.toString());
}
}

View file

@ -1,6 +1,11 @@
// What is this module called?
module util;
// What does this module require to function?
import std.base64;
import std.conv;
import std.digest.crc, std.digest.sha;
import std.digest.crc;
import std.digest.sha;
import std.net.curl;
import std.datetime;
import std.file;
@ -13,22 +18,24 @@ import std.algorithm;
import std.uri;
import std.json;
import std.traits;
import qxor;
import core.stdc.stdlib;
import core.thread;
// What other modules that we have created do we need to import?
import log;
import config;
import qxor;
import curlEngine;
// module variables
shared string deviceName;
static this()
{
static this() {
deviceName = Socket.hostName;
}
// gives a new name to the specified file or directory
void safeRename(const(char)[] path)
{
// Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario
void safeBackup(const(char)[] path, bool dryRun) {
auto ext = extension(path);
auto newPath = path.chomp(ext) ~ "-" ~ deviceName;
if (exists(newPath ~ ext)) {
@ -41,18 +48,55 @@ void safeRename(const(char)[] path)
newPath = newPath2;
}
newPath ~= ext;
rename(path, newPath);
// Perform the backup
log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath);
if (!dryRun) {
rename(path, newPath);
} else {
log.vdebug("DRY-RUN: Skipping local file backup");
}
}
// Rename the given item, and only performs the function if not in a --dry-run scenario
void safeRename(const(char)[] oldPath, const(char)[] newPath, bool dryRun) {
// Perform the rename
if (!dryRun) {
log.vdebug("Calling rename(oldPath, newPath)");
// rename physical path on disk
rename(oldPath, newPath);
} else {
log.vdebug("DRY-RUN: Skipping local file rename");
}
}
// deletes the specified file without throwing an exception if it does not exists
void safeRemove(const(char)[] path)
{
void safeRemove(const(char)[] path) {
if (exists(path)) remove(path);
}
// returns the CRC32 hex string of a file
string computeCRC32(string path) {
CRC32 crc;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
crc.put(data);
}
return crc.finish().toHexString().dup;
}
// returns the SHA1 hash hex string of a file
string computeSha1Hash(string path) {
SHA1 sha;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
sha.put(data);
}
return sha.finish().toHexString().dup;
}
// returns the quickXorHash base64 string of a file
string computeQuickXorHash(string path)
{
string computeQuickXorHash(string path) {
QuickXor qxor;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
@ -72,8 +116,7 @@ string computeSHA256Hash(string path) {
}
// converts wildcards (*, ?) to regex
Regex!char wild2regex(const(char)[] pattern)
{
Regex!char wild2regex(const(char)[] pattern) {
string str;
str.reserve(pattern.length + 2);
str ~= "^";
@ -115,53 +158,91 @@ Regex!char wild2regex(const(char)[] pattern)
return regex(str, "i");
}
// returns true if the network connection is available
bool testNetwork(Config cfg)
{
// Use low level HTTP struct
auto http = HTTP();
http.url = "https://login.microsoftonline.com";
// DNS lookup timeout
http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout")));
// Timeout for connecting
http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout")));
// Data Timeout for HTTPS connections
http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout")));
// maximum time any operation is allowed to take
// This includes dns resolution, connecting, data transfer, etc.
http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout")));
// What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6
http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// Test Internet access to Microsoft OneDrive
bool testInternetReachability(ApplicationConfig appConfig) {
// Use preconfigured object with all the correct http values assigned
auto curlEngine = new CurlEngine();
curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version"));
// Configure the remaining items required
// URL to use
curlEngine.http.url = "https://login.microsoftonline.com";
// HTTP connection test method
http.method = HTTP.Method.head;
curlEngine.http.method = HTTP.Method.head;
// Attempt to contact the Microsoft Online Service
try {
log.vdebug("Attempting to contact online service");
http.perform();
log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Online Service");
http.shutdown();
log.vdebug("Attempting to contact Microsoft OneDrive Login Service");
curlEngine.http.perform();
log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Login Service");
curlEngine.http.shutdown();
return true;
} catch (SocketException e) {
// Socket issue
log.vdebug("HTTP Socket Issue");
log.error("Cannot connect to Microsoft OneDrive Service - Socket Issue");
log.error("Cannot connect to Microsoft OneDrive Login Service - Socket Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
return false;
} catch (CurlException e) {
// No network connection to OneDrive Service
log.vdebug("No Network Connection");
log.error("Cannot connect to Microsoft OneDrive Service - Network Connection Issue");
log.error("Cannot connect to Microsoft OneDrive Login Service - Network Connection Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
return false;
}
}
// Retry Internet access test to Microsoft OneDrive
bool retryInternetConnectivtyTest(ApplicationConfig appConfig) {
// re-try network connection to OneDrive
// https://github.com/abraunegg/onedrive/issues/1184
// Back off & retry with incremental delay
int retryCount = 10000;
int retryAttempts = 1;
int backoffInterval = 1;
int maxBackoffInterval = 3600;
bool onlineRetry = false;
bool retrySuccess = false;
while (!retrySuccess){
// retry to access OneDrive API
backoffInterval++;
int thisBackOffInterval = retryAttempts*backoffInterval;
log.vdebug(" Retry Attempt: ", retryAttempts);
if (thisBackOffInterval <= maxBackoffInterval) {
log.vdebug(" Retry In (seconds): ", thisBackOffInterval);
Thread.sleep(dur!"seconds"(thisBackOffInterval));
} else {
log.vdebug(" Retry In (seconds): ", maxBackoffInterval);
Thread.sleep(dur!"seconds"(maxBackoffInterval));
}
// perform the re-rty
onlineRetry = testInternetReachability(appConfig);
if (onlineRetry) {
// We are now online
log.log("Internet connectivity to Microsoft OneDrive service has been restored");
retrySuccess = true;
} else {
// We are still offline
if (retryAttempts == retryCount) {
// we have attempted to re-connect X number of times
// false set this to true to break out of while loop
retrySuccess = true;
}
}
// Increment & loop around
retryAttempts++;
}
if (!onlineRetry) {
// Not online after 1.2 years of trying
log.error("ERROR: Was unable to reconnect to the Microsoft OneDrive service after 10000 attempts lasting over 1.2 years!");
}
// return the state
return onlineRetry;
}
// Can we read the file - as a permissions issue or file corruption will cause a failure
// https://github.com/abraunegg/onedrive/issues/113
// returns true if file can be accessed
bool readLocalFile(string path)
{
bool readLocalFile(string path) {
try {
// attempt to read up to the first 1 byte of the file
// validates we can 'read' the file based on file permissions
@ -175,8 +256,7 @@ bool readLocalFile(string path)
}
// calls globMatch for each string in pattern separated by '|'
bool multiGlobMatch(const(char)[] path, const(char)[] pattern)
{
bool multiGlobMatch(const(char)[] path, const(char)[] pattern) {
foreach (glob; pattern.split('|')) {
if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) {
return true;
@ -185,8 +265,7 @@ bool multiGlobMatch(const(char)[] path, const(char)[] pattern)
return false;
}
bool isValidName(string path)
{
bool isValidName(string path) {
// Restriction and limitations about windows naming files
// https://msdn.microsoft.com/en-us/library/aa365247
// https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders
@ -223,8 +302,7 @@ bool isValidName(string path)
return matched;
}
bool containsBadWhiteSpace(string path)
{
bool containsBadWhiteSpace(string path) {
// allow root item
if (path == ".") {
return true;
@ -248,8 +326,7 @@ bool containsBadWhiteSpace(string path)
return m.empty;
}
bool containsASCIIHTMLCodes(string path)
{
bool containsASCIIHTMLCodes(string path) {
// https://github.com/abraunegg/onedrive/issues/151
// If a filename contains ASCII HTML codes, regardless of if it gets encoded, it generates an error
// Check if the filename contains an ASCII HTML code sequence
@ -265,17 +342,13 @@ bool containsASCIIHTMLCodes(string path)
}
// Parse and display error message received from OneDrive
void displayOneDriveErrorMessage(string message, string callingFunction)
{
void displayOneDriveErrorMessage(string message, string callingFunction) {
writeln();
log.error("ERROR: Microsoft OneDrive API returned an error with the following message:");
auto errorArray = splitLines(message);
log.error(" Error Message: ", errorArray[0]);
// Extract 'message' as the reason
JSONValue errorMessage = parseJSON(replace(message, errorArray[0], ""));
// extra debug
log.vdebug("Raw Error Data: ", message);
log.vdebug("JSON Message: ", errorMessage);
// What is the reason for the error
if (errorMessage.type() == JSONType.object) {
@ -333,11 +406,14 @@ void displayOneDriveErrorMessage(string message, string callingFunction)
// Where in the code was this error generated
log.vlog(" Calling Function: ", callingFunction);
// Extra Debug if we are using --verbose --verbose
log.vdebug("Raw Error Data: ", message);
log.vdebug("JSON Message: ", errorMessage);
}
// Parse and display error message received from the local file system
void displayFileSystemErrorMessage(string message, string callingFunction)
{
void displayFileSystemErrorMessage(string message, string callingFunction) {
writeln();
log.error("ERROR: The local file system returned an error with the following message:");
auto errorArray = splitLines(message);
@ -353,6 +429,13 @@ void displayFileSystemErrorMessage(string message, string callingFunction)
}
}
// Display the POSIX Error Message
void displayPosixErrorMessage(string message) {
writeln();
log.error("ERROR: Microsoft OneDrive API returned data that highlights a POSIX compliance issue:");
log.error(" Error Message: ", message);
}
// Get the function name that is being called to assist with identifying where an error is being generated
string getFunctionName(alias func)() {
return __traits(identifier, __traits(parent, func)) ~ "()\n";
@ -527,7 +610,7 @@ void checkApplicationVersion() {
thisVersionReleaseGracePeriod = thisVersionReleaseGracePeriod.add!"months"(1);
log.vdebug("thisVersionReleaseGracePeriod: ", thisVersionReleaseGracePeriod);
// is this running version obsolete ?
// Is this running version obsolete ?
if (!displayObsolete) {
// if releaseGracePeriod > currentTime
// display an information warning that there is a new release available
@ -556,54 +639,106 @@ void checkApplicationVersion() {
}
}
// Unit Tests
unittest
{
assert(multiGlobMatch(".hidden", ".*"));
assert(multiGlobMatch(".hidden", "file|.*"));
assert(!multiGlobMatch("foo.bar", "foo|bar"));
// that should detect invalid file/directory name.
assert(isValidName("."));
assert(isValidName("./general.file"));
assert(!isValidName("./ leading_white_space"));
assert(!isValidName("./trailing_white_space "));
assert(!isValidName("./trailing_dot."));
assert(!isValidName("./includes<in the path"));
assert(!isValidName("./includes>in the path"));
assert(!isValidName("./includes:in the path"));
assert(!isValidName(`./includes"in the path`));
assert(!isValidName("./includes|in the path"));
assert(!isValidName("./includes?in the path"));
assert(!isValidName("./includes*in the path"));
assert(!isValidName("./includes / in the path"));
assert(!isValidName(`./includes\ in the path`));
assert(!isValidName(`./includes\\ in the path`));
assert(!isValidName(`./includes\\\\ in the path`));
assert(!isValidName("./includes\\ in the path"));
assert(!isValidName("./includes\\\\ in the path"));
assert(!isValidName("./CON"));
assert(!isValidName("./CON.text"));
assert(!isValidName("./PRN"));
assert(!isValidName("./AUX"));
assert(!isValidName("./NUL"));
assert(!isValidName("./COM0"));
assert(!isValidName("./COM1"));
assert(!isValidName("./COM2"));
assert(!isValidName("./COM3"));
assert(!isValidName("./COM4"));
assert(!isValidName("./COM5"));
assert(!isValidName("./COM6"));
assert(!isValidName("./COM7"));
assert(!isValidName("./COM8"));
assert(!isValidName("./COM9"));
assert(!isValidName("./LPT0"));
assert(!isValidName("./LPT1"));
assert(!isValidName("./LPT2"));
assert(!isValidName("./LPT3"));
assert(!isValidName("./LPT4"));
assert(!isValidName("./LPT5"));
assert(!isValidName("./LPT6"));
assert(!isValidName("./LPT7"));
assert(!isValidName("./LPT8"));
assert(!isValidName("./LPT9"));
bool hasId(JSONValue item) {
return ("id" in item) != null;
}
bool hasQuota(JSONValue item) {
return ("quota" in item) != null;
}
bool isItemDeleted(JSONValue item) {
return ("deleted" in item) != null;
}
bool isItemRoot(JSONValue item) {
return ("root" in item) != null;
}
bool hasParentReference(const ref JSONValue item) {
return ("parentReference" in item) != null;
}
bool hasParentReferenceId(JSONValue item) {
return ("id" in item["parentReference"]) != null;
}
bool hasParentReferencePath(JSONValue item) {
return ("path" in item["parentReference"]) != null;
}
bool isFolderItem(const ref JSONValue item) {
return ("folder" in item) != null;
}
bool isFileItem(const ref JSONValue item) {
return ("file" in item) != null;
}
bool isItemRemote(const ref JSONValue item) {
return ("remoteItem" in item) != null;
}
bool isItemFile(const ref JSONValue item) {
return ("file" in item) != null;
}
bool isItemFolder(const ref JSONValue item) {
return ("folder" in item) != null;
}
bool hasFileSize(const ref JSONValue item) {
return ("size" in item) != null;
}
bool isDotFile(const(string) path) {
// always allow the root
if (path == ".") return false;
auto paths = pathSplitter(buildNormalizedPath(path));
foreach(base; paths) {
if (startsWith(base, ".")){
return true;
}
}
return false;
}
bool isMalware(const ref JSONValue item) {
return ("malware" in item) != null;
}
bool hasHashes(const ref JSONValue item) {
return ("hashes" in item["file"]) != null;
}
bool hasQuickXorHash(const ref JSONValue item) {
return ("quickXorHash" in item["file"]["hashes"]) != null;
}
bool hasSHA256Hash(const ref JSONValue item) {
return ("sha256Hash" in item["file"]["hashes"]) != null;
}
bool isMicrosoftOneNoteMimeType1(const ref JSONValue item) {
return (item["file"]["mimeType"].str) == "application/msonenote";
}
bool isMicrosoftOneNoteMimeType2(const ref JSONValue item) {
return (item["file"]["mimeType"].str) == "application/octet-stream";
}
bool hasUploadURL(const ref JSONValue item) {
return ("uploadUrl" in item) != null;
}
bool hasNextExpectedRanges(const ref JSONValue item) {
return ("nextExpectedRanges" in item) != null;
}
bool hasLocalPath(const ref JSONValue item) {
return ("localPath" in item) != null;
}
bool hasETag(const ref JSONValue item) {
return ("eTag" in item) != null;
}