2018-12-28 03:19:20 +01:00
import core.stdc.stdlib : EXIT_SUCCESS , EXIT_FAILURE , exit ;
2015-09-20 21:21:51 +02:00
import core.memory , core . time , core . thread ;
2019-08-24 09:18:58 +02:00
import std.getopt , std . file , std . path , std . process , std . stdio , std . conv , std . algorithm . searching , std . string , std . regex ;
2017-03-24 22:30:03 +01:00
import config , itemdb , monitor , onedrive , selective , sync , util ;
2018-08-13 23:21:11 +02:00
import std.net.curl : CurlException ;
2018-12-28 03:19:20 +01:00
import core.stdc.signal ;
import std.traits ;
2016-08-04 23:35:58 +02:00
static import log ;
2015-09-01 20:45:34 +02:00
2018-12-28 03:19:20 +01:00
OneDriveApi oneDrive ;
ItemDatabase itemDb ;
2019-07-06 00:01:04 +02:00
const int EXIT_UNAUTHORIZED = 3 ;
2019-02-26 21:21:23 +01:00
enum MONITOR_LOG_SILENT = 2 ;
enum MONITOR_LOG_QUIET = 1 ;
enum LOG_NORMAL = 0 ;
2016-08-04 23:35:58 +02:00
int main ( string [ ] args )
2015-09-01 20:45:34 +02:00
{
2018-12-23 01:15:10 +01:00
// Disable buffering on stdout
stdout . setvbuf ( 0 , _IONBF ) ;
2018-04-13 01:33:16 +02:00
2019-01-26 01:03:00 +01:00
// configuration directory
2019-04-11 04:26:20 +02:00
string confdirOption ;
2019-01-26 01:03:00 +01:00
try {
2019-04-11 04:26:20 +02:00
// print the version and exit
bool printVersion = false ;
2019-01-26 01:03:00 +01:00
auto opt = getopt (
args ,
2019-04-11 04:26:20 +02:00
std . getopt . config . passThrough ,
2019-01-26 01:03:00 +01:00
std . getopt . config . bundling ,
std . getopt . config . caseSensitive ,
2019-04-11 04:26:20 +02:00
"confdir" , "Set the directory used to store the configuration files" , & confdirOption ,
2019-03-17 05:10:41 +01:00
"verbose|v+" , "Print more details, useful for debugging (repeat for extra debugging)" , & log . verbose ,
2019-01-26 01:03:00 +01:00
"version" , "Print the version and exit" , & printVersion
) ;
if ( opt . helpWanted ) {
2019-04-11 04:26:20 +02:00
args ~ = "--help" ;
}
if ( printVersion ) {
std . stdio . write ( "onedrive " , import ( "version" ) ) ;
2019-01-26 01:03:00 +01:00
return EXIT_SUCCESS ;
}
} catch ( GetOptException e ) {
log . error ( e . msg ) ;
log . error ( "Try 'onedrive -h' for more information" ) ;
return EXIT_FAILURE ;
} catch ( Exception e ) {
// error
log . error ( e . msg ) ;
log . error ( "Try 'onedrive -h' for more information" ) ;
return EXIT_FAILURE ;
}
2019-08-24 09:18:58 +02:00
2019-04-11 04:26:20 +02:00
// load configuration file if available
auto cfg = new config . Config ( confdirOption ) ;
if ( ! cfg . initialize ( ) ) {
// There was an error loading the configuration
// Error message already printed
return EXIT_FAILURE ;
2019-04-01 19:51:25 +02:00
}
2019-08-24 09:18:58 +02:00
2019-04-11 04:26:20 +02:00
// update configuration from command line args
cfg . update_from_args ( args ) ;
2019-08-24 09:18:58 +02:00
// Has any of our configuration that would require a --resync been changed?
// 1. sync_list file modification
// 2. config file modification - but only if sync_dir, skip_dir, skip_file or drive_id was modified
// 3. CLI input overriding configured config file option
string currentConfigHash ;
string currentSyncListHash ;
string previousConfigHash ;
string previousSyncListHash ;
string configHashFile = cfg . configDirName ~ "/.config.hash" ;
string syncListHashFile = cfg . configDirName ~ "/.sync_list.hash" ;
string configBackupFile = cfg . configDirName ~ "/.config.backup" ;
bool configOptionsDifferent = false ;
bool syncListDifferent = false ;
bool syncDirDifferent = false ;
bool skipFileDifferent = false ;
bool skipDirDifferent = false ;
if ( ( exists ( cfg . configDirName ~ "/config" ) ) & & ( ! exists ( configHashFile ) ) ) {
// Hash of config file needs to be created
std . file . write ( configHashFile , computeQuickXorHash ( cfg . configDirName ~ "/config" ) ) ;
}
if ( ( exists ( cfg . configDirName ~ "/sync_list" ) ) & & ( ! exists ( syncListHashFile ) ) ) {
// Hash of sync_list file needs to be created
std . file . write ( syncListHashFile , computeQuickXorHash ( cfg . configDirName ~ "/sync_list" ) ) ;
}
// If hash files exist, but config files do not ... remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different
if ( ( ! exists ( cfg . configDirName ~ "/config" ) ) & & ( exists ( configHashFile ) ) ) {
// if --resync safe remove config.hash and config.backup
if ( cfg . getValueBool ( "resync" ) ) {
safeRemove ( configHashFile ) ;
safeRemove ( configBackupFile ) ;
}
}
if ( ( ! exists ( cfg . configDirName ~ "/sync_list" ) ) & & ( exists ( syncListHashFile ) ) ) {
// if --resync safe remove sync_list.hash
if ( cfg . getValueBool ( "resync" ) ) safeRemove ( syncListHashFile ) ;
}
// Read config hashes if they exist
if ( exists ( cfg . configDirName ~ "/config" ) ) currentConfigHash = computeQuickXorHash ( cfg . configDirName ~ "/config" ) ;
if ( exists ( cfg . configDirName ~ "/sync_list" ) ) currentSyncListHash = computeQuickXorHash ( cfg . configDirName ~ "/sync_list" ) ;
if ( exists ( configHashFile ) ) previousConfigHash = readText ( configHashFile ) ;
if ( exists ( syncListHashFile ) ) previousSyncListHash = readText ( syncListHashFile ) ;
// Was sync_list updated?
if ( currentSyncListHash ! = previousSyncListHash ) {
// Debugging output to assist what changed
log . vdebug ( "sync_list file has been updated, --resync needed" ) ;
syncListDifferent = true ;
}
// Was config updated?
if ( currentConfigHash ! = previousConfigHash ) {
// config file was updated, however we only want to trigger a --resync requirement if sync_dir, skip_dir, skip_file or drive_id was modified
log . vdebug ( "config file has been updated, checking if --resync needed" ) ;
if ( exists ( configBackupFile ) ) {
// check backup config what has changed for these configuration options if anything
// # sync_dir = "~/OneDrive"
// # skip_file = "~*|.~*|*.tmp"
// # skip_dir = ""
// # drive_id = ""
string [ string ] stringValues ;
stringValues [ "sync_dir" ] = "" ;
stringValues [ "skip_file" ] = "" ;
stringValues [ "skip_dir" ] = "" ;
stringValues [ "drive_id" ] = "" ;
auto file = File ( configBackupFile , "r" ) ;
auto r = regex ( `^(\w+)\s*=\s*"(.*)"\s*$` ) ;
foreach ( line ; file . byLine ( ) ) {
line = stripLeft ( line ) ;
if ( line . length = = 0 | | line [ 0 ] = = ';' | | line [ 0 ] = = '#' ) continue ;
auto c = line . matchFirst ( r ) ;
if ( ! c . empty ) {
c . popFront ( ) ; // skip the whole match
string key = c . front . dup ;
auto p = key in stringValues ;
if ( p ) {
c . popFront ( ) ;
// compare this key
if ( ( key = = "sync_dir" ) & & ( c . front . dup ! = cfg . getValueString ( "sync_dir" ) ) ) {
log . vdebug ( key , " was modified since the last time the application was successfully run, --resync needed" ) ;
configOptionsDifferent = true ;
}
if ( ( key = = "skip_file" ) & & ( c . front . dup ! = cfg . getValueString ( "skip_file" ) ) ) {
log . vdebug ( key , " was modified since the last time the application was successfully run, --resync needed" ) ;
configOptionsDifferent = true ;
}
if ( ( key = = "skip_dir" ) & & ( c . front . dup ! = cfg . getValueString ( "skip_dir" ) ) ) {
log . vdebug ( key , " was modified since the last time the application was successfully run, --resync needed" ) ;
configOptionsDifferent = true ;
}
if ( ( key = = "drive_id" ) & & ( c . front . dup ! = cfg . getValueString ( "drive_id" ) ) ) {
log . vdebug ( key , " was modified since the last time the application was successfully run, --resync needed" ) ;
configOptionsDifferent = true ;
}
}
}
}
} else {
// no backup to check
log . vdebug ( "WARNING: no backup config file was found, unable to validate if any changes made" ) ;
}
// If there was a backup, any modified values we need to worry about would been detected
if ( ! cfg . getValueBool ( "display_config" ) ) {
// we are not testing the configuration
if ( ! configOptionsDifferent ) {
// no options are different
if ( ! cfg . getValueBool ( "dry_run" ) ) {
// we are not in a dry-run scenario
// update config hash
log . vdebug ( "updating config hash as it is out of date" ) ;
std . file . write ( configHashFile , computeQuickXorHash ( cfg . configDirName ~ "/config" ) ) ;
// create backup copy of current config file
log . vdebug ( "making backup of config file as it is out of date" ) ;
std . file . copy ( cfg . configDirName ~ "/config" , configBackupFile ) ;
}
}
}
}
// Is there a backup of the config file if the config file exists?
if ( ( exists ( cfg . configDirName ~ "/config" ) ) & & ( ! exists ( configBackupFile ) ) ) {
// create backup copy of current config file
std . file . copy ( cfg . configDirName ~ "/config" , configBackupFile ) ;
}
// config file set options can be changed via CLI input, specifically these will impact sync and --resync will be needed:
// --syncdir ARG
// --skip-file ARG
// --skip-dir ARG
if ( exists ( cfg . configDirName ~ "/config" ) ) {
// config file exists
// was the sync_dir updated by CLI?
if ( cfg . configFileSyncDir ! = "" ) {
// sync_dir was set in config file
if ( cfg . configFileSyncDir ! = cfg . getValueString ( "sync_dir" ) ) {
// config file was set and CLI input changed this
log . vdebug ( "sync_dir: CLI override of config file option, --resync needed" ) ;
syncDirDifferent = true ;
}
}
// was the skip_file updated by CLI?
if ( cfg . configFileSkipFile ! = "" ) {
// skip_file was set in config file
if ( cfg . configFileSkipFile ! = cfg . getValueString ( "skip_file" ) ) {
// config file was set and CLI input changed this
log . vdebug ( "skip_file: CLI override of config file option, --resync needed" ) ;
skipFileDifferent = true ;
}
}
// was the skip_dir updated by CLI?
if ( cfg . configFileSkipDir ! = "" ) {
// skip_dir was set in config file
if ( cfg . configFileSkipDir ! = cfg . getValueString ( "skip_dir" ) ) {
// config file was set and CLI input changed this
log . vdebug ( "skip_dir: CLI override of config file option, --resync needed" ) ;
skipDirDifferent = true ;
}
}
}
// Has anything triggered a --resync requirement?
if ( configOptionsDifferent | | syncListDifferent | | syncDirDifferent | | skipFileDifferent | | skipDirDifferent ) {
// --resync needed, is the user just testing configuration changes?
if ( ! cfg . getValueBool ( "display_config" ) ) {
// not testing configuration changes
if ( ! cfg . getValueBool ( "resync" ) ) {
// --resync not issued, fail fast
log . error ( "An application configuration change has been detected where a --resync is required" ) ;
return EXIT_FAILURE ;
} else {
// --resync issued, update hashes of config files if they exist
if ( ! cfg . getValueBool ( "dry_run" ) ) {
// not doing a dry run, update hash files if config & sync_list exist
if ( exists ( cfg . configDirName ~ "/config" ) ) {
// update hash
log . vdebug ( "updating config hash as --resync issued" ) ;
std . file . write ( configHashFile , computeQuickXorHash ( cfg . configDirName ~ "/config" ) ) ;
// create backup copy of current config file
log . vdebug ( "making backup of config file as --resync issued" ) ;
std . file . copy ( cfg . configDirName ~ "/config" , configBackupFile ) ;
}
if ( exists ( cfg . configDirName ~ "/sync_list" ) ) {
// update sync_list hash
log . vdebug ( "updating sync_list hash as --resync issued" ) ;
std . file . write ( syncListHashFile , computeQuickXorHash ( cfg . configDirName ~ "/sync_list" ) ) ;
}
}
}
}
}
2019-03-17 04:41:26 +01:00
// dry-run notification
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "dry_run" ) ) {
2019-03-17 04:41:26 +01:00
log . log ( "DRY-RUN Configured. Output below shows what 'would' have occurred." ) ;
}
2019-04-11 04:26:20 +02:00
// Are we able to reach the OneDrive Service
bool online = false ;
2019-03-11 07:57:47 +01:00
// dry-run database setup
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "dry_run" ) ) {
2019-03-11 07:57:47 +01:00
// Make a copy of the original items.sqlite3 for use as the dry run copy if it exists
if ( exists ( cfg . databaseFilePath ) ) {
// copy the file
log . vdebug ( "Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations" ) ;
copy ( cfg . databaseFilePath , cfg . databaseFilePathDryRun ) ;
}
}
2018-12-23 01:15:10 +01:00
// sync_dir environment handling to handle ~ expansion properly
2018-12-19 19:42:28 +01:00
string syncDir ;
if ( ( environment . get ( "SHELL" ) = = "" ) & & ( environment . get ( "USER" ) = = "" ) ) {
2018-12-23 01:15:10 +01:00
log . vdebug ( "sync_dir: No SHELL or USER environment variable configuration detected" ) ;
// No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker
// Does the 'currently configured' sync_dir include a ~
2019-04-11 04:26:20 +02:00
if ( canFind ( cfg . getValueString ( "sync_dir" ) , "~" ) ) {
2018-12-23 01:15:10 +01:00
// A ~ was found
log . vdebug ( "sync_dir: A '~' was found in sync_dir, using the calculated 'homePath' to replace '~'" ) ;
2019-04-11 04:26:20 +02:00
syncDir = cfg . homePath ~ strip ( cfg . getValueString ( "sync_dir" ) , "~" ) ;
2018-12-19 19:42:28 +01:00
} else {
2018-12-23 01:15:10 +01:00
// No ~ found in sync_dir, use as is
log . vdebug ( "sync_dir: Getting syncDir from config value sync_dir" ) ;
2019-04-11 04:26:20 +02:00
syncDir = cfg . getValueString ( "sync_dir" ) ;
2018-12-19 19:42:28 +01:00
}
} else {
2018-12-23 01:15:10 +01:00
// A shell and user is set, expand any ~ as this will be expanded correctly if present
log . vdebug ( "sync_dir: Getting syncDir from config value sync_dir" ) ;
2019-04-11 04:26:20 +02:00
if ( canFind ( cfg . getValueString ( "sync_dir" ) , "~" ) ) {
2018-12-23 01:15:10 +01:00
log . vdebug ( "sync_dir: A '~' was found in configured sync_dir, automatically expanding as SHELL and USER environment variable is set" ) ;
2019-04-11 04:26:20 +02:00
syncDir = expandTilde ( cfg . getValueString ( "sync_dir" ) ) ;
2018-12-23 01:15:10 +01:00
} else {
2019-04-11 04:26:20 +02:00
syncDir = cfg . getValueString ( "sync_dir" ) ;
2018-12-23 01:15:10 +01:00
}
2018-12-19 19:42:28 +01:00
}
2018-12-23 01:15:10 +01:00
// vdebug syncDir as set and calculated
log . vdebug ( "syncDir: " , syncDir ) ;
2018-11-23 21:13:16 +01:00
// Configure logging if enabled
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "enable_logging" ) ) {
2018-11-23 21:13:16 +01:00
// Read in a user defined log directory or use the default
2019-04-11 04:26:20 +02:00
string logDir = cfg . getValueString ( "log_dir" ) ;
2018-11-23 21:13:16 +01:00
log . vlog ( "Using logfile dir: " , logDir ) ;
log . init ( logDir ) ;
}
2018-12-05 20:19:00 +01:00
// Configure whether notifications are used
2019-04-11 04:26:20 +02:00
log . setNotifications ( cfg . getValueBool ( "monitor" ) & & ! cfg . getValueBool ( "disable_notifications" ) ) ;
2018-11-23 21:13:16 +01:00
2016-12-25 19:23:33 +01:00
// upgrades
2019-04-11 04:26:20 +02:00
if ( exists ( cfg . configDirName ~ "/items.db" ) ) {
if ( ! cfg . getValueBool ( "dry_run" ) ) {
safeRemove ( cfg . configDirName ~ "/items.db" ) ;
2019-03-11 07:57:47 +01:00
}
2018-12-05 20:19:00 +01:00
log . logAndNotify ( "Database schema changed, resync needed" ) ;
2019-04-11 04:26:20 +02:00
cfg . setValueBool ( "resync" , true ) ;
2016-12-25 19:23:33 +01:00
}
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "resync" ) | | cfg . getValueBool ( "logout" ) ) {
2019-08-24 09:18:58 +02:00
if ( cfg . getValueBool ( "resync" ) ) log . vdebug ( "--resync requested" ) ;
2017-05-28 23:14:37 +02:00
log . vlog ( "Deleting the saved status ..." ) ;
2019-04-11 04:26:20 +02:00
if ( ! cfg . getValueBool ( "dry_run" ) ) {
2019-03-11 07:57:47 +01:00
safeRemove ( cfg . databaseFilePath ) ;
safeRemove ( cfg . deltaLinkFilePath ) ;
safeRemove ( cfg . uploadStateFilePath ) ;
}
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "logout" ) ) {
2019-08-24 09:18:58 +02:00
log . vdebug ( "--logout requested" ) ;
2019-04-11 04:26:20 +02:00
if ( ! cfg . getValueBool ( "dry_run" ) ) {
2019-03-11 07:57:47 +01:00
safeRemove ( cfg . refreshTokenFilePath ) ;
}
2016-08-05 00:12:58 +02:00
}
2015-09-14 19:21:06 +02:00
}
2018-12-19 19:42:28 +01:00
// Display current application configuration, no application initialisation
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "display_config" ) ) {
string userConfigFilePath = cfg . configDirName ~ "/config" ;
string userSyncList = cfg . configDirName ~ "/sync_list" ;
2019-01-05 19:43:44 +01:00
// Display application version
std . stdio . write ( "onedrive version = " , import ( "version" ) ) ;
2018-12-19 19:42:28 +01:00
// Display all of the pertinent configuration options
2019-04-11 04:26:20 +02:00
writeln ( "Config path = " , cfg . configDirName ) ;
2018-12-19 19:42:28 +01:00
// Does a config file exist or are we using application defaults
if ( exists ( userConfigFilePath ) ) {
writeln ( "Config file found in config path = true" ) ;
} else {
writeln ( "Config file found in config path = false" ) ;
}
// Config Options
2019-04-11 04:26:20 +02:00
writeln ( "Config option 'check_nosync' = " , cfg . getValueBool ( "check_nosync" ) ) ;
2018-12-19 19:42:28 +01:00
writeln ( "Config option 'sync_dir' = " , syncDir ) ;
2019-04-11 04:26:20 +02:00
writeln ( "Config option 'skip_dir' = " , cfg . getValueString ( "skip_dir" ) ) ;
writeln ( "Config option 'skip_file' = " , cfg . getValueString ( "skip_file" ) ) ;
writeln ( "Config option 'skip_dotfiles' = " , cfg . getValueBool ( "skip_dotfiles" ) ) ;
writeln ( "Config option 'skip_symlinks' = " , cfg . getValueBool ( "skip_symlinks" ) ) ;
writeln ( "Config option 'monitor_interval' = " , cfg . getValueLong ( "monitor_interval" ) ) ;
2019-04-14 22:28:32 +02:00
writeln ( "Config option 'min_notify_changes' = " , cfg . getValueLong ( "min_notify_changes" ) ) ;
2019-04-11 04:26:20 +02:00
writeln ( "Config option 'log_dir' = " , cfg . getValueString ( "log_dir" ) ) ;
2018-12-19 19:42:28 +01:00
// Is config option drive_id configured?
2019-04-11 04:26:20 +02:00
if ( cfg . getValueString ( "drive_id" ) ! = "" ) {
writeln ( "Config option 'drive_id' = " , cfg . getValueString ( "drive_id" ) ) ;
2018-12-19 19:42:28 +01:00
}
// Is sync_list configured?
if ( exists ( userSyncList ) ) {
2019-05-09 13:18:49 +02:00
writeln ( "Config option 'sync_root_files' = " , cfg . getValueBool ( "sync_root_files" ) ) ;
2018-12-19 19:42:28 +01:00
writeln ( "Selective sync configured = true" ) ;
2019-01-05 19:43:44 +01:00
writeln ( "sync_list contents:" ) ;
// Output the sync_list contents
auto syncListFile = File ( userSyncList ) ;
auto range = syncListFile . byLine ( ) ;
foreach ( line ; range )
{
writeln ( line ) ;
}
2018-12-19 19:42:28 +01:00
} else {
2019-05-09 13:18:49 +02:00
writeln ( "Config option 'sync_root_files' = " , cfg . getValueBool ( "sync_root_files" ) ) ;
2018-12-19 19:42:28 +01:00
writeln ( "Selective sync configured = false" ) ;
}
2019-08-24 09:18:58 +02:00
// exit
2018-12-19 19:42:28 +01:00
return EXIT_SUCCESS ;
}
2019-06-20 17:11:29 +02:00
if ( cfg . getValueBool ( "force_http_11" ) ) {
log . log ( "NOTE: The use of --force-http-1.1 is depreciated" ) ;
}
2016-08-04 23:35:58 +02:00
log . vlog ( "Initializing the OneDrive API ..." ) ;
2018-08-13 23:21:11 +02:00
try {
online = testNetwork ( ) ;
} catch ( CurlException e ) {
// No network connection to OneDrive Service
2019-05-24 11:17:08 +02:00
log . error ( "Cannot connect to Microsoft OneDrive Service" ) ;
log . error ( "Reason: " , e . msg ) ;
2019-04-11 04:26:20 +02:00
if ( ! cfg . getValueBool ( "monitor" ) ) {
2018-12-04 01:15:44 +01:00
return EXIT_FAILURE ;
}
}
2018-05-16 11:19:43 +02:00
// Initialize OneDrive, check for authorization
2019-04-11 04:26:20 +02:00
oneDrive = new OneDriveApi ( cfg ) ;
oneDrive . printAccessToken = cfg . getValueBool ( "print_token" ) ;
2018-12-28 03:19:20 +01:00
if ( ! oneDrive . init ( ) ) {
2017-12-28 15:21:41 +01:00
log . error ( "Could not initialize the OneDrive API" ) ;
2016-08-04 23:35:58 +02:00
// workaround for segfault in std.net.curl.Curl.shutdown() on exit
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2019-07-06 00:01:04 +02:00
return EXIT_UNAUTHORIZED ;
2015-09-01 20:45:34 +02:00
}
2018-12-19 19:42:28 +01:00
2018-04-24 04:14:36 +02:00
// if --synchronize or --monitor not passed in, exit & display help
auto performSyncOK = false ;
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "synchronize" ) | | cfg . getValueBool ( "monitor" ) ) {
2018-04-24 04:14:36 +02:00
performSyncOK = true ;
}
2018-05-16 11:19:43 +02:00
// create-directory, remove-directory, source-directory, destination-directory
// are activities that dont perform a sync no error message for these items either
2019-08-02 10:43:31 +02:00
if ( ( ( cfg . getValueString ( "create_directory" ) ! = "" ) | | ( cfg . getValueString ( "remove_directory" ) ! = "" ) ) | | ( ( cfg . getValueString ( "source_directory" ) ! = "" ) & & ( cfg . getValueString ( "destination_directory" ) ! = "" ) ) | | ( cfg . getValueString ( "get_file_link" ) ! = "" ) | | ( cfg . getValueString ( "get_o365_drive_id" ) ! = "" ) | | cfg . getValueBool ( "display_sync_status" ) ) {
2018-05-16 11:19:43 +02:00
performSyncOK = true ;
}
2018-04-24 04:14:36 +02:00
if ( ! performSyncOK ) {
writeln ( "\n--synchronize or --monitor missing from your command options or use --help for further assistance\n" ) ;
writeln ( "No OneDrive sync will be performed without either of these two arguments being present\n" ) ;
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2018-04-24 04:14:36 +02:00
return EXIT_FAILURE ;
}
2019-03-23 00:20:27 +01:00
// if --synchronize && --monitor passed in, exit & display help as these conflict with each other
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "synchronize" ) & & cfg . getValueBool ( "monitor" ) ) {
2019-03-23 00:20:27 +01:00
writeln ( "\nERROR: --synchronize and --monitor cannot be used together\n" ) ;
writeln ( "Refer to --help to determine which command option you should use.\n" ) ;
oneDrive . http . shutdown ( ) ;
return EXIT_FAILURE ;
}
2019-03-11 07:57:47 +01:00
// Initialize the item database
2016-08-04 23:35:58 +02:00
log . vlog ( "Opening the item database ..." ) ;
2019-04-11 04:26:20 +02:00
if ( ! cfg . getValueBool ( "dry_run" ) ) {
2019-03-11 07:57:47 +01:00
// Load the items.sqlite3 file as the database
2019-08-24 09:18:58 +02:00
log . vdebug ( "Using database file: " , asNormalizedPath ( cfg . databaseFilePath ) ) ;
2019-03-11 07:57:47 +01:00
itemDb = new ItemDatabase ( cfg . databaseFilePath ) ;
} else {
// Load the items-dryrun.sqlite3 file as the database
2019-08-24 09:18:58 +02:00
log . vdebug ( "Using database file: " , asNormalizedPath ( cfg . databaseFilePathDryRun ) ) ;
2019-03-11 07:57:47 +01:00
itemDb = new ItemDatabase ( cfg . databaseFilePathDryRun ) ;
}
2018-03-14 05:43:40 +01:00
2016-08-04 23:35:58 +02:00
log . vlog ( "All operations will be performed in: " , syncDir ) ;
2018-12-23 01:15:10 +01:00
if ( ! exists ( syncDir ) ) {
log . vdebug ( "syncDir: Configured syncDir is missing. Creating: " , syncDir ) ;
2019-07-30 23:20:26 +02:00
try {
// Attempt to create the sync dir we have been configured with
mkdirRecurse ( syncDir ) ;
} catch ( std . file . FileException e ) {
// Creating the sync directory failed
log . error ( "ERROR: Unable to create local OneDrive syncDir - " , e . msg ) ;
oneDrive . http . shutdown ( ) ;
return EXIT_FAILURE ;
}
2018-12-23 01:15:10 +01:00
}
2015-09-27 18:47:41 +02:00
chdir ( syncDir ) ;
2018-03-14 05:43:40 +01:00
2018-05-16 11:19:43 +02:00
// Configure selective sync by parsing and getting a regex for skip_file config component
2017-03-24 22:30:03 +01:00
auto selectiveSync = new SelectiveSync ( ) ;
2019-01-05 19:43:44 +01:00
if ( exists ( cfg . syncListFilePath ) ) {
log . vdebug ( "Loading user configured sync_list file ..." ) ;
// list what will be synced
auto syncListFile = File ( cfg . syncListFilePath ) ;
auto range = syncListFile . byLine ( ) ;
foreach ( line ; range )
{
log . vdebug ( "sync_list: " , line ) ;
}
}
2017-03-24 22:30:03 +01:00
selectiveSync . load ( cfg . syncListFilePath ) ;
2018-05-16 11:19:43 +02:00
2019-03-14 20:55:05 +01:00
// Configure skip_dir & skip_file from config entries
log . vdebug ( "Configuring skip_dir ..." ) ;
2019-04-11 04:26:20 +02:00
log . vdebug ( "skip_dir: " , cfg . getValueString ( "skip_dir" ) ) ;
selectiveSync . setDirMask ( cfg . getValueString ( "skip_dir" ) ) ;
2019-03-14 20:55:05 +01:00
log . vdebug ( "Configuring skip_file ..." ) ;
2019-04-25 03:00:23 +02:00
// Validate skip_file to ensure that this does not contain an invalid configuration
// Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process.
foreach ( entry ; cfg . getValueString ( "skip_file" ) . split ( "|" ) ) {
if ( entry = = ".*" ) {
// invalid entry element detected
log . logAndNotify ( "ERROR: Invalid skip_file entry '.*' detected" ) ;
return EXIT_FAILURE ;
}
}
// valid entry
2019-04-11 04:26:20 +02:00
log . vdebug ( "skip_file: " , cfg . getValueString ( "skip_file" ) ) ;
selectiveSync . setFileMask ( cfg . getValueString ( "skip_file" ) ) ;
2019-04-25 03:00:23 +02:00
2019-03-11 07:57:47 +01:00
// Initialize the sync engine
2019-04-11 04:26:20 +02:00
auto sync = new SyncEngine ( cfg , oneDrive , itemDb , selectiveSync ) ;
2018-07-16 01:58:36 +02:00
try {
2018-12-04 01:15:44 +01:00
if ( ! initSyncEngine ( sync ) ) {
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2018-07-16 01:58:36 +02:00
return EXIT_FAILURE ;
2019-09-11 12:26:21 +02:00
} else {
if ( cfg . getValueString ( "get_file_link" ) = = "" ) {
// Print out that we are initializing the engine only if we are not grabbing the file link
log . logAndNotify ( "Initializing the Synchronization Engine ..." ) ;
}
2018-07-16 01:58:36 +02:00
}
2018-12-04 01:15:44 +01:00
} catch ( CurlException e ) {
2019-04-11 04:26:20 +02:00
if ( ! cfg . getValueBool ( "monitor" ) ) {
2019-09-11 12:26:21 +02:00
log . log ( "\nNo Internet connection." ) ;
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2018-09-24 21:25:40 +02:00
return EXIT_FAILURE ;
}
2018-07-16 01:58:36 +02:00
}
2018-12-04 01:15:44 +01:00
2018-08-09 23:40:17 +02:00
// We should only set noRemoteDelete in an upload-only scenario
2019-04-11 04:26:20 +02:00
if ( ( cfg . getValueBool ( "upload_only" ) ) & & ( cfg . getValueBool ( "no_remote_delete" ) ) ) sync . setNoRemoteDelete ( ) ;
2018-08-09 23:40:17 +02:00
2018-11-23 20:26:30 +01:00
// Do we configure to disable the upload validation routine
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "disable_upload_validation" ) ) sync . setDisableUploadValidation ( ) ;
2018-11-23 20:26:30 +01:00
2018-06-17 00:27:43 +02:00
// Do we need to validate the syncDir to check for the presence of a '.nosync' file
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "check_nomount" ) ) {
2018-06-17 00:27:43 +02:00
// we were asked to check the mounts
if ( exists ( syncDir ~ "/.nosync" ) ) {
2018-12-05 20:19:00 +01:00
log . logAndNotify ( "ERROR: .nosync file found. Aborting synchronization process to safeguard data." ) ;
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2018-06-17 00:27:43 +02:00
return EXIT_FAILURE ;
}
}
2018-03-14 05:43:40 +01:00
// Do we need to create or remove a directory?
2019-04-11 04:26:20 +02:00
if ( ( cfg . getValueString ( "create_directory" ) ! = "" ) | | ( cfg . getValueString ( "remove_directory" ) ! = "" ) ) {
2018-03-14 05:43:40 +01:00
2019-04-11 04:26:20 +02:00
if ( cfg . getValueString ( "create_directory" ) ! = "" ) {
2018-03-14 05:43:40 +01:00
// create a directory on OneDrive
2019-04-11 04:26:20 +02:00
sync . createDirectoryNoSync ( cfg . getValueString ( "create_directory" ) ) ;
2018-03-14 05:43:40 +01:00
}
2019-04-11 04:26:20 +02:00
if ( cfg . getValueString ( "remove_directory" ) ! = "" ) {
2018-03-14 05:43:40 +01:00
// remove a directory on OneDrive
2019-04-11 04:26:20 +02:00
sync . deleteDirectoryNoSync ( cfg . getValueString ( "remove_directory" ) ) ;
2018-03-14 05:43:40 +01:00
}
}
// Are we renaming or moving a directory?
2019-04-11 04:26:20 +02:00
if ( ( cfg . getValueString ( "source_directory" ) ! = "" ) & & ( cfg . getValueString ( "destination_directory" ) ! = "" ) ) {
2018-03-14 05:43:40 +01:00
// We are renaming or moving a directory
2019-04-11 04:26:20 +02:00
sync . renameDirectoryNoSync ( cfg . getValueString ( "source_directory" ) , cfg . getValueString ( "destination_directory" ) ) ;
2018-03-14 05:43:40 +01:00
}
2018-12-04 00:59:23 +01:00
// Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library?
2019-08-02 10:43:31 +02:00
if ( cfg . getValueString ( "get_o365_drive_id" ) ! = "" ) {
2019-04-11 04:26:20 +02:00
sync . querySiteCollectionForDriveID ( cfg . getValueString ( "get_o365_drive_id" ) ) ;
2018-12-04 00:59:23 +01:00
}
2019-08-02 10:43:31 +02:00
// Are we obtaining the URL path for a synced file?
if ( cfg . getValueString ( "get_file_link" ) ! = "" ) {
sync . queryOneDriveForFileURL ( cfg . getValueString ( "get_file_link" ) , syncDir ) ;
}
2018-12-28 02:26:03 +01:00
// Are we displaying the sync status of the client?
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "display_sync_status" ) ) {
2018-12-28 02:26:03 +01:00
string remotePath = "/" ;
string localPath = "." ;
// Are we doing a single directory check?
2019-04-11 04:26:20 +02:00
if ( cfg . getValueString ( "single_directory" ) ! = "" ) {
2018-12-28 02:26:03 +01:00
// Need two different path strings here
2019-04-11 04:26:20 +02:00
remotePath = cfg . getValueString ( "single_directory" ) ;
localPath = cfg . getValueString ( "single_directory" ) ;
2018-12-28 02:26:03 +01:00
}
sync . queryDriveForChanges ( remotePath ) ;
}
2018-03-14 05:43:40 +01:00
// Are we performing a sync, resync or monitor operation?
2019-04-11 04:26:20 +02:00
if ( ( cfg . getValueBool ( "synchronize" ) ) | | ( cfg . getValueBool ( "resync" ) ) | | ( cfg . getValueBool ( "monitor" ) ) ) {
2015-09-01 20:45:34 +02:00
2019-04-11 04:26:20 +02:00
if ( ( cfg . getValueBool ( "synchronize" ) ) | | ( cfg . getValueBool ( "resync" ) ) ) {
2018-03-14 05:43:40 +01:00
if ( online ) {
// Check user entry for local path - the above chdir means we are already in ~/OneDrive/ thus singleDirectory is local to this path
2019-04-11 04:26:20 +02:00
if ( cfg . getValueString ( "single_directory" ) ! = "" ) {
2018-03-14 05:43:40 +01:00
// Does the directory we want to sync actually exist?
2019-04-11 04:26:20 +02:00
if ( ! exists ( cfg . getValueString ( "single_directory" ) ) ) {
2018-03-14 05:43:40 +01:00
// the requested directory does not exist ..
2018-12-05 20:19:00 +01:00
log . logAndNotify ( "ERROR: The requested local directory does not exist. Please check ~/OneDrive/ for requested path" ) ;
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2018-03-14 05:43:40 +01:00
return EXIT_FAILURE ;
}
}
2019-04-11 04:26:20 +02:00
performSync ( sync , cfg . getValueString ( "single_directory" ) , cfg . getValueBool ( "download_only" ) , cfg . getValueBool ( "local_first" ) , cfg . getValueBool ( "upload_only" ) , LOG_NORMAL , true ) ;
2015-09-20 21:21:51 +02:00
}
2018-03-14 05:43:40 +01:00
}
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "monitor" ) ) {
2018-12-05 20:19:00 +01:00
log . logAndNotify ( "Initializing monitor ..." ) ;
2019-04-11 04:26:20 +02:00
log . log ( "OneDrive monitor interval (seconds): " , cfg . getValueLong ( "monitor_interval" ) ) ;
2018-03-14 05:43:40 +01:00
Monitor m = new Monitor ( selectiveSync ) ;
m . onDirCreated = delegate ( string path ) {
log . vlog ( "[M] Directory created: " , path ) ;
try {
sync . scanForDifferences ( path ) ;
2019-02-21 09:51:15 +01:00
} catch ( CurlException e ) {
log . vlog ( "Offline, cannot create remote dir!" ) ;
2018-03-14 05:43:40 +01:00
} catch ( Exception e ) {
2018-12-06 08:05:52 +01:00
log . logAndNotify ( "Cannot create remote directory: " , e . msg ) ;
2018-03-14 05:43:40 +01:00
}
} ;
m . onFileChanged = delegate ( string path ) {
log . vlog ( "[M] File changed: " , path ) ;
try {
sync . scanForDifferences ( path ) ;
2019-02-21 09:51:15 +01:00
} catch ( CurlException e ) {
log . vlog ( "Offline, cannot upload changed item!" ) ;
2018-03-14 05:43:40 +01:00
} catch ( Exception e ) {
2018-12-06 08:05:52 +01:00
log . logAndNotify ( "Cannot upload file changes/creation: " , e . msg ) ;
2018-03-14 05:43:40 +01:00
}
} ;
m . onDelete = delegate ( string path ) {
log . vlog ( "[M] Item deleted: " , path ) ;
try {
sync . deleteByPath ( path ) ;
2019-02-21 09:51:15 +01:00
} catch ( CurlException e ) {
log . vlog ( "Offline, cannot delete item!" ) ;
2018-12-06 00:50:46 +01:00
} catch ( SyncException e ) {
if ( e . msg = = "The item to delete is not in the local database" ) {
log . vlog ( "Item cannot be deleted because not found in database" ) ;
} else {
2018-12-06 08:05:52 +01:00
log . logAndNotify ( "Cannot delete remote item: " , e . msg ) ;
2018-12-06 00:50:46 +01:00
}
2018-03-14 05:43:40 +01:00
} catch ( Exception e ) {
2018-12-06 08:05:52 +01:00
log . logAndNotify ( "Cannot delete remote item: " , e . msg ) ;
2018-03-14 05:43:40 +01:00
}
} ;
m . onMove = delegate ( string from , string to ) {
log . vlog ( "[M] Item moved: " , from , " -> " , to ) ;
try {
sync . uploadMoveItem ( from , to ) ;
2019-02-21 09:51:15 +01:00
} catch ( CurlException e ) {
log . vlog ( "Offline, cannot move item!" ) ;
2018-03-14 05:43:40 +01:00
} catch ( Exception e ) {
2018-12-06 08:05:52 +01:00
log . logAndNotify ( "Cannot move item:, " , e . msg ) ;
2018-03-14 05:43:40 +01:00
}
} ;
2018-12-28 03:19:20 +01:00
signal ( SIGINT , & exitHandler ) ;
signal ( SIGTERM , & exitHandler ) ;
2018-08-17 00:03:21 +02:00
// initialise the monitor class
2019-04-11 04:26:20 +02:00
if ( ! cfg . getValueBool ( "download_only" ) ) m . init ( cfg , cfg . getValueLong ( "verbose" ) > 0 , cfg . getValueBool ( "skip_symlinks" ) , cfg . getValueBool ( "check_nosync" ) ) ;
2018-03-14 05:43:40 +01:00
// monitor loop
2019-04-11 04:26:20 +02:00
immutable auto checkInterval = dur ! "seconds" ( cfg . getValueLong ( "monitor_interval" ) ) ;
immutable auto logInterval = cfg . getValueLong ( "monitor_log_frequency" ) ;
immutable auto fullScanFrequency = cfg . getValueLong ( "monitor_fullscan_frequency" ) ;
2018-03-14 05:43:40 +01:00
auto lastCheckTime = MonoTime . currTime ( ) ;
2019-02-26 21:21:23 +01:00
auto logMonitorCounter = 0 ;
2019-04-01 20:21:02 +02:00
auto fullScanCounter = 0 ;
bool fullScanRequired = true ;
2018-03-14 05:43:40 +01:00
while ( true ) {
2019-04-11 04:26:20 +02:00
if ( ! cfg . getValueBool ( "download_only" ) ) m . update ( online ) ;
2018-03-14 05:43:40 +01:00
auto currTime = MonoTime . currTime ( ) ;
if ( currTime - lastCheckTime > checkInterval ) {
2019-04-01 20:21:02 +02:00
// log monitor output suppression
2019-02-26 21:21:23 +01:00
logMonitorCounter + = 1 ;
if ( logMonitorCounter > logInterval )
logMonitorCounter = 1 ;
2019-04-01 20:21:02 +02:00
// full scan of sync_dir
fullScanCounter + = 1 ;
if ( fullScanCounter > fullScanFrequency ) {
fullScanCounter = 1 ;
fullScanRequired = true ;
}
2018-12-26 08:11:06 +01:00
// log.logAndNotify("DEBUG trying to create checkpoint");
// auto res = itemdb.db_checkpoint();
// log.logAndNotify("Checkpoint return: ", res);
// itemdb.dump_open_statements();
2019-04-01 20:21:02 +02:00
2018-11-29 10:48:24 +01:00
try {
2018-12-04 01:15:44 +01:00
if ( ! initSyncEngine ( sync ) ) {
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2018-12-04 01:15:44 +01:00
return EXIT_FAILURE ;
}
2018-12-12 21:08:18 +01:00
try {
2019-04-11 04:26:20 +02:00
performSync ( sync , cfg . getValueString ( "single_directory" ) , cfg . getValueBool ( "download_only" ) , cfg . getValueBool ( "local_first" ) , cfg . getValueBool ( "upload_only" ) , ( logMonitorCounter = = logInterval ? MONITOR_LOG_QUIET : MONITOR_LOG_SILENT ) , fullScanRequired ) ;
if ( ! cfg . getValueBool ( "download_only" ) ) {
2018-12-12 21:08:18 +01:00
// discard all events that may have been generated by the sync
m . update ( false ) ;
}
} catch ( CurlException e ) {
// we already tried three times in the performSync routine
// if we still have problems, then the sync handle might have
// gone stale and we need to re-initialize the sync engine
2019-02-26 20:49:23 +01:00
log . log ( "Persistent connection errors, reinitializing connection" ) ;
2018-12-12 21:08:18 +01:00
sync . reset ( ) ;
2018-12-04 01:15:44 +01:00
}
2018-11-29 10:48:24 +01:00
} catch ( CurlException e ) {
2018-12-12 21:08:18 +01:00
log . log ( "Cannot initialize connection to OneDrive" ) ;
2017-12-31 17:07:21 +01:00
}
2018-08-07 21:35:18 +02:00
// performSync complete, set lastCheckTime to current time
2019-04-01 20:21:02 +02:00
fullScanRequired = false ;
2018-08-07 21:35:18 +02:00
lastCheckTime = MonoTime . currTime ( ) ;
2018-03-14 05:43:40 +01:00
GC . collect ( ) ;
2018-09-13 00:43:29 +02:00
}
Thread . sleep ( dur ! "msecs" ( 500 ) ) ;
2015-09-17 00:16:23 +02:00
}
}
2015-09-14 19:21:06 +02:00
}
2016-02-24 17:07:59 +01:00
2019-03-11 07:57:47 +01:00
// Workaround for segfault in std.net.curl.Curl.shutdown() on exit
2018-12-28 03:19:20 +01:00
oneDrive . http . shutdown ( ) ;
2019-03-11 07:57:47 +01:00
// Make sure the .wal file is incorporated into the main db before we exit
destroy ( itemDb ) ;
// --dry-run temp database cleanup
2019-04-11 04:26:20 +02:00
if ( cfg . getValueBool ( "dry_run" ) ) {
2019-03-11 07:57:47 +01:00
if ( exists ( cfg . databaseFilePathDryRun ) ) {
// remove the file
log . vdebug ( "Removing items-dryrun.sqlite3 as dry run operations complete" ) ;
safeRemove ( cfg . databaseFilePathDryRun ) ;
}
}
2016-08-04 23:35:58 +02:00
return EXIT_SUCCESS ;
2015-09-01 20:45:34 +02:00
}
2015-09-20 21:21:51 +02:00
2018-12-04 01:15:44 +01:00
bool initSyncEngine ( SyncEngine sync )
{
try {
sync . init ( ) ;
} catch ( OneDriveException e ) {
if ( e . httpStatusCode = = 400 | | e . httpStatusCode = = 401 ) {
// Authorization is invalid
log . log ( "\nAuthorization token invalid, use --logout to authorize the client again\n" ) ;
return false ;
}
if ( e . httpStatusCode > = 500 ) {
// There was a HTTP 5xx Server Side Error, message already printed
return false ;
}
}
return true ;
}
2015-09-20 21:21:51 +02:00
// try to synchronize the folder three times
2019-04-01 20:21:02 +02:00
void performSync ( SyncEngine sync , string singleDirectory , bool downloadOnly , bool localFirst , bool uploadOnly , long logLevel , bool fullScanRequired )
2015-09-20 21:21:51 +02:00
{
int count ;
2018-03-14 05:43:40 +01:00
string remotePath = "/" ;
string localPath = "." ;
// Are we doing a single directory sync?
if ( singleDirectory ! = "" ) {
// Need two different path strings here
remotePath = singleDirectory ;
localPath = singleDirectory ;
2019-09-09 04:30:59 +02:00
// Set flag for singleDirectoryScope for change handling
sync . setSingleDirectoryScope ( ) ;
2018-03-14 05:43:40 +01:00
}
2019-04-06 03:44:51 +02:00
// Due to Microsoft Sharepoint 'enrichment' of files, we try to download the Microsoft modified file automatically
// Set flag if we are in upload only state to handle this differently
// See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details
if ( uploadOnly ) sync . setUploadOnly ( ) ;
2015-09-20 21:21:51 +02:00
do {
try {
2018-03-14 05:43:40 +01:00
if ( singleDirectory ! = "" ) {
// we were requested to sync a single directory
log . vlog ( "Syncing changes from this selected path: " , singleDirectory ) ;
2018-07-15 07:22:08 +02:00
if ( uploadOnly ) {
// Upload Only of selected single directory
2019-02-26 21:21:23 +01:00
if ( logLevel < MONITOR_LOG_QUIET ) log . log ( "Syncing changes from selected local path only - NOT syncing data changes from OneDrive ..." ) ;
2018-07-15 07:22:08 +02:00
sync . scanForDifferences ( localPath ) ;
} else {
// No upload only
if ( localFirst ) {
// Local First
2019-02-26 21:21:23 +01:00
if ( logLevel < MONITOR_LOG_QUIET ) log . log ( "Syncing changes from selected local path first before downloading changes from OneDrive ..." ) ;
2018-04-07 09:06:57 +02:00
sync . scanForDifferences ( localPath ) ;
sync . applyDifferencesSingleDirectory ( remotePath ) ;
2018-07-15 07:22:08 +02:00
} else {
// OneDrive First
2019-02-26 21:21:23 +01:00
if ( logLevel < MONITOR_LOG_QUIET ) log . log ( "Syncing changes from selected OneDrive path ..." ) ;
2018-07-15 07:22:08 +02:00
sync . applyDifferencesSingleDirectory ( remotePath ) ;
// is this a download only request?
if ( ! downloadOnly ) {
// process local changes
sync . scanForDifferences ( localPath ) ;
// ensure that the current remote state is updated locally
sync . applyDifferencesSingleDirectory ( remotePath ) ;
}
2018-04-07 09:06:57 +02:00
}
2018-03-14 05:43:40 +01:00
}
} else {
2018-07-15 07:22:08 +02:00
// no single directory sync
if ( uploadOnly ) {
// Upload Only of entire sync_dir
2019-02-26 21:21:23 +01:00
if ( logLevel < MONITOR_LOG_QUIET ) log . log ( "Syncing changes from local path only - NOT syncing data changes from OneDrive ..." ) ;
2018-07-15 07:22:08 +02:00
sync . scanForDifferences ( localPath ) ;
} else {
// No upload only
if ( localFirst ) {
// sync local files first before downloading from OneDrive
2019-02-26 21:21:23 +01:00
if ( logLevel < MONITOR_LOG_QUIET ) log . log ( "Syncing changes from local path first before downloading changes from OneDrive ..." ) ;
2018-04-07 09:06:57 +02:00
sync . scanForDifferences ( localPath ) ;
sync . applyDifferences ( ) ;
2018-07-15 07:22:08 +02:00
} else {
// sync from OneDrive first before uploading files to OneDrive
2019-02-26 21:21:23 +01:00
if ( logLevel < MONITOR_LOG_SILENT ) log . log ( "Syncing changes from OneDrive ..." ) ;
2018-07-15 07:22:08 +02:00
sync . applyDifferences ( ) ;
2019-04-01 20:21:02 +02:00
// Is a full scan of the entire sync_dir required?
if ( fullScanRequired ) {
// is this a download only request?
if ( ! downloadOnly ) {
// process local changes walking the entire path checking for changes
// in monitor mode all local changes are captured via inotify
// thus scanning every 'monitor_interval' (default 45 seconds) for local changes is excessive and not required
sync . scanForDifferences ( localPath ) ;
// ensure that the current remote state is updated locally
sync . applyDifferences ( ) ;
}
2018-07-15 07:22:08 +02:00
}
2018-04-07 09:06:57 +02:00
}
2018-03-14 05:43:40 +01:00
}
2017-12-31 17:07:21 +01:00
}
2015-09-20 21:21:51 +02:00
count = - 1 ;
2017-12-28 15:03:15 +01:00
} catch ( Exception e ) {
2018-12-12 21:08:18 +01:00
if ( + + count = = 3 ) {
log . log ( "Giving up on sync after three attempts: " , e . msg ) ;
throw e ;
} else
log . log ( "Retry sync count: " , count , ": " , e . msg ) ;
2015-09-20 21:21:51 +02:00
}
} while ( count ! = - 1 ) ;
}
2018-12-05 20:19:00 +01:00
2018-12-28 03:19:20 +01:00
// getting around the @nogc problem
// https://p0nce.github.io/d-idioms/#Bypassing-@nogc
auto assumeNoGC ( T ) ( T t ) if ( isFunctionPointer ! T | | isDelegate ! T )
{
enum attrs = functionAttributes ! T | FunctionAttribute . nogc ;
return cast ( SetFunctionAttributes ! ( T , functionLinkage ! T , attrs ) ) t ;
}
extern ( C ) nothrow @nogc @system void exitHandler ( int value ) {
try {
assumeNoGC ( ( ) {
log . log ( "Got termination signal, shutting down db connection" ) ;
// make sure the .wal file is incorporated into the main db
destroy ( itemDb ) ;
// workaround for segfault in std.net.curl.Curl.shutdown() on exit
oneDrive . http . shutdown ( ) ;
} ) ( ) ;
} catch ( Exception e ) { }
exit ( 0 ) ;
}
2018-12-28 10:01:50 +01:00