Merge branch 'master' into norbert/config-options

This commit is contained in:
Norbert Preining 2019-03-17 11:47:48 +09:00
commit 362bf35688
15 changed files with 894 additions and 379 deletions

View file

@ -16,7 +16,12 @@ DEBIAN_MIRROR="http://ftp.us.debian.org/debian"
HOST_DEPENDENCIES="qemu-user-static binfmt-support debootstrap sbuild wget"
# Debian package dependencies for the chrooted environment
GUEST_DEPENDENCIES="build-essential libcurl4-openssl-dev libsqlite3-dev libgnutls-openssl27 git"
GUEST_DEPENDENCIES="build-essential libcurl4-openssl-dev libsqlite3-dev libgnutls-openssl27 git libxml2"
# LDC Version
# Different versions due to https://github.com/ldc-developers/ldc/issues/3027
LDC_VERSION_ARMHF=1.13.0
LDC_VERSION_ARM64=1.14.0
function setup_arm32_chroot {
# Update apt repository details
@ -27,10 +32,10 @@ function setup_arm32_chroot {
# Host dependencies
sudo apt-get install -qq -y ${HOST_DEPENDENCIES}
# Download LDC compiler
wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-armhf.tar.xz
tar -xf ldc2-1.11.0-linux-armhf.tar.xz
mv ldc2-1.11.0-linux-armhf dlang-${ARCH}
rm -rf ldc2-1.11.0-linux-armhf.tar.xz
wget https://github.com/ldc-developers/ldc/releases/download/v${LDC_VERSION_ARMHF}/ldc2-${LDC_VERSION_ARMHF}-linux-armhf.tar.xz
tar -xf ldc2-${LDC_VERSION_ARMHF}-linux-armhf.tar.xz
mv ldc2-${LDC_VERSION_ARMHF}-linux-armhf dlang-${ARCH}
rm -rf ldc2-${LDC_VERSION_ARMHF}-linux-armhf.tar.xz
# Create chrooted environment
sudo mkdir ${CHROOT_DIR}
sudo debootstrap --foreign --no-check-gpg --variant=buildd --arch=${CHROOT_ARCH} ${VERSION} ${CHROOT_DIR} ${DEBIAN_MIRROR}
@ -49,10 +54,10 @@ function setup_arm64_chroot {
# Host dependencies
sudo apt-get install -qq -y ${HOST_DEPENDENCIES}
# Download LDC compiler
wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-aarch64.tar.xz
tar -xf ldc2-1.11.0-linux-aarch64.tar.xz
mv ldc2-1.11.0-linux-aarch64 dlang-${ARCH}
rm -rf ldc2-1.11.0-linux-aarch64.tar.xz
wget https://github.com/ldc-developers/ldc/releases/download/v${LDC_VERSION_ARM64}/ldc2-${LDC_VERSION_ARM64}-linux-aarch64.tar.xz
tar -xf ldc2-${LDC_VERSION_ARM64}-linux-aarch64.tar.xz
mv ldc2-${LDC_VERSION_ARM64}-linux-aarch64 dlang-${ARCH}
rm -rf ldc2-${LDC_VERSION_ARM64}-linux-aarch64.tar.xz
# ARM64 qemu-debootstrap needs to be 1.0.78, Trusty is 1.0.59
#sudo echo "deb http://archive.ubuntu.com/ubuntu xenial main restricted universe multiverse" >> /etc/apt/sources.list

View file

@ -3,6 +3,37 @@
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## 2.2.6 - 2019-03-12
### Fixed
* Resolve application crash when unable to delete remote folders when business retention policies are enabled
* Resolve deprecation warning: loop index implicitly converted from size_t to int
* Resolve warnings regarding 'bashisms'
* Resolve handling of notification failure is dbus server has not started or available
* Resolve handling of response JSON to ensure that 'id' key element is always checked for
* Resolve excessive & needless logging in monitor mode
* Resolve compiling with LDC on Alpine as musl lacks some standard interfaces
* Resolve notification issues when offline and cannot act on changes
* Resolve Docker entrypoint.sh to accept command line arguments
* Resolve to create a new upload session on reinit
* Resolve where on OneDrive query failure, default root and drive id is used if a response is not returned
* Resolve Key not found: nextExpectedRanges when attempting session uploads and incorrect response is returned
* Resolve application crash when re-using an authentication URI twice after previous --logout
* Resolve creating a folder on a shared personal folder appears successful but returns a JSON error
* Resolve to treat mv of new file as upload of mv target
* Update Debian i386 build dependencies
* Update handling of --get-O365-drive-id to print out all 'site names' that match the explicit search entry rather than just the last match
* Update Docker readme & documentation
* Update handling of validating local file permissions for new file uploads
### Added
* Add support for install & uninstall on RHEL / CentOS 6.x
* Add support for when notifications are enabled, display the number of OneDrive changes to process if any are found
* Add 'config' option 'min_notif_changes' for minimum number of changes to notify on, default = 5
* Add additional Docker container builds utilising a smaller OS footprint
* Add configurable interval of logging in monitor mode
* Implement new CLI option --skip-dot-files to skip .files and .folders if option is used
* Implement new CLI option --check-for-nosync to ignore folder when special file (.nosync) present
* Implement new CLI option --dry-run
## 2.2.5 - 2019-01-16
### Fixed
* Update handling of HTTP 412 - Precondition Failed errors

125
README.md
View file

@ -122,8 +122,9 @@ sudo pacman -S libnotify
```text
sudo apt-get install libcurl4-openssl-dev
sudo apt-get install libsqlite3-dev
wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-armhf.tar.xz
tar -xvf ldc2-1.11.0-linux-armhf.tar.xz
sudo apt-get install libxml2
wget https://github.com/ldc-developers/ldc/releases/download/v1.13.0/ldc2-1.13.0-linux-armhf.tar.xz
tar -xvf ldc2-1.13.0-linux-armhf.tar.xz
```
For notifications the following is necessary:
```text
@ -134,8 +135,9 @@ sudo apt install libnotify-dev
```text
sudo apt-get install libcurl4-openssl-dev
sudo apt-get install libsqlite3-dev
wget https://github.com/ldc-developers/ldc/releases/download/v1.11.0/ldc2-1.11.0-linux-aarch64.tar.xz
tar -xvf ldc2-1.11.0-linux-aarch64.tar.xz
sudo apt-get install libxml2
wget https://github.com/ldc-developers/ldc/releases/download/v1.14.0/ldc2-1.14.0-linux-aarch64.tar.xz
tar -xvf ldc2-1.14.0-linux-aarch64.tar.xz
```
For notifications the following is necessary:
```text
@ -206,7 +208,7 @@ sudo make install
```text
git clone https://github.com/abraunegg/onedrive.git
cd onedrive
make DC=~/ldc2-1.11.0-linux-armhf/bin/ldmd2
make DC=~/ldc2-1.13.0-linux-armhf/bin/ldmd2
sudo make install
```
@ -214,7 +216,7 @@ sudo make install
```text
git clone https://github.com/abraunegg/onedrive.git
cd onedrive
make DC=~/ldc2-1.11.0-linux-aarch64/bin/ldmd2
make DC=~/ldc2-1.14.0-linux-aarch64/bin/ldmd2
sudo make install
```
@ -253,6 +255,49 @@ If your system utilises curl >= 7.62.0 you may need to use `--force-http-1.1` in
After installing the application you must run it at least once from the terminal to authorize it.
You will be asked to open a specific link using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving the permission, you will be redirected to a blank page. Copy the URI of the blank page into the application.
```text
[user@hostname ~]$ onedrive
Authorize this app visiting:
https://.....
Enter the response uri:
```
### Testing your configuration
You are able to test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded or removed, however the application will display what 'would' have occurred. For example:
```text
onedrive --synchronize --verbose --dry-run
DRY-RUN Configured. Output below shows what 'would' have occurred.
Loading config ...
Using Config Dir: /home/user/.config/onedrive
Initializing the OneDrive API ...
Opening the item database ...
All operations will be performed in: /home/user/OneDrive
Initializing the Synchronization Engine ...
Account Type: personal
Default Drive ID: <redacted>
Default Root ID: <redacted>
Remaining Free Space: 5368709120
Fetching details for OneDrive Root
OneDrive Root exists in the database
Syncing changes from OneDrive ...
Applying changes of Path ID: <redacted>
Uploading differences of .
Processing root
The directory has not changed
Uploading new items of .
OneDrive Client requested to create remote path: ./newdir
The requested directory to create was not found on OneDrive - creating remote directory: ./newdir
Successfully created the remote directory ./newdir on OneDrive
Uploading new file ./newdir/newfile.txt ... done.
Remaining free space: 5368709076
Applying changes of Path ID: <redacted>
```
**Note:** `--dry-run` can only be used with `--synchronize`. It cannot be used with `--monitor` and will be ignored.
### Show your configuration
To validate your configuration the application will use, utilise the following:
@ -264,6 +309,7 @@ This will display all the pertinent runtime interpretation of the options and co
Config path = /home/alex/.config/onedrive
Config file found in config path = false
Config option 'sync_dir' = /home/alex/OneDrive
Config option 'skip_dir' =
Config option 'skip_file' = ~*
Config option 'skip_dotfiles' = false
Config option 'skip_symlinks' = false
@ -383,6 +429,18 @@ If you want to just delete the application key, but keep the items database:
rm -f ~/.config/onedrive/refresh_token
```
### Handling a OneDrive account password change
If you change your OneDrive account password, the client will no longer be authorised to sync, and will generate the following error:
```text
ERROR: OneDrive returned a 'HTTP 401 Unauthorized' - Cannot Initialize Sync Engine
```
To re-authorise the client, follow the steps below:
1. If running the client as a service (init.d or systemd), stop the service
2. Run the command `onedrive --logout`. This will clean up the previous authorisation, and will prompt you to re-authorise as per initial configuration.
3. Restart the client if running as a service or perform a manual sync
The application will now sync with OneDrive with the new credentials.
## Additional Configuration
Additional configuration is optional.
If you want to change the defaults, you can copy and edit the included config file into your `~/.config/onedrive` directory:
@ -409,11 +467,23 @@ Proceed with caution here when changing the default sync dir from ~/OneDrive to
The issue here is around how the client stores the sync_dir path in the database. If the config file is missing, or you don't use the `--syncdir` parameter - what will happen is the client will default back to `~/OneDrive` and 'think' that either all your data has been deleted - thus delete the content on OneDrive, or will start downloading all data from OneDrive into the default location.
### skip_file
Example: `skip_file = "~*|Desktop|Documents/OneNote*|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/config.xlaunch|Documents/WindowsPowerShell"`
### skip_dir
Example: `skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell"`
Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns.
**Note:** after changing `skip_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync`
### skip_file
Example: `skip_file = "~*|Documents/OneNote*|Documents/config.xlaunch|myfile.ext"`
Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns.
Files can be skipped in the following fashion:
* Specify a wildcard, eg: '*.txt' (skip all txt files)
* Explicitly specify the filename and it's full path relative to your sync_dir, eg: 'path/to/file/filename.ext'
* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext'
**Note:** after changing `skip_file`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync`
**Note:** Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process.
@ -456,6 +526,17 @@ Year 2
```
**Note:** after changing the sync_list, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync`
### Skipping directories from syncing
There are several mechanisms available to 'skip' a directory from scanning:
* Utilise 'skip_dir'
* Utilise 'sync_list'
One further method is to add a '.nosync' empty file to any folder. When this file is present, adding `--check-for-nosync` to your command line will now make the sync process skip any folder where the '.nosync' file is present.
To make this a permanent change to always skip folders when a '.nosync' empty file is present, add the following to your config file:
Example: `check_nosync = "true"`
### Shared folders
Folders shared with you can be synced by adding them to your OneDrive. To do that open your Onedrive, go to the Shared files list, right click on the folder you want to sync and then click on "Add to my OneDrive".
@ -553,6 +634,30 @@ systemctl --user start onedrive-work
```
Repeat these steps for each OneDrive account that you wish to use.
### Access OneDrive service through a proxy
If you have a requirement to run the client through a proxy, there are a couple of ways to achieve this:
1. Set proxy configuration in `~/.bashrc` to allow the authorization process and when utilizing `--synchronize`
2. If running as a systemd service, edit the applicable systemd service file to include the proxy configuration information:
```text
[Unit]
Description=OneDrive Free Client
Documentation=https://github.com/abraunegg/onedrive
After=network-online.target
Wants=network-online.target
[Service]
Environment="HTTP_PROXY=http://ip.address:port"
Environment="HTTPS_PROXY=http://ip.address:port"
ExecStart=/usr/local/bin/onedrive --monitor
Restart=on-failure
RestartSec=3
[Install]
WantedBy=default.target
```
**Note:** After modifying the service files, you will need to run `sudo systemctl daemon-reload` to ensure the service file changes are picked up. A restart of the OneDrive service will also be required to pick up the change to send the traffic via the proxy server
## Extra
### Reporting issues
@ -586,6 +691,8 @@ Options:
--check-for-nomount
Check for the presence of .nosync in the syncdir root. If found, do not perform sync.
--check-for-nosync
Check for the presence of .nosync in each directory. If found, skip directory from sync.
--confdir ARG
Set the directory used to store the configuration files
--create-directory ARG
@ -604,6 +711,8 @@ Options:
Only download remote changes
--disable-upload-validation
Disable upload validation when uploading to OneDrive
--dry-run
Perform a trial sync with no changes made
--enable-logging
Enable client activity to a separate log file
--force-http-1.1

View file

@ -1,4 +1,4 @@
.TH ONEDRIVE "1" "January 2019" "2.2.5" "User Commands"
.TH ONEDRIVE "1" "March 2019" "2.2.6" "User Commands"
.SH NAME
onedrive \- folder synchronization with OneDrive
.SH SYNOPSIS
@ -21,6 +21,9 @@ Without any option given, no sync is done and the program exits.
\fB\-\-check\-for\-nomount\fP
Check for the presence of .nosync in the syncdir root. If found, do not perform sync.
.TP
\fB\-\-check\-for\-nosync\fP
Check for the presence of .nosync in each directory. If found, skip directory from sync.
.TP
\fB\-\-confdir\fP ARG
Set the directory used to store the configuration files
.TP
@ -48,6 +51,9 @@ Display the sync status of the client \- no sync will be performed.
\fB\-d \-\-download\-only\fP
Only download remote changes
.TP
\fB\-\-dry\-run\fP
Perform a trial sync with no changes made. Can ONLY be used with --synchronize. Will be ignored for --monitor
.TP
\fB\-\-enable\-logging\fP
Enable client activity to a separate log file
.TP

View file

@ -1,5 +1,5 @@
pkgname=onedrive
pkgver=2.2.5
pkgver=2.2.6
pkgrel=1 #patch-level (Increment this when patch is applied)
pkgdesc="A free OneDrive Client for Linux. This is a fork of the https://github.com/skilion/onedrive repository"
license=("unknown")

View file

@ -6,7 +6,7 @@
%endif
Name: onedrive
Version: 2.2.5
Version: 2.2.6
Release: 1%{?dist}
Summary: Microsoft OneDrive Client
Group: System Environment/Network

View file

@ -9,6 +9,7 @@ final class Config
public string refreshTokenFilePath;
public string deltaLinkFilePath;
public string databaseFilePath;
public string databaseFilePathDryRun;
public string uploadStateFilePath;
public string syncListFilePath;
public string homePath;
@ -28,10 +29,12 @@ final class Config
stringValues["single_directory"] = "";
stringValues["sync_dir"] = "~/OneDrive";
stringValues["skip_file"] = "~*";
stringValues["skip_dir"] = "";
stringValues["log_dir"] = "/var/log/onedrive/";
stringValues["drive_id"] = "";
boolValues["upload_only"] = false;
boolValues["check_for_nomount"] = false;
boolValues["check_nomount"] = false;
boolValues["check_nosync"] = false;
boolValues["download_only"] = false;
boolValues["disable_notifications"] = false;
boolValues["disable_upload_validation"] = false;
@ -41,7 +44,8 @@ final class Config
boolValues["no_remote_delete"] = false;
boolValues["skip_symlinks"] = false;
boolValues["debug_https"] = false;
boolValues["skip_dotfiles"] = false;
boolValues["skip_dotfiles"] = false;
boolValues["dry_run"] = false;
longValues["verbose"] = 0;
longValues["monitor_interval"] = 45,
longValues["min_notif_changes"] = 5;
@ -109,6 +113,7 @@ final class Config
refreshTokenFilePath = configDirName ~ "/refresh_token";
deltaLinkFilePath = configDirName ~ "/delta_link";
databaseFilePath = configDirName ~ "/items.sqlite3";
databaseFilePathDryRun = configDirName ~ "/items-dryrun.sqlite3";
uploadStateFilePath = configDirName ~ "/resume_upload";
userConfigFilePath = configDirName ~ "/config";
syncListFilePath = configDirName ~ "/sync_list";
@ -157,9 +162,12 @@ final class Config
args,
std.getopt.config.bundling,
std.getopt.config.caseSensitive,
"check-for-nomount",
"check-for-nomount",
"Check for the presence of .nosync in the syncdir root. If found, do not perform sync.",
&boolValues["check_for_nomount"],
&boolValues["check_nomount"],
"check-for-nosync",
"Check for the presence of .nosync in each directory. If found, skip directory from sync.",
&boolValues["check_nosync"],
"create-directory",
"Create a directory on OneDrive - no sync will be performed.",
&stringValues["create_directory"],
@ -184,6 +192,9 @@ final class Config
"download-only|d",
"Only download remote changes",
&boolValues["download_only"],
"dry-run",
"Perform a trial sync with no changes made",
&boolValues["dry_run"],
"enable-logging",
"Enable client activity to a separate log file",
&boolValues["enable_logging"],

View file

@ -64,6 +64,16 @@ int main(string[] args)
// Are we able to reach the OneDrive Service
bool online = false;
// dry-run database setup
if (cfg.getValueBool("dry_run")) {
// Make a copy of the original items.sqlite3 for use as the dry run copy if it exists
if (exists(cfg.databaseFilePath)) {
// copy the file
log.vdebug("Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations");
copy(cfg.databaseFilePath,cfg.databaseFilePathDryRun);
}
}
// sync_dir environment handling to handle ~ expansion properly
string syncDir;
@ -107,18 +117,24 @@ int main(string[] args)
// upgrades
if (exists(configDirName ~ "/items.db")) {
remove(configDirName ~ "/items.db");
if (!cfg.getValueBool("dry_run")) {
safeRemove(configDirName ~ "/items.db");
}
log.logAndNotify("Database schema changed, resync needed");
cfg.setValueBool("resync", true);
}
if (cfg.getValueBool("resync") || cfg.getValueBool("logout")) {
log.vlog("Deleting the saved status ...");
safeRemove(cfg.databaseFilePath);
safeRemove(cfg.deltaLinkFilePath);
safeRemove(cfg.uploadStateFilePath);
if (!cfg.getValueBool("dry_run")) {
safeRemove(cfg.databaseFilePath);
safeRemove(cfg.deltaLinkFilePath);
safeRemove(cfg.uploadStateFilePath);
}
if (cfg.getValueBool("logout")) {
safeRemove(cfg.refreshTokenFilePath);
if (!cfg.getValueBool("dry_run")) {
safeRemove(cfg.refreshTokenFilePath);
}
}
}
@ -139,7 +155,9 @@ int main(string[] args)
}
// Config Options
writeln("Config option 'check_nosync' = ", cfg.getValueBool("check_nosync"));
writeln("Config option 'sync_dir' = ", syncDir);
writeln("Config option 'skip_dir' = ", cfg.getValueString("skip_dir"));
writeln("Config option 'skip_file' = ", cfg.getValueString("skip_file"));
writeln("Config option 'skip_dotfiles' = ", cfg.getValueBool("skip_dotfiles"));
writeln("Config option 'skip_symlinks' = ", cfg.getValueBool("skip_symlinks"));
@ -211,9 +229,17 @@ int main(string[] args)
return EXIT_FAILURE;
}
// initialize system
// Initialize the item database
log.vlog("Opening the item database ...");
itemDb = new ItemDatabase(cfg.databaseFilePath);
if (!cfg.getValueBool("dry_run")) {
// Load the items.sqlite3 file as the database
log.vdebug("Using database file: ", cfg.databaseFilePath);
itemDb = new ItemDatabase(cfg.databaseFilePath);
} else {
// Load the items-dryrun.sqlite3 file as the database
log.vdebug("Using database file: ", cfg.databaseFilePathDryRun);
itemDb = new ItemDatabase(cfg.databaseFilePathDryRun);
}
log.vlog("All operations will be performed in: ", syncDir);
if (!exists(syncDir)) {
@ -235,9 +261,16 @@ int main(string[] args)
}
}
selectiveSync.load(cfg.syncListFilePath);
selectiveSync.setMask(cfg.getValueString("skip_file"));
// Initialise the sync engine
// Configure skip_dir & skip_file from config entries
log.vdebug("Configuring skip_dir ...");
log.vdebug("skip_dir: ", cfg.getValueString("skip_dir"));
selectiveSync.setDirMask(cfg.getValueString("skip_dir"));
log.vdebug("Configuring skip_file ...");
log.vdebug("skip_file: ", cfg.getValueString("skip_file"));
selectiveSync.setFileMask(cfg.getValueString("skip_file"));
// Initialize the sync engine
log.logAndNotify("Initializing the Synchronization Engine ...");
auto sync = new SyncEngine(cfg, oneDrive, itemDb, selectiveSync);
@ -280,7 +313,7 @@ int main(string[] args)
if (cfg.getValueString("remove_directory") != "") {
// remove a directory on OneDrive
sync.deleteDirectoryNoSync(cfg.getValueString("remove_directory"));
sync.deleteDirectoryNoSync(cfg.getValueString("remove_directory"));
}
}
@ -384,7 +417,7 @@ int main(string[] args)
signal(SIGTERM, &exitHandler);
// initialise the monitor class
if (!cfg.getValueBool("download_only")) m.init(cfg, cfg.getValueLong("verbose") > 0, cfg.getValueBool("skip_symlinks"));
if (!cfg.getValueBool("download_only")) m.init(cfg, cfg.getValueLong("verbose") > 0, cfg.getValueBool("skip_symlinks"), cfg.getValueBool("check_nosync"));
// monitor loop
immutable auto checkInterval = dur!"seconds"(cfg.getValueLong("monitor_interval"));
immutable auto logInterval = cfg.getValueLong("monitor_log_frequency");
@ -431,8 +464,21 @@ int main(string[] args)
}
}
// workaround for segfault in std.net.curl.Curl.shutdown() on exit
// Workaround for segfault in std.net.curl.Curl.shutdown() on exit
oneDrive.http.shutdown();
// Make sure the .wal file is incorporated into the main db before we exit
destroy(itemDb);
// --dry-run temp database cleanup
if (cfg.getValueBool("dry_run")) {
if (exists(cfg.databaseFilePathDryRun)) {
// remove the file
log.vdebug("Removing items-dryrun.sqlite3 as dry run operations complete");
safeRemove(cfg.databaseFilePathDryRun);
}
}
return EXIT_SUCCESS;
}

View file

@ -32,6 +32,8 @@ final class Monitor
private void[] buffer;
// skip symbolic links
bool skip_symlinks;
// check for .nosync if enabled
bool check_nosync;
private SelectiveSync selectiveSync;
@ -46,10 +48,11 @@ final class Monitor
this.selectiveSync = selectiveSync;
}
void init(Config cfg, bool verbose, bool skip_symlinks)
void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync)
{
this.verbose = verbose;
this.skip_symlinks = skip_symlinks;
this.check_nosync = check_nosync;
assert(onDirCreated && onFileChanged && onDelete && onMove);
fd = inotify_init();
@ -74,7 +77,10 @@ final class Monitor
// skip filtered items
if (dirname != ".") {
if (selectiveSync.isNameExcluded(baseName(dirname))) {
if (selectiveSync.isDirNameExcluded(strip(dirname,"./"))) {
return;
}
if (selectiveSync.isFileNameExcluded(baseName(dirname))) {
return;
}
if (selectiveSync.isPathExcluded(buildNormalizedPath(dirname))) {
@ -91,6 +97,14 @@ final class Monitor
}
}
// Do we need to check for .nosync? Only if check_nosync is true
if (check_nosync) {
if (exists(buildNormalizedPath(dirname) ~ "/.nosync")) {
log.vlog("Skipping watching path - .nosync found & --check-for-nosync enabled: ", buildNormalizedPath(dirname));
return;
}
}
add(dirname);
foreach(DirEntry entry; dirEntries(dirname, SpanMode.shallow, false)) {
if (entry.isDir) {
@ -178,7 +192,10 @@ final class Monitor
// skip filtered items
path = getPath(event);
if (selectiveSync.isNameExcluded(baseName(path))) {
if (selectiveSync.isDirNameExcluded(strip(path,"./"))) {
goto skip;
}
if (selectiveSync.isFileNameExcluded(strip(path,"./"))) {
goto skip;
}
if (selectiveSync.isPathExcluded(path)) {

View file

@ -8,6 +8,8 @@ import progress;
import config;
static import log;
shared bool debugResponse = false;
private bool dryRun = false;
private bool simulateNoRefreshTokenFile = false;
private immutable {
// Client Identifier
@ -104,6 +106,14 @@ final class OneDriveApi
// Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1
http.handle.set(CurlOption.http_version,2);
}
// Do we set the dryRun handlers?
if (cfg.getValueBool("dry_run")) {
.dryRun = true;
if (cfg.getValueBool("logout")) {
.simulateNoRefreshTokenFile = true;
}
}
}
bool init()
@ -112,17 +122,33 @@ final class OneDriveApi
driveId = cfg.getValueString("drive_id");
if (driveId.length) {
driveUrl = driveByIdUrl ~ driveId;
itemByIdUrl = driveUrl ~ "/items";
itemByPathUrl = driveUrl ~ "/root:/";
itemByIdUrl = driveUrl ~ "/items";
itemByPathUrl = driveUrl ~ "/root:/";
}
} catch (Exception e) {}
try {
refreshToken = readText(cfg.refreshTokenFilePath);
} catch (FileException e) {
return authorize();
if (!.dryRun) {
// original code
try {
refreshToken = readText(cfg.refreshTokenFilePath);
} catch (FileException e) {
return authorize();
}
return true;
} else {
// --dry-run
if (!.simulateNoRefreshTokenFile) {
try {
refreshToken = readText(cfg.refreshTokenFilePath);
} catch (FileException e) {
return authorize();
}
return true;
} else {
// --dry-run & --logout
return authorize();
}
}
return true;
}
bool authorize()
@ -358,11 +384,19 @@ final class OneDriveApi
private void acquireToken(const(char)[] postData)
{
JSONValue response = post(tokenUrl, postData);
accessToken = "bearer " ~ response["access_token"].str();
refreshToken = response["refresh_token"].str();
accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer());
std.file.write(cfg.refreshTokenFilePath, refreshToken);
if (printAccessToken) writeln("New access token: ", accessToken);
if ("access_token" in response){
accessToken = "bearer " ~ response["access_token"].str();
refreshToken = response["refresh_token"].str();
accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer());
if (!.dryRun) {
std.file.write(cfg.refreshTokenFilePath, refreshToken);
}
if (printAccessToken) writeln("New access token: ", accessToken);
} else {
log.error("\nInvalid authentication response from OneDrive. Please check the response uri\n");
// re-authorize
authorize();
}
}
private void checkAccessTokenExpired()
@ -692,6 +726,12 @@ final class OneDriveApi
{
switch(http.statusLine.code)
{
// 400 - Bad Request
case 400:
// Bad Request .. how should we act?
log.vlog("OneDrive returned a 'HTTP 400 - Bad Request' - gracefully handling error");
break;
// 412 - Precondition Failed
case 412:
log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error");

View file

@ -10,6 +10,7 @@ final class SelectiveSync
{
private string[] paths;
private Regex!char mask;
private Regex!char dirmask;
void load(string filepath)
{
@ -22,20 +23,46 @@ final class SelectiveSync
}
}
void setMask(const(char)[] mask)
void setFileMask(const(char)[] mask)
{
this.mask = wild2regex(mask);
}
// config file skip_file parameter
bool isNameExcluded(string name)
void setDirMask(const(char)[] dirmask)
{
// Does the file match skip_file config entry?
// Returns true if the file matches a skip_file config entry
// Returns false if no match
return !name.matchFirst(mask).empty;
this.dirmask = wild2regex(dirmask);
}
// config file skip_dir parameter
bool isDirNameExcluded(string name)
{
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
return !name.matchFirst(dirmask).empty;
}
// config file skip_file parameter
bool isFileNameExcluded(string name)
{
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
// Try full path match first
if (!name.matchFirst(mask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(mask).empty) {
return true;
}
}
// no match
return false;
}
// config sync_list file handling
bool isPathExcluded(string path)
{

File diff suppressed because it is too large Load diff

View file

@ -111,8 +111,16 @@ struct UploadSession
JSONValue upload()
{
long offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long;
long fileSize = getSize(session["localPath"].str);
long offset;
long fileSize;
if ("nextExpectedRanges" in session){
offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long;
}
if ("localPath" in session){
fileSize = getSize(session["localPath"].str);
}
// Upload Progress Bar
size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1;

View file

@ -14,7 +14,7 @@ import std.uri;
import qxor;
static import log;
private string deviceName;
shared string deviceName;
static this()
{
@ -136,8 +136,9 @@ bool testNetwork()
bool readLocalFile(string path)
{
try {
// attempt to read the first 10MB of the file
read(path,10000000);
// attempt to read up to the first 1 byte of the file
// validates we can 'read' the file based on file permissions
read(path,1);
} catch (std.file.FileException e) {
// unable to read the new local file
log.log("Skipping uploading this file as it cannot be read (file permissions or file corruption): ", path);