Merge branch 'master' into norbert/config-options

This commit is contained in:
Norbert Preining 2019-03-25 09:41:27 +09:00
commit a433b44b56
11 changed files with 151 additions and 58 deletions

View file

@ -3,6 +3,21 @@
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## 2.3.0 - 2019-03-25
### Fixed
* Resolve application crash if no 'size' value is returned when uploading a new file
* Resolve application crash if a 5xx error is returned when uploading a new file
* Resolve not 'refreshing' version file when rebuilding
* Resolve unexpected application processing by preventing use of --synchronize & --monitor together
* Resolve high CPU usage when performing DB reads
* Update error logging around directory case-insensitive match
* Update Travis CI and ARM dependencies for LDC 1.14.0
* Update Makefile due to build failure if building from release archive file
* Update logging as to why a OneDrive object was skipped
### Added
* Implement config option 'skip_dir'
## 2.2.6 - 2019-03-12 ## 2.2.6 - 2019-03-12
### Fixed ### Fixed
* Resolve application crash when unable to delete remote folders when business retention policies are enabled * Resolve application crash when unable to delete remote folders when business retention policies are enabled

View file

@ -1,7 +1,8 @@
DC ?= dmd DC ?= dmd
RELEASEVER = v2.3.0
pkgconfig := $(shell if [ $(PKGCONFIG) ] && [ "$(PKGCONFIG)" != 0 ] ; then echo 1 ; else echo "" ; fi) pkgconfig := $(shell if [ $(PKGCONFIG) ] && [ "$(PKGCONFIG)" != 0 ] ; then echo 1 ; else echo "" ; fi)
notifications := $(shell if [ $(NOTIFICATIONS) ] && [ "$(NOTIFICATIONS)" != 0 ] ; then echo 1 ; else echo "" ; fi) notifications := $(shell if [ $(NOTIFICATIONS) ] && [ "$(NOTIFICATIONS)" != 0 ] ; then echo 1 ; else echo "" ; fi)
gitversion := $(shell if [ -f .git/HEAD ] ; then echo 1 ; else echo "" ; fi)
ifeq ($(pkgconfig),1) ifeq ($(pkgconfig),1)
LIBS = $(shell pkg-config --libs sqlite3 libcurl) LIBS = $(shell pkg-config --libs sqlite3 libcurl)
@ -26,6 +27,14 @@ ifeq ($(notdir $(DC)),ldc2)
NOTIF_VERSIONS := $(addprefix -d,$(NOTIF_VERSIONS)) NOTIF_VERSIONS := $(addprefix -d,$(NOTIF_VERSIONS))
endif endif
ifeq ($(DEBUG),1)
ifeq ($(notdir $(DC)),ldc2)
DFLAGS += -d-debug -gc
else
DFLAGS += -debug -gs
endif
endif
DFLAGS += -w -g -ofonedrive -O $(NOTIF_VERSIONS) $(LIBS) -J. DFLAGS += -w -g -ofonedrive -O $(NOTIF_VERSIONS) $(LIBS) -J.
PREFIX ?= /usr/local PREFIX ?= /usr/local
@ -126,5 +135,12 @@ endif
for i in $(DOCFILES) ; do rm -f $(DESTDIR)$(DOCDIR)/$$i ; done for i in $(DOCFILES) ; do rm -f $(DESTDIR)$(DOCDIR)/$$i ; done
rm -f $(DESTDIR)$(MANDIR)/onedrive.1 rm -f $(DESTDIR)$(MANDIR)/onedrive.1
version: .git/HEAD .git/index version:
ifeq ($(gitversion),1)
echo $(shell git describe --tags) > version echo $(shell git describe --tags) > version
else
echo $(RELEASEVER) > version
endif
.PHONY: version

View file

@ -195,6 +195,9 @@ By passing `NOTIFICATIONS=1` to the `make` call, notifications via
libnotify are enabled. If `pkg-config` is not used (see above), the necessary libnotify are enabled. If `pkg-config` is not used (see above), the necessary
libraries are `gmodule-2.0`, `glib-2.0`, and `notify`. libraries are `gmodule-2.0`, `glib-2.0`, and `notify`.
By passing `DEBUG=1` to the `make` call, `onedrive` gets built with additional debug
information, useful (for example) to get `perf`-issued figures.
### Building using a different compiler (for example [LDC](https://wiki.dlang.org/LDC)) ### Building using a different compiler (for example [LDC](https://wiki.dlang.org/LDC))
#### Debian - i386 / i686 #### Debian - i386 / i686
```text ```text

View file

@ -1,4 +1,4 @@
.TH ONEDRIVE "1" "March 2019" "2.2.6" "User Commands" .TH ONEDRIVE "1" "March 2019" "2.3.0" "User Commands"
.SH NAME .SH NAME
onedrive \- folder synchronization with OneDrive onedrive \- folder synchronization with OneDrive
.SH SYNOPSIS .SH SYNOPSIS

View file

@ -1,5 +1,5 @@
pkgname=onedrive pkgname=onedrive
pkgver=2.2.6 pkgver=2.3.0
pkgrel=1 #patch-level (Increment this when patch is applied) pkgrel=1 #patch-level (Increment this when patch is applied)
pkgdesc="A free OneDrive Client for Linux. This is a fork of the https://github.com/skilion/onedrive repository" pkgdesc="A free OneDrive Client for Linux. This is a fork of the https://github.com/skilion/onedrive repository"
license=("unknown") license=("unknown")

View file

@ -6,7 +6,7 @@
%endif %endif
Name: onedrive Name: onedrive
Version: 2.2.6 Version: 2.3.0
Release: 1%{?dist} Release: 1%{?dist}
Summary: Microsoft OneDrive Client Summary: Microsoft OneDrive Client
Group: System Environment/Network Group: System Environment/Network

View file

@ -31,7 +31,7 @@ struct Item {
final class ItemDatabase final class ItemDatabase
{ {
// increment this for every change in the db schema // increment this for every change in the db schema
immutable int itemDatabaseVersion = 7; immutable int itemDatabaseVersion = 8;
Database db; Database db;
string insertItemStmt; string insertItemStmt;
@ -59,9 +59,28 @@ final class ItemDatabase
db.exec("DROP TABLE item"); db.exec("DROP TABLE item");
createTable(); createTable();
} }
db.exec("PRAGMA foreign_keys = ON"); // Set the enforcement of foreign key constraints.
db.exec("PRAGMA recursive_triggers = ON"); // https://www.sqlite.org/pragma.html#pragma_foreign_keys
// PRAGMA foreign_keys = boolean;
db.exec("PRAGMA foreign_keys = TRUE");
// Set the recursive trigger capability
// https://www.sqlite.org/pragma.html#pragma_recursive_triggers
// PRAGMA recursive_triggers = boolean;
db.exec("PRAGMA recursive_triggers = TRUE");
// Set the journal mode for databases associated with the current connection
// https://www.sqlite.org/pragma.html#pragma_journal_mode
db.exec("PRAGMA journal_mode = WAL"); db.exec("PRAGMA journal_mode = WAL");
// Automatic indexing is enabled by default as of version 3.7.17
// https://www.sqlite.org/pragma.html#pragma_automatic_index
// PRAGMA automatic_index = boolean;
db.exec("PRAGMA automatic_index = FALSE");
// Tell SQLite to store temporary tables in memory. This will speed up many read operations that rely on temporary tables, indices, and views.
// https://www.sqlite.org/pragma.html#pragma_temp_store
db.exec("PRAGMA temp_store = MEMORY");
// Tell SQlite to cleanup database table size
// https://www.sqlite.org/pragma.html#pragma_auto_vacuum
// PRAGMA schema.auto_vacuum = 0 | NONE | 1 | FULL | 2 | INCREMENTAL;
db.exec("PRAGMA auto_vacuum = FULL");
insertItemStmt = " insertItemStmt = "
INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, crc32Hash, sha1Hash, quickXorHash, remoteDriveId, remoteId) INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, crc32Hash, sha1Hash, quickXorHash, remoteDriveId, remoteId)
@ -106,6 +125,8 @@ final class ItemDatabase
)"); )");
db.exec("CREATE INDEX name_idx ON item (name)"); db.exec("CREATE INDEX name_idx ON item (name)");
db.exec("CREATE INDEX remote_idx ON item (remoteDriveId, remoteId)"); db.exec("CREATE INDEX remote_idx ON item (remoteDriveId, remoteId)");
db.exec("CREATE INDEX item_children_idx ON item (driveId, parentId)");
db.exec("CREATE INDEX selectByPath_idx ON item (name, driveId, parentId)");
db.setVersion(itemDatabaseVersion); db.setVersion(itemDatabaseVersion);
} }

View file

@ -236,6 +236,14 @@ int main(string[] args)
return EXIT_FAILURE; return EXIT_FAILURE;
} }
// if --synchronize && --monitor passed in, exit & display help as these conflict with each other
if (synchronize && monitor) {
writeln("\nERROR: --synchronize and --monitor cannot be used together\n");
writeln("Refer to --help to determine which command option you should use.\n");
oneDrive.http.shutdown();
return EXIT_FAILURE;
}
// Initialize the item database // Initialize the item database
log.vlog("Opening the item database ..."); log.vlog("Opening the item database ...");
if (!cfg.getValueBool("dry_run")) { if (!cfg.getValueBool("dry_run")) {

View file

@ -39,7 +39,19 @@ final class SelectiveSync
// Does the directory name match skip_dir config entry? // Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry // Returns true if the name matches a skip_dir config entry
// Returns false if no match // Returns false if no match
return !name.matchFirst(dirmask).empty;
// Try full path match first
if (!name.matchFirst(dirmask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(dirmask).empty) {
return true;
}
}
// no match
return false;
} }
// config file skip_file parameter // config file skip_file parameter
@ -66,7 +78,7 @@ final class SelectiveSync
// config sync_list file handling // config sync_list file handling
bool isPathExcluded(string path) bool isPathExcluded(string path)
{ {
return .isPathExcluded(path, paths) || .isPathMatched(path, mask); return .isPathExcluded(path, paths) || .isPathMatched(path, mask) || .isPathMatched(path, dirmask);
} }
} }

View file

@ -754,9 +754,17 @@ final class SyncEngine
bool unwanted; bool unwanted;
unwanted |= skippedItems.find(item.parentId).length != 0; unwanted |= skippedItems.find(item.parentId).length != 0;
if (unwanted) log.vdebug("Flagging as unwanted: find(item.parentId).length != 0"); if (unwanted) log.vdebug("Flagging as unwanted: find(item.parentId).length != 0");
unwanted |= selectiveSync.isFileNameExcluded(item.name); // Check if this is a directory to skip
if (unwanted) log.vdebug("Flagging as unwanted: item name is excluded: ", item.name); if (!unwanted) {
unwanted = selectiveSync.isDirNameExcluded(item.name);
if (unwanted) log.vlog("Skipping item - excluded by skip_dir config: ", item.name);
}
// Check if this is a file to skip
if (!unwanted) {
unwanted = selectiveSync.isFileNameExcluded(item.name);
if (unwanted) log.vlog("Skipping item - excluded by skip_file config: ", item.name);
}
// check the item type // check the item type
if (!unwanted) { if (!unwanted) {
if (isItemFile(driveItem)) { if (isItemFile(driveItem)) {
@ -1140,9 +1148,15 @@ final class SyncEngine
bool unwanted = false; bool unwanted = false;
string path; string path;
// Is item.name or the path excluded // Is the path excluded?
unwanted = selectiveSync.isFileNameExcluded(item.name); unwanted = selectiveSync.isDirNameExcluded(item.name);
// If the path is not excluded, is the filename excluded?
if (!unwanted) {
unwanted = selectiveSync.isFileNameExcluded(item.name);
}
// If path or filename does not exclude, is this excluded due to use of selective sync?
if (!unwanted) { if (!unwanted) {
path = itemdb.computePath(item.driveId, item.id); path = itemdb.computePath(item.driveId, item.id);
unwanted = selectiveSync.isPathExcluded(path); unwanted = selectiveSync.isPathExcluded(path);
@ -1485,12 +1499,14 @@ final class SyncEngine
// filter out user configured items to skip // filter out user configured items to skip
if (path != ".") { if (path != ".") {
if (isDir(path)) { if (isDir(path)) {
log.vdebug("Checking path: ", path);
if (selectiveSync.isDirNameExcluded(strip(path,"./"))) { if (selectiveSync.isDirNameExcluded(strip(path,"./"))) {
log.vlog("Skipping item - excluded by skip_dir config: ", path); log.vlog("Skipping item - excluded by skip_dir config: ", path);
return; return;
} }
} }
if (isFile(path)) { if (isFile(path)) {
log.vdebug("Checking file: ", path);
if (selectiveSync.isFileNameExcluded(strip(path,"./"))) { if (selectiveSync.isFileNameExcluded(strip(path,"./"))) {
log.vlog("Skipping item - excluded by skip_file config: ", path); log.vlog("Skipping item - excluded by skip_file config: ", path);
return; return;
@ -1682,6 +1698,8 @@ final class SyncEngine
auto maxUploadFileSize = 16106127360; // 15GB auto maxUploadFileSize = 16106127360; // 15GB
//auto maxUploadFileSize = 21474836480; // 20GB //auto maxUploadFileSize = 21474836480; // 20GB
auto thisFileSize = getSize(path); auto thisFileSize = getSize(path);
// To avoid a 409 Conflict error - does the file actually exist on OneDrive already?
JSONValue fileDetailsFromOneDrive;
// Can we read the file - as a permissions issue or file corruption will cause a failure // Can we read the file - as a permissions issue or file corruption will cause a failure
// https://github.com/abraunegg/onedrive/issues/113 // https://github.com/abraunegg/onedrive/issues/113
@ -1689,10 +1707,6 @@ final class SyncEngine
// able to read the file // able to read the file
if (thisFileSize <= maxUploadFileSize){ if (thisFileSize <= maxUploadFileSize){
// Resolves: https://github.com/skilion/onedrive/issues/121, https://github.com/skilion/onedrive/issues/294, https://github.com/skilion/onedrive/issues/329 // Resolves: https://github.com/skilion/onedrive/issues/121, https://github.com/skilion/onedrive/issues/294, https://github.com/skilion/onedrive/issues/329
// To avoid a 409 Conflict error - does the file actually exist on OneDrive already?
JSONValue fileDetailsFromOneDrive;
// Does this 'file' already exist on OneDrive? // Does this 'file' already exist on OneDrive?
try { try {
// test if the local path exists on OneDrive // test if the local path exists on OneDrive
@ -1771,47 +1785,50 @@ final class SyncEngine
// Log action to log file // Log action to log file
log.fileOnly("Uploading new file ", path, " ... done."); log.fileOnly("Uploading new file ", path, " ... done.");
// The file was uploaded // The file was uploaded, or a 4xx / 5xx error was generated
ulong uploadFileSize = response["size"].integer; if ("size" in response){
// The response JSON contains size, high likelihood valid response returned
// In some cases the file that was uploaded was not complete, but 'completed' without errors on OneDrive ulong uploadFileSize = response["size"].integer;
// This has been seen with PNG / JPG files mainly, which then contributes to generating a 412 error when we attempt to update the metadata
// Validate here that the file uploaded, at least in size, matches in the response to what the size is on disk // In some cases the file that was uploaded was not complete, but 'completed' without errors on OneDrive
if (thisFileSize != uploadFileSize){ // This has been seen with PNG / JPG files mainly, which then contributes to generating a 412 error when we attempt to update the metadata
if(disableUploadValidation){ // Validate here that the file uploaded, at least in size, matches in the response to what the size is on disk
// Print a warning message if (thisFileSize != uploadFileSize){
log.log("WARNING: Uploaded file size does not match local file - skipping upload validation"); if(disableUploadValidation){
// Print a warning message
log.log("WARNING: Uploaded file size does not match local file - skipping upload validation");
} else {
// OK .. the uploaded file does not match and we did not disable this validation
log.log("Uploaded file size does not match local file - upload failure - retrying");
// Delete uploaded bad file
onedrive.deleteById(response["parentReference"]["driveId"].str, response["id"].str, response["eTag"].str);
// Re-upload
uploadNewFile(path);
return;
}
}
// File validation is OK
if ((accountType == "personal") || (thisFileSize == 0)){
// Update the item's metadata on OneDrive
string id = response["id"].str;
string cTag = response["cTag"].str;
if (exists(path)) {
SysTime mtime = timeLastModified(path).toUTC();
// use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded
uploadLastModifiedTime(parent.driveId, id, cTag, mtime);
} else {
// will be removed in different event!
log.log("File disappeared after upload: ", path);
}
return;
} else { } else {
// OK .. the uploaded file does not match and we did not disable this validation // OneDrive Business Account - always use a session to upload
log.log("Uploaded file size does not match local file - upload failure - retrying"); // The session includes a Request Body element containing lastModifiedDateTime
// Delete uploaded bad file // which negates the need for a modify event against OneDrive
onedrive.deleteById(response["parentReference"]["driveId"].str, response["id"].str, response["eTag"].str); saveItem(response);
// Re-upload
uploadNewFile(path);
return; return;
} }
}
// File validation is OK
if ((accountType == "personal") || (thisFileSize == 0)){
// Update the item's metadata on OneDrive
string id = response["id"].str;
string cTag = response["cTag"].str;
if (exists(path)) {
SysTime mtime = timeLastModified(path).toUTC();
// use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded
uploadLastModifiedTime(parent.driveId, id, cTag, mtime);
} else {
// will be removed in different event!
log.log("File disappeared after upload: ", path);
}
return;
} else {
// OneDrive Business Account - always use a session to upload
// The session includes a Request Body element containing lastModifiedDateTime
// which negates the need for a modify event against OneDrive
saveItem(response);
return;
} }
} else { } else {
// we are --dry-run - simulate the file upload // we are --dry-run - simulate the file upload
@ -1836,7 +1853,8 @@ final class SyncEngine
// even though some file systems (such as a POSIX-compliant file system) may consider them as different. // even though some file systems (such as a POSIX-compliant file system) may consider them as different.
// Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior.
if (fileDetailsFromOneDrive["name"].str == baseName(path)){ // Check that 'name' is in the JSON response (validates data) and that 'name' == the path we are looking for
if (("name" in fileDetailsFromOneDrive) && (fileDetailsFromOneDrive["name"].str == baseName(path))) {
// OneDrive 'name' matches local path name // OneDrive 'name' matches local path name
log.vlog("Requested file to upload exists on OneDrive - local database is out of sync for this file: ", path); log.vlog("Requested file to upload exists on OneDrive - local database is out of sync for this file: ", path);