From a2298be257d60338212abd873edfb69cc832e754 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 19:28:10 +0200 Subject: [PATCH 01/33] QuickXor implementation --- src/qxor.d | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 src/qxor.d diff --git a/src/qxor.d b/src/qxor.d new file mode 100644 index 00000000..d59673e3 --- /dev/null +++ b/src/qxor.d @@ -0,0 +1,88 @@ +import std.algorithm; +import std.digest.digest; + +// implementation of the QuickXorHash algorithm in D +// https://github.com/OneDrive/onedrive-api-docs/blob/master/snippets/quickxorhash.md +struct QuickXor +{ + private immutable int widthInBits = 160; + private immutable size_t lengthInBytes = (widthInBits - 1) / 8 + 1; + private immutable size_t lengthInQWords = (widthInBits - 1) / 64 + 1; + private immutable int bitsInLastCell = widthInBits % 64; // 32 + private immutable int shift = 11; + + private ulong[lengthInQWords] _data; + private ulong _lengthSoFar; + private int _shiftSoFar; + + nothrow @safe void put(scope const(ubyte)[] array...) + { + int vectorArrayIndex = _shiftSoFar / 64; + int vectorOffset = _shiftSoFar % 64; + size_t iterations = min(array.length, widthInBits); + + for (size_t i = 0; i < iterations; i++) { + bool isLastCell = vectorArrayIndex == _data.length - 1; + int bitsInVectorCell = isLastCell ? bitsInLastCell : 64; + + if (vectorOffset <= bitsInVectorCell - 8) { + for (size_t j = i; j < array.length; j += widthInBits) { + _data[vectorArrayIndex] ^= cast(ulong) array[j] << vectorOffset; + } + } else { + int index1 = vectorArrayIndex; + int index2 = isLastCell ? 0 : (vectorArrayIndex + 1); + byte low = cast(byte) (bitsInVectorCell - vectorOffset); + + byte xoredByte = 0; + for (int j = i; j < array.length; j += widthInBits) { + xoredByte ^= array[j]; + } + + _data[index1] ^= cast(ulong) xoredByte << vectorOffset; + _data[index2] ^= cast(ulong) xoredByte >> low; + } + + vectorOffset += shift; + if (vectorOffset >= bitsInVectorCell) { + vectorArrayIndex = isLastCell ? 0 : vectorArrayIndex + 1; + vectorOffset -= bitsInVectorCell; + } + } + + _shiftSoFar += (_shiftSoFar + shift * array.length) % widthInBits; + _lengthSoFar += array.length; + + } + + nothrow @safe void start() + { + _data = _data.init; + _shiftSoFar = 0; + _lengthSoFar = 0; + } + + nothrow @trusted ubyte[lengthInBytes] finish() + { + ubyte[lengthInBytes] tmp; + tmp[0 .. lengthInBytes] = (cast(ubyte*) _data)[0 .. lengthInBytes]; + for (size_t i = 0; i < 8; i++) { + tmp[lengthInBytes - 8 + i] ^= (cast(ubyte*) &_lengthSoFar)[i]; + } + return tmp; + } +} + +unittest +{ + assert(isDigest!QuickXor); +} + +unittest +{ + QuickXor qxor; + qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog"); + assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE"); +} + +alias QuickXorDigest = WrapperDigest!(QuickXor); \ No newline at end of file From f19b86e4c72dc14ec84cd9f2177af98ac6440a42 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 19:37:51 +0200 Subject: [PATCH 02/33] QuickXor utility function --- src/util.d | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/util.d b/src/util.d index f7512f22..163ffad7 100644 --- a/src/util.d +++ b/src/util.d @@ -1,3 +1,4 @@ +import std.base64; import std.conv; import std.digest.crc; import std.file; @@ -7,6 +8,7 @@ import std.regex; import std.socket; import std.stdio; import std.string; +import qxor; private string deviceName; @@ -50,6 +52,17 @@ string computeCrc32(string path) return crc.finish().toHexString().dup; } +// return the quickXorHash base64 string of a file +string computeQuickXorHash(string path) +{ + QuickXor qxor; + auto file = File(path, "rb"); + foreach (ubyte[] data; chunks(file, 4096)) { + qxor.put(data); + } + return Base64.encode(qxor.finish()); +} + // convert wildcards (*, ?) to regex Regex!char wild2regex(const(char)[] pattern) { From 9030a107b7aaed29e4bba82d9e52ae6f2e6770c0 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 19:49:55 +0200 Subject: [PATCH 03/33] fix third person in comments --- src/util.d | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/util.d b/src/util.d index 163ffad7..30721563 100644 --- a/src/util.d +++ b/src/util.d @@ -17,7 +17,7 @@ static this() deviceName = Socket.hostName; } -// give a new name to the specified file or directory +// gives a new name to the specified file or directory void safeRename(const(char)[] path) { auto ext = extension(path); @@ -35,13 +35,13 @@ void safeRename(const(char)[] path) rename(path, newPath); } -// delete the specified file without throwing an exception if it does not exists +// deletes the specified file without throwing an exception if it does not exists void safeRemove(const(char)[] path) { if (exists(path)) remove(path); } -// return the crc32 hex string of a file +// returns the crc32 hex string of a file string computeCrc32(string path) { CRC32 crc; @@ -52,7 +52,7 @@ string computeCrc32(string path) return crc.finish().toHexString().dup; } -// return the quickXorHash base64 string of a file +// returns the quickXorHash base64 string of a file string computeQuickXorHash(string path) { QuickXor qxor; @@ -63,7 +63,7 @@ string computeQuickXorHash(string path) return Base64.encode(qxor.finish()); } -// convert wildcards (*, ?) to regex +// converts wildcards (*, ?) to regex Regex!char wild2regex(const(char)[] pattern) { string str; @@ -92,7 +92,7 @@ Regex!char wild2regex(const(char)[] pattern) return regex(str, "i"); } -// return true if the network connection is available +// returns true if the network connection is available bool testNetwork() { HTTP http = HTTP("https://login.microsoftonline.com"); @@ -100,7 +100,7 @@ bool testNetwork() return http.perform(ThrowOnError.no) == 0; } -// call globMatch for each string in pattern separated by '|' +// calls globMatch for each string in pattern separated by '|' bool multiGlobMatch(const(char)[] path, const(char)[] pattern) { foreach (glob; pattern.split('|')) { From 7fec22bef11d78c837339e9985669a7d688c6c55 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 19:51:02 +0200 Subject: [PATCH 04/33] fix variable type in for loop --- src/qxor.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qxor.d b/src/qxor.d index d59673e3..b7d03f07 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -35,7 +35,7 @@ struct QuickXor byte low = cast(byte) (bitsInVectorCell - vectorOffset); byte xoredByte = 0; - for (int j = i; j < array.length; j += widthInBits) { + for (size_t j = i; j < array.length; j += widthInBits) { xoredByte ^= array[j]; } From f2209c0591453af31a8f1464e36f6748b224f54e Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 19:53:39 +0200 Subject: [PATCH 05/33] add qxor.d to Makefile --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 363ff7fe..c20133e5 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,7 @@ SOURCES = \ src/main.d \ src/monitor.d \ src/onedrive.d \ + src/qxor.d \ src/selective.d \ src/sqlite.d \ src/sync.d \ From 80c60beef98d29410482f08e00d8008d0e4784ad Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 19:55:00 +0200 Subject: [PATCH 06/33] added newline at the end --- src/qxor.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qxor.d b/src/qxor.d index b7d03f07..451901aa 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -85,4 +85,4 @@ unittest assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE"); } -alias QuickXorDigest = WrapperDigest!(QuickXor); \ No newline at end of file +alias QuickXorDigest = WrapperDigest!(QuickXor); From 3d8daa086dc47a062cb526ed875c97caee529b7b Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 20:14:50 +0200 Subject: [PATCH 07/33] added --print-token --- src/main.d | 6 +++++- src/onedrive.d | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/main.d b/src/main.d index 0de12d25..26dfef8e 100644 --- a/src/main.d +++ b/src/main.d @@ -16,6 +16,8 @@ int main(string[] args) bool logout; // enable verbose logging bool verbose; + // print the access token + bool printAccessToken; try { auto opt = getopt( @@ -25,7 +27,8 @@ int main(string[] args) "resync", "Forget the last saved state, perform a full sync.", &resync, "logout", "Logout the current user.", &logout, "confdir", "Set the directory to use to store the configuration files.", &configDirName, - "verbose|v", "Print more details, useful for debugging.", &log.verbose + "verbose|v", "Print more details, useful for debugging.", &log.verbose, + "print-token", "Print the access token, useful for debugging.", &printAccessToken ); if (opt.helpWanted) { defaultGetoptPrinter( @@ -71,6 +74,7 @@ int main(string[] args) return EXIT_FAILURE; } auto onedrive = new OneDriveApi(cfg); + onedrive.printAccessToken = printAccessToken; if (!onedrive.init()) { log.log("Could not initialize the OneDrive API"); // workaround for segfault in std.net.curl.Curl.shutdown() on exit diff --git a/src/onedrive.d b/src/onedrive.d index b400f2b9..dec6126f 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -50,6 +50,9 @@ final class OneDriveApi private SysTime accessTokenExpiration; /* private */ HTTP http; + // if true, every new access token is printed + bool printAccessToken; + this(Config cfg) { this.cfg = cfg; @@ -244,6 +247,7 @@ final class OneDriveApi refreshToken = response["refresh_token"].str(); accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); std.file.write(cfg.refreshTokenFilePath, refreshToken); + if (printAccessToken) writeln("New access token: ", accessToken); } private void checkAccessTokenExpired() From 691862b18ff648b7a770b68f748e8fd6ea58a0d5 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 20:54:57 +0200 Subject: [PATCH 08/33] implement the recommended way to enumerate changes --- src/config.d | 4 ++-- src/main.d | 2 +- src/onedrive.d | 8 ++++---- src/sync.d | 37 ++++++++++++++++--------------------- 4 files changed, 23 insertions(+), 28 deletions(-) diff --git a/src/config.d b/src/config.d index 653982d9..e7a25dbe 100644 --- a/src/config.d +++ b/src/config.d @@ -5,7 +5,7 @@ static import log; final class Config { public string refreshTokenFilePath; - public string statusTokenFilePath; + public string deltaLinkFilePath; public string databaseFilePath; public string uploadStateFilePath; public string syncListFilePath; @@ -17,7 +17,7 @@ final class Config this(string configDirName) { refreshTokenFilePath = configDirName ~ "/refresh_token"; - statusTokenFilePath = configDirName ~ "/status_token"; + deltaLinkFilePath = configDirName ~ "/delta_link"; databaseFilePath = configDirName ~ "/items.sqlite3"; uploadStateFilePath = configDirName ~ "/resume_upload"; userConfigFilePath = configDirName ~ "/config"; diff --git a/src/main.d b/src/main.d index 26dfef8e..5709a87c 100644 --- a/src/main.d +++ b/src/main.d @@ -60,7 +60,7 @@ int main(string[] args) if (resync || logout) { log.log("Deleting the saved status ..."); safeRemove(cfg.databaseFilePath); - safeRemove(cfg.statusTokenFilePath); + safeRemove(cfg.deltaLinkFilePath); safeRemove(cfg.uploadStateFilePath); if (logout) { safeRemove(cfg.refreshTokenFilePath); diff --git a/src/onedrive.d b/src/onedrive.d index dec6126f..3f601fcf 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -104,24 +104,24 @@ final class OneDriveApi } // https://dev.onedrive.com/items/view_delta.htm - JSONValue viewChangesById(const(char)[] id, const(char)[] statusToken) + JSONValue viewChangesById(const(char)[] id, const(char)[] deltaLink) { checkAccessTokenExpired(); + if (deltaLink) return get(deltaLink); const(char)[] url = itemByIdUrl ~ id ~ "/delta"; url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference"; - if (statusToken) url ~= "&token=" ~ statusToken; return get(url); } // https://dev.onedrive.com/items/view_delta.htm - JSONValue viewChangesByPath(const(char)[] path, const(char)[] statusToken) + JSONValue viewChangesByPath(const(char)[] path, const(char)[] deltaLink) { checkAccessTokenExpired(); + if (deltaLink) return get(deltaLink); string url = itemByPathUrl ~ encodeComponent(path) ~ ":/delta"; // HACK if (path == ".") url = driveUrl ~ "/root/delta"; url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference"; - if (statusToken) url ~= "&token=" ~ statusToken; return get(url); } diff --git a/src/sync.d b/src/sync.d index 0554b4f6..8916309f 100644 --- a/src/sync.d +++ b/src/sync.d @@ -60,8 +60,6 @@ final class SyncEngine private ItemDatabase itemdb; private UploadSession session; private SelectiveSync selectiveSync; - // token representing the last status correctly synced - private string statusToken; // list of items to skip while applying the changes private string[] skippedItems; // list of items to delete after the changes has been downloaded @@ -79,12 +77,6 @@ final class SyncEngine void init() { - // restore the previous status token - try { - statusToken = readText(cfg.statusTokenFilePath); - } catch (FileException e) { - // swallow exception - } // check if there is an interrupted upload session if (session.restore()) { log.log("Continuing the upload session ..."); @@ -96,33 +88,36 @@ final class SyncEngine void applyDifferences() { log.vlog("Applying differences ..."); + + // restore the last known state + string deltaLink; + try { + deltaLink = readText(cfg.deltaLinkFilePath); + } catch (FileException e) { + // swallow exception + } + try { JSONValue changes; do { // get changes from the server try { - changes = onedrive.viewChangesByPath(".", statusToken); + changes = onedrive.viewChangesByPath(".", deltaLink); } catch (OneDriveException e) { if (e.httpStatusCode == 410) { - log.log("Status token expired, resyncing"); - statusToken = null; + log.log("Delta link expired, resyncing"); + deltaLink = null; continue; - } - else { + } else { throw e; } } foreach (item; changes["value"].array) { applyDifference(item); } - // hack to reuse old code - string url; - if ("@odata.nextLink" in changes) url = changes["@odata.nextLink"].str; - if ("@odata.deltaLink" in changes) url = changes["@odata.deltaLink"].str; - auto c = matchFirst(url, r"(?:token=)([\w\d]+)"); - c.popFront(); // skip the whole match - statusToken = c.front; - std.file.write(cfg.statusTokenFilePath, statusToken); + if ("@odata.nextLink" in changes) deltaLink = changes["@odata.nextLink"].str; + if ("@odata.deltaLink" in changes) deltaLink = changes["@odata.deltaLink"].str; + std.file.write(cfg.deltaLinkFilePath, deltaLink); } while ("@odata.nextLink" in changes); } catch (ErrnoException e) { throw new SyncException(e.msg, e); From 44fc36fd8d023028ddf30c45cd863cf89ac7b606 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 22:13:19 +0200 Subject: [PATCH 09/33] support for SHA1 and QuickXor hash algorithms --- src/itemdb.d | 41 +++++++++++-------- src/sync.d | 108 +++++++++++++++++++++++++-------------------------- src/util.d | 13 ++++++- 3 files changed, 89 insertions(+), 73 deletions(-) diff --git a/src/itemdb.d b/src/itemdb.d index 556aa0de..2a290adf 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -16,13 +16,15 @@ struct Item string cTag; SysTime mtime; string parentId; - string crc32; + string crc32Hash; + string sha1Hash; + string quickXorHash; } final class ItemDatabase { // increment this for every change in the db schema - immutable int itemDatabaseVersion = 3; + immutable int itemDatabaseVersion = 4; Database db; Statement insertItemStmt; @@ -35,14 +37,16 @@ final class ItemDatabase db = Database(filename); if (db.getVersion() == 0) { db.exec("CREATE TABLE item ( - id TEXT NOT NULL PRIMARY KEY, - name TEXT NOT NULL, - type TEXT NOT NULL, - eTag TEXT, - cTag TEXT, - mtime TEXT NOT NULL, - parentId TEXT, - crc32 TEXT, + id TEXT NOT NULL PRIMARY KEY, + name TEXT NOT NULL, + type TEXT NOT NULL, + eTag TEXT, + cTag TEXT, + mtime TEXT NOT NULL, + parentId TEXT, + crc32Hash TEXT, + sha1Hash TEXT, + quickXorHash TEXT, FOREIGN KEY (parentId) REFERENCES item (id) ON DELETE CASCADE )"); db.exec("CREATE INDEX name_idx ON item (name)"); @@ -52,14 +56,13 @@ final class ItemDatabase } db.exec("PRAGMA foreign_keys = ON"); db.exec("PRAGMA recursive_triggers = ON"); - - insertItemStmt = db.prepare("INSERT OR REPLACE INTO item (id, name, type, eTag, cTag, mtime, parentId, crc32) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"); + insertItemStmt = db.prepare("INSERT OR REPLACE INTO item (id, name, type, eTag, cTag, mtime, parentId, crc32Hash, sha1Hash, quickXorHash) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); updateItemStmt = db.prepare(" UPDATE item - SET name = ?2, type = ?3, eTag = ?4, cTag = ?5, mtime = ?6, parentId = ?7, crc32 = ?8 + SET name = ?2, type = ?3, eTag = ?4, cTag = ?5, mtime = ?6, parentId = ?7, crc32Hash = ?8, sha1Hash = ?9, quickXorHash = ?10 WHERE id = ?1 "); - selectItemByIdStmt = db.prepare("SELECT id, name, type, eTag, cTag, mtime, parentId, crc32 FROM item WHERE id = ?"); + selectItemByIdStmt = db.prepare("SELECT id, name, type, eTag, cTag, mtime, parentId, crc32Hash, sha1Hash, quickXorHash FROM item WHERE id = ?"); selectItemByParentIdStmt = db.prepare("SELECT id FROM item WHERE parentId = ?"); } @@ -201,13 +204,15 @@ final class ItemDatabase bind(5, cTag); bind(6, mtime.toISOExtString()); bind(7, parentId); - bind(8, crc32); + bind(8, crc32Hash); + bind(9, sha1Hash); + bind(10, quickXorHash); } } private Item buildItem(Statement.Result result) { - assert(!result.empty && result.front.length == 8); + assert(!result.empty && result.front.length == 10); Item item = { id: result.front[0].dup, name: result.front[1].dup, @@ -215,7 +220,9 @@ final class ItemDatabase cTag: result.front[4].dup, mtime: SysTime.fromISOExtString(result.front[5]), parentId: result.front[6].dup, - crc32: result.front[7].dup + crc32Hash: result.front[7].dup, + sha1Hash: result.front[8].dup, + quickXorHash: result.front[9].dup }; switch (result.front[2]) { case "file": item.type = ItemType.file; break; diff --git a/src/sync.d b/src/sync.d index 8916309f..0cdd3a5c 100644 --- a/src/sync.d +++ b/src/sync.d @@ -31,11 +31,55 @@ private bool isItemRoot(const ref JSONValue item) return ("root" in item) != null; } -private bool testCrc32(string path, const(char)[] crc32) +private Item makeItem(const ref JSONValue jsonItem) { - if (crc32) { - string localCrc32 = computeCrc32(path); - if (crc32 == localCrc32) return true; + ItemType type; + if (isItemFile(jsonItem)) { + type = ItemType.file; + } else if (isItemFolder(jsonItem)) { + type = ItemType.dir; + } else { + assert(0); + } + + Item item = { + id: jsonItem["id"].str, + name: jsonItem["name"].str, + type: type, + eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned if for the root in OneDrive Biz + cTag: isItemFolder(jsonItem) ? null : jsonItem["cTag"].str, + mtime: SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), + parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str + }; + + // extract the file hash + if (type == ItemType.file) { + if ("hashes" in jsonItem["file"]) { + if ("crc32Hash" in jsonItem["file"]["hashes"]) { + item.crc32Hash = jsonItem["file"]["hashes"]["crc32Hash"].str; + } else if ("sha1Hash" in jsonItem["file"]["hashes"]) { + item.sha1Hash = jsonItem["file"]["hashes"]["sha1Hash"].str; + } else if ("quickXorHash" in jsonItem["file"]["hashes"]) { + item.quickXorHash = jsonItem["file"]["hashes"]["quickXorHash"].str; + } else { + log.vlog("The file does not have any hash"); + } + } else { + log.vlog("No hashes in the file facet"); + } + } + + return item; +} + +private bool testFileHash(string path, const ref Item item) +{ + if (item.crc32Hash) { + if (item.crc32Hash == computeCrc32(path)) return true; + } else if (item.sha1Hash) { + if (item.sha1Hash == computeSha1Hash(path)) return true; + } else if (item.quickXorHash) { + if (item.quickXorHash == computeQuickXorHash(path)) return true; } return false; } @@ -193,36 +237,13 @@ final class SyncEngine } } - ItemType type; - if (isItemFile(item)) { - type = ItemType.file; - } else if (isItemFolder(item)) { - type = ItemType.dir; - } else { + if (!isItemFile(item) && !isItemFolder(item)) { log.vlog("The item is neither a file nor a directory, skipping"); skippedItems ~= id; return; } - string crc32; - if (type == ItemType.file) { - try { - crc32 = item["file"]["hashes"]["crc32Hash"].str; - } catch (JSONException e) { - log.vlog("The hash is not available"); - } - } - - Item newItem = { - id: id, - name: name, - type: type, - eTag: eTag, - cTag: "cTag" in item ? item["cTag"].str : null, - mtime: SysTime.fromISOExtString(item["fileSystemInfo"]["lastModifiedDateTime"].str), - parentId: parentId, - crc32: crc32 - }; + Item newItem = makeItem(item); if (!cached) { applyNewItem(newItem, path); @@ -305,7 +326,7 @@ final class SyncEngine } else { log.vlog("The local item has a different modified time ", localModifiedTime, " remote is ", item.mtime); } - if (testCrc32(path, item.crc32)) { + if (testFileHash(path, item)) { return true; } else { log.vlog("The local item has a different hash"); @@ -428,7 +449,7 @@ final class SyncEngine log.vlog("The file last modified time has changed"); string id = item.id; string eTag = item.eTag; - if (!testCrc32(path, item.crc32)) { + if (!testFileHash(path, item)) { log.vlog("The file content has changed"); log.log("Uploading: ", path); JSONValue response; @@ -546,30 +567,7 @@ final class SyncEngine private void saveItem(JSONValue jsonItem) { - ItemType type; - if (isItemFile(jsonItem)) { - type = ItemType.file; - } else if (isItemFolder(jsonItem)) { - type = ItemType.dir; - } else { - assert(0); - } - Item item = { - id: jsonItem["id"].str, - name: jsonItem["name"].str, - type: type, - eTag: jsonItem["eTag"].str, - cTag: "cTag" in jsonItem ? jsonItem["cTag"].str : null, - mtime: SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), - parentId: jsonItem["parentReference"]["id"].str - }; - if (type == ItemType.file) { - try { - item.crc32 = jsonItem["file"]["hashes"]["crc32Hash"].str; - } catch (JSONException e) { - log.vlog("The hash is not available"); - } - } + Item item = makeItem(jsonItem); itemdb.upsert(item); } diff --git a/src/util.d b/src/util.d index 30721563..49150dc7 100644 --- a/src/util.d +++ b/src/util.d @@ -1,6 +1,6 @@ import std.base64; import std.conv; -import std.digest.crc; +import std.digest.crc, std.digest.sha; import std.file; import std.net.curl; import std.path; @@ -52,6 +52,17 @@ string computeCrc32(string path) return crc.finish().toHexString().dup; } +// returns the sha1 hash hex string of a file +string computeSha1Hash(string path) +{ + SHA1 sha; + auto file = File(path, "rb"); + foreach (ubyte[] data; chunks(file, 4096)) { + sha.put(data); + } + return sha.finish().toHexString().dup; +} + // returns the quickXorHash base64 string of a file string computeQuickXorHash(string path) { From 8360d6508d3f27ded68d6aafee463f60aa5a2361 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 22:17:21 +0200 Subject: [PATCH 10/33] align message --- src/main.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.d b/src/main.d index 5709a87c..b5d64719 100644 --- a/src/main.d +++ b/src/main.d @@ -33,7 +33,7 @@ int main(string[] args) if (opt.helpWanted) { defaultGetoptPrinter( "Usage: onedrive [OPTION]...\n\n" ~ - "no option Sync and exit.", + "no option Sync and exit.", opt.options ); return EXIT_SUCCESS; From 809ccde5627f9a2f2c08fb1e8906d4c12d34cd66 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 22:45:09 +0200 Subject: [PATCH 11/33] workaround for old OneDrive files --- src/sync.d | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/sync.d b/src/sync.d index 0cdd3a5c..5bdb2d17 100644 --- a/src/sync.d +++ b/src/sync.d @@ -47,7 +47,7 @@ private Item makeItem(const ref JSONValue jsonItem) name: jsonItem["name"].str, type: type, eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned if for the root in OneDrive Biz - cTag: isItemFolder(jsonItem) ? null : jsonItem["cTag"].str, + cTag: /*isItemFolder(jsonItem)*/ "cTag" !in jsonItem ? null : jsonItem["cTag"].str, // 'cTag' is missing in old files mtime: SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str }; @@ -65,7 +65,8 @@ private Item makeItem(const ref JSONValue jsonItem) log.vlog("The file does not have any hash"); } } else { - log.vlog("No hashes in the file facet"); + // 'hashes' is missing in old files + log.vlog("No hashes facet"); } } From 305242d8a1570b62ca6b00bd510b4af39038bb90 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 23:01:58 +0200 Subject: [PATCH 12/33] fix typo --- src/upload.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/upload.d b/src/upload.d index ead0ec81..52a01029 100644 --- a/src/upload.d +++ b/src/upload.d @@ -43,7 +43,7 @@ struct UploadSession return false; } if (!exists(session["localPath"].str)) { - log.vlog("The file do not exist anymore"); + log.vlog("The file does not exist anymore"); return false; } // request the session status From 8b6c99b0d91b13016567adab679ae6d67654b09b Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 23:14:37 +0200 Subject: [PATCH 13/33] notify deletion in verbose mode --- src/main.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.d b/src/main.d index b5d64719..85c08483 100644 --- a/src/main.d +++ b/src/main.d @@ -58,7 +58,7 @@ int main(string[] args) } if (resync || logout) { - log.log("Deleting the saved status ..."); + log.vlog("Deleting the saved status ..."); safeRemove(cfg.databaseFilePath); safeRemove(cfg.deltaLinkFilePath); safeRemove(cfg.uploadStateFilePath); From b5cbaa13cc0cf384ff3ed39305776c5961f801d8 Mon Sep 17 00:00:00 2001 From: skilion Date: Sun, 28 May 2017 23:15:03 +0200 Subject: [PATCH 14/33] cleaned comments --- src/sync.d | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sync.d b/src/sync.d index 5bdb2d17..40f92330 100644 --- a/src/sync.d +++ b/src/sync.d @@ -46,8 +46,8 @@ private Item makeItem(const ref JSONValue jsonItem) id: jsonItem["id"].str, name: jsonItem["name"].str, type: type, - eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned if for the root in OneDrive Biz - cTag: /*isItemFolder(jsonItem)*/ "cTag" !in jsonItem ? null : jsonItem["cTag"].str, // 'cTag' is missing in old files + eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned for the root in OneDrive Biz + cTag: "cTag" !in jsonItem ? null : jsonItem["cTag"].str, // cTag is missing in old files (plus all folders) mtime: SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str }; From ad5441ccdc8a8f03ba2ada17d768296e65a24ce6 Mon Sep 17 00:00:00 2001 From: skilion Date: Mon, 29 May 2017 00:23:55 +0200 Subject: [PATCH 15/33] fix indentation --- src/qxor.d | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/qxor.d b/src/qxor.d index 451901aa..f164a342 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -16,10 +16,10 @@ struct QuickXor private int _shiftSoFar; nothrow @safe void put(scope const(ubyte)[] array...) - { + { int vectorArrayIndex = _shiftSoFar / 64; int vectorOffset = _shiftSoFar % 64; - size_t iterations = min(array.length, widthInBits); + size_t iterations = min(array.length, widthInBits); for (size_t i = 0; i < iterations; i++) { bool isLastCell = vectorArrayIndex == _data.length - 1; @@ -67,7 +67,7 @@ struct QuickXor ubyte[lengthInBytes] tmp; tmp[0 .. lengthInBytes] = (cast(ubyte*) _data)[0 .. lengthInBytes]; for (size_t i = 0; i < 8; i++) { - tmp[lengthInBytes - 8 + i] ^= (cast(ubyte*) &_lengthSoFar)[i]; + tmp[lengthInBytes - 8 + i] ^= (cast(ubyte*) &_lengthSoFar)[i]; } return tmp; } From c2815d649813686d644c261c724b8cd074649922 Mon Sep 17 00:00:00 2001 From: skilion Date: Mon, 29 May 2017 01:14:22 +0200 Subject: [PATCH 16/33] fix indentation --- src/onedrive.d | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/onedrive.d b/src/onedrive.d index 3f601fcf..07a2d14a 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -21,10 +21,10 @@ class OneDriveException: Exception // https://dev.onedrive.com/misc/errors.htm JSONValue error; - @nogc @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__) - { - super(msg, file, line, next); - } + @nogc @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__) + { + super(msg, file, line, next); + } @safe pure this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__) { From 83a958df598a8457cb618d659a7104608aaf14a0 Mon Sep 17 00:00:00 2001 From: skilion Date: Mon, 29 May 2017 01:37:09 +0200 Subject: [PATCH 17/33] added DESTDIR and PREFIX --- Makefile | 26 ++++++++++++++----------- onedrive.service => onedrive.service.in | 2 +- 2 files changed, 16 insertions(+), 12 deletions(-) rename onedrive.service => onedrive.service.in (79%) diff --git a/Makefile b/Makefile index c20133e5..58761d45 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ -DC = dmd DFLAGS = -ofonedrive -L-lcurl -L-lsqlite3 -L-ldl -DESTDIR = /usr/local/bin +PREFIX = /usr/local SOURCES = \ src/config.d \ @@ -16,22 +15,27 @@ SOURCES = \ src/upload.d \ src/util.d +all: onedrive onedrive.service + onedrive: $(SOURCES) - $(DC) -O -release -inline -boundscheck=off $(DFLAGS) $(SOURCES) + dmd -g -inline -O -release $(DFLAGS) $(SOURCES) + +onedrive.service: + sed "s|@PREFIX@|$(PREFIX)|g" onedrive.service.in > onedrive.service debug: $(SOURCES) - $(DC) -debug -g -gs $(DFLAGS) $(SOURCES) + dmd -debug -g -gs $(DFLAGS) $(SOURCES) unittest: $(SOURCES) - $(DC) -unittest -debug -g -gs $(DFLAGS) $(SOURCES) + dmd -debug -g -gs -unittest $(DFLAGS) $(SOURCES) clean: - rm -f onedrive.o onedrive + rm -f onedrive onedrive.o onedrive.service -install: onedrive onedrive.service - install onedrive $(DESTDIR)/onedrive - install -m 644 onedrive.service /usr/lib/systemd/user +install: all + install -D onedrive $(DESTDIR)$(PREFIX)/bin/onedrive + install -D -m 644 onedrive.service $(DESTDIR)/usr/lib/systemd/user uninstall: - rm -f $(DESTDIR)/onedrive - rm -f /usr/lib/systemd/user/onedrive.service + rm -f $(DESTDIR)$(PREFIX)/bin/onedrive + rm -f $(DESTDIR)/usr/lib/systemd/user/onedrive.service diff --git a/onedrive.service b/onedrive.service.in similarity index 79% rename from onedrive.service rename to onedrive.service.in index 71db5ae6..1c014f39 100644 --- a/onedrive.service +++ b/onedrive.service.in @@ -3,7 +3,7 @@ Description=OneDrive Free Client Documentation=https://github.com/skilion/onedrive [Service] -ExecStart=/usr/local/bin/onedrive -m +ExecStart=@PREFIX@/bin/onedrive -m Restart=no [Install] From ac6b43c28d658782d9e48879afcebd4a4ea4274e Mon Sep 17 00:00:00 2001 From: Doug Newgard Date: Sun, 28 May 2017 21:12:35 -0500 Subject: [PATCH 18/33] Add service filename to Makefile When the dir the file is being installed to doesn't exist (like when setting DESTDIR), "install" will take the last part as the filename. A file called "/usr/lib/systemd/user" doesn't work correctly. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 58761d45..f6286df6 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ clean: install: all install -D onedrive $(DESTDIR)$(PREFIX)/bin/onedrive - install -D -m 644 onedrive.service $(DESTDIR)/usr/lib/systemd/user + install -D -m 644 onedrive.service $(DESTDIR)/usr/lib/systemd/user/onedrive.service uninstall: rm -f $(DESTDIR)$(PREFIX)/bin/onedrive From 4f542800a79407351651c0a8df078976acb99fcf Mon Sep 17 00:00:00 2001 From: skilion Date: Mon, 12 Jun 2017 16:46:10 +0200 Subject: [PATCH 19/33] fix bug when using personalized config dir --- src/main.d | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main.d b/src/main.d index 85c08483..087f19ba 100644 --- a/src/main.d +++ b/src/main.d @@ -7,7 +7,7 @@ static import log; int main(string[] args) { // configuration directory - string configDirName = expandTilde(environment.get("XDG_CONFIG_HOME", "~/.config")) ~ "/onedrive"; + string configDirName = environment.get("XDG_CONFIG_HOME", "~/.config") ~ "/onedrive"; // enable monitor mode bool monitor; // force a full resync @@ -45,7 +45,7 @@ int main(string[] args) } log.vlog("Loading config ..."); - configDirName = expandTilde(configDirName); + configDirName = configDirName.expandTilde().absolutePath(); if (!exists(configDirName)) mkdir(configDirName); auto cfg = new config.Config(configDirName); cfg.init(); From c5a9b8b4802701247ce5075e4acdbe3ca03ae5e5 Mon Sep 17 00:00:00 2001 From: skilion Date: Mon, 12 Jun 2017 16:53:15 +0200 Subject: [PATCH 20/33] fix bug where remote items are wrongly identified as deleted --- src/sync.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync.d b/src/sync.d index 40f92330..a481ffba 100644 --- a/src/sync.d +++ b/src/sync.d @@ -23,7 +23,7 @@ private bool isItemFile(const ref JSONValue item) private bool isItemDeleted(const ref JSONValue item) { // HACK: fix for https://github.com/skilion/onedrive/issues/157 - return ("deleted" in item) || ("fileSystemInfo" !in item); + return ("deleted" in item) || ("fileSystemInfo" !in item && "remoteItem" !in item); } private bool isItemRoot(const ref JSONValue item) From 74c931cdba09a7a70dd216e433e45abb7ea6e70e Mon Sep 17 00:00:00 2001 From: skilion Date: Wed, 14 Jun 2017 15:48:15 +0200 Subject: [PATCH 21/33] added test for NULL values --- src/sqlite.d | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/sqlite.d b/src/sqlite.d index e42b7dcd..219a973e 100644 --- a/src/sqlite.d +++ b/src/sqlite.d @@ -197,17 +197,17 @@ unittest auto s = db.prepare("INSERT INTO test VALUES (?, ?)"); s.bind(1, "key1"); - s.bind(2, "value1"); + s.bind(2, "value"); s.exec(); s.bind(1, "key2"); - s.bind(2, "value2"); + s.bind(2, null); s.exec(); s = db.prepare("SELECT * FROM test ORDER BY id ASC"); auto r = s.exec(); assert(r.front[0] == "key1"); r.popFront(); - assert(r.front[1] == "value2"); + assert(r.front[1] == null); r.popFront(); assert(r.empty); } From 35f81b21158932cd3051ddcb845c61afa63a191b Mon Sep 17 00:00:00 2001 From: skilion Date: Wed, 14 Jun 2017 15:50:02 +0200 Subject: [PATCH 22/33] itemdb support for remote items --- src/itemdb.d | 244 ++++++++++++++++++++++++--------------------------- src/sync.d | 36 +++++--- 2 files changed, 139 insertions(+), 141 deletions(-) diff --git a/src/itemdb.d b/src/itemdb.d index 2a290adf..73c791dc 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -4,17 +4,20 @@ import sqlite; enum ItemType { file, - dir + dir, + remote } struct Item { + string driveId; string id; string name; ItemType type; string eTag; string cTag; SysTime mtime; + string parentDriveId; string parentId; string crc32Hash; string sha1Hash; @@ -24,30 +27,37 @@ struct Item final class ItemDatabase { // increment this for every change in the db schema - immutable int itemDatabaseVersion = 4; + immutable int itemDatabaseVersion = 5; Database db; Statement insertItemStmt; Statement updateItemStmt; Statement selectItemByIdStmt; Statement selectItemByParentIdStmt; + Statement deleteItemByIdStmt; this(const(char)[] filename) { db = Database(filename); if (db.getVersion() == 0) { db.exec("CREATE TABLE item ( - id TEXT NOT NULL PRIMARY KEY, - name TEXT NOT NULL, - type TEXT NOT NULL, - eTag TEXT, - cTag TEXT, - mtime TEXT NOT NULL, - parentId TEXT, - crc32Hash TEXT, - sha1Hash TEXT, - quickXorHash TEXT, - FOREIGN KEY (parentId) REFERENCES item (id) ON DELETE CASCADE + driveId TEXT NOT NULL, + id TEXT NOT NULL, + name TEXT NOT NULL, + type TEXT NOT NULL, + eTag TEXT, + cTag TEXT, + mtime TEXT NOT NULL, + parentDriveId TEXT, + parentId TEXT, + crc32Hash TEXT, + sha1Hash TEXT, + quickXorHash TEXT, + PRIMARY KEY (driveId, id), + FOREIGN KEY (parentDriveId, parentId) + REFERENCES item (driveId, id) + ON DELETE CASCADE + ON UPDATE RESTRICT )"); db.exec("CREATE INDEX name_idx ON item (name)"); db.setVersion(itemDatabaseVersion); @@ -56,14 +66,22 @@ final class ItemDatabase } db.exec("PRAGMA foreign_keys = ON"); db.exec("PRAGMA recursive_triggers = ON"); - insertItemStmt = db.prepare("INSERT OR REPLACE INTO item (id, name, type, eTag, cTag, mtime, parentId, crc32Hash, sha1Hash, quickXorHash) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"); + insertItemStmt = db.prepare(" + INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentDriveId, parentId, crc32Hash, sha1Hash, quickXorHash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + "); updateItemStmt = db.prepare(" UPDATE item - SET name = ?2, type = ?3, eTag = ?4, cTag = ?5, mtime = ?6, parentId = ?7, crc32Hash = ?8, sha1Hash = ?9, quickXorHash = ?10 - WHERE id = ?1 + SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentDriveId = ?8, parentId = ?9, crc32Hash = ?10, sha1Hash = ?11, quickXorHash = ?12 + WHERE driveId = ?1 AND id = ?2 "); - selectItemByIdStmt = db.prepare("SELECT id, name, type, eTag, cTag, mtime, parentId, crc32Hash, sha1Hash, quickXorHash FROM item WHERE id = ?"); - selectItemByParentIdStmt = db.prepare("SELECT id FROM item WHERE parentId = ?"); + selectItemByIdStmt = db.prepare(" + SELECT * + FROM item + WHERE driveId = ?1 AND id = ?2 + "); + selectItemByParentIdStmt = db.prepare("SELECT driveId, id FROM item WHERE parentId = ? AND id = ?"); + deleteItemByIdStmt = db.prepare("DELETE FROM item WHERE driveId = ? AND id = ?"); } void insert(const ref Item item) @@ -80,8 +98,9 @@ final class ItemDatabase void upsert(const ref Item item) { - auto s = db.prepare("SELECT COUNT(*) FROM item WHERE id = ?"); - s.bind(1, item.id); + auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?"); + s.bind(1, item.driveId); + s.bind(2, item.id); auto r = s.exec(); Statement* stmt; if (r.front[0] == "0") stmt = &insertItemStmt; @@ -90,23 +109,25 @@ final class ItemDatabase stmt.exec(); } - Item[] selectChildren(const(char)[] id) + Item[] selectChildren(const(char)[] driveId, const(char)[] id) { - selectItemByParentIdStmt.bind(1, id); + selectItemByParentIdStmt.bind(1, driveId); + selectItemByParentIdStmt.bind(2, id); auto res = selectItemByParentIdStmt.exec(); Item[] items; foreach (row; res) { Item item; - bool found = selectById(row[0], item); - assert(found); + bool found = selectById(row[0], row[1], item); + assert(found, "Could not select the child of the item"); items ~= item; } return items; } - bool selectById(const(char)[] id, out Item item) + bool selectById(const(char)[] driveId, const(char)[] id, out Item item) { - selectItemByIdStmt.bind(1, id); + selectItemByIdStmt.bind(1, driveId); + selectItemByIdStmt.bind(2, id); auto r = selectItemByIdStmt.exec(); if (!r.empty) { item = buildItem(r); @@ -115,143 +136,110 @@ final class ItemDatabase return false; } + // returns the item with the given path + // the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3" bool selectByPath(const(char)[] path, out Item item) { - // prefix with the root dir + Item currItem; path = "root/" ~ path.chompPrefix("."); - - // initialize the search - string[2][] candidates; // [id, parentId] - auto s = db.prepare("SELECT id, parentId FROM item WHERE name = ?"); - s.bind(1, baseName(path)); - auto r = s.exec(); - foreach (row; r) candidates ~= [row[0].dup, row[1].dup]; - path = dirName(path); - - if (path != ".") { - s = db.prepare("SELECT parentId FROM item WHERE id = ? AND name = ?"); - // discard the candidates that do not have the correct parent - do { - s.bind(2, baseName(path)); - string[2][] newCandidates; - newCandidates.reserve(candidates.length); - foreach (candidate; candidates) { - s.bind(1, candidate[1]); - r = s.exec(); - if (!r.empty) { - string[2] c = [candidate[0], r.front[0].idup]; - newCandidates ~= c; - } - } - candidates = newCandidates; - path = dirName(path); - } while (path != "."); - } - - // reached the root - string[2][] newCandidates; - foreach (candidate; candidates) { - if (!candidate[1]) { - newCandidates ~= candidate; - } - } - candidates = newCandidates; - assert(candidates.length <= 1); - - if (candidates.length == 1) return selectById(candidates[0][0], item); - return false; - } - - void deleteById(const(char)[] id) - { - auto s = db.prepare("DELETE FROM item WHERE id = ?"); - s.bind(1, id); - s.exec(); - } - - // returns true if the item has the specified parent - bool hasParent(T)(const(char)[] itemId, T parentId) - if (is(T : const(char)[]) || is(T : const(char[])[])) - { - auto s = db.prepare("SELECT parentId FROM item WHERE id = ?"); - while (true) { - s.bind(1, itemId); + auto s = db.prepare("SELECT * FROM item WHERE name IS ?1 AND parentDriveId IS ?2 AND parentId IS ?3"); + foreach (name; pathSplitter(path)) { + s.bind(1, name); + s.bind(2, currItem.driveId); + s.bind(3, currItem.id); auto r = s.exec(); - if (r.empty) break; - auto currParentId = r.front[0]; - static if (is(T : const(char)[])) { - if (currParentId == parentId) return true; - } else { - foreach (id; parentId) if (currParentId == id) return true; + if (r.empty) return false; + currItem = buildItem(r); + // if the item of type remote jump to the child + if (currItem.type == ItemType.remote) { + auto children = selectChildren(currItem.driveId, currItem.id); + enforce(children.length == 1, "The remote item has more than 1 child"); + currItem = children[0]; } - itemId = currParentId.dup; } - return false; + item = currItem; + return true; + } + + void deleteById(const(char)[] driveId, const(char)[] id) + { + deleteItemByIdStmt.bind(1, driveId); + deleteItemByIdStmt.bind(2, id); + deleteItemByIdStmt.exec(); } private void bindItem(const ref Item item, ref Statement stmt) { with (stmt) with (item) { - bind(1, id); - bind(2, name); + bind(1, driveId); + bind(2, id); + bind(3, name); string typeStr = null; final switch (type) with (ItemType) { - case file: typeStr = "file"; break; - case dir: typeStr = "dir"; break; + case file: typeStr = "file"; break; + case dir: typeStr = "dir"; break; + case remote: typeStr = "remote"; break; } - bind(3, typeStr); - bind(4, eTag); - bind(5, cTag); - bind(6, mtime.toISOExtString()); - bind(7, parentId); - bind(8, crc32Hash); - bind(9, sha1Hash); - bind(10, quickXorHash); + bind(4, typeStr); + bind(5, eTag); + bind(6, cTag); + bind(7, mtime.toISOExtString()); + bind(8, parentDriveId); + bind(9, parentId); + bind(10, crc32Hash); + bind(11, sha1Hash); + bind(12, quickXorHash); } } private Item buildItem(Statement.Result result) { - assert(!result.empty && result.front.length == 10); + assert(!result.empty, "The result must not be empty"); + assert(result.front.length == 12, "The result must have 12 columns"); Item item = { - id: result.front[0].dup, - name: result.front[1].dup, - eTag: result.front[3].dup, - cTag: result.front[4].dup, - mtime: SysTime.fromISOExtString(result.front[5]), - parentId: result.front[6].dup, - crc32Hash: result.front[7].dup, - sha1Hash: result.front[8].dup, - quickXorHash: result.front[9].dup + driveId: result.front[0].dup, + id: result.front[1].dup, + name: result.front[2].dup, + eTag: result.front[4].dup, + cTag: result.front[5].dup, + mtime: SysTime.fromISOExtString(result.front[6]), + parentDriveId: result.front[7].dup, + parentId: result.front[8].dup, + crc32Hash: result.front[9].dup, + sha1Hash: result.front[10].dup, + quickXorHash: result.front[11].dup }; - switch (result.front[2]) { - case "file": item.type = ItemType.file; break; - case "dir": item.type = ItemType.dir; break; + switch (result.front[3]) { + case "file": item.type = ItemType.file; break; + case "dir": item.type = ItemType.dir; break; + case "remote": item.type = ItemType.remote; break; default: assert(0); } return item; } // computes the path of the given item id - // the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3" - // a trailing slash is never added - string computePath(const(char)[] id) + // the path is relative to the sync directory ex: "Music/Turbo Killer.mp3" + // a trailing slash is not added if the item is a directory + string computePath(const(char)[] driveId, const(char)[] id) { string path; - auto s = db.prepare("SELECT name, parentId FROM item WHERE id = ?"); + Item item; while (true) { - s.bind(1, id); - auto r = s.exec(); - enforce(!r.empty, "Unknow item id"); - if (r.front[1]) { - if (path) path = r.front[0].idup ~ "/" ~ path; - else path = r.front[0].idup; + enforce(selectById(driveId, id, item), "Unknow item id"); + if (item.type == ItemType.remote) { + // substitute the last name with the current + path = item.name ~ path[indexOf(path, '/') .. $]; + } else if (item.parentId) { + if (path) path = item.name ~ "/" ~ path; + else path = item.name; } else { // root if (!path) path = "."; break; } - id = r.front[1].dup; + driveId = item.parentDriveId; + id = item.parentId; } return path; } diff --git a/src/sync.d b/src/sync.d index a481ffba..fee2bd69 100644 --- a/src/sync.d +++ b/src/sync.d @@ -43,12 +43,14 @@ private Item makeItem(const ref JSONValue jsonItem) } Item item = { + driveId: jsonItem["parentReference"]["driveId"].str, id: jsonItem["id"].str, name: jsonItem["name"].str, type: type, eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned for the root in OneDrive Biz cTag: "cTag" !in jsonItem ? null : jsonItem["cTag"].str, // cTag is missing in old files (plus all folders) mtime: SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), + parentDriveId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["driveId"].str, parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str }; @@ -108,7 +110,7 @@ final class SyncEngine // list of items to skip while applying the changes private string[] skippedItems; // list of items to delete after the changes has been downloaded - private string[] idsToDelete; + private string[2][] idsToDelete; this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb, SelectiveSync selectiveSync) { @@ -182,6 +184,7 @@ final class SyncEngine private void applyDifference(JSONValue item) { + string driveId = item["parentReference"]["driveId"].str; string id = item["id"].str; string name = item["name"].str; @@ -209,9 +212,9 @@ final class SyncEngine // rename the local item if it is unsynced and there is a new version of it Item oldItem; string oldPath; - bool cached = itemdb.selectById(id, oldItem); + bool cached = itemdb.selectById(driveId, id, oldItem); if (cached && eTag != oldItem.eTag) { - oldPath = itemdb.computePath(id); + oldPath = itemdb.computePath(driveId, id); if (!isItemSynced(oldItem, oldPath)) { log.vlog("The local item is unsynced, renaming"); if (exists(oldPath)) safeRename(oldPath); @@ -222,14 +225,14 @@ final class SyncEngine // check if the item is to be deleted if (isItemDeleted(item)) { log.vlog("The item is marked for deletion"); - if (cached) idsToDelete ~= id; + if (cached) idsToDelete ~= [driveId, id]; return; } // compute the path of the item string path = "."; if (parentId) { - path = itemdb.computePath(parentId) ~ "/" ~ name; + path = itemdb.computePath(driveId, parentId) ~ "/" ~ name; // selective sync if (selectiveSync.isPathExcluded(path)) { log.vlog("Filtered out: ", path); @@ -281,18 +284,21 @@ final class SyncEngine case ItemType.dir: log.log("Creating directory: ", path); mkdir(path); - break; + break; + case ItemType.remote: + assert(0); } setTimes(path, item.mtime, item.mtime); } private void applyChangedItem(Item oldItem, Item newItem, string newPath) { + assert(oldItem.driveId == newItem.driveId); assert(oldItem.id == newItem.id); assert(oldItem.type == newItem.type); if (oldItem.eTag != newItem.eTag) { - string oldPath = itemdb.computePath(oldItem.id); + string oldPath = itemdb.computePath(oldItem.driveId, oldItem.id); if (oldPath != newPath) { log.log("Moving: ", oldPath, " -> ", newPath); if (exists(newPath)) { @@ -343,6 +349,8 @@ final class SyncEngine log.vlog("The local item is a file but should be a directory"); } break; + case ItemType.remote: + assert(0); } return false; } @@ -350,9 +358,9 @@ final class SyncEngine private void deleteItems() { log.vlog("Deleting files ..."); - foreach_reverse (id; idsToDelete) { - string path = itemdb.computePath(id); - itemdb.deleteById(id); + foreach_reverse (i; idsToDelete) { + string path = itemdb.computePath(i[0], i[1]); + itemdb.deleteById(i[0], i[1]); if (exists(path)) { if (isFile(path)) { remove(path); @@ -400,7 +408,7 @@ final class SyncEngine log.vlog("Filtered out"); return; } - string path = itemdb.computePath(item.id); + string path = itemdb.computePath(item.driveId, item.id); if (selectiveSync.isPathExcluded(path)) { log.vlog("Filtered out: ", path); return; @@ -413,6 +421,8 @@ final class SyncEngine case ItemType.file: uploadFileDifferences(item, path); break; + case ItemType.remote: + assert(0); } } @@ -427,7 +437,7 @@ final class SyncEngine } else { log.vlog("The directory has not changed"); // loop trough the children - foreach (Item child; itemdb.selectChildren(item.id)) { + foreach (Item child; itemdb.selectChildren(item.driveId, item.id)) { uploadDifferences(child); } } @@ -552,7 +562,7 @@ final class SyncEngine if (e.httpStatusCode == 404) log.log(e.msg); else throw e; } - itemdb.deleteById(item.id); + itemdb.deleteById(item.driveId, item.id); } private void uploadLastModifiedTime(const(char)[] id, const(char)[] eTag, SysTime mtime) From 7de0d97e6ec24564cc80b7dd1539b96772407979 Mon Sep 17 00:00:00 2001 From: skilion Date: Wed, 14 Jun 2017 22:30:29 +0200 Subject: [PATCH 23/33] fix error message --- src/itemdb.d | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/itemdb.d b/src/itemdb.d index 73c791dc..11677e88 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -153,7 +153,11 @@ final class ItemDatabase // if the item of type remote jump to the child if (currItem.type == ItemType.remote) { auto children = selectChildren(currItem.driveId, currItem.id); - enforce(children.length == 1, "The remote item has more than 1 child"); + enforce(children.length == 1, "The remote item does not have exactly 1 child"); + // keep some characteristics of the remote item + children[0].name = currItem.name; + children[0].eTag = currItem.eTag; + children[0].cTag = currItem.cTag; currItem = children[0]; } } From 65bb14ad3769df2c8d6072eeb4b59ae7c3d06304 Mon Sep 17 00:00:00 2001 From: skilion Date: Wed, 14 Jun 2017 22:39:36 +0200 Subject: [PATCH 24/33] WIP for remote items support --- src/itemdb.d | 6 +-- src/sync.d | 118 +++++++++++++++++++++++++-------------------------- 2 files changed, 59 insertions(+), 65 deletions(-) diff --git a/src/itemdb.d b/src/itemdb.d index 11677e88..07a75506 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -150,14 +150,12 @@ final class ItemDatabase auto r = s.exec(); if (r.empty) return false; currItem = buildItem(r); - // if the item of type remote jump to the child + // if the item is of type remote substitute it with the child if (currItem.type == ItemType.remote) { auto children = selectChildren(currItem.driveId, currItem.id); enforce(children.length == 1, "The remote item does not have exactly 1 child"); - // keep some characteristics of the remote item + // keep the name of the remote item children[0].name = currItem.name; - children[0].eTag = currItem.eTag; - children[0].cTag = currItem.cTag; currItem = children[0]; } } diff --git a/src/sync.d b/src/sync.d index fee2bd69..ae907b32 100644 --- a/src/sync.d +++ b/src/sync.d @@ -31,6 +31,11 @@ private bool isItemRoot(const ref JSONValue item) return ("root" in item) != null; } +private bool isItemRemote(const ref JSONValue item) +{ + return ("remoteItem" in item) != null; +} + private Item makeItem(const ref JSONValue jsonItem) { ItemType type; @@ -38,8 +43,8 @@ private Item makeItem(const ref JSONValue jsonItem) type = ItemType.file; } else if (isItemFolder(jsonItem)) { type = ItemType.dir; - } else { - assert(0); + } else if (isItemRemote(jsonItem)) { + type = ItemType.remote; } Item item = { @@ -49,7 +54,7 @@ private Item makeItem(const ref JSONValue jsonItem) type: type, eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned for the root in OneDrive Biz cTag: "cTag" !in jsonItem ? null : jsonItem["cTag"].str, // cTag is missing in old files (plus all folders) - mtime: SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), + mtime: isItemRemote(jsonItem) ? SysTime(0) : SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), parentDriveId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["driveId"].str, parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str }; @@ -182,39 +187,56 @@ final class SyncEngine assumeSafeAppend(skippedItems); } - private void applyDifference(JSONValue item) + private void applyDifference(JSONValue jsonItem) { - string driveId = item["parentReference"]["driveId"].str; - string id = item["id"].str; - string name = item["name"].str; - - log.vlog(id, " ", name); - - // eTag and parentId do not exists for the root in OneDrive Biz - string eTag, parentId; - if (!isItemRoot(item)) { - eTag = item["eTag"].str; - parentId = item["parentReference"]["id"].str; - } + Item item = makeItem(jsonItem); + log.vlog(item.id, " ", item.name); // skip unwanted items early - if (skippedItems.find(parentId).length != 0) { + bool unwanted; + unwanted |= skippedItems.find(item.parentId).length != 0; + unwanted |= selectiveSync.isNameExcluded(item.name); + unwanted |= selectiveSync.isPathExcluded(path); + if (unwanted) { log.vlog("Filtered out"); - skippedItems ~= id; + skippedItems ~= item.id; return; } - if (selectiveSync.isNameExcluded(name)) { - log.vlog("Filtered out"); - skippedItems ~= id; + + // compute the path of the item + string path = "."; + if (!isItemRoot(jsonItem)) { + path = itemdb.computePath(item.driveId, item.parentId) ~ "/" ~ item.name; + } + + // check if the item is to be deleted + if (isItemDeleted(jsonItem)) { + log.vlog("The item is marked for deletion"); + idsToDelete ~= [item.driveId, item.id]; + return; + } + + // check the item type + if (isItemRemote(jsonItem)) { + // TODO + // check name change + // scan the children later + // fix child references + log.vlog("Remote items are not supported yet"); + skippedItems ~= item.id; + return; + } else if (!isItemFile(jsonItem) && !isItemFolder(jsonItem)) { + log.vlog("The item is neither a file nor a directory, skipping"); + skippedItems ~= item.id; return; } // rename the local item if it is unsynced and there is a new version of it Item oldItem; string oldPath; - bool cached = itemdb.selectById(driveId, id, oldItem); - if (cached && eTag != oldItem.eTag) { - oldPath = itemdb.computePath(driveId, id); + bool cached = itemdb.selectById(item.driveId, item.id, oldItem); + if (cached && item.eTag != oldItem.eTag) { + oldPath = itemdb.computePath(item.driveId, item.id); if (!isItemSynced(oldItem, oldPath)) { log.vlog("The local item is unsynced, renaming"); if (exists(oldPath)) safeRename(oldPath); @@ -222,44 +244,17 @@ final class SyncEngine } } - // check if the item is to be deleted - if (isItemDeleted(item)) { - log.vlog("The item is marked for deletion"); - if (cached) idsToDelete ~= [driveId, id]; - return; - } - - // compute the path of the item - string path = "."; - if (parentId) { - path = itemdb.computePath(driveId, parentId) ~ "/" ~ name; - // selective sync - if (selectiveSync.isPathExcluded(path)) { - log.vlog("Filtered out: ", path); - skippedItems ~= id; - return; - } - } - - if (!isItemFile(item) && !isItemFolder(item)) { - log.vlog("The item is neither a file nor a directory, skipping"); - skippedItems ~= id; - return; - } - - Item newItem = makeItem(item); - if (!cached) { - applyNewItem(newItem, path); + applyNewItem(item, path); } else { - applyChangedItem(oldItem, newItem, path); + applyChangedItem(oldItem, oldPath, item, path); } // save the item in the db if (oldItem.id) { - itemdb.update(newItem); + itemdb.update(item); } else { - itemdb.insert(newItem); + itemdb.insert(item); } } @@ -272,7 +267,7 @@ final class SyncEngine setTimes(path, item.mtime, item.mtime); return; } else { - log.vlog("The local item is out of sync, renaming ..."); + log.vlog("The local item is out of sync, renaming..."); safeRename(path); } } @@ -285,24 +280,25 @@ final class SyncEngine log.log("Creating directory: ", path); mkdir(path); break; - case ItemType.remote: - assert(0); + case ItemType.remote: + assert(0); } setTimes(path, item.mtime, item.mtime); } - private void applyChangedItem(Item oldItem, Item newItem, string newPath) + // update a local item + // the local item is assumed to be in sync with the local db + private void applyChangedItem(Item oldItem, string oldPath, Item newItem, string newPath) { assert(oldItem.driveId == newItem.driveId); assert(oldItem.id == newItem.id); assert(oldItem.type == newItem.type); if (oldItem.eTag != newItem.eTag) { - string oldPath = itemdb.computePath(oldItem.driveId, oldItem.id); if (oldPath != newPath) { log.log("Moving: ", oldPath, " -> ", newPath); if (exists(newPath)) { - log.vlog("The destination is occupied, renaming ..."); + log.vlog("The destination is occupied, renaming the conflicting file..."); safeRename(newPath); } rename(oldPath, newPath); From b703a824c719c4ba949f443bbea18f6f635ff10c Mon Sep 17 00:00:00 2001 From: skilion Date: Thu, 15 Jun 2017 12:59:33 +0200 Subject: [PATCH 25/33] compute path before performing selective sync --- src/sync.d | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/sync.d b/src/sync.d index ae907b32..f7c883f4 100644 --- a/src/sync.d +++ b/src/sync.d @@ -192,24 +192,26 @@ final class SyncEngine Item item = makeItem(jsonItem); log.vlog(item.id, " ", item.name); - // skip unwanted items early + string path = "."; bool unwanted; unwanted |= skippedItems.find(item.parentId).length != 0; unwanted |= selectiveSync.isNameExcluded(item.name); - unwanted |= selectiveSync.isPathExcluded(path); + + if (!unwanted && !isItemRoot(jsonItem)) { + // delay path computation after assuring the item parent is not excluded + path = itemdb.computePath(item.parentDriveId, item.parentId) ~ "/" ~ item.name; + // selective sync + unwanted |= selectiveSync.isPathExcluded(path); + } + + // skip unwanted items early if (unwanted) { log.vlog("Filtered out"); skippedItems ~= item.id; return; } - // compute the path of the item - string path = "."; - if (!isItemRoot(jsonItem)) { - path = itemdb.computePath(item.driveId, item.parentId) ~ "/" ~ item.name; - } - - // check if the item is to be deleted + // check if the item is going to be deleted if (isItemDeleted(jsonItem)) { log.vlog("The item is marked for deletion"); idsToDelete ~= [item.driveId, item.id]; From 9e2123249aa9db794327871d3a48a5e543ed63e5 Mon Sep 17 00:00:00 2001 From: skilion Date: Thu, 15 Jun 2017 13:02:04 +0200 Subject: [PATCH 26/33] do not check token status during initialization --- src/onedrive.d | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/onedrive.d b/src/onedrive.d index 07a2d14a..14f96ab3 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -13,6 +13,7 @@ private immutable { string driveUrl = "https://graph.microsoft.com/v1.0/me/drive"; string itemByIdUrl = "https://graph.microsoft.com/v1.0/me/drive/items/"; string itemByPathUrl = "https://graph.microsoft.com/v1.0/me/drive/root:/"; + string driveByIdUrl = "https://graph.microsoft.com/v1.0/me/drives/"; } class OneDriveException: Exception @@ -64,15 +65,8 @@ final class OneDriveApi { try { refreshToken = readText(cfg.refreshTokenFilePath); - getDefaultDrive(); } catch (FileException e) { return authorize(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { - log.log("Refresh token invalid"); - return authorize(); - } - throw e; } return true; } @@ -104,11 +98,11 @@ final class OneDriveApi } // https://dev.onedrive.com/items/view_delta.htm - JSONValue viewChangesById(const(char)[] id, const(char)[] deltaLink) + JSONValue viewChangesById(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) { checkAccessTokenExpired(); if (deltaLink) return get(deltaLink); - const(char)[] url = itemByIdUrl ~ id ~ "/delta"; + const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/delta"; url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference"; return get(url); } @@ -252,8 +246,15 @@ final class OneDriveApi private void checkAccessTokenExpired() { - if (Clock.currTime() >= accessTokenExpiration) { - newToken(); + try { + if (Clock.currTime() >= accessTokenExpiration) { + newToken(); + } + } catch (OneDriveException e) { + if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { + e.msg ~= "\nRefresh token invalid, use --logout to authorize the client again"; + } + throw e; } } From 3529a4f78b5a8cfdbd4f05c51034eb5a4a595cfe Mon Sep 17 00:00:00 2001 From: skilion Date: Thu, 15 Jun 2017 13:45:14 +0200 Subject: [PATCH 27/33] make makeItem() more resiliant --- src/sync.d | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/sync.d b/src/sync.d index f7c883f4..1f2b068d 100644 --- a/src/sync.d +++ b/src/sync.d @@ -54,7 +54,7 @@ private Item makeItem(const ref JSONValue jsonItem) type: type, eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned for the root in OneDrive Biz cTag: "cTag" !in jsonItem ? null : jsonItem["cTag"].str, // cTag is missing in old files (plus all folders) - mtime: isItemRemote(jsonItem) ? SysTime(0) : SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str), + mtime: "fileSystemInfo" in jsonItem ? SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str) : SysTime(0), parentDriveId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["driveId"].str, parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str }; @@ -71,9 +71,6 @@ private Item makeItem(const ref JSONValue jsonItem) } else { log.vlog("The file does not have any hash"); } - } else { - // 'hashes' is missing in old files - log.vlog("No hashes facet"); } } @@ -148,7 +145,7 @@ final class SyncEngine } catch (FileException e) { // swallow exception } - + try { JSONValue changes; do { @@ -189,8 +186,8 @@ final class SyncEngine private void applyDifference(JSONValue jsonItem) { + log.vlog(jsonItem["id"].str, " ", jsonItem["name"].str); Item item = makeItem(jsonItem); - log.vlog(item.id, " ", item.name); string path = "."; bool unwanted; From 126f2719b838ef3ffb085591bb9fff62bd17e4f8 Mon Sep 17 00:00:00 2001 From: skilion Date: Thu, 15 Jun 2017 14:19:23 +0200 Subject: [PATCH 28/33] hack for onedrive biz --- src/sync.d | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/sync.d b/src/sync.d index 1f2b068d..a6f7f10c 100644 --- a/src/sync.d +++ b/src/sync.d @@ -36,6 +36,9 @@ private bool isItemRemote(const ref JSONValue item) return ("remoteItem" in item) != null; } +// HACK: OneDrive Biz does not return parentReference for the root +string defaultDriveId; + private Item makeItem(const ref JSONValue jsonItem) { ItemType type; @@ -48,7 +51,7 @@ private Item makeItem(const ref JSONValue jsonItem) } Item item = { - driveId: jsonItem["parentReference"]["driveId"].str, + driveId: isItemRoot(jsonItem) ? defaultDriveId : jsonItem["parentReference"]["driveId"].str, id: jsonItem["id"].str, name: jsonItem["name"].str, type: type, @@ -147,6 +150,7 @@ final class SyncEngine } try { + defaultDriveId = onedrive.getDefaultDrive()["id"].str; JSONValue changes; do { // get changes from the server From 974be0884b62864d38897ccdac2a1e7f8fee7255 Mon Sep 17 00:00:00 2001 From: skilion Date: Thu, 15 Jun 2017 15:17:47 +0200 Subject: [PATCH 29/33] fixed bug where an item is incorrectly assumed to be a file --- src/sync.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync.d b/src/sync.d index a6f7f10c..48a90307 100644 --- a/src/sync.d +++ b/src/sync.d @@ -63,7 +63,7 @@ private Item makeItem(const ref JSONValue jsonItem) }; // extract the file hash - if (type == ItemType.file) { + if (isItemFile(jsonItem)) { if ("hashes" in jsonItem["file"]) { if ("crc32Hash" in jsonItem["file"]["hashes"]) { item.crc32Hash = jsonItem["file"]["hashes"]["crc32Hash"].str; From 1581c6989aff571b694473c3e24e4438db9675e1 Mon Sep 17 00:00:00 2001 From: skilion Date: Fri, 16 Jun 2017 11:39:28 +0200 Subject: [PATCH 30/33] fix error in QuickXorHash implementation --- src/qxor.d | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/qxor.d b/src/qxor.d index f164a342..b9e9fb6d 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -19,11 +19,11 @@ struct QuickXor { int vectorArrayIndex = _shiftSoFar / 64; int vectorOffset = _shiftSoFar % 64; - size_t iterations = min(array.length, widthInBits); + immutable size_t iterations = min(array.length, widthInBits); for (size_t i = 0; i < iterations; i++) { - bool isLastCell = vectorArrayIndex == _data.length - 1; - int bitsInVectorCell = isLastCell ? bitsInLastCell : 64; + immutable bool isLastCell = vectorArrayIndex == _data.length - 1; + immutable int bitsInVectorCell = isLastCell ? bitsInLastCell : 64; if (vectorOffset <= bitsInVectorCell - 8) { for (size_t j = i; j < array.length; j += widthInBits) { @@ -50,7 +50,7 @@ struct QuickXor } } - _shiftSoFar += (_shiftSoFar + shift * array.length) % widthInBits; + _shiftSoFar = cast(int) (_shiftSoFar + shift * (array.length % widthInBits)) % widthInBits; _lengthSoFar += array.length; } From f00a80bcac8d6b66ebd8d53c31d2fc70dddc9aac Mon Sep 17 00:00:00 2001 From: skilion Date: Fri, 16 Jun 2017 16:44:48 +0200 Subject: [PATCH 31/33] update documentation --- README.md | 119 +++++++++++++++++++++++++++++++++++------------------- 1 file changed, 77 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 882c0225..9b619b68 100644 --- a/README.md +++ b/README.md @@ -1,97 +1,132 @@ -OneDrive Free Client -==================== +# OneDrive Free Client +###### A complete tool to interact with OneDrive on Linux. Built following the UNIX philosophy. ### Features: * State caching * Real-Time file monitoring with Inotify * Resumable uploads +* Support OneDrive for Business (part of Office 365) ### What's missing: -* OneDrive for business is not supported -* While local changes are uploaded right away, remote changes are delayed. +* Shared folders are not supported +* While local changes are uploaded right away, remote changes are delayed * No GUI +## Setup + ### Dependencies * [libcurl](http://curl.haxx.se/libcurl/) * [SQLite 3](https://www.sqlite.org/) * [Digital Mars D Compiler (DMD)](http://dlang.org/download.html) -### Dependencies: Ubuntu -``` -sudo apt-get install libcurl-dev +### Dependencies: Ubuntu/Debian +```sh +sudo apt-get install libcurl4-openssl-dev sudo apt-get install libsqlite3-dev +# install DMD sudo wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list wget -qO - http://dlang.org/d-keyring.gpg | sudo apt-key add - sudo apt-get update && sudo apt-get install dmd-bin ``` ### Installation -``` -git clone git@github.com:skilion/onedrive.git +```sh +git clone https://github.com/skilion/onedrive.git cd onedrive make sudo make install ``` -### Configuration: -You should copy the default config file into your home directory before making changes: +### First run :zap: +After installing the application you must run it at least one time from the terminal to authorize it. The procedure requires a web browser. +You will be asked to open a specific link where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving the permission, you will be redirected to a blank page. Copy the URI of the blank page into the application. + +### Uninstall +```sh +sudo make uninstall +# delete the application state +rm -rf .config/onedrive ``` + +## Configuration +Configuration is optional. By default all files are downloaded in `~/OneDrive` and only hidden files are skipped. +If you want to change the defaults, you can copy and edit the included config file into your `~/.config/onedrive` directory: +```sh mkdir -p ~/.config/onedrive cp ./config ~/.config/onedrive/config +nano ~/.config/onedrive/config ``` Available options: * `sync_dir`: directory where the files will be synced -* `skip_file`: any files or directories that match this pattern will be skipped during sync +* `skip_file`: any files or directories that match this pattern will be skipped during sync. -Pattern are case insensitive. -`*` and `?` [wildcards characters][1] are supported. -Use `|` to separate multiple patterns. +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. -[1]: https://technet.microsoft.com/en-us/library/bb490639.aspx +Note: after changing `skip_file`, you must perform a full synchronization by executing `onedrive --resync` -### Selective sync +### Selective sync :zap: Selective sync allows you to sync only specific files and directories. To enable selective sync create a file named `sync_list` in `~/.config/onedrive`. -Each line represents a path to a file or directory relative from your `sync_dir`. -``` -$ cat ~/.config/onedrive/sync_list +Each line of the file represents a path to a file or directory relative from your `sync_dir`. +Here is an example: +```text Backup -Documents/report.odt +Documents/latest_report.docx Work/ProjectX notes.txt ``` +Note: after changing the sync list, you must perform a full synchronization by executing `onedrive --resync` -### First run -The first time you run the program you will be asked to sign in. The procedure requires a web browser. - -### Service +### OneDrive service If you want to sync your files automatically, enable and start the systemd service: -``` +```sh systemctl --user enable onedrive systemctl --user start onedrive ``` To see the logs run: -``` +```sh journalctl --user-unit onedrive -f ``` -### Usage: -``` -onedrive [OPTION]... - --m --monitor Keep monitoring for local and remote changes. - --resync Forget the last saved state, perform a full sync. - --logout Logout the current user. - --confdir Set the directory to use to store the configuration files. --v --verbose Print more details, useful for debugging. --h --help This help information. +### Using multiple accounts +You can run multiple instances of the application specifying a different config directory in order to handle multiple OneDrive accounts. +To do this you can use the `--confdir` parameter. +Here is an example: +```sh +onedrive --monitor --confdir="~/.config/onedrivePersonal" & +onedrive --monitor --confdir="~/.config/onedriveWork" & ``` -### Notes: -* After changing `skip_file` in your configs or the sync list, you must execute `onedrive --resync` -* [Windows naming conventions][2] apply -* Use `make debug` to generate an executable for debugging +`--monitor` keeps the application running and monitoring for changes -[2]: https://msdn.microsoft.com/en-us/library/aa365247 +`&` puts the application in background and leaves the terminal interactive + +## Extra + +### Reporting issues +If you encounter any bugs you can report them here on Github. Before filing an issue be sure to: + +1. Have compiled the application in debug mode with `make debug` +2. Run the application in verbose mode `onedrive --verbose` +3. Have the log of the error (preferably uploaded on an external website such as [pastebin](https://pastebin.com/)) +4. Collect any information that you may think it is relevant to the error (such as the steps to trigger it) + +### All available commands: +```text +Usage: onedrive [OPTION]... + +no option Sync and exit. +-m --monitor Keep monitoring for local and remote changes. + --resync Forget the last saved state, perform a full sync. + --logout Logout the current user. + --confdir Set the directory to use to store the configuration files. +-v --verbose Print more details, useful for debugging. + --print-token Print the access token, useful for debugging. +-h --help This help information. +``` + +### File naming +The files and directories in the synchronization directory must follow the [Windows naming conventions](https://msdn.microsoft.com/en-us/library/aa365247). +The application will crash for example if you have two files with the same name but different case. This is expected behavior and won't be fixed. From ce4802a434dd75571f13cc988319cadbb296d27b Mon Sep 17 00:00:00 2001 From: skilion Date: Fri, 16 Jun 2017 18:41:57 +0200 Subject: [PATCH 32/33] added fedora/centos dependencies --- README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 9b619b68..18865ce9 100644 --- a/README.md +++ b/README.md @@ -23,10 +23,14 @@ ```sh sudo apt-get install libcurl4-openssl-dev sudo apt-get install libsqlite3-dev -# install DMD -sudo wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list -wget -qO - http://dlang.org/d-keyring.gpg | sudo apt-key add - -sudo apt-get update && sudo apt-get install dmd-bin +curl -fsS https://dlang.org/install.sh | bash -s dmd +``` + +### Dependencies: Fedora/CentOS +```sh +sudo apt-get install libcurl-devel +sudo apt-get install sqlite-devel +curl -fsS https://dlang.org/install.sh | bash -s dmd ``` ### Installation From a61abc4e1569121b14886de2d3904ebf7d74a4d3 Mon Sep 17 00:00:00 2001 From: skilion Date: Tue, 20 Jun 2017 22:01:57 +0200 Subject: [PATCH 33/33] fix issue #202 --- src/sync.d | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/sync.d b/src/sync.d index 48a90307..5f12767f 100644 --- a/src/sync.d +++ b/src/sync.d @@ -53,10 +53,10 @@ private Item makeItem(const ref JSONValue jsonItem) Item item = { driveId: isItemRoot(jsonItem) ? defaultDriveId : jsonItem["parentReference"]["driveId"].str, id: jsonItem["id"].str, - name: jsonItem["name"].str, + name: "name" in jsonItem ? jsonItem["name"].str : null, // name may be missing for deleted files in OneDrive Biz type: type, - eTag: isItemRoot(jsonItem) ? null : jsonItem["eTag"].str, // eTag is not returned for the root in OneDrive Biz - cTag: "cTag" !in jsonItem ? null : jsonItem["cTag"].str, // cTag is missing in old files (plus all folders) + eTag: "eTag" in jsonItem ? jsonItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Biz + cTag: "cTag" in jsonItem ? jsonItem["cTag"].str : null, // cTag is missing in old files (and all folders) mtime: "fileSystemInfo" in jsonItem ? SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str) : SysTime(0), parentDriveId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["driveId"].str, parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str @@ -190,7 +190,7 @@ final class SyncEngine private void applyDifference(JSONValue jsonItem) { - log.vlog(jsonItem["id"].str, " ", jsonItem["name"].str); + log.vlog(jsonItem["id"].str, " ", "name" in jsonItem ? jsonItem["name"].str : null); Item item = makeItem(jsonItem); string path = ".";