Merge branch 'master' of github.com:skilion/onedrive

This commit is contained in:
lanhin 2017-06-22 22:25:04 +08:00
commit 8f5f54f6a1
12 changed files with 517 additions and 352 deletions

View file

@ -1,6 +1,5 @@
DC = dmd
DFLAGS = -ofonedrive -L-lcurl -L-lsqlite3 -L-ldl
DESTDIR = /usr/local/bin
PREFIX = /usr/local
SOURCES = \
src/config.d \
@ -9,28 +8,34 @@ SOURCES = \
src/main.d \
src/monitor.d \
src/onedrive.d \
src/qxor.d \
src/selective.d \
src/sqlite.d \
src/sync.d \
src/upload.d \
src/util.d
all: onedrive onedrive.service
onedrive: $(SOURCES)
$(DC) -O -release -inline -boundscheck=off $(DFLAGS) $(SOURCES)
dmd -g -inline -O -release $(DFLAGS) $(SOURCES)
onedrive.service:
sed "s|@PREFIX@|$(PREFIX)|g" onedrive.service.in > onedrive.service
debug: $(SOURCES)
$(DC) -debug -g -gs $(DFLAGS) $(SOURCES)
dmd -debug -g -gs $(DFLAGS) $(SOURCES)
unittest: $(SOURCES)
$(DC) -unittest -debug -g -gs $(DFLAGS) $(SOURCES)
dmd -debug -g -gs -unittest $(DFLAGS) $(SOURCES)
clean:
rm -f onedrive.o onedrive
rm -f onedrive onedrive.o onedrive.service
install: onedrive onedrive.service
install onedrive $(DESTDIR)/onedrive
install -m 644 onedrive.service /usr/lib/systemd/user
install: all
install -D onedrive $(DESTDIR)$(PREFIX)/bin/onedrive
install -D -m 644 onedrive.service $(DESTDIR)/usr/lib/systemd/user/onedrive.service
uninstall:
rm -f $(DESTDIR)/onedrive
rm -f /usr/lib/systemd/user/onedrive.service
rm -f $(DESTDIR)$(PREFIX)/bin/onedrive
rm -f $(DESTDIR)/usr/lib/systemd/user/onedrive.service

129
README.md
View file

@ -1,97 +1,136 @@
OneDrive Free Client
====================
# OneDrive Free Client
###### A complete tool to interact with OneDrive on Linux. Built following the UNIX philosophy.
### Features:
* State caching
* Real-Time file monitoring with Inotify
* Resumable uploads
* Support OneDrive for Business (part of Office 365)
### What's missing:
* OneDrive for business is not supported
* While local changes are uploaded right away, remote changes are delayed.
* Shared folders are not supported
* While local changes are uploaded right away, remote changes are delayed
* No GUI
## Setup
### Dependencies
* [libcurl](http://curl.haxx.se/libcurl/)
* [SQLite 3](https://www.sqlite.org/)
* [Digital Mars D Compiler (DMD)](http://dlang.org/download.html)
### Dependencies: Ubuntu
```
sudo apt-get install libcurl-dev
### Dependencies: Ubuntu/Debian
```sh
sudo apt-get install libcurl4-openssl-dev
sudo apt-get install libsqlite3-dev
sudo wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list
wget -qO - http://dlang.org/d-keyring.gpg | sudo apt-key add -
sudo apt-get update && sudo apt-get install dmd-bin
curl -fsS https://dlang.org/install.sh | bash -s dmd
```
### Dependencies: Fedora/CentOS
```sh
sudo apt-get install libcurl-devel
sudo apt-get install sqlite-devel
curl -fsS https://dlang.org/install.sh | bash -s dmd
```
### Installation
```
git clone git@github.com:skilion/onedrive.git
```sh
git clone https://github.com/skilion/onedrive.git
cd onedrive
make
sudo make install
```
### Configuration:
You should copy the default config file into your home directory before making changes:
### First run :zap:
After installing the application you must run it at least one time from the terminal to authorize it. The procedure requires a web browser.
You will be asked to open a specific link where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving the permission, you will be redirected to a blank page. Copy the URI of the blank page into the application.
### Uninstall
```sh
sudo make uninstall
# delete the application state
rm -rf .config/onedrive
```
## Configuration
Configuration is optional. By default all files are downloaded in `~/OneDrive` and only hidden files are skipped.
If you want to change the defaults, you can copy and edit the included config file into your `~/.config/onedrive` directory:
```sh
mkdir -p ~/.config/onedrive
cp ./config ~/.config/onedrive/config
nano ~/.config/onedrive/config
```
Available options:
* `sync_dir`: directory where the files will be synced
* `skip_file`: any files or directories that match this pattern will be skipped during sync
* `skip_file`: any files or directories that match this pattern will be skipped during sync.
Pattern are case insensitive.
`*` and `?` [wildcards characters][1] are supported.
Use `|` to separate multiple patterns.
Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns.
[1]: https://technet.microsoft.com/en-us/library/bb490639.aspx
Note: after changing `skip_file`, you must perform a full synchronization by executing `onedrive --resync`
### Selective sync
### Selective sync :zap:
Selective sync allows you to sync only specific files and directories.
To enable selective sync create a file named `sync_list` in `~/.config/onedrive`.
Each line represents a path to a file or directory relative from your `sync_dir`.
```
$ cat ~/.config/onedrive/sync_list
Each line of the file represents a path to a file or directory relative from your `sync_dir`.
Here is an example:
```text
Backup
Documents/report.odt
Documents/latest_report.docx
Work/ProjectX
notes.txt
```
Note: after changing the sync list, you must perform a full synchronization by executing `onedrive --resync`
### First run
The first time you run the program you will be asked to sign in. The procedure requires a web browser.
### Service
### OneDrive service
If you want to sync your files automatically, enable and start the systemd service:
```
```sh
systemctl --user enable onedrive
systemctl --user start onedrive
```
To see the logs run:
```
```sh
journalctl --user-unit onedrive -f
```
### Usage:
```
onedrive [OPTION]...
-m --monitor Keep monitoring for local and remote changes.
--resync Forget the last saved state, perform a full sync.
--logout Logout the current user.
--confdir Set the directory to use to store the configuration files.
-v --verbose Print more details, useful for debugging.
-h --help This help information.
### Using multiple accounts
You can run multiple instances of the application specifying a different config directory in order to handle multiple OneDrive accounts.
To do this you can use the `--confdir` parameter.
Here is an example:
```sh
onedrive --monitor --confdir="~/.config/onedrivePersonal" &
onedrive --monitor --confdir="~/.config/onedriveWork" &
```
### Notes:
* After changing `skip_file` in your configs or the sync list, you must execute `onedrive --resync`
* [Windows naming conventions][2] apply
* Use `make debug` to generate an executable for debugging
`--monitor` keeps the application running and monitoring for changes
[2]: https://msdn.microsoft.com/en-us/library/aa365247
`&` puts the application in background and leaves the terminal interactive
## Extra
### Reporting issues
If you encounter any bugs you can report them here on Github. Before filing an issue be sure to:
1. Have compiled the application in debug mode with `make debug`
2. Run the application in verbose mode `onedrive --verbose`
3. Have the log of the error (preferably uploaded on an external website such as [pastebin](https://pastebin.com/))
4. Collect any information that you may think it is relevant to the error (such as the steps to trigger it)
### All available commands:
```text
Usage: onedrive [OPTION]...
no option Sync and exit.
-m --monitor Keep monitoring for local and remote changes.
--resync Forget the last saved state, perform a full sync.
--logout Logout the current user.
--confdir Set the directory to use to store the configuration files.
-v --verbose Print more details, useful for debugging.
--print-token Print the access token, useful for debugging.
-h --help This help information.
```
### File naming
The files and directories in the synchronization directory must follow the [Windows naming conventions](https://msdn.microsoft.com/en-us/library/aa365247).
The application will crash for example if you have two files with the same name but different case. This is expected behavior and won't be fixed.

View file

@ -3,7 +3,7 @@ Description=OneDrive Free Client
Documentation=https://github.com/skilion/onedrive
[Service]
ExecStart=/usr/local/bin/onedrive -m
ExecStart=@PREFIX@/bin/onedrive -m
Restart=no
[Install]

View file

@ -5,7 +5,7 @@ static import log;
final class Config
{
public string refreshTokenFilePath;
public string statusTokenFilePath;
public string deltaLinkFilePath;
public string databaseFilePath;
public string uploadStateFilePath;
public string syncListFilePath;
@ -17,7 +17,7 @@ final class Config
this(string configDirName)
{
refreshTokenFilePath = configDirName ~ "/refresh_token";
statusTokenFilePath = configDirName ~ "/status_token";
deltaLinkFilePath = configDirName ~ "/delta_link";
databaseFilePath = configDirName ~ "/items.sqlite3";
uploadStateFilePath = configDirName ~ "/resume_upload";
userConfigFilePath = configDirName ~ "/config";

View file

@ -4,46 +4,60 @@ import sqlite;
enum ItemType
{
file,
dir
dir,
remote
}
struct Item
{
string driveId;
string id;
string name;
ItemType type;
string eTag;
string cTag;
SysTime mtime;
string parentDriveId;
string parentId;
string crc32;
string crc32Hash;
string sha1Hash;
string quickXorHash;
}
final class ItemDatabase
{
// increment this for every change in the db schema
immutable int itemDatabaseVersion = 3;
immutable int itemDatabaseVersion = 5;
Database db;
Statement insertItemStmt;
Statement updateItemStmt;
Statement selectItemByIdStmt;
Statement selectItemByParentIdStmt;
Statement deleteItemByIdStmt;
this(const(char)[] filename)
{
db = Database(filename);
if (db.getVersion() == 0) {
db.exec("CREATE TABLE item (
id TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
type TEXT NOT NULL,
eTag TEXT,
cTag TEXT,
mtime TEXT NOT NULL,
parentId TEXT,
crc32 TEXT,
FOREIGN KEY (parentId) REFERENCES item (id) ON DELETE CASCADE
driveId TEXT NOT NULL,
id TEXT NOT NULL,
name TEXT NOT NULL,
type TEXT NOT NULL,
eTag TEXT,
cTag TEXT,
mtime TEXT NOT NULL,
parentDriveId TEXT,
parentId TEXT,
crc32Hash TEXT,
sha1Hash TEXT,
quickXorHash TEXT,
PRIMARY KEY (driveId, id),
FOREIGN KEY (parentDriveId, parentId)
REFERENCES item (driveId, id)
ON DELETE CASCADE
ON UPDATE RESTRICT
)");
db.exec("CREATE INDEX name_idx ON item (name)");
db.setVersion(itemDatabaseVersion);
@ -52,15 +66,22 @@ final class ItemDatabase
}
db.exec("PRAGMA foreign_keys = ON");
db.exec("PRAGMA recursive_triggers = ON");
insertItemStmt = db.prepare("INSERT OR REPLACE INTO item (id, name, type, eTag, cTag, mtime, parentId, crc32) VALUES (?, ?, ?, ?, ?, ?, ?, ?)");
insertItemStmt = db.prepare("
INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentDriveId, parentId, crc32Hash, sha1Hash, quickXorHash)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
");
updateItemStmt = db.prepare("
UPDATE item
SET name = ?2, type = ?3, eTag = ?4, cTag = ?5, mtime = ?6, parentId = ?7, crc32 = ?8
WHERE id = ?1
SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentDriveId = ?8, parentId = ?9, crc32Hash = ?10, sha1Hash = ?11, quickXorHash = ?12
WHERE driveId = ?1 AND id = ?2
");
selectItemByIdStmt = db.prepare("SELECT id, name, type, eTag, cTag, mtime, parentId, crc32 FROM item WHERE id = ?");
selectItemByParentIdStmt = db.prepare("SELECT id FROM item WHERE parentId = ?");
selectItemByIdStmt = db.prepare("
SELECT *
FROM item
WHERE driveId = ?1 AND id = ?2
");
selectItemByParentIdStmt = db.prepare("SELECT driveId, id FROM item WHERE parentId = ? AND id = ?");
deleteItemByIdStmt = db.prepare("DELETE FROM item WHERE driveId = ? AND id = ?");
}
void insert(const ref Item item)
@ -77,8 +98,9 @@ final class ItemDatabase
void upsert(const ref Item item)
{
auto s = db.prepare("SELECT COUNT(*) FROM item WHERE id = ?");
s.bind(1, item.id);
auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?");
s.bind(1, item.driveId);
s.bind(2, item.id);
auto r = s.exec();
Statement* stmt;
if (r.front[0] == "0") stmt = &insertItemStmt;
@ -87,23 +109,25 @@ final class ItemDatabase
stmt.exec();
}
Item[] selectChildren(const(char)[] id)
Item[] selectChildren(const(char)[] driveId, const(char)[] id)
{
selectItemByParentIdStmt.bind(1, id);
selectItemByParentIdStmt.bind(1, driveId);
selectItemByParentIdStmt.bind(2, id);
auto res = selectItemByParentIdStmt.exec();
Item[] items;
foreach (row; res) {
Item item;
bool found = selectById(row[0], item);
assert(found);
bool found = selectById(row[0], row[1], item);
assert(found, "Could not select the child of the item");
items ~= item;
}
return items;
}
bool selectById(const(char)[] id, out Item item)
bool selectById(const(char)[] driveId, const(char)[] id, out Item item)
{
selectItemByIdStmt.bind(1, id);
selectItemByIdStmt.bind(1, driveId);
selectItemByIdStmt.bind(2, id);
auto r = selectItemByIdStmt.exec();
if (!r.empty) {
item = buildItem(r);
@ -112,139 +136,112 @@ final class ItemDatabase
return false;
}
// returns the item with the given path
// the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3"
bool selectByPath(const(char)[] path, out Item item)
{
// prefix with the root dir
Item currItem;
path = "root/" ~ path.chompPrefix(".");
// initialize the search
string[2][] candidates; // [id, parentId]
auto s = db.prepare("SELECT id, parentId FROM item WHERE name = ?");
s.bind(1, baseName(path));
auto r = s.exec();
foreach (row; r) candidates ~= [row[0].dup, row[1].dup];
path = dirName(path);
if (path != ".") {
s = db.prepare("SELECT parentId FROM item WHERE id = ? AND name = ?");
// discard the candidates that do not have the correct parent
do {
s.bind(2, baseName(path));
string[2][] newCandidates;
newCandidates.reserve(candidates.length);
foreach (candidate; candidates) {
s.bind(1, candidate[1]);
r = s.exec();
if (!r.empty) {
string[2] c = [candidate[0], r.front[0].idup];
newCandidates ~= c;
}
}
candidates = newCandidates;
path = dirName(path);
} while (path != ".");
}
// reached the root
string[2][] newCandidates;
foreach (candidate; candidates) {
if (!candidate[1]) {
newCandidates ~= candidate;
}
}
candidates = newCandidates;
assert(candidates.length <= 1);
if (candidates.length == 1) return selectById(candidates[0][0], item);
return false;
}
void deleteById(const(char)[] id)
{
auto s = db.prepare("DELETE FROM item WHERE id = ?");
s.bind(1, id);
s.exec();
}
// returns true if the item has the specified parent
bool hasParent(T)(const(char)[] itemId, T parentId)
if (is(T : const(char)[]) || is(T : const(char[])[]))
{
auto s = db.prepare("SELECT parentId FROM item WHERE id = ?");
while (true) {
s.bind(1, itemId);
auto s = db.prepare("SELECT * FROM item WHERE name IS ?1 AND parentDriveId IS ?2 AND parentId IS ?3");
foreach (name; pathSplitter(path)) {
s.bind(1, name);
s.bind(2, currItem.driveId);
s.bind(3, currItem.id);
auto r = s.exec();
if (r.empty) break;
auto currParentId = r.front[0];
static if (is(T : const(char)[])) {
if (currParentId == parentId) return true;
} else {
foreach (id; parentId) if (currParentId == id) return true;
if (r.empty) return false;
currItem = buildItem(r);
// if the item is of type remote substitute it with the child
if (currItem.type == ItemType.remote) {
auto children = selectChildren(currItem.driveId, currItem.id);
enforce(children.length == 1, "The remote item does not have exactly 1 child");
// keep the name of the remote item
children[0].name = currItem.name;
currItem = children[0];
}
itemId = currParentId.dup;
}
return false;
item = currItem;
return true;
}
void deleteById(const(char)[] driveId, const(char)[] id)
{
deleteItemByIdStmt.bind(1, driveId);
deleteItemByIdStmt.bind(2, id);
deleteItemByIdStmt.exec();
}
private void bindItem(const ref Item item, ref Statement stmt)
{
with (stmt) with (item) {
bind(1, id);
bind(2, name);
bind(1, driveId);
bind(2, id);
bind(3, name);
string typeStr = null;
final switch (type) with (ItemType) {
case file: typeStr = "file"; break;
case dir: typeStr = "dir"; break;
case file: typeStr = "file"; break;
case dir: typeStr = "dir"; break;
case remote: typeStr = "remote"; break;
}
bind(3, typeStr);
bind(4, eTag);
bind(5, cTag);
bind(6, mtime.toISOExtString());
bind(7, parentId);
bind(8, crc32);
bind(4, typeStr);
bind(5, eTag);
bind(6, cTag);
bind(7, mtime.toISOExtString());
bind(8, parentDriveId);
bind(9, parentId);
bind(10, crc32Hash);
bind(11, sha1Hash);
bind(12, quickXorHash);
}
}
private Item buildItem(Statement.Result result)
{
assert(!result.empty && result.front.length == 8);
assert(!result.empty, "The result must not be empty");
assert(result.front.length == 12, "The result must have 12 columns");
Item item = {
id: result.front[0].dup,
name: result.front[1].dup,
eTag: result.front[3].dup,
cTag: result.front[4].dup,
mtime: SysTime.fromISOExtString(result.front[5]),
parentId: result.front[6].dup,
crc32: result.front[7].dup
driveId: result.front[0].dup,
id: result.front[1].dup,
name: result.front[2].dup,
eTag: result.front[4].dup,
cTag: result.front[5].dup,
mtime: SysTime.fromISOExtString(result.front[6]),
parentDriveId: result.front[7].dup,
parentId: result.front[8].dup,
crc32Hash: result.front[9].dup,
sha1Hash: result.front[10].dup,
quickXorHash: result.front[11].dup
};
switch (result.front[2]) {
case "file": item.type = ItemType.file; break;
case "dir": item.type = ItemType.dir; break;
switch (result.front[3]) {
case "file": item.type = ItemType.file; break;
case "dir": item.type = ItemType.dir; break;
case "remote": item.type = ItemType.remote; break;
default: assert(0);
}
return item;
}
// computes the path of the given item id
// the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3"
// a trailing slash is never added
string computePath(const(char)[] id)
// the path is relative to the sync directory ex: "Music/Turbo Killer.mp3"
// a trailing slash is not added if the item is a directory
string computePath(const(char)[] driveId, const(char)[] id)
{
string path;
auto s = db.prepare("SELECT name, parentId FROM item WHERE id = ?");
Item item;
while (true) {
s.bind(1, id);
auto r = s.exec();
enforce(!r.empty, "Unknow item id");
if (r.front[1]) {
if (path) path = r.front[0].idup ~ "/" ~ path;
else path = r.front[0].idup;
enforce(selectById(driveId, id, item), "Unknow item id");
if (item.type == ItemType.remote) {
// substitute the last name with the current
path = item.name ~ path[indexOf(path, '/') .. $];
} else if (item.parentId) {
if (path) path = item.name ~ "/" ~ path;
else path = item.name;
} else {
// root
if (!path) path = ".";
break;
}
id = r.front[1].dup;
driveId = item.parentDriveId;
id = item.parentId;
}
return path;
}

View file

@ -7,7 +7,7 @@ static import log;
int main(string[] args)
{
// configuration directory
string configDirName = expandTilde(environment.get("XDG_CONFIG_HOME", "~/.config")) ~ "/onedrive";
string configDirName = environment.get("XDG_CONFIG_HOME", "~/.config") ~ "/onedrive";
// enable monitor mode
bool monitor;
// force a full resync
@ -16,6 +16,8 @@ int main(string[] args)
bool logout;
// enable verbose logging
bool verbose;
// print the access token
bool printAccessToken;
try {
auto opt = getopt(
@ -25,12 +27,13 @@ int main(string[] args)
"resync", "Forget the last saved state, perform a full sync.", &resync,
"logout", "Logout the current user.", &logout,
"confdir", "Set the directory to use to store the configuration files.", &configDirName,
"verbose|v", "Print more details, useful for debugging.", &log.verbose
"verbose|v", "Print more details, useful for debugging.", &log.verbose,
"print-token", "Print the access token, useful for debugging.", &printAccessToken
);
if (opt.helpWanted) {
defaultGetoptPrinter(
"Usage: onedrive [OPTION]...\n\n" ~
"no option Sync and exit.",
"no option Sync and exit.",
opt.options
);
return EXIT_SUCCESS;
@ -42,7 +45,7 @@ int main(string[] args)
}
log.vlog("Loading config ...");
configDirName = expandTilde(configDirName);
configDirName = configDirName.expandTilde().absolutePath();
if (!exists(configDirName)) mkdir(configDirName);
auto cfg = new config.Config(configDirName);
cfg.init();
@ -55,9 +58,9 @@ int main(string[] args)
}
if (resync || logout) {
log.log("Deleting the saved status ...");
log.vlog("Deleting the saved status ...");
safeRemove(cfg.databaseFilePath);
safeRemove(cfg.statusTokenFilePath);
safeRemove(cfg.deltaLinkFilePath);
safeRemove(cfg.uploadStateFilePath);
if (logout) {
safeRemove(cfg.refreshTokenFilePath);
@ -71,6 +74,7 @@ int main(string[] args)
return EXIT_FAILURE;
}
auto onedrive = new OneDriveApi(cfg);
onedrive.printAccessToken = printAccessToken;
if (!onedrive.init()) {
log.log("Could not initialize the OneDrive API");
// workaround for segfault in std.net.curl.Curl.shutdown() on exit

View file

@ -13,6 +13,7 @@ private immutable {
string driveUrl = "https://graph.microsoft.com/v1.0/me/drive";
string itemByIdUrl = "https://graph.microsoft.com/v1.0/me/drive/items/";
string itemByPathUrl = "https://graph.microsoft.com/v1.0/me/drive/root:/";
string driveByIdUrl = "https://graph.microsoft.com/v1.0/me/drives/";
}
class OneDriveException: Exception
@ -21,10 +22,10 @@ class OneDriveException: Exception
// https://dev.onedrive.com/misc/errors.htm
JSONValue error;
@nogc @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
{
super(msg, file, line, next);
}
@nogc @safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
{
super(msg, file, line, next);
}
@safe pure this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__)
{
@ -50,6 +51,9 @@ final class OneDriveApi
private SysTime accessTokenExpiration;
/* private */ HTTP http;
// if true, every new access token is printed
bool printAccessToken;
this(Config cfg)
{
this.cfg = cfg;
@ -61,15 +65,8 @@ final class OneDriveApi
{
try {
refreshToken = readText(cfg.refreshTokenFilePath);
getDefaultDrive();
} catch (FileException e) {
return authorize();
} catch (OneDriveException e) {
if (e.httpStatusCode == 400 || e.httpStatusCode == 401) {
log.log("Refresh token invalid");
return authorize();
}
throw e;
}
return true;
}
@ -101,24 +98,24 @@ final class OneDriveApi
}
// https://dev.onedrive.com/items/view_delta.htm
JSONValue viewChangesById(const(char)[] id, const(char)[] statusToken)
JSONValue viewChangesById(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink)
{
checkAccessTokenExpired();
const(char)[] url = itemByIdUrl ~ id ~ "/delta";
if (deltaLink) return get(deltaLink);
const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/delta";
url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference";
if (statusToken) url ~= "&token=" ~ statusToken;
return get(url);
}
// https://dev.onedrive.com/items/view_delta.htm
JSONValue viewChangesByPath(const(char)[] path, const(char)[] statusToken)
JSONValue viewChangesByPath(const(char)[] path, const(char)[] deltaLink)
{
checkAccessTokenExpired();
if (deltaLink) return get(deltaLink);
string url = itemByPathUrl ~ encodeComponent(path) ~ ":/delta";
// HACK
if (path == ".") url = driveUrl ~ "/root/delta";
url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference";
if (statusToken) url ~= "&token=" ~ statusToken;
return get(url);
}
@ -248,12 +245,20 @@ final class OneDriveApi
refreshToken = response["refresh_token"].str();
accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer());
std.file.write(cfg.refreshTokenFilePath, refreshToken);
if (printAccessToken) writeln("New access token: ", accessToken);
}
private void checkAccessTokenExpired()
{
if (Clock.currTime() >= accessTokenExpiration) {
newToken();
try {
if (Clock.currTime() >= accessTokenExpiration) {
newToken();
}
} catch (OneDriveException e) {
if (e.httpStatusCode == 400 || e.httpStatusCode == 401) {
e.msg ~= "\nRefresh token invalid, use --logout to authorize the client again";
}
throw e;
}
}

88
src/qxor.d Normal file
View file

@ -0,0 +1,88 @@
import std.algorithm;
import std.digest.digest;
// implementation of the QuickXorHash algorithm in D
// https://github.com/OneDrive/onedrive-api-docs/blob/master/snippets/quickxorhash.md
struct QuickXor
{
private immutable int widthInBits = 160;
private immutable size_t lengthInBytes = (widthInBits - 1) / 8 + 1;
private immutable size_t lengthInQWords = (widthInBits - 1) / 64 + 1;
private immutable int bitsInLastCell = widthInBits % 64; // 32
private immutable int shift = 11;
private ulong[lengthInQWords] _data;
private ulong _lengthSoFar;
private int _shiftSoFar;
nothrow @safe void put(scope const(ubyte)[] array...)
{
int vectorArrayIndex = _shiftSoFar / 64;
int vectorOffset = _shiftSoFar % 64;
immutable size_t iterations = min(array.length, widthInBits);
for (size_t i = 0; i < iterations; i++) {
immutable bool isLastCell = vectorArrayIndex == _data.length - 1;
immutable int bitsInVectorCell = isLastCell ? bitsInLastCell : 64;
if (vectorOffset <= bitsInVectorCell - 8) {
for (size_t j = i; j < array.length; j += widthInBits) {
_data[vectorArrayIndex] ^= cast(ulong) array[j] << vectorOffset;
}
} else {
int index1 = vectorArrayIndex;
int index2 = isLastCell ? 0 : (vectorArrayIndex + 1);
byte low = cast(byte) (bitsInVectorCell - vectorOffset);
byte xoredByte = 0;
for (size_t j = i; j < array.length; j += widthInBits) {
xoredByte ^= array[j];
}
_data[index1] ^= cast(ulong) xoredByte << vectorOffset;
_data[index2] ^= cast(ulong) xoredByte >> low;
}
vectorOffset += shift;
if (vectorOffset >= bitsInVectorCell) {
vectorArrayIndex = isLastCell ? 0 : vectorArrayIndex + 1;
vectorOffset -= bitsInVectorCell;
}
}
_shiftSoFar = cast(int) (_shiftSoFar + shift * (array.length % widthInBits)) % widthInBits;
_lengthSoFar += array.length;
}
nothrow @safe void start()
{
_data = _data.init;
_shiftSoFar = 0;
_lengthSoFar = 0;
}
nothrow @trusted ubyte[lengthInBytes] finish()
{
ubyte[lengthInBytes] tmp;
tmp[0 .. lengthInBytes] = (cast(ubyte*) _data)[0 .. lengthInBytes];
for (size_t i = 0; i < 8; i++) {
tmp[lengthInBytes - 8 + i] ^= (cast(ubyte*) &_lengthSoFar)[i];
}
return tmp;
}
}
unittest
{
assert(isDigest!QuickXor);
}
unittest
{
QuickXor qxor;
qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog");
assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE");
}
alias QuickXorDigest = WrapperDigest!(QuickXor);

View file

@ -197,17 +197,17 @@ unittest
auto s = db.prepare("INSERT INTO test VALUES (?, ?)");
s.bind(1, "key1");
s.bind(2, "value1");
s.bind(2, "value");
s.exec();
s.bind(1, "key2");
s.bind(2, "value2");
s.bind(2, null);
s.exec();
s = db.prepare("SELECT * FROM test ORDER BY id ASC");
auto r = s.exec();
assert(r.front[0] == "key1");
r.popFront();
assert(r.front[1] == "value2");
assert(r.front[1] == null);
r.popFront();
assert(r.empty);
}

View file

@ -23,7 +23,7 @@ private bool isItemFile(const ref JSONValue item)
private bool isItemDeleted(const ref JSONValue item)
{
// HACK: fix for https://github.com/skilion/onedrive/issues/157
return ("deleted" in item) || ("fileSystemInfo" !in item);
return ("deleted" in item) || ("fileSystemInfo" !in item && "remoteItem" !in item);
}
private bool isItemRoot(const ref JSONValue item)
@ -31,11 +31,63 @@ private bool isItemRoot(const ref JSONValue item)
return ("root" in item) != null;
}
private bool testCrc32(string path, const(char)[] crc32)
private bool isItemRemote(const ref JSONValue item)
{
if (crc32) {
string localCrc32 = computeCrc32(path);
if (crc32 == localCrc32) return true;
return ("remoteItem" in item) != null;
}
// HACK: OneDrive Biz does not return parentReference for the root
string defaultDriveId;
private Item makeItem(const ref JSONValue jsonItem)
{
ItemType type;
if (isItemFile(jsonItem)) {
type = ItemType.file;
} else if (isItemFolder(jsonItem)) {
type = ItemType.dir;
} else if (isItemRemote(jsonItem)) {
type = ItemType.remote;
}
Item item = {
driveId: isItemRoot(jsonItem) ? defaultDriveId : jsonItem["parentReference"]["driveId"].str,
id: jsonItem["id"].str,
name: "name" in jsonItem ? jsonItem["name"].str : null, // name may be missing for deleted files in OneDrive Biz
type: type,
eTag: "eTag" in jsonItem ? jsonItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Biz
cTag: "cTag" in jsonItem ? jsonItem["cTag"].str : null, // cTag is missing in old files (and all folders)
mtime: "fileSystemInfo" in jsonItem ? SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str) : SysTime(0),
parentDriveId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["driveId"].str,
parentId: isItemRoot(jsonItem) ? null : jsonItem["parentReference"]["id"].str
};
// extract the file hash
if (isItemFile(jsonItem)) {
if ("hashes" in jsonItem["file"]) {
if ("crc32Hash" in jsonItem["file"]["hashes"]) {
item.crc32Hash = jsonItem["file"]["hashes"]["crc32Hash"].str;
} else if ("sha1Hash" in jsonItem["file"]["hashes"]) {
item.sha1Hash = jsonItem["file"]["hashes"]["sha1Hash"].str;
} else if ("quickXorHash" in jsonItem["file"]["hashes"]) {
item.quickXorHash = jsonItem["file"]["hashes"]["quickXorHash"].str;
} else {
log.vlog("The file does not have any hash");
}
}
}
return item;
}
private bool testFileHash(string path, const ref Item item)
{
if (item.crc32Hash) {
if (item.crc32Hash == computeCrc32(path)) return true;
} else if (item.sha1Hash) {
if (item.sha1Hash == computeSha1Hash(path)) return true;
} else if (item.quickXorHash) {
if (item.quickXorHash == computeQuickXorHash(path)) return true;
}
return false;
}
@ -60,12 +112,10 @@ final class SyncEngine
private ItemDatabase itemdb;
private UploadSession session;
private SelectiveSync selectiveSync;
// token representing the last status correctly synced
private string statusToken;
// list of items to skip while applying the changes
private string[] skippedItems;
// list of items to delete after the changes has been downloaded
private string[] idsToDelete;
private string[2][] idsToDelete;
this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb, SelectiveSync selectiveSync)
{
@ -79,12 +129,6 @@ final class SyncEngine
void init()
{
// restore the previous status token
try {
statusToken = readText(cfg.statusTokenFilePath);
} catch (FileException e) {
// swallow exception
}
// check if there is an interrupted upload session
if (session.restore()) {
log.log("Continuing the upload session ...");
@ -96,33 +140,37 @@ final class SyncEngine
void applyDifferences()
{
log.vlog("Applying differences ...");
// restore the last known state
string deltaLink;
try {
deltaLink = readText(cfg.deltaLinkFilePath);
} catch (FileException e) {
// swallow exception
}
try {
defaultDriveId = onedrive.getDefaultDrive()["id"].str;
JSONValue changes;
do {
// get changes from the server
try {
changes = onedrive.viewChangesByPath(".", statusToken);
changes = onedrive.viewChangesByPath(".", deltaLink);
} catch (OneDriveException e) {
if (e.httpStatusCode == 410) {
log.log("Status token expired, resyncing");
statusToken = null;
log.log("Delta link expired, resyncing");
deltaLink = null;
continue;
}
else {
} else {
throw e;
}
}
foreach (item; changes["value"].array) {
applyDifference(item);
}
// hack to reuse old code
string url;
if ("@odata.nextLink" in changes) url = changes["@odata.nextLink"].str;
if ("@odata.deltaLink" in changes) url = changes["@odata.deltaLink"].str;
auto c = matchFirst(url, r"(?:token=)([\w\d]+)");
c.popFront(); // skip the whole match
statusToken = c.front;
std.file.write(cfg.statusTokenFilePath, statusToken);
if ("@odata.nextLink" in changes) deltaLink = changes["@odata.nextLink"].str;
if ("@odata.deltaLink" in changes) deltaLink = changes["@odata.deltaLink"].str;
std.file.write(cfg.deltaLinkFilePath, deltaLink);
} while ("@odata.nextLink" in changes);
} catch (ErrnoException e) {
throw new SyncException(e.msg, e);
@ -140,38 +188,58 @@ final class SyncEngine
assumeSafeAppend(skippedItems);
}
private void applyDifference(JSONValue item)
private void applyDifference(JSONValue jsonItem)
{
string id = item["id"].str;
string name = item["name"].str;
log.vlog(jsonItem["id"].str, " ", "name" in jsonItem ? jsonItem["name"].str : null);
Item item = makeItem(jsonItem);
log.vlog(id, " ", name);
string path = ".";
bool unwanted;
unwanted |= skippedItems.find(item.parentId).length != 0;
unwanted |= selectiveSync.isNameExcluded(item.name);
// eTag and parentId do not exists for the root in OneDrive Biz
string eTag, parentId;
if (!isItemRoot(item)) {
eTag = item["eTag"].str;
parentId = item["parentReference"]["id"].str;
if (!unwanted && !isItemRoot(jsonItem)) {
// delay path computation after assuring the item parent is not excluded
path = itemdb.computePath(item.parentDriveId, item.parentId) ~ "/" ~ item.name;
// selective sync
unwanted |= selectiveSync.isPathExcluded(path);
}
// skip unwanted items early
if (skippedItems.find(parentId).length != 0) {
if (unwanted) {
log.vlog("Filtered out");
skippedItems ~= id;
skippedItems ~= item.id;
return;
}
if (selectiveSync.isNameExcluded(name)) {
log.vlog("Filtered out");
skippedItems ~= id;
// check if the item is going to be deleted
if (isItemDeleted(jsonItem)) {
log.vlog("The item is marked for deletion");
idsToDelete ~= [item.driveId, item.id];
return;
}
// check the item type
if (isItemRemote(jsonItem)) {
// TODO
// check name change
// scan the children later
// fix child references
log.vlog("Remote items are not supported yet");
skippedItems ~= item.id;
return;
} else if (!isItemFile(jsonItem) && !isItemFolder(jsonItem)) {
log.vlog("The item is neither a file nor a directory, skipping");
skippedItems ~= item.id;
return;
}
// rename the local item if it is unsynced and there is a new version of it
Item oldItem;
string oldPath;
bool cached = itemdb.selectById(id, oldItem);
if (cached && eTag != oldItem.eTag) {
oldPath = itemdb.computePath(id);
bool cached = itemdb.selectById(item.driveId, item.id, oldItem);
if (cached && item.eTag != oldItem.eTag) {
oldPath = itemdb.computePath(item.driveId, item.id);
if (!isItemSynced(oldItem, oldPath)) {
log.vlog("The local item is unsynced, renaming");
if (exists(oldPath)) safeRename(oldPath);
@ -179,67 +247,17 @@ final class SyncEngine
}
}
// check if the item is to be deleted
if (isItemDeleted(item)) {
log.vlog("The item is marked for deletion");
if (cached) idsToDelete ~= id;
return;
}
// compute the path of the item
string path = ".";
if (parentId) {
path = itemdb.computePath(parentId) ~ "/" ~ name;
// selective sync
if (selectiveSync.isPathExcluded(path)) {
log.vlog("Filtered out: ", path);
skippedItems ~= id;
return;
}
}
ItemType type;
if (isItemFile(item)) {
type = ItemType.file;
} else if (isItemFolder(item)) {
type = ItemType.dir;
} else {
log.vlog("The item is neither a file nor a directory, skipping");
skippedItems ~= id;
return;
}
string crc32;
if (type == ItemType.file) {
try {
crc32 = item["file"]["hashes"]["crc32Hash"].str;
} catch (JSONException e) {
log.vlog("The hash is not available");
}
}
Item newItem = {
id: id,
name: name,
type: type,
eTag: eTag,
cTag: "cTag" in item ? item["cTag"].str : null,
mtime: SysTime.fromISOExtString(item["fileSystemInfo"]["lastModifiedDateTime"].str),
parentId: parentId,
crc32: crc32
};
if (!cached) {
applyNewItem(newItem, path);
applyNewItem(item, path);
} else {
applyChangedItem(oldItem, newItem, path);
applyChangedItem(oldItem, oldPath, item, path);
}
// save the item in the db
if (oldItem.id) {
itemdb.update(newItem);
itemdb.update(item);
} else {
itemdb.insert(newItem);
itemdb.insert(item);
}
}
@ -252,7 +270,7 @@ final class SyncEngine
setTimes(path, item.mtime, item.mtime);
return;
} else {
log.vlog("The local item is out of sync, renaming ...");
log.vlog("The local item is out of sync, renaming...");
safeRename(path);
}
}
@ -266,21 +284,25 @@ final class SyncEngine
//Use mkdirRecuse to deal nested dir
mkdirRecurse(path);
break;
case ItemType.remote:
assert(0);
}
setTimes(path, item.mtime, item.mtime);
}
private void applyChangedItem(Item oldItem, Item newItem, string newPath)
// update a local item
// the local item is assumed to be in sync with the local db
private void applyChangedItem(Item oldItem, string oldPath, Item newItem, string newPath)
{
assert(oldItem.driveId == newItem.driveId);
assert(oldItem.id == newItem.id);
assert(oldItem.type == newItem.type);
if (oldItem.eTag != newItem.eTag) {
string oldPath = itemdb.computePath(oldItem.id);
if (oldPath != newPath) {
log.log("Moving: ", oldPath, " -> ", newPath);
if (exists(newPath)) {
log.vlog("The destination is occupied, renaming ...");
log.vlog("The destination is occupied, renaming the conflicting file...");
safeRename(newPath);
}
rename(oldPath, newPath);
@ -311,7 +333,7 @@ final class SyncEngine
} else {
log.vlog("The local item has a different modified time ", localModifiedTime, " remote is ", item.mtime);
}
if (testCrc32(path, item.crc32)) {
if (testFileHash(path, item)) {
return true;
} else {
log.vlog("The local item has a different hash");
@ -327,6 +349,8 @@ final class SyncEngine
log.vlog("The local item is a file but should be a directory");
}
break;
case ItemType.remote:
assert(0);
}
return false;
}
@ -334,9 +358,9 @@ final class SyncEngine
private void deleteItems()
{
log.vlog("Deleting files ...");
foreach_reverse (id; idsToDelete) {
string path = itemdb.computePath(id);
itemdb.deleteById(id);
foreach_reverse (i; idsToDelete) {
string path = itemdb.computePath(i[0], i[1]);
itemdb.deleteById(i[0], i[1]);
if (exists(path)) {
if (isFile(path)) {
remove(path);
@ -384,7 +408,7 @@ final class SyncEngine
log.vlog("Filtered out");
return;
}
string path = itemdb.computePath(item.id);
string path = itemdb.computePath(item.driveId, item.id);
if (selectiveSync.isPathExcluded(path)) {
log.vlog("Filtered out: ", path);
return;
@ -397,6 +421,8 @@ final class SyncEngine
case ItemType.file:
uploadFileDifferences(item, path);
break;
case ItemType.remote:
assert(0);
}
}
@ -411,7 +437,7 @@ final class SyncEngine
} else {
log.vlog("The directory has not changed");
// loop trough the children
foreach (Item child; itemdb.selectChildren(item.id)) {
foreach (Item child; itemdb.selectChildren(item.driveId, item.id)) {
uploadDifferences(child);
}
}
@ -434,7 +460,7 @@ final class SyncEngine
log.vlog("The file last modified time has changed");
string id = item.id;
string eTag = item.eTag;
if (!testCrc32(path, item.crc32)) {
if (!testFileHash(path, item)) {
log.vlog("The file content has changed");
log.log("Uploading: ", path);
JSONValue response;
@ -536,7 +562,7 @@ final class SyncEngine
if (e.httpStatusCode == 404) log.log(e.msg);
else throw e;
}
itemdb.deleteById(item.id);
itemdb.deleteById(item.driveId, item.id);
}
private void uploadLastModifiedTime(const(char)[] id, const(char)[] eTag, SysTime mtime)
@ -552,30 +578,7 @@ final class SyncEngine
private void saveItem(JSONValue jsonItem)
{
ItemType type;
if (isItemFile(jsonItem)) {
type = ItemType.file;
} else if (isItemFolder(jsonItem)) {
type = ItemType.dir;
} else {
assert(0);
}
Item item = {
id: jsonItem["id"].str,
name: jsonItem["name"].str,
type: type,
eTag: jsonItem["eTag"].str,
cTag: "cTag" in jsonItem ? jsonItem["cTag"].str : null,
mtime: SysTime.fromISOExtString(jsonItem["fileSystemInfo"]["lastModifiedDateTime"].str),
parentId: jsonItem["parentReference"]["id"].str
};
if (type == ItemType.file) {
try {
item.crc32 = jsonItem["file"]["hashes"]["crc32Hash"].str;
} catch (JSONException e) {
log.vlog("The hash is not available");
}
}
Item item = makeItem(jsonItem);
itemdb.upsert(item);
}

View file

@ -43,7 +43,7 @@ struct UploadSession
return false;
}
if (!exists(session["localPath"].str)) {
log.vlog("The file do not exist anymore");
log.vlog("The file does not exist anymore");
return false;
}
// request the session status

View file

@ -1,5 +1,6 @@
import std.base64;
import std.conv;
import std.digest.crc;
import std.digest.crc, std.digest.sha;
import std.file;
import std.net.curl;
import std.path;
@ -7,6 +8,7 @@ import std.regex;
import std.socket;
import std.stdio;
import std.string;
import qxor;
private string deviceName;
@ -15,7 +17,7 @@ static this()
deviceName = Socket.hostName;
}
// give a new name to the specified file or directory
// gives a new name to the specified file or directory
void safeRename(const(char)[] path)
{
auto ext = extension(path);
@ -33,13 +35,13 @@ void safeRename(const(char)[] path)
rename(path, newPath);
}
// delete the specified file without throwing an exception if it does not exists
// deletes the specified file without throwing an exception if it does not exists
void safeRemove(const(char)[] path)
{
if (exists(path)) remove(path);
}
// return the crc32 hex string of a file
// returns the crc32 hex string of a file
string computeCrc32(string path)
{
CRC32 crc;
@ -50,7 +52,29 @@ string computeCrc32(string path)
return crc.finish().toHexString().dup;
}
// convert wildcards (*, ?) to regex
// returns the sha1 hash hex string of a file
string computeSha1Hash(string path)
{
SHA1 sha;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
sha.put(data);
}
return sha.finish().toHexString().dup;
}
// returns the quickXorHash base64 string of a file
string computeQuickXorHash(string path)
{
QuickXor qxor;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
qxor.put(data);
}
return Base64.encode(qxor.finish());
}
// converts wildcards (*, ?) to regex
Regex!char wild2regex(const(char)[] pattern)
{
string str;
@ -79,7 +103,7 @@ Regex!char wild2regex(const(char)[] pattern)
return regex(str, "i");
}
// return true if the network connection is available
// returns true if the network connection is available
bool testNetwork()
{
HTTP http = HTTP("https://login.microsoftonline.com");
@ -87,7 +111,7 @@ bool testNetwork()
return http.perform(ThrowOnError.no) == 0;
}
// call globMatch for each string in pattern separated by '|'
// calls globMatch for each string in pattern separated by '|'
bool multiGlobMatch(const(char)[] path, const(char)[] pattern)
{
foreach (glob; pattern.split('|')) {