Refactoring

- unified configs
- logging module
- new cmd line option to change the config dir
This commit is contained in:
skilion 2016-08-04 23:35:58 +02:00
parent 7d700e1a4c
commit 738536736a
8 changed files with 202 additions and 193 deletions

View file

@ -6,6 +6,7 @@ CONFDIR = /usr/local/etc
SOURCES = \
src/config.d \
src/itemdb.d \
src/log.d \
src/main.d \
src/monitor.d \
src/onedrive.d \

View file

@ -1,22 +1,36 @@
import std.file, std.regex, std.stdio;
static import log;
struct Config
final class Config
{
public string refreshTokenFilePath;
public string statusTokenFilePath;
public string databaseFilePath;
public string uploadStateFilePath;
private string userConfigFilePath;
// hashmap for the values found in the user config file
private string[string] values;
this(string[] filenames...)
this(string configDirName)
{
refreshTokenFilePath = configDirName ~ "/refresh_token";
statusTokenFilePath = configDirName ~ "/status_token";
databaseFilePath = configDirName ~ "/items.db";
uploadStateFilePath = configDirName ~ "/resume_upload";
userConfigFilePath = configDirName ~ "/config";
}
void init()
{
bool found = false;
foreach (filename; filenames) {
if (exists(filename)) {
found = true;
load(filename);
}
}
found |= load("/etc/onedrive.conf");
found |= load("/usr/local/etc/onedrive.conf");
found |= load(userConfigFilePath);
if (!found) throw new Exception("No config file found");
}
string get(string key)
string getValue(string key)
{
auto p = key in values;
if (p) {
@ -26,8 +40,9 @@ struct Config
}
}
private void load(string filename)
private bool load(string filename)
{
scope(failure) return false;
auto file = File(filename, "r");
auto r = regex(`^\s*(\w+)\s*=\s*"(.*)"\s*$`);
foreach (line; file.byLine()) {
@ -38,23 +53,16 @@ struct Config
c.popFront();
values[key] = c.front.dup;
} else {
writeln("Malformed config line: ", line);
log.log("Malformed config line: ", line);
}
}
return true;
}
}
unittest
{
auto cfg = Config("empty", "onedrive.conf");
assert(cfg.get("sync_dir") == "~/OneDrive");
}
unittest
{
try {
auto cfg = Config("empty");
assert(0);
} catch (Exception e) {
}
auto cfg = new Config("");
cfg.load("onedrive.conf");
assert(cfg.getValue("sync_dir") == "~/OneDrive");
}

14
src/log.d Normal file
View file

@ -0,0 +1,14 @@
import std.stdio;
// enable verbose logging
bool verbose;
void log(T...)(T args)
{
stderr.writeln(args);
}
void vlog(T...)(T args)
{
if (verbose) stderr.writeln(args);
}

View file

@ -1,20 +1,28 @@
import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE;
import core.memory, core.time, core.thread;
import std.getopt, std.file, std.path, std.process, std.stdio;
import std.getopt, std.file, std.path, std.process;
import config, itemdb, monitor, onedrive, sync, util;
static import log;
void main(string[] args)
int main(string[] args)
{
// always print log messages
stdout = stderr;
// configuration directory
string configDirName = expandTilde(environment.get("XDG_CONFIG_HOME", "~/.config")) ~ "/onedrive";
// enable monitor mode
bool monitor;
// force a full resync
bool resync;
// enable verbose logging
bool verbose;
bool monitor, resync, verbose;
try {
auto opt = getopt(
args,
std.getopt.config.bundling,
"monitor|m", "Keep monitoring for local and remote changes.", &monitor,
"resync", "Forget the last saved state, perform a full sync.", &resync,
"verbose|v", "Print more details, useful for debugging.", &verbose
"confdir", "Directory to use to store the configuration files.", &configDirName,
"verbose|v", "Print more details, useful for debugging.", &log.verbose
);
if (opt.helpWanted) {
defaultGetoptPrinter(
@ -22,109 +30,84 @@ void main(string[] args)
"no option Sync and exit.",
opt.options
);
return;
return EXIT_SUCCESS;
}
} catch (GetOptException e) {
writeln(e.msg);
writeln("Try 'onedrive -h' for more information.");
return;
log.log(e.msg);
log.log("Try 'onedrive -h' for more information.");
return EXIT_FAILURE;
}
string configDirName = expandTilde(environment.get("XDG_CONFIG_HOME", "~/.config")) ~ "/onedrive";
string configFile1Path = "/etc/onedrive.conf";
string configFile2Path = "/usr/local/etc/onedrive.conf";
string configFile3Path = configDirName ~ "/config";
string refreshTokenFilePath = configDirName ~ "/refresh_token";
string statusTokenFilePath = configDirName ~ "/status_token";
string databaseFilePath = configDirName ~ "/items.db";
log.vlog("Loading config ...");
if (!exists(configDirName)) mkdir(configDirName);
auto cfg = new config.Config(configDirName);
cfg.init();
if (resync) {
if (verbose) writeln("Deleting the saved status ...");
if (exists(databaseFilePath)) remove(databaseFilePath);
if (exists(statusTokenFilePath)) remove(statusTokenFilePath);
log.log("Deleting the saved status ...");
if (exists(cfg.databaseFilePath)) remove(cfg.databaseFilePath);
if (exists(cfg.statusTokenFilePath)) remove(cfg.statusTokenFilePath);
}
if (verbose) writeln("Loading config ...");
auto cfg = config.Config(configFile1Path, configFile2Path, configFile3Path);
if (verbose) writeln("Initializing the OneDrive API ...");
log.vlog("Initializing the OneDrive API ...");
bool online = testNetwork();
if (!online && !monitor) {
writeln("No network connection");
return;
log.log("No network connection");
return EXIT_FAILURE;
}
auto onedrive = new OneDriveApi(cfg, verbose);
onedrive.onRefreshToken = (string refreshToken) {
std.file.write(refreshTokenFilePath, refreshToken);
};
try {
string refreshToken = readText(refreshTokenFilePath);
onedrive.setRefreshToken(refreshToken);
} catch (FileException e) {
if (!onedrive.authorize()) {
// workaround for segfault in std.net.curl.Curl.shutdown() on exit
onedrive.http.shutdown();
return;
}
auto onedrive = new OneDriveApi(cfg);
if (!onedrive.init()) {
log.log("Could not initialize the OneDrive API");
// workaround for segfault in std.net.curl.Curl.shutdown() on exit
onedrive.http.shutdown();
return EXIT_FAILURE;
}
if (verbose) writeln("Opening the item database ...");
auto itemdb = new ItemDatabase(databaseFilePath);
log.vlog("Opening the item database ...");
auto itemdb = new ItemDatabase(cfg.databaseFilePath);
string syncDir = expandTilde(cfg.get("sync_dir"));
if (verbose) writeln("All operations will be performed in: ", syncDir);
string syncDir = expandTilde(cfg.getValue("sync_dir"));
log.vlog("All operations will be performed in: ", syncDir);
if (!exists(syncDir)) mkdir(syncDir);
chdir(syncDir);
if (verbose) writeln("Initializing the Synchronization Engine ...");
auto sync = new SyncEngine(cfg, onedrive, itemdb, configDirName, verbose);
sync.onStatusToken = (string statusToken) {
std.file.write(statusTokenFilePath, statusToken);
};
string statusToken;
try {
statusToken = readText(statusTokenFilePath);
} catch (FileException e) {
// swallow exception
}
sync.init(statusToken);
log.vlog("Initializing the Synchronization Engine ...");
auto sync = new SyncEngine(cfg, onedrive, itemdb);
sync.init();
if (online) performSync(sync);
if (monitor) {
if (verbose) writeln("Initializing monitor ...");
log.vlog("Initializing monitor ...");
Monitor m;
m.onDirCreated = delegate(string path) {
if (verbose) writeln("[M] Directory created: ", path);
log.vlog("[M] Directory created: ", path);
try {
sync.scanForDifferences(path);
} catch(SyncException e) {
writeln(e.msg);
log.log(e.msg);
}
};
m.onFileChanged = delegate(string path) {
if (verbose) writeln("[M] File changed: ", path);
log.vlog("[M] File changed: ", path);
try {
sync.scanForDifferences(path);
} catch(SyncException e) {
writeln(e.msg);
log.log(e.msg);
}
};
m.onDelete = delegate(string path) {
if (verbose) writeln("[M] Item deleted: ", path);
log.vlog("[M] Item deleted: ", path);
try {
sync.deleteByPath(path);
} catch(SyncException e) {
writeln(e.msg);
log.log(e.msg);
}
};
m.onMove = delegate(string from, string to) {
if (verbose) writeln("[M] Item moved: ", from, " -> ", to);
log.vlog("[M] Item moved: ", from, " -> ", to);
try {
sync.uploadMoveItem(from, to);
} catch(SyncException e) {
writeln(e.msg);
log.log(e.msg);
}
};
m.init(cfg, verbose);
@ -151,6 +134,7 @@ void main(string[] args)
// workaround for segfault in std.net.curl.Curl.shutdown() on exit
onedrive.http.shutdown();
return EXIT_SUCCESS;
}
// try to synchronize the folder three times
@ -164,7 +148,7 @@ void performSync(SyncEngine sync)
count = -1;
} catch (SyncException e) {
if (++count == 3) throw e;
else writeln(e.msg);
else log.log(e.msg);
}
} while (count != -1);
}

View file

@ -1,9 +1,9 @@
import core.sys.linux.sys.inotify;
import core.stdc.errno;
import core.sys.posix.poll;
import core.sys.posix.unistd;
import core.sys.posix.poll, core.sys.posix.unistd;
import std.exception, std.file, std.path, std.regex, std.stdio, std.string;
import config, util;
static import log;
// relevant inotify events
private immutable uint32_t mask = IN_ATTRIB | IN_CLOSE_WRITE | IN_CREATE |
@ -41,8 +41,8 @@ struct Monitor
void init(Config cfg, bool verbose)
{
this.verbose = verbose;
skipDir = wild2regex(cfg.get("skip_dir"));
skipFile = wild2regex(cfg.get("skip_file"));
skipDir = wild2regex(cfg.getValue("skip_dir"));
skipFile = wild2regex(cfg.getValue("skip_file"));
fd = inotify_init();
if (fd == -1) throw new MonitorException("inotify_init failed");
if (!buffer) buffer = new void[4096];
@ -72,21 +72,21 @@ struct Monitor
int wd = inotify_add_watch(fd, toStringz(dirname), mask);
if (wd == -1) {
if (errno() == ENOSPC) {
writeln("The maximum number of inotify wathches is probably too low.");
writeln("");
writeln("To see the current max number of watches run");
writeln("");
writeln(" sysctl fs.inotify.max_user_watches");
writeln("");
writeln("To change the current max number of watches to 32768 run");
writeln("");
writeln(" sudo sysctl fs.inotify.max_user_watches=32768");
writeln("");
log.log("The maximum number of inotify wathches is probably too low.");
log.log("");
log.log("To see the current max number of watches run");
log.log("");
log.log(" sysctl fs.inotify.max_user_watches");
log.log("");
log.log("To change the current max number of watches to 32768 run");
log.log("");
log.log(" sudo sysctl fs.inotify.max_user_watches=32768");
log.log("");
}
throw new MonitorException("inotify_add_watch failed");
}
wdToDirName[wd] = dirname ~ "/";
if (verbose) writeln("Monitor directory: ", dirname);
log.vlog("Monitor directory: ", dirname);
}
// remove a watch descriptor
@ -95,7 +95,7 @@ struct Monitor
assert(wd in wdToDirName);
int ret = inotify_rm_watch(fd, wd);
if (ret == -1) throw new MonitorException("inotify_rm_watch failed");
if (verbose) writeln("Monitored directory removed: ", wdToDirName[wd]);
log.vlog("Monitored directory removed: ", wdToDirName[wd]);
wdToDirName.remove(wd);
}
@ -108,7 +108,7 @@ struct Monitor
int ret = inotify_rm_watch(fd, wd);
if (ret == -1) throw new MonitorException("inotify_rm_watch failed");
wdToDirName.remove(wd);
if (verbose) writeln("Monitored directory removed: ", dirname);
log.vlog("Monitored directory removed: ", dirname);
}
}
}
@ -190,7 +190,7 @@ struct Monitor
if (useCallbacks) onFileChanged(path);
}
} else {
writeln("Unknow inotify event: ", format("%#x", event.mask));
log.log("Unknow inotify event: ", format("%#x", event.mask));
}
skip:

View file

@ -1,7 +1,8 @@
import std.net.curl: CurlException, HTTP;
import std.datetime, std.exception, std.json, std.path;
import std.datetime, std.exception, std.file, std.json, std.path;
import std.stdio, std.string, std.uni, std.uri;
import config;
static import log;
private immutable {
@ -32,18 +33,28 @@ class OneDriveException: Exception
final class OneDriveApi
{
private Config cfg;
private string clientId;
private string refreshToken, accessToken;
private SysTime accessTokenExpiration;
/* private */ HTTP http;
void delegate(string) onRefreshToken; // called when a new refresh_token is received
this(Config cfg, bool verbose)
this(Config cfg)
{
this.clientId = cfg.get("client_id");
this.cfg = cfg;
this.clientId = cfg.getValue("client_id");
http = HTTP();
//http.verbose = verbose;
//http.verbose = true;
}
bool init()
{
try {
refreshToken = readText(cfg.refreshTokenFilePath);
} catch (FileException e) {
return authorize();
}
return true;
}
bool authorize()
@ -51,13 +62,13 @@ final class OneDriveApi
import std.stdio, std.regex;
char[] response;
string url = authUrl ~ "?client_id=" ~ clientId ~ "&scope=onedrive.readwrite%20offline_access&response_type=code&redirect_uri=" ~ redirectUrl;
writeln("Authorize this app visiting:\n");
log.log("Authorize this app visiting:\n");
write(url, "\n\n", "Enter the response uri: ");
readln(response);
// match the authorization code
auto c = matchFirst(response, r"(?:code=)(([\w\d]+-){4}[\w\d]+)");
if (c.empty) {
writeln("Invalid uri");
log.log("Invalid uri");
return false;
}
c.popFront(); // skip the whole match
@ -65,11 +76,6 @@ final class OneDriveApi
return true;
}
void setRefreshToken(string refreshToken)
{
this.refreshToken = refreshToken;
}
// https://dev.onedrive.com/items/view_delta.htm
JSONValue viewChangesById(const(char)[] id, const(char)[] statusToken)
{
@ -206,7 +212,7 @@ final class OneDriveApi
accessToken = "bearer " ~ response["access_token"].str();
refreshToken = response["refresh_token"].str();
accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer());
if (onRefreshToken) onRefreshToken(refreshToken);
std.file.write(cfg.refreshTokenFilePath, refreshToken);
}
private void checkAccessTokenExpired()

View file

@ -2,10 +2,10 @@ import std.exception: ErrnoException;
import std.algorithm, std.datetime, std.file, std.json, std.path, std.regex;
import std.stdio, std.string;
import config, itemdb, onedrive, upload, util;
static import log;
private string uploadStateFileName = "resume_upload";
// threshold after which files will be uploaded using an upload session
private long thresholdFileSize = 10 * 2^^20; // 10 Mib
private long thresholdFileSize = 10 * 2^^20; // 10 MiB
private bool isItemFolder(const ref JSONValue item)
{
@ -49,7 +49,6 @@ final class SyncEngine
private Config cfg;
private OneDriveApi onedrive;
private ItemDatabase itemdb;
private bool verbose;
private Regex!char skipDir, skipFile;
private UploadSession session;
// token representing the last status correctly synced
@ -59,27 +58,28 @@ final class SyncEngine
// list of items to delete after the changes has been downloaded
private string[] pathsToDelete;
void delegate(string) onStatusToken;
this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb, string configDirName, bool verbose)
this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb)
{
assert(onedrive && itemdb);
this.cfg = cfg;
this.onedrive = onedrive;
this.itemdb = itemdb;
//this.configDirName = configDirName;
this.verbose = verbose;
skipDir = wild2regex(cfg.get("skip_dir"));
skipFile = wild2regex(cfg.get("skip_file"));
session = UploadSession(onedrive, configDirName ~ "/" ~ uploadStateFileName, verbose);
skipDir = wild2regex(cfg.getValue("skip_dir"));
skipFile = wild2regex(cfg.getValue("skip_file"));
session = UploadSession(onedrive, cfg.uploadStateFilePath);
}
void init(string statusToken = null)
void init()
{
this.statusToken = statusToken;
// restore the previous status token
try {
statusToken = readText(cfg.statusTokenFilePath);
} catch (FileException e) {
// swallow exception
}
// check if there is an interrupted upload session
if (session.restore()) {
writeln("Continuing the upload session ...");
log.log("Continuing the upload session ...");
auto item = session.upload();
saveItem(item);
}
@ -87,7 +87,7 @@ final class SyncEngine
void applyDifferences()
{
if (verbose) writeln("Applying differences ...");
log.vlog("Applying differences ...");
try {
JSONValue changes;
do {
@ -96,7 +96,7 @@ final class SyncEngine
applyDifference(item);
}
statusToken = changes["@delta.token"].str;
onStatusToken(statusToken);
std.file.write(cfg.statusTokenFilePath, statusToken);
} while (("@odata.nextLink" in changes.object) !is null);
} catch (ErrnoException e) {
throw new SyncException(e.msg, e);
@ -130,7 +130,7 @@ final class SyncEngine
return;
}
if (verbose) writeln(id, " ", name);
log.vlog(id, " ", name);
// rename the local item if it is unsynced and there is a new version of it
Item oldItem;
@ -139,7 +139,7 @@ final class SyncEngine
if (cached && eTag != oldItem.eTag) {
oldPath = itemdb.computePath(id);
if (!isItemSynced(oldItem, oldPath)) {
if (verbose) writeln("The local item is unsynced, renaming");
log.vlog("The local item is unsynced, renaming");
if (exists(oldPath)) safeRename(oldPath);
cached = false;
}
@ -153,7 +153,7 @@ final class SyncEngine
ItemType type;
if (isItemDeleted(item)) {
if (verbose) writeln("The item is marked for deletion");
log.vlog("The item is marked for deletion");
if (cached) {
itemdb.deleteById(id);
pathsToDelete ~= oldPath;
@ -162,18 +162,18 @@ final class SyncEngine
} else if (isItemFile(item)) {
type = ItemType.file;
if (!path.matchFirst(skipFile).empty) {
if (verbose) writeln("Filtered out");
log.vlog("Filtered out");
return;
}
} else if (isItemFolder(item)) {
type = ItemType.dir;
if (!path.matchFirst(skipDir).empty) {
if (verbose) writeln("Filtered out");
log.vlog("Filtered out");
skippedItems ~= id;
return;
}
} else {
if (verbose) writeln("The item is neither a file nor a directory, skipping");
log.vlog("The item is neither a file nor a directory, skipping");
skippedItems ~= id;
return;
}
@ -194,7 +194,7 @@ final class SyncEngine
try {
crc32 = item["file"]["hashes"]["crc32Hash"].str;
} catch (JSONException e) {
if (verbose) writeln("The hash is not available");
log.vlog("The hash is not available");
}
}
@ -227,22 +227,22 @@ final class SyncEngine
{
if (exists(path)) {
if (isItemSynced(item, path)) {
if (verbose) writeln("The item is already present");
log.vlog("The item is already present");
// ensure the modified time is correct
setTimes(path, item.mtime, item.mtime);
return;
} else {
if (verbose) writeln("The local item is out of sync, renaming ...");
log.vlog("The local item is out of sync, renaming ...");
safeRename(path);
}
}
final switch (item.type) {
case ItemType.file:
writeln("Downloading: ", path);
log.log("Downloading: ", path);
onedrive.downloadById(item.id, path);
break;
case ItemType.dir:
writeln("Creating directory: ", path);
log.log("Creating directory: ", path);
mkdir(path);
break;
}
@ -257,20 +257,20 @@ final class SyncEngine
if (oldItem.eTag != newItem.eTag) {
string oldPath = itemdb.computePath(oldItem.id);
if (oldPath != newPath) {
writeln("Moving: ", oldPath, " -> ", newPath);
log.log("Moving: ", oldPath, " -> ", newPath);
if (exists(newPath)) {
if (verbose) writeln("The destination is occupied, renaming ...");
log.vlog("The destination is occupied, renaming ...");
safeRename(newPath);
}
rename(oldPath, newPath);
}
if (newItem.type == ItemType.file && oldItem.cTag != newItem.cTag) {
writeln("Downloading: ", newPath);
log.log("Downloading: ", newPath);
onedrive.downloadById(newItem.id, newPath);
}
setTimes(newPath, newItem.mtime, newItem.mtime);
} else {
if (verbose) writeln("The item has not changed");
log.vlog("The item has not changed");
}
}
@ -287,22 +287,22 @@ final class SyncEngine
if (localModifiedTime == item.mtime) {
return true;
} else {
if (verbose) writeln("The local item has a different modified time ", localModifiedTime, " remote is ", item.mtime);
log.vlog("The local item has a different modified time ", localModifiedTime, " remote is ", item.mtime);
}
if (testCrc32(path, item.crc32)) {
return true;
} else {
if (verbose) writeln("The local item has a different hash");
log.vlog("The local item has a different hash");
}
} else {
if (verbose) writeln("The local item is a directory but should be a file");
log.vlog("The local item is a directory but should be a file");
}
break;
case ItemType.dir:
if (isDir(path)) {
return true;
} else {
if (verbose) writeln("The local item is a file but should be a directory");
log.vlog("The local item is a file but should be a directory");
}
break;
}
@ -311,16 +311,16 @@ final class SyncEngine
private void deleteItems()
{
if (verbose) writeln("Deleting files ...");
log.vlog("Deleting files ...");
foreach_reverse (path; pathsToDelete) {
if (exists(path)) {
if (isFile(path)) {
remove(path);
writeln("Deleted file: ", path);
log.log("Deleted file: ", path);
} else {
try {
rmdir(path);
writeln("Deleted directory: ", path);
log.log("Deleted directory: ", path);
} catch (FileException e) {
// directory not empty
}
@ -335,12 +335,12 @@ final class SyncEngine
public void scanForDifferences(string path)
{
try {
if (verbose) writeln("Uploading differences ...");
log.vlog("Uploading differences ...");
Item item;
if (itemdb.selectByPath(path, item)) {
uploadDifferences(item);
}
if (verbose) writeln("Uploading new items ...");
log.vlog("Uploading new items ...");
uploadNewItems(path);
} catch (ErrnoException e) {
throw new SyncException(e.msg, e);
@ -353,19 +353,19 @@ final class SyncEngine
private void uploadDifferences(Item item)
{
if (verbose) writeln(item.id, " ", item.name);
log.vlog(item.id, " ", item.name);
string path = itemdb.computePath(item.id);
final switch (item.type) {
case ItemType.dir:
if (!path.matchFirst(skipDir).empty) {
if (verbose) writeln("Filtered out");
log.vlog("Filtered out");
break;
}
uploadDirDifferences(item, path);
break;
case ItemType.file:
if (!path.matchFirst(skipFile).empty) {
if (verbose) writeln("Filtered out");
log.vlog("Filtered out");
break;
}
uploadFileDifferences(item, path);
@ -378,18 +378,18 @@ final class SyncEngine
assert(item.type == ItemType.dir);
if (exists(path)) {
if (!isDir(path)) {
if (verbose) writeln("The item was a directory but now is a file");
log.vlog("The item was a directory but now is a file");
uploadDeleteItem(item, path);
uploadNewFile(path);
} else {
if (verbose) writeln("The directory has not changed");
log.vlog("The directory has not changed");
// loop trough the children
foreach (Item child; itemdb.selectChildren(item.id)) {
uploadDifferences(child);
}
}
} else {
if (verbose) writeln("The directory has been deleted");
log.vlog("The directory has been deleted");
uploadDeleteItem(item, path);
}
}
@ -403,12 +403,12 @@ final class SyncEngine
import core.time: Duration;
item.mtime.fracSecs = Duration.zero; // HACK
if (localModifiedTime != item.mtime) {
if (verbose) writeln("The file last modified time has changed");
log.vlog("The file last modified time has changed");
string id = item.id;
string eTag = item.eTag;
if (!testCrc32(path, item.crc32)) {
if (verbose) writeln("The file content has changed");
writeln("Uploading: ", path);
log.vlog("The file content has changed");
log.log("Uploading: ", path);
JSONValue response;
if (getSize(path) <= thresholdFileSize) {
response = onedrive.simpleUpload(path, path, eTag);
@ -424,15 +424,15 @@ final class SyncEngine
}
uploadLastModifiedTime(id, eTag, localModifiedTime.toUTC());
} else {
if (verbose) writeln("The file has not changed");
log.vlog("The file has not changed");
}
} else {
if (verbose) writeln("The item was a file but now is a directory");
log.vlog("The item was a file but now is a directory");
uploadDeleteItem(item, path);
uploadCreateDir(path);
}
} else {
if (verbose) writeln("The file has been deleted");
log.vlog("The file has been deleted");
uploadDeleteItem(item, path);
}
}
@ -462,7 +462,7 @@ final class SyncEngine
private void uploadCreateDir(const(char)[] path)
{
writeln("Creating remote directory: ", path);
log.log("Creating remote directory: ", path);
JSONValue item = ["name": baseName(path).idup];
item["folder"] = parseJSON("{}");
auto res = onedrive.createByPath(path.dirName ~ "/", item);
@ -471,7 +471,7 @@ final class SyncEngine
private void uploadNewFile(string path)
{
writeln("Uploading: ", path);
log.log("Uploading: ", path);
JSONValue response;
if (getSize(path) <= thresholdFileSize) {
response = onedrive.simpleUpload(path, path);
@ -490,11 +490,11 @@ final class SyncEngine
private void uploadDeleteItem(Item item, const(char)[] path)
{
writeln("Deleting remote item: ", path);
log.log("Deleting remote item: ", path);
try {
onedrive.deleteById(item.id, item.eTag);
} catch (OneDriveException e) {
if (e.code == 404) writeln(e.msg);
if (e.code == 404) log.log(e.msg);
else throw e;
}
itemdb.deleteById(item.id);
@ -540,7 +540,7 @@ final class SyncEngine
void uploadMoveItem(string from, string to)
{
writeln("Moving remote item: ", from, " -> ", to);
log.log("Moving remote item: ", from, " -> ", to);
Item fromItem, toItem, parentItem;
if (!itemdb.selectByPath(from, fromItem)) {
throw new SyncException("Can't move an unsynced item");
@ -572,7 +572,7 @@ final class SyncEngine
try {
uploadDeleteItem(item, path);
} catch (OneDriveException e) {
if (e.code == 404) writeln(e.msg);
if (e.code == 404) log.log(e.msg);
else throw e;
}
}

View file

@ -1,12 +1,8 @@
import std.algorithm;
import std.conv;
import std.datetime;
import std.file;
import std.json;
import std.stdio;
import std.algorithm, std.conv, std.datetime, std.file, std.json;
import onedrive;
static import log;
private long fragmentSize = 10 * 2^^20; // 10 Mib
private long fragmentSize = 10 * 2^^20; // 10 MiB
struct UploadSession
{
@ -17,7 +13,7 @@ struct UploadSession
// path where to save the session
private string sessionFilePath;
this(OneDriveApi onedrive, string sessionFilePath, bool verbose)
this(OneDriveApi onedrive, string sessionFilePath)
{
assert(onedrive);
this.onedrive = onedrive;
@ -39,15 +35,15 @@ struct UploadSession
bool restore()
{
if (exists(sessionFilePath)) {
if (verbose) writeln("Trying to restore the upload session ...");
log.vlog("Trying to restore the upload session ...");
session = readText(sessionFilePath).parseJSON();
auto expiration = SysTime.fromISOExtString(session["expirationDateTime"].str);
if (expiration < Clock.currTime()) {
if (verbose) writeln("The upload session is expired");
log.vlog("The upload session is expired");
return false;
}
if (!exists(session["localPath"].str)) {
if (verbose) writeln("The file do not exist anymore");
log.vlog("The file do not exist anymore");
return false;
}
// request the session status
@ -55,7 +51,7 @@ struct UploadSession
session["expirationDateTime"] = response["expirationDateTime"];
session["nextExpectedRanges"] = response["nextExpectedRanges"];
if (session["nextExpectedRanges"].array.length == 0) {
if (verbose) writeln("The upload session is completed");
log.vlog("The upload session is completed");
return false;
}
return true;
@ -70,7 +66,7 @@ struct UploadSession
JSONValue response;
while (true) {
long fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset;
if (verbose) writeln("Uploading fragment: ", offset, "-", offset + fragSize, "/", fileSize);
log.vlog("Uploading fragment: ", offset, "-", offset + fragSize, "/", fileSize);
response = onedrive.uploadFragment(
session["uploadUrl"].str,
session["localPath"].str,