2024-01-08 23:13:17 +01:00
// What is this module called?
module itemdb ;
// What does this module require to function?
2018-01-02 13:41:56 +01:00
import std.datetime ;
import std.exception ;
import std.path ;
import std.string ;
2020-06-27 11:10:37 +02:00
import std.stdio ;
import std.algorithm.searching ;
2018-08-02 00:28:52 +02:00
import core.stdc.stdlib ;
2024-01-08 23:13:17 +01:00
import std.json ;
import std.conv ;
// What other modules that we have created do we need to import?
2015-09-14 23:56:14 +02:00
import sqlite ;
2024-01-08 23:13:17 +01:00
import util ;
import log ;
2015-09-14 23:56:14 +02:00
2018-01-02 15:05:32 +01:00
enum ItemType {
2024-03-08 21:15:11 +01:00
none ,
2015-09-14 23:56:14 +02:00
file ,
2017-06-14 15:50:02 +02:00
dir ,
2024-01-08 23:13:17 +01:00
remote ,
unknown
2015-09-14 23:56:14 +02:00
}
2018-01-02 15:05:32 +01:00
struct Item {
2017-06-14 15:50:02 +02:00
string driveId ;
2015-09-14 23:56:14 +02:00
string id ;
string name ;
2024-01-08 23:13:17 +01:00
string remoteName ;
2015-09-14 23:56:14 +02:00
ItemType type ;
string eTag ;
string cTag ;
SysTime mtime ;
string parentId ;
2017-05-28 22:13:19 +02:00
string quickXorHash ;
2023-06-19 22:55:00 +02:00
string sha256Hash ;
2017-12-27 15:13:28 +01:00
string remoteDriveId ;
2024-03-08 21:15:11 +01:00
string remoteParentId ;
2017-12-27 15:13:28 +01:00
string remoteId ;
2024-03-08 21:15:11 +01:00
ItemType remoteType ;
2020-06-16 23:57:14 +02:00
string syncStatus ;
2024-01-08 23:13:17 +01:00
string size ;
}
// Construct an Item struct from a JSON driveItem
Item makeDatabaseItem ( JSONValue driveItem ) {
Item item = {
id : driveItem [ "id" ] . str ,
name : "name" in driveItem ? driveItem [ "name" ] . str : null , // name may be missing for deleted files in OneDrive Business
eTag : "eTag" in driveItem ? driveItem [ "eTag" ] . str : null , // eTag is not returned for the root in OneDrive Business
cTag : "cTag" in driveItem ? driveItem [ "cTag" ] . str : null , // cTag is missing in old files (and all folders in OneDrive Business)
remoteName : "actualOnlineName" in driveItem ? driveItem [ "actualOnlineName" ] . str : null , // actualOnlineName is only used with OneDrive Business Shared Folders
} ;
// OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834
// OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive
if ( isItemDeleted ( driveItem ) ) {
// Set mtime to SysTime(0)
item . mtime = SysTime ( 0 ) ;
} else {
// Item is not in a deleted state
// Resolve 'Key not found: fileSystemInfo' when then item is a remote item
// https://github.com/abraunegg/onedrive/issues/11
if ( isItemRemote ( driveItem ) ) {
// remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default
// Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI
// to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash
// See: https://github.com/abraunegg/onedrive/issues/1533
if ( "fileSystemInfo" in driveItem [ "remoteItem" ] ) {
// 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases
item . mtime = SysTime . fromISOExtString ( driveItem [ "remoteItem" ] [ "fileSystemInfo" ] [ "lastModifiedDateTime" ] . str ) ;
} else {
// is a remote item, but 'fileSystemInfo' is missing from 'remoteItem'
if ( "fileSystemInfo" in driveItem ) {
item . mtime = SysTime . fromISOExtString ( driveItem [ "fileSystemInfo" ] [ "lastModifiedDateTime" ] . str ) ;
}
}
} else {
// Does fileSystemInfo exist at all ?
if ( "fileSystemInfo" in driveItem ) {
item . mtime = SysTime . fromISOExtString ( driveItem [ "fileSystemInfo" ] [ "lastModifiedDateTime" ] . str ) ;
}
}
}
// Set this item object type
bool typeSet = false ;
if ( isItemFile ( driveItem ) ) {
// 'file' object exists in the JSON
addLogEntry ( "Flagging object as a file" , [ "debug" ] ) ;
typeSet = true ;
item . type = ItemType . file ;
}
if ( isItemFolder ( driveItem ) ) {
// 'folder' object exists in the JSON
addLogEntry ( "Flagging object as a directory" , [ "debug" ] ) ;
typeSet = true ;
item . type = ItemType . dir ;
}
if ( isItemRemote ( driveItem ) ) {
// 'remote' object exists in the JSON
addLogEntry ( "Flagging object as a remote" , [ "debug" ] ) ;
typeSet = true ;
item . type = ItemType . remote ;
}
// root and remote items do not have parentReference
if ( ! isItemRoot ( driveItem ) & & ( "parentReference" in driveItem ) ! = null ) {
item . driveId = driveItem [ "parentReference" ] [ "driveId" ] . str ;
if ( hasParentReferenceId ( driveItem ) ) {
item . parentId = driveItem [ "parentReference" ] [ "id" ] . str ;
}
}
// extract the file hash and file size
if ( isItemFile ( driveItem ) & & ( "hashes" in driveItem [ "file" ] ) ) {
// Get file size
if ( hasFileSize ( driveItem ) ) {
item . size = to ! string ( driveItem [ "size" ] . integer ) ;
// Get quickXorHash as default
if ( "quickXorHash" in driveItem [ "file" ] [ "hashes" ] ) {
item . quickXorHash = driveItem [ "file" ] [ "hashes" ] [ "quickXorHash" ] . str ;
} else {
addLogEntry ( "quickXorHash is missing from " ~ driveItem [ "id" ] . str , [ "debug" ] ) ;
}
// If quickXorHash is empty ..
if ( item . quickXorHash . empty ) {
// Is there a sha256Hash?
if ( "sha256Hash" in driveItem [ "file" ] [ "hashes" ] ) {
item . sha256Hash = driveItem [ "file" ] [ "hashes" ] [ "sha256Hash" ] . str ;
} else {
addLogEntry ( "sha256Hash is missing from " ~ driveItem [ "id" ] . str , [ "debug" ] ) ;
}
}
} else {
// So that we have at least a zero value here as the API provided no 'size' data for this file item
item . size = "0" ;
}
}
// Is the object a remote drive item - living on another driveId ?
if ( isItemRemote ( driveItem ) ) {
2024-03-08 21:15:11 +01:00
// Check and assign remoteDriveId
if ( "parentReference" in driveItem [ "remoteItem" ] & & "driveId" in driveItem [ "remoteItem" ] [ "parentReference" ] ) {
item . remoteDriveId = driveItem [ "remoteItem" ] [ "parentReference" ] [ "driveId" ] . str ;
}
// Check and assign remoteParentId
if ( "parentReference" in driveItem [ "remoteItem" ] & & "id" in driveItem [ "remoteItem" ] [ "parentReference" ] ) {
item . remoteParentId = driveItem [ "remoteItem" ] [ "parentReference" ] [ "id" ] . str ;
}
// Check and assign remoteId
if ( "id" in driveItem [ "remoteItem" ] ) {
item . remoteId = driveItem [ "remoteItem" ] [ "id" ] . str ;
}
// Check and assign remoteType
if ( "file" in driveItem [ "remoteItem" ] . object ) {
item . remoteType = ItemType . file ;
} else {
item . remoteType = ItemType . dir ;
}
2024-01-08 23:13:17 +01:00
}
// We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not:
// - National Cloud Deployments do not support /delta as a query
// - When using --single-directory
// - When using --download-only --cleanup-local-files
//
// Thus we need to track in the database that this item is in sync
// As we are making an item, set the syncStatus to Y
// ONLY when either of the three modes above are being used, all the existing DB entries will get set to N
// so when processing /children, it can be identified what the 'deleted' difference is
item . syncStatus = "Y" ;
// Return the created item
return item ;
2015-09-14 23:56:14 +02:00
}
2024-01-08 23:13:17 +01:00
final class ItemDatabase {
2016-12-24 14:12:20 +01:00
// increment this for every change in the db schema
2024-03-08 21:15:11 +01:00
immutable int itemDatabaseVersion = 13 ;
2016-12-24 14:12:20 +01:00
2015-09-14 23:56:14 +02:00
Database db ;
2018-12-26 08:11:06 +01:00
string insertItemStmt ;
string updateItemStmt ;
string selectItemByIdStmt ;
2024-01-08 23:13:17 +01:00
string selectItemByRemoteIdStmt ;
2018-12-26 08:11:06 +01:00
string selectItemByParentIdStmt ;
string deleteItemByIdStmt ;
2022-08-30 11:09:35 +02:00
bool databaseInitialised = false ;
2015-09-14 23:56:14 +02:00
2024-01-08 23:13:17 +01:00
this ( const ( char ) [ ] filename ) {
2015-09-14 23:56:14 +02:00
db = Database ( filename ) ;
2018-08-02 00:28:52 +02:00
int dbVersion ;
try {
dbVersion = db . getVersion ( ) ;
} catch ( SqliteException e ) {
// An error was generated - what was the error?
2022-05-07 21:57:25 +02:00
if ( e . msg = = "database is locked" ) {
2024-01-08 23:13:17 +01:00
addLogEntry ( ) ;
2024-03-25 07:57:17 +01:00
addLogEntry ( "ERROR: The 'onedrive' application is already running - please check system process list for active application instances" ) ;
2024-03-30 23:03:58 +01:00
addLogEntry ( " - Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process" ) ;
2024-01-08 23:13:17 +01:00
addLogEntry ( ) ;
2022-05-07 21:57:25 +02:00
} else {
2024-03-25 07:57:17 +01:00
// A different error .. detail the message, detail the actual SQLite Error Code to assist with troubleshooting
2024-01-08 23:13:17 +01:00
addLogEntry ( ) ;
2024-03-25 07:57:17 +01:00
addLogEntry ( "ERROR: An internal database error occurred: " ~ e . msg ~ " (SQLite Error Code: " ~ to ! string ( e . errorCode ) ~ ")" ) ;
2024-01-08 23:13:17 +01:00
addLogEntry ( ) ;
2024-03-30 23:03:58 +01:00
// Give the user some additional information and pointers on this error
// The below list is based on user issue / discussion reports since 2018
switch ( e . errorCode ) {
case 7 : // SQLITE_NOMEM
addLogEntry ( "The operation could not be completed due to insufficient memory. Please close unnecessary applications to free up memory and try again." ) ;
break ;
case 10 : // SQLITE_IOERR
addLogEntry ( "A disk I/O error occurred. This could be due to issues with the storage medium (e.g., disk full, hardware failure, filesystem corruption). Please check your disk's health using a disk utility tool, ensure there is enough free space, and check the filesystem for errors." ) ;
break ;
case 11 : // SQLITE_CORRUPT
addLogEntry ( "The database file appears to be corrupt. This could be due to incomplete or failed writes, hardware issues, or unexpected interruptions during database operations. Please perform a --resync operation." ) ;
break ;
case 14 : // SQLITE_CANTOPEN
addLogEntry ( "The database file could not be opened. Please check that the database file exists, has the correct permissions, and is not being blocked by another process or security software." ) ;
break ;
case 26 : // SQLITE_NOTADB
addLogEntry ( "The file attempted to be opened does not appear to be a valid SQLite database, or it may have been corrupted to a point where it's no longer recognizable. Please check your application configuration directory and/or perform a --resync operation." ) ;
break ;
default :
addLogEntry ( "An unexpected error occurred. Please consult the application documentation or support to resolve this issue." ) ;
break ;
}
// Blank line before exit
addLogEntry ( ) ;
2022-05-07 21:57:25 +02:00
}
2022-08-30 11:09:35 +02:00
return ;
2018-08-02 00:28:52 +02:00
}
if ( dbVersion = = 0 ) {
createTable ( ) ;
2016-12-25 19:23:33 +01:00
} else if ( db . getVersion ( ) ! = itemDatabaseVersion ) {
2024-01-08 23:13:17 +01:00
addLogEntry ( "The item database is incompatible, re-creating database table structures" ) ;
2018-08-02 00:28:52 +02:00
db . exec ( "DROP TABLE item" ) ;
createTable ( ) ;
2016-12-25 19:23:33 +01:00
}
2024-01-08 23:13:17 +01:00
// What is the threadsafe value
auto threadsafeValue = db . getThreadsafeValue ( ) ;
addLogEntry ( "Threadsafe database value: " ~ to ! string ( threadsafeValue ) , [ "debug" ] ) ;
2019-03-24 01:12:40 +01:00
// Set the enforcement of foreign key constraints.
// https://www.sqlite.org/pragma.html#pragma_foreign_keys
// PRAGMA foreign_keys = boolean;
db . exec ( "PRAGMA foreign_keys = TRUE" ) ;
// Set the recursive trigger capability
// https://www.sqlite.org/pragma.html#pragma_recursive_triggers
// PRAGMA recursive_triggers = boolean;
db . exec ( "PRAGMA recursive_triggers = TRUE" ) ;
// Set the journal mode for databases associated with the current connection
// https://www.sqlite.org/pragma.html#pragma_journal_mode
2018-05-03 08:38:40 +02:00
db . exec ( "PRAGMA journal_mode = WAL" ) ;
2019-03-24 01:12:40 +01:00
// Automatic indexing is enabled by default as of version 3.7.17
// https://www.sqlite.org/pragma.html#pragma_automatic_index
// PRAGMA automatic_index = boolean;
db . exec ( "PRAGMA automatic_index = FALSE" ) ;
// Tell SQLite to store temporary tables in memory. This will speed up many read operations that rely on temporary tables, indices, and views.
// https://www.sqlite.org/pragma.html#pragma_temp_store
db . exec ( "PRAGMA temp_store = MEMORY" ) ;
// Tell SQlite to cleanup database table size
// https://www.sqlite.org/pragma.html#pragma_auto_vacuum
// PRAGMA schema.auto_vacuum = 0 | NONE | 1 | FULL | 2 | INCREMENTAL;
db . exec ( "PRAGMA auto_vacuum = FULL" ) ;
2022-05-07 21:57:25 +02:00
// This pragma sets or queries the database connection locking-mode. The locking-mode is either NORMAL or EXCLUSIVE.
// https://www.sqlite.org/pragma.html#pragma_locking_mode
// PRAGMA schema.locking_mode = NORMAL | EXCLUSIVE
db . exec ( "PRAGMA locking_mode = EXCLUSIVE" ) ;
2018-05-03 08:38:40 +02:00
2018-12-26 08:11:06 +01:00
insertItemStmt = "
2024-03-08 21:15:11 +01:00
INSERT OR REPLACE INTO item ( driveId , id , name , remoteName , type , eTag , cTag , mtime , parentId , quickXorHash , sha256Hash , remoteDriveId , remoteParentId , remoteId , remoteType , syncStatus , size )
VALUES ( ? 1 , ? 2 , ? 3 , ? 4 , ? 5 , ? 6 , ? 7 , ? 8 , ? 9 , ? 10 , ? 11 , ? 12 , ? 13 , ? 14 , ? 15 , ? 16 , ? 17 )
2018-12-26 08:11:06 +01:00
" ;
updateItemStmt = "
2015-09-17 00:16:23 +02:00
UPDATE item
2024-03-08 21:15:11 +01:00
SET name = ? 3 , remoteName = ? 4 , type = ? 5 , eTag = ? 6 , cTag = ? 7 , mtime = ? 8 , parentId = ? 9 , quickXorHash = ? 10 , sha256Hash = ? 11 , remoteDriveId = ? 12 , remoteParentId = ? 13 , remoteId = ? 14 , remoteType = ? 15 , syncStatus = ? 16 , size = ? 17
2017-06-14 15:50:02 +02:00
WHERE driveId = ? 1 AND id = ? 2
2018-12-26 08:11:06 +01:00
" ;
selectItemByIdStmt = "
2017-06-14 15:50:02 +02:00
SELECT *
FROM item
WHERE driveId = ? 1 AND id = ? 2
2018-12-26 08:11:06 +01:00
" ;
2024-01-08 23:13:17 +01:00
selectItemByRemoteIdStmt = "
SELECT *
FROM item
WHERE remoteDriveId = ? 1 AND remoteId = ? 2
" ;
2018-12-26 08:11:06 +01:00
selectItemByParentIdStmt = "SELECT * FROM item WHERE driveId = ? AND parentId = ?" ;
deleteItemByIdStmt = "DELETE FROM item WHERE driveId = ? AND id = ?" ;
2022-08-30 11:09:35 +02:00
// flag that the database is accessible and we have control
databaseInitialised = true ;
}
2024-01-08 23:13:17 +01:00
bool isDatabaseInitialised ( ) {
2022-08-30 11:09:35 +02:00
return databaseInitialised ;
2015-09-14 23:56:14 +02:00
}
2024-01-08 23:13:17 +01:00
void createTable ( ) {
2018-08-02 00:28:52 +02:00
db . exec ( " CREATE TABLE item (
driveId TEXT NOT NULL ,
id TEXT NOT NULL ,
name TEXT NOT NULL ,
2024-01-08 23:13:17 +01:00
remoteName TEXT ,
2018-08-02 00:28:52 +02:00
type TEXT NOT NULL ,
eTag TEXT ,
cTag TEXT ,
mtime TEXT NOT NULL ,
parentId TEXT ,
quickXorHash TEXT ,
2023-06-19 22:55:00 +02:00
sha256Hash TEXT ,
2018-08-02 00:28:52 +02:00
remoteDriveId TEXT ,
2024-03-08 21:15:11 +01:00
remoteParentId TEXT ,
2018-08-02 00:28:52 +02:00
remoteId TEXT ,
2024-03-08 21:15:11 +01:00
remoteType TEXT ,
2018-08-02 00:28:52 +02:00
deltaLink TEXT ,
2020-06-16 23:57:14 +02:00
syncStatus TEXT ,
2024-01-08 23:13:17 +01:00
size TEXT ,
2018-08-02 00:28:52 +02:00
PRIMARY KEY ( driveId , id ) ,
FOREIGN KEY ( driveId , parentId )
REFERENCES item ( driveId , id )
ON DELETE CASCADE
ON UPDATE RESTRICT
) " ) ;
db . exec ( "CREATE INDEX name_idx ON item (name)" ) ;
db . exec ( "CREATE INDEX remote_idx ON item (remoteDriveId, remoteId)" ) ;
2019-03-24 01:12:40 +01:00
db . exec ( "CREATE INDEX item_children_idx ON item (driveId, parentId)" ) ;
db . exec ( "CREATE INDEX selectByPath_idx ON item (name, driveId, parentId)" ) ;
2018-08-02 00:28:52 +02:00
db . setVersion ( itemDatabaseVersion ) ;
}
2024-01-08 23:13:17 +01:00
void insert ( const ref Item item ) {
2018-12-26 08:11:06 +01:00
auto p = db . prepare ( insertItemStmt ) ;
bindItem ( item , p ) ;
p . exec ( ) ;
2015-09-14 23:56:14 +02:00
}
2024-01-08 23:13:17 +01:00
void update ( const ref Item item ) {
2018-12-26 08:11:06 +01:00
auto p = db . prepare ( updateItemStmt ) ;
bindItem ( item , p ) ;
p . exec ( ) ;
}
2024-01-08 23:13:17 +01:00
void dump_open_statements ( ) {
2018-12-26 08:11:06 +01:00
db . dump_open_statements ( ) ;
}
2024-01-08 23:13:17 +01:00
int db_checkpoint ( ) {
2018-12-26 08:11:06 +01:00
return db . db_checkpoint ( ) ;
2015-09-17 16:28:24 +02:00
}
2024-01-08 23:13:17 +01:00
void upsert ( const ref Item item ) {
2017-06-14 15:50:02 +02:00
auto s = db . prepare ( "SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?" ) ;
s . bind ( 1 , item . driveId ) ;
s . bind ( 2 , item . id ) ;
2015-09-17 16:28:24 +02:00
auto r = s . exec ( ) ;
2018-12-26 08:11:06 +01:00
Statement stmt ;
if ( r . front [ 0 ] = = "0" ) stmt = db . prepare ( insertItemStmt ) ;
else stmt = db . prepare ( updateItemStmt ) ;
bindItem ( item , stmt ) ;
2016-12-24 14:12:20 +01:00
stmt . exec ( ) ;
2015-09-17 16:28:24 +02:00
}
2024-01-08 23:13:17 +01:00
Item [ ] selectChildren ( const ( char ) [ ] driveId , const ( char ) [ ] id ) {
2018-12-26 08:11:06 +01:00
auto p = db . prepare ( selectItemByParentIdStmt ) ;
p . bind ( 1 , driveId ) ;
p . bind ( 2 , id ) ;
auto res = p . exec ( ) ;
2015-09-18 21:42:27 +02:00
Item [ ] items ;
2017-12-28 15:42:33 +01:00
while ( ! res . empty ) {
items ~ = buildItem ( res ) ;
res . step ( ) ;
2015-09-14 23:56:14 +02:00
}
2015-09-18 21:42:27 +02:00
return items ;
2015-09-14 23:56:14 +02:00
}
2024-01-08 23:13:17 +01:00
bool selectById ( const ( char ) [ ] driveId , const ( char ) [ ] id , out Item item ) {
2018-12-26 08:11:06 +01:00
auto p = db . prepare ( selectItemByIdStmt ) ;
p . bind ( 1 , driveId ) ;
p . bind ( 2 , id ) ;
auto r = p . exec ( ) ;
2015-09-14 23:56:14 +02:00
if ( ! r . empty ) {
item = buildItem ( r ) ;
return true ;
}
return false ;
}
2024-01-08 23:13:17 +01:00
bool selectByRemoteId ( const ( char ) [ ] remoteDriveId , const ( char ) [ ] remoteId , out Item item ) {
auto p = db . prepare ( selectItemByRemoteIdStmt ) ;
p . bind ( 1 , remoteDriveId ) ;
p . bind ( 2 , remoteId ) ;
auto r = p . exec ( ) ;
if ( ! r . empty ) {
item = buildItem ( r ) ;
return true ;
}
return false ;
}
2019-01-06 17:02:08 +01:00
// returns true if an item id is in the database
2024-01-08 23:13:17 +01:00
bool idInLocalDatabase ( const ( string ) driveId , const ( string ) id ) {
2018-12-26 08:11:06 +01:00
auto p = db . prepare ( selectItemByIdStmt ) ;
p . bind ( 1 , driveId ) ;
p . bind ( 2 , id ) ;
auto r = p . exec ( ) ;
2018-05-09 08:39:23 +02:00
if ( ! r . empty ) {
return true ;
}
return false ;
}
2017-06-14 15:50:02 +02:00
// returns the item with the given path
// the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3"
2024-01-08 23:13:17 +01:00
bool selectByPath ( const ( char ) [ ] path , string rootDriveId , out Item item ) {
2018-01-02 15:05:32 +01:00
Item currItem = { driveId : rootDriveId } ;
2019-07-12 05:12:26 +02:00
// Issue https://github.com/abraunegg/onedrive/issues/578
2024-01-08 23:13:17 +01:00
path = "root/" ~ ( startsWith ( path , "./" ) | | path = = "." ? path . chompPrefix ( "." ) : path ) ;
2019-07-12 05:12:26 +02:00
2018-01-02 15:05:32 +01:00
auto s = db . prepare ( "SELECT * FROM item WHERE name = ?1 AND driveId IS ?2 AND parentId IS ?3" ) ;
2017-06-14 15:50:02 +02:00
foreach ( name ; pathSplitter ( path ) ) {
s . bind ( 1 , name ) ;
s . bind ( 2 , currItem . driveId ) ;
s . bind ( 3 , currItem . id ) ;
auto r = s . exec ( ) ;
if ( r . empty ) return false ;
currItem = buildItem ( r ) ;
2024-01-08 23:13:17 +01:00
// If the item is of type remote substitute it with the child
2017-06-14 15:50:02 +02:00
if ( currItem . type = = ItemType . remote ) {
2024-01-08 23:13:17 +01:00
addLogEntry ( "Record is a Remote Object: " ~ to ! string ( currItem ) , [ "debug" ] ) ;
2017-12-27 15:13:28 +01:00
Item child ;
if ( selectById ( currItem . remoteDriveId , currItem . remoteId , child ) ) {
assert ( child . type ! = ItemType . remote , "The type of the child cannot be remote" ) ;
currItem = child ;
2024-01-08 23:13:17 +01:00
addLogEntry ( "Selecting Record that is NOT Remote Object: " ~ to ! string ( currItem ) , [ "debug" ] ) ;
2017-12-27 15:13:28 +01:00
}
2015-09-20 19:07:16 +02:00
}
2018-01-02 13:41:56 +01:00
}
item = currItem ;
return true ;
}
2024-01-08 23:13:17 +01:00
// same as selectByPath() but it does not traverse remote folders, returns the remote element if that is what is required
bool selectByPathIncludingRemoteItems ( const ( char ) [ ] path , string rootDriveId , out Item item ) {
2018-01-02 15:05:32 +01:00
Item currItem = { driveId : rootDriveId } ;
2019-07-12 05:12:26 +02:00
// Issue https://github.com/abraunegg/onedrive/issues/578
2024-01-08 23:13:17 +01:00
path = "root/" ~ ( startsWith ( path , "./" ) | | path = = "." ? path . chompPrefix ( "." ) : path ) ;
2019-07-12 05:12:26 +02:00
2018-01-02 15:05:32 +01:00
auto s = db . prepare ( "SELECT * FROM item WHERE name IS ?1 AND driveId IS ?2 AND parentId IS ?3" ) ;
2018-01-02 13:41:56 +01:00
foreach ( name ; pathSplitter ( path ) ) {
s . bind ( 1 , name ) ;
s . bind ( 2 , currItem . driveId ) ;
s . bind ( 3 , currItem . id ) ;
auto r = s . exec ( ) ;
if ( r . empty ) return false ;
currItem = buildItem ( r ) ;
2015-09-14 23:56:14 +02:00
}
2024-01-08 23:13:17 +01:00
if ( currItem . type = = ItemType . remote ) {
addLogEntry ( "Record selected is a Remote Object: " ~ to ! string ( currItem ) , [ "debug" ] ) ;
}
2017-06-14 15:50:02 +02:00
item = currItem ;
return true ;
2015-09-14 23:56:14 +02:00
}
2024-01-08 23:13:17 +01:00
void deleteById ( const ( char ) [ ] driveId , const ( char ) [ ] id ) {
2018-12-26 08:11:06 +01:00
auto p = db . prepare ( deleteItemByIdStmt ) ;
p . bind ( 1 , driveId ) ;
p . bind ( 2 , id ) ;
p . exec ( ) ;
2015-09-14 23:56:14 +02:00
}
2024-01-08 23:13:17 +01:00
private void bindItem ( const ref Item item , ref Statement stmt ) {
2016-12-24 14:12:20 +01:00
with ( stmt ) with ( item ) {
2017-06-14 15:50:02 +02:00
bind ( 1 , driveId ) ;
bind ( 2 , id ) ;
bind ( 3 , name ) ;
2024-01-08 23:13:17 +01:00
bind ( 4 , remoteName ) ;
2024-03-08 21:15:11 +01:00
// type handling
2016-12-24 14:12:20 +01:00
string typeStr = null ;
final switch ( type ) with ( ItemType ) {
2017-06-14 15:50:02 +02:00
case file : typeStr = "file" ; break ;
case dir : typeStr = "dir" ; break ;
case remote : typeStr = "remote" ; break ;
2024-01-08 23:13:17 +01:00
case unknown : typeStr = "unknown" ; break ;
2024-03-08 21:15:11 +01:00
case none : typeStr = null ; break ;
2016-12-24 14:12:20 +01:00
}
2024-01-08 23:13:17 +01:00
bind ( 5 , typeStr ) ;
bind ( 6 , eTag ) ;
bind ( 7 , cTag ) ;
bind ( 8 , mtime . toISOExtString ( ) ) ;
bind ( 9 , parentId ) ;
bind ( 10 , quickXorHash ) ;
bind ( 11 , sha256Hash ) ;
bind ( 12 , remoteDriveId ) ;
2024-03-08 21:15:11 +01:00
bind ( 13 , remoteParentId ) ;
bind ( 14 , remoteId ) ;
// remoteType handling
string remoteTypeStr = null ;
final switch ( remoteType ) with ( ItemType ) {
case file : remoteTypeStr = "file" ; break ;
case dir : remoteTypeStr = "dir" ; break ;
case remote : remoteTypeStr = "remote" ; break ;
case unknown : remoteTypeStr = "unknown" ; break ;
case none : remoteTypeStr = null ; break ;
}
bind ( 15 , remoteTypeStr ) ;
bind ( 16 , syncStatus ) ;
bind ( 17 , size ) ;
2016-12-24 14:12:20 +01:00
}
}
2024-01-08 23:13:17 +01:00
private Item buildItem ( Statement . Result result ) {
2017-06-14 15:50:02 +02:00
assert ( ! result . empty , "The result must not be empty" ) ;
2024-03-08 21:15:11 +01:00
assert ( result . front . length = = 18 , "The result must have 18 columns" ) ;
2015-09-14 23:56:14 +02:00
Item item = {
2024-01-08 23:13:17 +01:00
// column 0: driveId
// column 1: id
// column 2: name
// column 3: remoteName - only used when there is a difference in the local name & remote shared folder name
// column 4: type
// column 5: eTag
// column 6: cTag
// column 7: mtime
// column 8: parentId
// column 9: quickXorHash
// column 10: sha256Hash
// column 11: remoteDriveId
2024-03-08 21:15:11 +01:00
// column 12: remoteParentId
// column 13: remoteId
// column 14: remoteType
// column 15: deltaLink
// column 16: syncStatus
// column 17: size
2024-01-08 23:13:17 +01:00
2017-06-14 15:50:02 +02:00
driveId : result . front [ 0 ] . dup ,
id : result . front [ 1 ] . dup ,
name : result . front [ 2 ] . dup ,
2024-01-08 23:13:17 +01:00
remoteName : result . front [ 3 ] . dup ,
// Column 4 is type - not set here
eTag : result . front [ 5 ] . dup ,
cTag : result . front [ 6 ] . dup ,
mtime : SysTime . fromISOExtString ( result . front [ 7 ] ) ,
parentId : result . front [ 8 ] . dup ,
quickXorHash : result . front [ 9 ] . dup ,
sha256Hash : result . front [ 10 ] . dup ,
remoteDriveId : result . front [ 11 ] . dup ,
2024-03-08 21:15:11 +01:00
remoteParentId : result . front [ 12 ] . dup ,
remoteId : result . front [ 13 ] . dup ,
// Column 14 is remoteType - not set here
// Column 15 is deltaLink - not set here
syncStatus : result . front [ 16 ] . dup ,
size : result . front [ 17 ] . dup
2015-09-14 23:56:14 +02:00
} ;
2024-03-08 21:15:11 +01:00
// Configure item.type
2024-01-08 23:13:17 +01:00
switch ( result . front [ 4 ] ) {
2017-06-14 15:50:02 +02:00
case "file" : item . type = ItemType . file ; break ;
case "dir" : item . type = ItemType . dir ; break ;
case "remote" : item . type = ItemType . remote ; break ;
2017-12-27 15:13:28 +01:00
default : assert ( 0 , "Invalid item type" ) ;
2015-09-14 23:56:14 +02:00
}
2024-03-08 21:15:11 +01:00
// Configure item.remoteType
switch ( result . front [ 14 ] ) {
// We only care about 'dir' and 'file' for 'remote' items
case "file" : item . remoteType = ItemType . file ; break ;
case "dir" : item . remoteType = ItemType . dir ; break ;
default : item . remoteType = ItemType . none ; break ; // Default to ItemType.none
}
// Return item
2015-09-14 23:56:14 +02:00
return item ;
}
2016-12-24 14:12:20 +01:00
// computes the path of the given item id
2017-06-14 15:50:02 +02:00
// the path is relative to the sync directory ex: "Music/Turbo Killer.mp3"
2017-12-27 15:13:28 +01:00
// the trailing slash is not added even if the item is a directory
2024-01-08 23:13:17 +01:00
string computePath ( const ( char ) [ ] driveId , const ( char ) [ ] id ) {
2017-12-27 15:13:28 +01:00
assert ( driveId & & id ) ;
2015-09-14 23:56:14 +02:00
string path ;
2017-06-14 15:50:02 +02:00
Item item ;
2017-12-27 15:13:28 +01:00
auto s = db . prepare ( "SELECT * FROM item WHERE driveId = ?1 AND id = ?2" ) ;
auto s2 = db . prepare ( "SELECT driveId, id FROM item WHERE remoteDriveId = ?1 AND remoteId = ?2" ) ;
2015-09-14 23:56:14 +02:00
while ( true ) {
2017-12-27 15:13:28 +01:00
s . bind ( 1 , driveId ) ;
s . bind ( 2 , id ) ;
auto r = s . exec ( ) ;
if ( ! r . empty ) {
item = buildItem ( r ) ;
if ( item . type = = ItemType . remote ) {
// substitute the last name with the current
ptrdiff_t idx = indexOf ( path , '/' ) ;
path = idx > = 0 ? item . name ~ path [ idx . . $ ] : item . name ;
} else {
if ( path ) path = item . name ~ "/" ~ path ;
else path = item . name ;
}
id = item . parentId ;
2015-09-20 19:07:16 +02:00
} else {
2017-12-27 15:13:28 +01:00
if ( id = = null ) {
// check for remoteItem
s2 . bind ( 1 , item . driveId ) ;
s2 . bind ( 2 , item . id ) ;
auto r2 = s2 . exec ( ) ;
if ( r2 . empty ) {
// root reached
assert ( path . length > = 4 ) ;
2020-06-27 11:10:37 +02:00
// remove "root/" from path string if it exists
if ( path . length > = 5 ) {
if ( canFind ( path , "root/" ) ) {
path = path [ 5 . . $ ] ;
}
} else {
path = path [ 4 . . $ ] ;
}
2017-12-27 15:13:28 +01:00
// special case of computing the path of the root itself
if ( path . length = = 0 ) path = "." ;
break ;
} else {
// remote folder
driveId = r2 . front [ 0 ] . dup ;
id = r2 . front [ 1 ] . dup ;
}
} else {
// broken tree
2024-01-08 23:13:17 +01:00
addLogEntry ( "The following generated a broken tree query:" , [ "debug" ] ) ;
addLogEntry ( "Drive ID: " ~ to ! string ( driveId ) , [ "debug" ] ) ;
addLogEntry ( "Item ID: " ~ to ! string ( id ) , [ "debug" ] ) ;
2017-12-27 15:13:28 +01:00
assert ( 0 ) ;
}
2015-09-20 19:07:16 +02:00
}
2015-09-14 23:56:14 +02:00
}
2015-09-20 19:07:16 +02:00
return path ;
2015-09-14 23:56:14 +02:00
}
2017-12-31 02:30:31 +01:00
2024-01-08 23:13:17 +01:00
Item [ ] selectRemoteItems ( ) {
2017-12-31 02:30:31 +01:00
Item [ ] items ;
auto stmt = db . prepare ( "SELECT * FROM item WHERE remoteDriveId IS NOT NULL" ) ;
auto res = stmt . exec ( ) ;
while ( ! res . empty ) {
items ~ = buildItem ( res ) ;
res . step ( ) ;
}
return items ;
}
2024-01-08 23:13:17 +01:00
string getDeltaLink ( const ( char ) [ ] driveId , const ( char ) [ ] id ) {
// Log what we received
addLogEntry ( "DeltaLink Query (driveId): " ~ to ! string ( driveId ) , [ "debug" ] ) ;
addLogEntry ( "DeltaLink Query (id): " ~ to ! string ( id ) , [ "debug" ] ) ;
2017-12-31 02:30:31 +01:00
assert ( driveId & & id ) ;
auto stmt = db . prepare ( "SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2" ) ;
stmt . bind ( 1 , driveId ) ;
stmt . bind ( 2 , id ) ;
auto res = stmt . exec ( ) ;
if ( res . empty ) return null ;
return res . front [ 0 ] . dup ;
}
2024-01-08 23:13:17 +01:00
void setDeltaLink ( const ( char ) [ ] driveId , const ( char ) [ ] id , const ( char ) [ ] deltaLink ) {
2017-12-31 02:30:31 +01:00
assert ( driveId & & id ) ;
assert ( deltaLink ) ;
auto stmt = db . prepare ( "UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2" ) ;
stmt . bind ( 1 , driveId ) ;
stmt . bind ( 2 , id ) ;
stmt . bind ( 3 , deltaLink ) ;
stmt . exec ( ) ;
}
2020-06-16 23:57:14 +02:00
// National Cloud Deployments (US and DE) do not support /delta as a query
// We need to track in the database that this item is in sync
// As we query /children to get all children from OneDrive, update anything in the database
// to be flagged as not-in-sync, thus, we can use that flag to determing what was previously
// in-sync, but now deleted on OneDrive
2024-01-08 23:13:17 +01:00
void downgradeSyncStatusFlag ( const ( char ) [ ] driveId , const ( char ) [ ] id ) {
2020-06-27 11:10:37 +02:00
assert ( driveId ) ;
auto stmt = db . prepare ( "UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2" ) ;
stmt . bind ( 1 , driveId ) ;
stmt . bind ( 2 , id ) ;
stmt . exec ( ) ;
2020-06-16 23:57:14 +02:00
}
// National Cloud Deployments (US and DE) do not support /delta as a query
// Select items that have a out-of-sync flag set
2024-01-08 23:13:17 +01:00
Item [ ] selectOutOfSyncItems ( const ( char ) [ ] driveId ) {
2020-06-27 11:10:37 +02:00
assert ( driveId ) ;
2020-06-16 23:57:14 +02:00
Item [ ] items ;
2020-06-27 11:10:37 +02:00
auto stmt = db . prepare ( "SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1" ) ;
stmt . bind ( 1 , driveId ) ;
auto res = stmt . exec ( ) ;
while ( ! res . empty ) {
items ~ = buildItem ( res ) ;
res . step ( ) ;
}
return items ;
}
// OneDrive Business Folders are stored in the database potentially without a root | parentRoot link
// Select items associated with the provided driveId
2024-01-08 23:13:17 +01:00
Item [ ] selectByDriveId ( const ( char ) [ ] driveId ) {
2020-06-27 11:10:37 +02:00
assert ( driveId ) ;
Item [ ] items ;
auto stmt = db . prepare ( "SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL" ) ;
stmt . bind ( 1 , driveId ) ;
2020-06-16 23:57:14 +02:00
auto res = stmt . exec ( ) ;
while ( ! res . empty ) {
items ~ = buildItem ( res ) ;
res . step ( ) ;
}
return items ;
}
2020-11-06 00:28:15 +01:00
2024-01-08 23:13:17 +01:00
// Select all items associated with the provided driveId
Item [ ] selectAllItemsByDriveId ( const ( char ) [ ] driveId ) {
assert ( driveId ) ;
Item [ ] items ;
auto stmt = db . prepare ( "SELECT * FROM item WHERE driveId = ?1" ) ;
stmt . bind ( 1 , driveId ) ;
auto res = stmt . exec ( ) ;
while ( ! res . empty ) {
items ~ = buildItem ( res ) ;
res . step ( ) ;
}
return items ;
}
2020-11-06 00:28:15 +01:00
// Perform a vacuum on the database, commit WAL / SHM to file
2024-01-08 23:13:17 +01:00
void performVacuum ( ) {
addLogEntry ( "Attempting to perform a database vacuum to merge any temporary data" , [ "debug" ] ) ;
2022-12-04 10:20:55 +01:00
try {
auto stmt = db . prepare ( "VACUUM;" ) ;
stmt . exec ( ) ;
2024-01-08 23:13:17 +01:00
addLogEntry ( "Database vacuum is complete" , [ "debug" ] ) ;
2022-12-04 10:20:55 +01:00
} catch ( SqliteException e ) {
2024-01-08 23:13:17 +01:00
addLogEntry ( ) ;
addLogEntry ( "ERROR: Unable to perform a database vacuum: " ~ e . msg ) ;
addLogEntry ( ) ;
2022-12-04 10:20:55 +01:00
}
2020-11-06 00:28:15 +01:00
}
2021-01-20 09:46:56 +01:00
// Select distinct driveId items from database
2024-01-08 23:13:17 +01:00
string [ ] selectDistinctDriveIds ( ) {
2021-01-20 09:46:56 +01:00
string [ ] driveIdArray ;
auto stmt = db . prepare ( "SELECT DISTINCT driveId FROM item;" ) ;
auto res = stmt . exec ( ) ;
if ( res . empty ) return driveIdArray ;
while ( ! res . empty ) {
driveIdArray ~ = res . front [ 0 ] . dup ;
res . step ( ) ;
}
return driveIdArray ;
}
2024-01-08 23:13:17 +01:00
}