diff --git a/Makefile.in b/Makefile.in index a6ba389a..9081bf12 100644 --- a/Makefile.in +++ b/Makefile.in @@ -55,7 +55,7 @@ endif system_unit_files = contrib/systemd/onedrive@.service user_unit_files = contrib/systemd/onedrive.service -DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-folders.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md +DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-items.md docs/client-architecture.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md docs/known-issues.md ifneq ("$(wildcard /etc/redhat-release)","") RHEL = $(shell cat /etc/redhat-release | grep -E "(Red Hat Enterprise Linux|CentOS)" | wc -l) @@ -74,6 +74,7 @@ SOURCES = \ src/qxor.d \ src/curlEngine.d \ src/onedrive.d \ + src/webhook.d \ src/sync.d \ src/itemdb.d \ src/sqlite.d \ diff --git a/docs/application-security.md b/docs/application-security.md index e0fad5a2..96d07566 100644 --- a/docs/application-security.md +++ b/docs/application-security.md @@ -71,9 +71,9 @@ When using the OneDrive Client for Linux, the above authentication scopes will b This is similar to the Microsoft Windows OneDrive Client: -![Linux Authentication to Microsoft OneDrive](./puml/onedrive_windows_authentication.png) +![Windows Authentication to Microsoft OneDrive](./puml/onedrive_windows_authentication.png) -In a business environment, where IT Staff need to 'approve' the OneDrive Client for Linux, can do so knowing that the client is safe to use. The only concernt that the IT Staff should have is how is the client device, where the OneDrive Client for Linux is running, is being secured, as in a corporate setting, Windows would be controlled by Active Directory and applicable Group Policy Objects (GPO's) to ensure the security of corporate data on the client device. It is out of scope for this client to handle how Linux devices are being secure. +In a business setting, IT staff who need to authorise the use of the OneDrive Client for Linux in their environment can be assured of its safety. The primary concern for IT staff should be securing the device running the OneDrive Client for Linux. Unlike in a corporate environment where Windows devices are secured through Active Directory and Group Policy Objects (GPOs) to protect corporate data on the device, it is beyond the responsibility of this client to manage security on Linux devices. ## Configuring read-only access to your OneDrive data In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. diff --git a/docs/business-shared-folders.md b/docs/business-shared-folders.md deleted file mode 100644 index 4282f4ac..00000000 --- a/docs/business-shared-folders.md +++ /dev/null @@ -1,40 +0,0 @@ -# How to configure OneDrive Business Shared Folder Sync -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Important Note -This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to: -* Removing `sync_business_shared_folders = "true|false"` from your 'config' file -* Removing the 'business_shared_folders' file -* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues. - -## Process Overview -Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: -1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you. -2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder -3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement. -4. Test the configuration using '--dry-run' -5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required - - -**NOTE:** This documentation will be updated as this feature progresses. - - -### Enable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_items = "true" -``` - -### Disable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_items = "false" -``` - -## Known Issues -Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. - -Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: - -![shared_with_me](./images/shared_with_me.JPG) - -This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/business-shared-items.md b/docs/business-shared-items.md new file mode 100644 index 00000000..bce09a2f --- /dev/null +++ b/docs/business-shared-items.md @@ -0,0 +1,251 @@ +# How to sync OneDrive Business Shared Items +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Important Note +This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to: +* Removing `sync_business_shared_folders = "true|false"` from your 'config' file +* Removing the 'business_shared_folders' file +* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues. +* Removing any configuration online that might be related to using this feature prior to v2.5.0 + +## Process Overview +Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: +1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you. +2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder +3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement. +4. Test the configuration using '--dry-run' +5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required + + +**NOTE:** This documentation will be updated as this feature progresses. + + +### Enable syncing of OneDrive Business Shared Items via config file +```text +sync_business_shared_items = "true" +``` + +### Disable syncing of OneDrive Business Shared Items via config file +```text +sync_business_shared_items = "false" +``` + +## Syncing OneDrive Business Shared Folders +Use the following steps to add a OneDrive Business Shared Folder to your account: +1. Login to Microsoft OneDrive online, and navigate to 'Shared' from the left hand side pane + +![objects_shared_with_me](./images/objects_shared_with_me.png) + +2. Select the respective folder you wish to sync, and click the 'Add shortcut to My files' at the top of the page + +![add_shared_folder](./images/add_shared_folder.png) + +3. The final result online will look like this: + +![shared_folder_added](./images/shared_folder_added.png) + +When using Microsoft Windows, this shared folder will appear as the following: + +![windows_view_shared_folders](./images/windows_view_shared_folders.png) + +4. Sync your data using `onedrive --sync --verbose`. If you have just enabled the `sync_business_shared_items = "true"` configuration option, you will be required to perform a resync. During the sync, the selected shared folder will be downloaded: + +``` +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 4 +Finished processing /delta JSON response from the OneDrive API +Processing 3 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Creating local directory: ./my_shared_folder +Quota information is restricted or not available for this drive. +Syncing this OneDrive Business Shared Folder: my_shared_folder +Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 6 +Finished processing /delta JSON response from the OneDrive API +Processing 6 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Creating local directory: ./my_shared_folder/asdf +Creating local directory: ./my_shared_folder/original_data +Number of items to download from OneDrive: 3 +Downloading file: my_shared_folder/asdf/asdfasdfhashdkfasdf.txt ... done +Downloading file: my_shared_folder/asdf/asdfasdf.txt ... done +Downloading file: my_shared_folder/original_data/file1.data ... done +Performing a database consistency and integrity check on locally stored data +... +``` + +When this is viewed locally, on Linux, this shared folder is seen as the following: + +![linux_shared_folder_view](./images/linux_shared_folder_view.png) + +Any shared folder you add can utilise any 'client side filtering' rules that you have created. + + +## Syncing OneDrive Business Shared Files +There are two methods to support the syncing OneDrive Business Shared Files with the OneDrive Application +1. Add a 'shortcut' to your 'My Files' for the file, which creates a URL shortcut to the file which can be followed when using a Linux Window Manager (Gnome, KDE etc) and the link will open up in a browser. Microsoft Windows only supports this option. +2. Use `--sync-shared-files` option to sync all files shared with you to your local disk. If you use this method, you can utilise any 'client side filtering' rules that you have created to filter out files you do not want locally. This option will create a new folder locally, with sub-folders named after the person who shared the data with you. + +### Syncing OneDrive Business Shared Files using Option 1 +1. As per the above method for adding folders, select the shared file, then select to 'Add shorcut' to the file + +![add_shared_file_shortcut](./images/add_shared_file_shortcut.png) + +2. The final result online will look like this: + +![add_shared_file_shortcut_added](./images/online_shared_file_link.png) + +When using Microsoft Windows, this shared file will appear as the following: + +![windows_view_shared_file_link](./images/windows_view_shared_file_link.png) + +3. Sync your data using `onedrive --sync --verbose`. If you have just enabled the `sync_business_shared_items = "true"` configuration option, you will be required to perform a resync. +``` +... +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file: ./file to share.docx.url ... done +Syncing this OneDrive Business Shared Folder: my_shared_folder +Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 0 +Finished processing /delta JSON response from the OneDrive API +No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive +Quota information is restricted or not available for this drive. +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Quota information is restricted or not available for this drive. +... +``` + +When this is viewed locally, on Linux, this shared folder is seen as the following: + +![linux_view_shared_file_link](./images/linux_view_shared_file_link.png) + +Any shared file link you add can utilise any 'client side filtering' rules that you have created. + + +### Syncing OneDrive Business Shared Files using Option 2 + +**NOTE:** When using option 2, all files that have been shared with you will be downloaded by default. To reduce this, first use `--list-shared-items` to list all shared items with your account, then use 'client side filtering' rules such as 'sync_list' configuration to selectivly sync all the files to your local system. + +1. Review all items that have been shared with you by using `onedrive --list-shared-items`. This should display output similar to the following: +``` +... +Listing available OneDrive Business Shared Items: + +----------------------------------------------------------------------------------- +Shared File: large_document_shared.docx +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: no_download_access.docx +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: online_access_only.txt +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: read_only.txt +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: qewrqwerwqer.txt +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: dummy_file_to_share.docx +Shared By: testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: Sub Folder 2 +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared File: file to share.docx +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: Top Folder +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: my_shared_folder +Shared By: testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +Shared Folder: Jenkins +Shared By: test user (testuser@mynasau3.onmicrosoft.com) +----------------------------------------------------------------------------------- +... +``` + +2. If applicable, add entries to a 'sync_list' file, to only sync the shared files that are of importance to you. + +3. Run the command `onedrive --sync --verbose --sync-shared-files` to sync the shared files to your local file system. This will create a new local folder called 'Files Shared With Me', and will contain sub-directories named after the entity account that has shared the file with you. In that folder will reside the shared file: + +``` +... +Finished processing /delta JSON response from the OneDrive API +No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive +Syncing this OneDrive Business Shared Folder: my_shared_folder +Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 0 +Finished processing /delta JSON response from the OneDrive API +No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive +Quota information is restricted or not available for this drive. +Creating the OneDrive Business Shared Files Local Directory: /home/alex/OneDrive/Files Shared With Me +Checking for any applicable OneDrive Business Shared Files which need to be synced locally +Creating the OneDrive Business Shared File Users Local Directory: /home/alex/OneDrive/Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com) +Creating the OneDrive Business Shared File Users Local Directory: /home/alex/OneDrive/Files Shared With Me/testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com) +Number of items to download from OneDrive: 7 +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/file to share.docx ... done +OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error +Unable to download this file as this was shared as read-only without download permission: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/no_download_access.docx +ERROR: File failed to download. Increase logging verbosity to determine why. +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/no_download_access.docx ... failed! +Downloading file: Files Shared With Me/testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com)/dummy_file_to_share.docx ... done +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 0% | ETA --:--:-- +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/online_access_only.txt ... done +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/read_only.txt ... done +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/qewrqwerwqer.txt ... done +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 5% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 10% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 15% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 20% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 25% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 30% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 35% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 40% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 45% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 50% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 55% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 60% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 65% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 70% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 75% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 80% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 85% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 90% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 95% | ETA 00:00:00 +Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 100% | DONE in 00:00:00 +Quota information is restricted or not available for this drive. +Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... done +Quota information is restricted or not available for this drive. +Quota information is restricted or not available for this drive. +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT +Quota information is restricted or not available for this drive. +... +``` + +When this is viewed locally, on Linux, this 'Files Shared With Me' and content is seen as the following: + +![files_shared_with_me_folder](./images/files_shared_with_me_folder.png) + +Unfortunatly there is no Microsoft Windows equivalent for this capability. + +## Known Issues +Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. + +Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: + +![shared_with_me](./images/shared_with_me.JPG) + +This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/client-architecture.md b/docs/client-architecture.md new file mode 100644 index 00000000..8c2ee5dc --- /dev/null +++ b/docs/client-architecture.md @@ -0,0 +1,331 @@ +# OneDrive Client for Linux Application Architecture + +## How does the client work at a high level? +The client utilises the 'libcurl' library to communicate with the Microsoft Authentication Service and the Microsoft Graph API. The diagram below shows this high level interaction with the Microsoft services online: + +![client_use_of_libcurl](./puml/client_use_of_libcurl.png) + +Depending on your operational environment, it is possible to 'tweak' the following options which will modify how libcurl operates with it's interaction with Microsoft OneDrive services: + +* Downgrade all HTTPS operations to use HTTP1.1 ('force_http_11') +* Control how long a specific transfer should take before it is considered too slow and aborted ('operation_timeout') +* Control libcurl handling of DNS Cache Timeout ('dns_timeout') +* Control the maximum time allowed for the connection to be established ('connect_timeout') +* Control the timeout for activity on an established HTTPS connection ('data_timeout') +* Control what IP protocol version should be used when communicating with OneDrive ('ip_protocol_version') +* Control what User Agent is presented to Microsoft services ('user_agent') + +**Note:** The default 'user_agent' value conforms to specific Microsoft requirements to identify as an ISV that complies with OneDrive traffic decoration requirements. Changing this value potentially will impact how Microsoft see's your client, thus your traffic may get throttled. For further information please read: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online + +Diving a little deeper into how the client operates, the diagram below outlines at a high level the operational workflow of the OneDrive Client for Linux, demonstrating how it interacts with the OneDrive API to maintain synchronisation, manage local and cloud data integrity, and ensure that user data is accurately mirrored between the local filesystem and OneDrive cloud storage. + +![High Level Application Sequence](./puml/high_level_operational_process.png) + +The application operational processes have several high level key stages: + +1. **Access Token Validation:** Initially, the client validates its access and the existing access token, refreshing it if necessary. This step ensures that the client has the required permissions to interact with the OneDrive API. + +2. **Query Microsoft OneDrive API:** The client queries the /delta API endpoint of Microsoft OneDrive, which returns JSON responses. The /delta endpoint is particularly used for syncing changes, helping the client to identify any updates in the OneDrive storage. + +3. **Process JSON Responses:** The client processes each JSON response to determine if it represents a 'root' or 'deleted' item. Items not marked as 'root' or 'deleted' are temporarily stored for further processing. For 'root' or 'deleted' items, the client processes them immediately, otherwise, the client evaluates the items against client-side filtering rules to decide whether to discard them or to process and save them in the local database cache for actions like creating directories or downloading files. + +4. **Local Cache Database Processing for Data Integrity:** The client processes its local cache database to check for data integrity and differences compared to the OneDrive storage. If differences are found, such as a file or folder change including deletions, the client uploads these changes to OneDrive. Responses from the API, including item metadata, are saved to the local cache database. + +5. **Local Filesystem Scanning:** The client scans the local filesystem for new files or folders. Each new item is checked against client-side filtering rules. If an item passes the filtering, it is uploaded to OneDrive. Otherwise, it is discarded if it doesn't meet the filtering criteria. + +6. **Final Data True-Up:** Lastly, the client queries the /delta link for a final true-up, processing any further online JSON changes if required. This ensures that the local and OneDrive storages are fully synchronised. + +## What are the operational modes of the client? + +There are 2 main operational modes that the client can utilise: + +1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. This method is used when you utilise `--sync`. +2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive and utilises 'inotify' to watch for local system changes. This method is used when you utilise `--monitor`. + +By default, both modes consider all data stored online within Microsoft OneDrive as the 'source-of-truth' - that is, what is online, is the correct data (file version, file content, file timestamp, folder structure and so on). This consideration also matches how the Microsoft OneDrive Client for Windows operates. + +However, in standalone mode (`--sync`), you can *change* what reference the client will use as the 'source-of-truth' for your data by using the `--local-first` option so that the application will look at your local files *first* and consider your local files as your 'source-of-truth' to replicate that directory structure to Microsoft OneDrive. + +**Critical Advisory:** Please be aware that if you designate a network mount point (such as NFS, Windows Network Share, or Samba Network Share) as your `sync_dir`, this setup inherently lacks 'inotify' support. Support for 'inotify' is essential for real-time tracking of file changes, which means that the client's 'Monitor Mode' cannot immediately detect changes in files located on these network shares. Instead, synchronisation between your local filesystem and Microsoft OneDrive will occur at intervals specified by the `monitor_interval` setting. This limitation regarding 'inotify' support on network mount points like NFS or Samba is beyond the control of this client. + +## OneDrive Client for Linux High Level Activity Flows + +The diagrams below show the high level process flow and decision making when running the application + +### Main functional activity flows +![Main Activity](./puml/main_activity_flows.png) + +### Processing a potentially new local item +![applyPotentiallyNewLocalItem](./puml/applyPotentiallyNewLocalItem.png) + +### Processing a potentially changed local item +![applyPotentiallyChangedItem](./puml/applyPotentiallyChangedItem.png) + +### Download a file from Microsoft OneDrive +![downloadFile](./puml/downloadFile.png) + +### Upload a modified file to Microsoft OneDrive +![uploadModifiedFile](./puml/uploadModifiedFile.png) + +### Upload a new local file to Microsoft OneDrive +![uploadFile](./puml/uploadFile.png) + +### Determining if an 'item' is syncronised between Microsoft OneDrive and the local file system +![Item Sync Determination](./puml/is_item_in_sync.png) + +### Determining if an 'item' is excluded due to 'Client Side Filtering' rules + +By default, the OneDrive Client for Linux will sync all files and folders between Microsoft OneDrive and the local filesystem. + +Client Side Filtering in the context of this client refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: + +* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process. + +* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local. + +* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage. + +* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync. + +This exclusion process can be illustrated by the following activity diagram. A 'true' return value means that the path being evaluated needs to be excluded: + +![Client Side Filtering Determination](./puml/client_side_filtering_rules.png) + +## File conflict handling - default operational modes + +When using the default operational modes (`--sync` or `--monitor`) the client application is conforming to how the Microsoft Windows OneDrive client operates in terms of resolving conflicts for files. + +Additionally, when using `--resync` this conflict resolution can differ slightly, as, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system. + +Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash. + +### Default Operational Modes - Conflict Handling + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync` +3. Modify file online +4. Modify file locally with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync` + +![conflict_handling_default](./puml/conflict_handling_default.png) + +#### Evidence of Conflict Handling +``` +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +The local file to replace (./1.txt) has been modified locally since the last download. Renaming it to avoid potential local data loss. +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Downloading file ./1.txt ... done +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +... +The file has not changed +Processing เอกสาร +The directory has not changed +Processing 1.txt +The file has not changed +Scanning the local file system '~/OneDrive' for new data to upload +... +New items to upload to OneDrive: 1 +Total New Data to Upload: 52 Bytes +Uploading new file ./1-onedrive-client-dev.txt ... done. +Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +### Default Operational Modes - Conflict Handling with --resync + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync` +3. Modify file online +4. Modify file locally with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --resync` + +![conflict_handling_default_resync](./puml/conflict_handling_default_resync.png) + +#### Evidence of Conflict Handling +``` +... +Deleting the saved application sync status ... +Using IPv4 and IPv6 (if configured) for all network operations +Checking Application Version ... +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 14 +Finished processing /delta JSON response from the OneDrive API +Processing 13 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Local file time discrepancy detected: ./1.txt +This local file has a different modified time 2024-Feb-19 19:32:55Z (UTC) when compared to remote modified time 2024-Feb-19 19:32:36Z (UTC) +The local file has a different hash when compared to remote file hash +Local item does not exist in local database - replacing with file from OneDrive - failed download? +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +... +Processing เอกสาร +The directory has not changed +Processing 1.txt +The file has not changed +Scanning the local file system '~/OneDrive' for new data to upload +... +New items to upload to OneDrive: 1 +Total New Data to Upload: 52 Bytes +Uploading new file ./1-onedrive-client-dev.txt ... done. +Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +## File conflict handling - local-first operational mode + +When using `--local-first` as your operational parameter the client application is now using your local filesystem data as the 'source-of-truth' as to what should be stored online. + +However - Microsoft OneDrive itself, has *zero* acknowledgement of this concept, thus, conflict handling needs to be aligned to how Microsoft OneDrive on other platforms operate, that is, rename the local offending file. + +Additionally, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system. + +Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash or use of `--local-first`. + +### Local First Operational Modes - Conflict Handling + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` +3. Modify file locally with different data|contents +4. Modify file online with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` + +![conflict_handling_local-first_default](./puml/conflict_handling_local-first_default.png) + +#### Evidence of Conflict Handling +``` +Reading configuration file: /home/alex/.config/onedrive/config +... +Using IPv4 and IPv6 (if configured) for all network operations +Checking Application Version ... +... +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +The directory has not changed +... +The file has not changed +Processing เอกสาร +The directory has not changed +Processing 1.txt +Local file time discrepancy detected: 1.txt +The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive +Changed local items to upload to OneDrive: 1 +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: 1.txt -> 1-onedrive-client-dev.txt +Uploading new file 1-onedrive-client-dev.txt ... done. +Scanning the local file system '~/OneDrive' for new data to upload +... +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 3 +Finished processing /delta JSON response from the OneDrive API +Processing 2 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + + +### Local First Operational Modes - Conflict Handling with --resync + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` +3. Modify file locally with different data|contents +4. Modify file online with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first --resync` + +![conflict_handling_local-first_resync](./puml/conflict_handling_local-first_resync.png) + +#### Evidence of Conflict Handling +``` +... + +The usage of --resync will delete your local 'onedrive' client state, thus no record of your current 'sync status' will exist. +This has the potential to overwrite local versions of files with perhaps older versions of documents downloaded from OneDrive, resulting in local data loss. +If in doubt, backup your local data before using --resync + +Are you sure you wish to proceed with --resync? [Y/N] y + +Deleting the saved application sync status ... +Using IPv4 and IPv6 (if configured) for all network operations +... +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Scanning the local file system '~/OneDrive' for new data to upload +Skipping item - excluded by sync_list config: ./random_25k_files +OneDrive Client requested to create this directory online: ./α +The requested directory to create was found on OneDrive - skipping creating the directory: ./α +... +New items to upload to OneDrive: 9 +Total New Data to Upload: 49 KB +... +The file we are attemtping to upload as a new file already exists on Microsoft OneDrive: ./1.txt +Skipping uploading this item as a new file, will upload as a modified file (online file already exists): ./1.txt +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Uploading new file ./1-onedrive-client-dev.txt ... done. +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 15 +Finished processing /delta JSON response from the OneDrive API +Processing 14 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +## Client Functional Component Architecture Relationships + +The diagram below shows the main functional relationship of application code components, and how these relate to each relevant code module within this application: + +![Functional Code Components](./puml/code_functional_component_relationships.png) + +## Database Schema + +The diagram below shows the database schema that is used within the application + +![Database Schema](./puml/database_schema.png) diff --git a/docs/images/add_shared_file_shortcut.png b/docs/images/add_shared_file_shortcut.png new file mode 100644 index 00000000..2d16abb4 Binary files /dev/null and b/docs/images/add_shared_file_shortcut.png differ diff --git a/docs/images/add_shared_folder.png b/docs/images/add_shared_folder.png new file mode 100644 index 00000000..a34f089b Binary files /dev/null and b/docs/images/add_shared_folder.png differ diff --git a/docs/images/files_shared_with_me_folder.png b/docs/images/files_shared_with_me_folder.png new file mode 100644 index 00000000..9f9462c2 Binary files /dev/null and b/docs/images/files_shared_with_me_folder.png differ diff --git a/docs/images/linux_shared_folder_view.png b/docs/images/linux_shared_folder_view.png new file mode 100644 index 00000000..2fa0a192 Binary files /dev/null and b/docs/images/linux_shared_folder_view.png differ diff --git a/docs/images/linux_view_shared_file_link.png b/docs/images/linux_view_shared_file_link.png new file mode 100644 index 00000000..eb2dfea9 Binary files /dev/null and b/docs/images/linux_view_shared_file_link.png differ diff --git a/docs/images/objects_shared_with_me.png b/docs/images/objects_shared_with_me.png new file mode 100644 index 00000000..1327c535 Binary files /dev/null and b/docs/images/objects_shared_with_me.png differ diff --git a/docs/images/online_shared_file_link.png b/docs/images/online_shared_file_link.png new file mode 100644 index 00000000..c264b105 Binary files /dev/null and b/docs/images/online_shared_file_link.png differ diff --git a/docs/images/shared_folder_added.png b/docs/images/shared_folder_added.png new file mode 100644 index 00000000..3677de1b Binary files /dev/null and b/docs/images/shared_folder_added.png differ diff --git a/docs/images/windows_view_shared_file_link.png b/docs/images/windows_view_shared_file_link.png new file mode 100644 index 00000000..d6bfb02f Binary files /dev/null and b/docs/images/windows_view_shared_file_link.png differ diff --git a/docs/images/windows_view_shared_folders.png b/docs/images/windows_view_shared_folders.png new file mode 100644 index 00000000..9432a62c Binary files /dev/null and b/docs/images/windows_view_shared_folders.png differ diff --git a/docs/install.md b/docs/install.md index f5338122..33bf4e04 100644 --- a/docs/install.md +++ b/docs/install.md @@ -12,23 +12,25 @@ Distribution packages may be of an older release when compared to the latest rel |---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |Alpine Linux Edge package|❌|✔|❌|✔ | | | Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space -| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | -| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| CentOS 8 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 8 package|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first | +| CentOS 9 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 9 package|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first | +| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |Debian Sid package|✔|✔|✔|✔| | | Fedora | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |Fedora Rawhide package|✔|✔|✔|✔| | | Gentoo | [onedrive](https://gpo.zugaina.org/net-misc/onedrive) | No API Available |✔|✔|❌|❌| | | Homebrew | [onedrive](https://formulae.brew.sh/formula/onedrive) | Homebrew package |❌|✔|❌|❌| | -| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | | NixOS | [onedrive](https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive)|nixpkgs unstable package|❌|✔|❌|❌| Use package `onedrive` either by adding it to `configuration.nix` or by using the command `nix-env -iA .onedrive`. This does not install a service. To install a service, use unstable channel (will stabilize in 20.09) and add `services.onedrive.enable=true` in `configuration.nix`. You can also add a custom package using the `services.onedrive.package` option (recommended since package lags upstream). Enabling the service installs a default package too (based on the channel). You can also add multiple onedrive accounts trivially, see [documentation](https://github.com/NixOS/nixpkgs/pull/77734#issuecomment-575874225). | | OpenSuSE | [onedrive](https://software.opensuse.org/package/onedrive) |openSUSE Tumbleweed package|✔|✔|❌|❌| | | OpenSuSE Build Service | [onedrive](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) | No API Available |✔|✔|✔|✔| Package Build Service for Debian and Ubuntu | -| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |Raspbian Stable package |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories

It is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |Raspbian Stable package |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories as the package is obsolete and is not supported

For a supported application version, it is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Slackware | [onedrive](https://slackbuilds.org/result/?search=onedrive&sv=) |SlackBuilds package|✔|✔|❌|❌| | | Solus | [onedrive](https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R) |Solus package|✔|✔|❌|❌| | -| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |Ubuntu 23.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported

For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported

For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |Ubuntu 23.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported

For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | | Void Linux | [onedrive](https://voidlinux.org/packages/?arch=x86_64&q=onedrive) |Void Linux x86_64 package|✔|✔|❌|❌| | #### Important information for all Ubuntu and Ubuntu based distribution users: @@ -102,7 +104,7 @@ For notifications the following is also necessary: sudo yum install libnotify-devel ``` -### Dependencies: Fedora > Version 18 / CentOS 8.x / RHEL 8.x / RHEL 9.x +### Dependencies: Fedora > Version 18 / CentOS 8.x / CentOS 9.x/ RHEL 8.x / RHEL 9.x ```text sudo dnf groupinstall 'Development Tools' sudo dnf install libcurl-devel sqlite-devel diff --git a/docs/puml/applyPotentiallyChangedItem.png b/docs/puml/applyPotentiallyChangedItem.png new file mode 100644 index 00000000..cbb7f956 Binary files /dev/null and b/docs/puml/applyPotentiallyChangedItem.png differ diff --git a/docs/puml/applyPotentiallyChangedItem.puml b/docs/puml/applyPotentiallyChangedItem.puml new file mode 100644 index 00000000..58ed2d53 --- /dev/null +++ b/docs/puml/applyPotentiallyChangedItem.puml @@ -0,0 +1,48 @@ +@startuml +start +partition "applyPotentiallyChangedItem" { + :Check if existing item path differs from changed item path; + if (itemWasMoved) then (yes) + :Log moving item; + if (destination exists) then (yes) + if (item in database) then (yes) + :Check if item is synced; + if (item is synced) then (yes) + :Log destination is in sync; + else (no) + :Log destination occupied with a different item; + :Backup conflicting file; + note right: Local data loss prevention + endif + else (no) + :Log destination occupied by an un-synced file; + :Backup conflicting file; + note right: Local data loss prevention + endif + endif + :Try to rename path; + if (dry run) then (yes) + :Track as faked id item; + :Track path not renamed; + else (no) + :Rename item; + :Flag item as moved; + if (item is a file) then (yes) + :Set local timestamp to match online; + endif + endif + else (no) + endif + :Check if eTag changed; + if (eTag changed) then (yes) + if (item is a file and not moved) then (yes) + :Decide if to download based on hash; + else (no) + :Update database; + endif + else (no) + :Update database if timestamp differs or in specific operational mode; + endif +} +stop +@enduml diff --git a/docs/puml/applyPotentiallyNewLocalItem.png b/docs/puml/applyPotentiallyNewLocalItem.png new file mode 100644 index 00000000..59705f75 Binary files /dev/null and b/docs/puml/applyPotentiallyNewLocalItem.png differ diff --git a/docs/puml/applyPotentiallyNewLocalItem.puml b/docs/puml/applyPotentiallyNewLocalItem.puml new file mode 100644 index 00000000..b900f3ef --- /dev/null +++ b/docs/puml/applyPotentiallyNewLocalItem.puml @@ -0,0 +1,90 @@ +@startuml +start +partition "applyPotentiallyNewLocalItem" { + :Check if path exists; + + if (Path exists?) then (yes) + :Log "Path on local disk already exists"; + + if (Is symbolic link?) then (yes) + :Log "Path is a symbolic link"; + + if (Can read symbolic link?) then (no) + :Log "Reading symbolic link failed"; + :Log "Skipping item - invalid symbolic link"; + stop + endif + endif + + :Determine if item is in-sync; + note right: Execute 'isItemSynced()' function + if (Is item in-sync?) then (yes) + :Log "Item in-sync"; + :Update/Insert item in DB; + stop + else (no) + :Log "Item not in-sync"; + :Compare local & remote modification times; + + if (Local time > Remote time?) then (yes) + if (ID in database?) then (yes) + :Log "Local file is newer & ID in DB"; + :Fetch latest DB record; + if (Times equal?) then (yes) + :Log "Times match, keeping local file"; + else (no) + :Log "Local time newer, keeping file"; + note right: Online item has an 'older' modified timestamp wise than the local file\nIt is assumed that the local file is the file to keep + endif + stop + else (no) + :Log "Local item not in DB"; + if (Bypass data protection?) then (yes) + :Log "WARNING: Data protection disabled"; + else (no) + :Safe backup local file; + note right: Local data loss prevention + endif + stop + endif + else (no) + if (Remote time > Local time?) then (yes) + :Log "Remote item is newer"; + if (Bypass data protection?) then (yes) + :Log "WARNING: Data protection disabled"; + else (no) + :Safe backup local file; + note right: Local data loss prevention + endif + endif + + if (Times equal?) then (yes) + note left: Specific handling if timestamp was\nadjusted by isItemSynced() + :Log "Times equal, no action required"; + :Update/Insert item in DB; + stop + endif + endif + endif + + else (no) + :Handle as potentially new item; + switch (Item type) + case (File) + :Add to download queue; + case (Directory) + :Log "Creating local directory"; + if (Dry run?) then (no) + :Create directory & set attributes; + :Save item to DB; + else + :Log "Dry run, faking directory creation"; + :Save item to dry-run DB; + endif + case (Unknown) + :Log "Unknown type, no action"; + endswitch + endif +} +stop +@enduml diff --git a/docs/puml/client_side_filtering_rules.png b/docs/puml/client_side_filtering_rules.png new file mode 100644 index 00000000..2a71d76a Binary files /dev/null and b/docs/puml/client_side_filtering_rules.png differ diff --git a/docs/puml/client_side_filtering_rules.puml b/docs/puml/client_side_filtering_rules.puml new file mode 100644 index 00000000..2e3ed1a6 --- /dev/null +++ b/docs/puml/client_side_filtering_rules.puml @@ -0,0 +1,71 @@ +@startuml +start +:Start; +partition "checkPathAgainstClientSideFiltering" { + :Get localFilePath; + + if (Does path exist?) then (no) + :Return false; + stop + endif + + if (Check .nosync?) then (yes) + :Check for .nosync file; + if (.nosync found) then (yes) + :Log and return true; + stop + endif + endif + + if (Skip dotfiles?) then (yes) + :Check if dotfile; + if (Is dotfile) then (yes) + :Log and return true; + stop + endif + endif + + if (Skip symlinks?) then (yes) + :Check if symlink; + if (Is symlink) then (yes) + if (Config says skip?) then (yes) + :Log and return true; + stop + elseif (Unexisting symlink?) then (yes) + :Check if relative link works; + if (Relative link ok) then (no) + :Log and return true; + stop + endif + endif + endif + endif + + if (Skip dir or file?) then (yes) + :Check dir or file exclusion; + if (Excluded by config?) then (yes) + :Log and return true; + stop + endif + endif + + if (Use sync_list?) then (yes) + :Check sync_list exclusions; + if (Excluded by sync_list?) then (yes) + :Log and return true; + stop + endif + endif + + if (Check file size?) then (yes) + :Check for file size limit; + if (File size exceeds limit?) then (yes) + :Log and return true; + stop + endif + endif + + :Return false; +} +stop +@enduml diff --git a/docs/puml/client_use_of_libcurl.png b/docs/puml/client_use_of_libcurl.png new file mode 100644 index 00000000..03fe31d8 Binary files /dev/null and b/docs/puml/client_use_of_libcurl.png differ diff --git a/docs/puml/client_use_of_libcurl.puml b/docs/puml/client_use_of_libcurl.puml new file mode 100644 index 00000000..aa8980ff --- /dev/null +++ b/docs/puml/client_use_of_libcurl.puml @@ -0,0 +1,25 @@ +@startuml +participant "OneDrive Client\nfor Linux" as od +participant "libcurl" as lc +participant "Microsoft Authentication Service\n(OAuth 2.0 Endpoint)" as oauth +participant "Microsoft Graph API" as graph + +activate od +activate lc + +od->oauth: Request access token +activate oauth +oauth-->od: Access token +deactivate oauth + +loop API Communication + od->lc: Construct HTTPS request (with token) + activate lc + lc->graph: API Request + activate graph + graph-->lc: API Response + deactivate graph + lc-->od: Process response + deactivate lc +end +@enduml \ No newline at end of file diff --git a/docs/puml/code_functional_component_relationships.png b/docs/puml/code_functional_component_relationships.png new file mode 100644 index 00000000..1a2a2854 Binary files /dev/null and b/docs/puml/code_functional_component_relationships.png differ diff --git a/docs/puml/code_functional_component_relationships.puml b/docs/puml/code_functional_component_relationships.puml new file mode 100644 index 00000000..ede5bff0 --- /dev/null +++ b/docs/puml/code_functional_component_relationships.puml @@ -0,0 +1,78 @@ +@startuml +!define DATABASE_ENTITY(x) entity x +component main { +} +component config { +} +component log { +} +component curlEngine { +} +component util { +} +component onedrive { +} +component syncEngine { +} +component itemdb { +} +component clientSideFiltering { +} +component monitor { +} +component sqlite { +} +component qxor { +} + +DATABASE_ENTITY("Database") + +main --> config +main --> log +main --> curlEngine +main --> util +main --> onedrive +main --> syncEngine +main --> itemdb +main --> clientSideFiltering +main --> monitor + +config --> log +config --> util + +clientSideFiltering --> config +clientSideFiltering --> util +clientSideFiltering --> log + +syncEngine --> config +syncEngine --> log +syncEngine --> util +syncEngine --> onedrive +syncEngine --> itemdb +syncEngine --> clientSideFiltering + +util --> log +util --> config +util --> qxor +util --> curlEngine + +sqlite --> log +sqlite -> "Database" : uses + +onedrive --> config +onedrive --> log +onedrive --> util +onedrive --> curlEngine + +monitor --> config +monitor --> util +monitor --> log +monitor --> clientSideFiltering +monitor .> syncEngine : inotify event + +itemdb --> sqlite +itemdb --> util +itemdb --> log + +curlEngine --> log +@enduml diff --git a/docs/puml/conflict_handling_default.png b/docs/puml/conflict_handling_default.png new file mode 100644 index 00000000..6e8b0226 Binary files /dev/null and b/docs/puml/conflict_handling_default.png differ diff --git a/docs/puml/conflict_handling_default.puml b/docs/puml/conflict_handling_default.puml new file mode 100644 index 00000000..e7f8eb3f --- /dev/null +++ b/docs/puml/conflict_handling_default.puml @@ -0,0 +1,31 @@ +@startuml +start +note left: Operational Mode 'onedrive --sync' +:Query OneDrive /delta API for online changes; +note left: This data is considered the 'source-of-truth'\nLocal data should be a 'replica' of this data +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + :Compute relevant file hashes; + :Check DB for file record; + if (DB record found) then (yes) + :Compare file hash with DB hash; + if (Is the hash different) then (yes) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + else (no) + endif + else (no) + endif + else (no) + endif +:Download file (as per online JSON item) as required; +else (no) + :Other handling for directories | root ojects | deleted items; +endif +:Performing a database consistency and\nintegrity check on locally stored data; +:Scan file system for any new data to upload; +note left: The file that was renamed will be uploaded here +stop +@enduml \ No newline at end of file diff --git a/docs/puml/conflict_handling_default_resync.png b/docs/puml/conflict_handling_default_resync.png new file mode 100644 index 00000000..0d969c29 Binary files /dev/null and b/docs/puml/conflict_handling_default_resync.png differ diff --git a/docs/puml/conflict_handling_default_resync.puml b/docs/puml/conflict_handling_default_resync.puml new file mode 100644 index 00000000..c1061a87 --- /dev/null +++ b/docs/puml/conflict_handling_default_resync.puml @@ -0,0 +1,35 @@ +@startuml +start +note left: Operational Mode 'onedrive -sync --resync' +:Query OneDrive /delta API for online changes; +note left: This data is considered the 'source-of-truth'\nLocal data should be a 'replica' of this data +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + note left: In a --resync scenario there are no DB\nrecords that can be used or referenced\nuntil the JSON item is processed and\nadded to the local database cache + if (Can the file be read) then (yes) + :Compute UTC timestamp data from local file and JSON data; + if (timestamps are equal) then (yes) + else (no) + :Log that a local file time discrepancy was detected; + if (Do file hashes match) then (yes) + :Correct the offending timestamp as hashes match; + else (no) + :Local file is technically different; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + endif + endif + else (no) + endif + else (no) + endif +:Download file (as per online JSON item) as required; +else (no) + :Other handling for directories | root ojects | deleted items; +endif +:Performing a database consistency and\nintegrity check on locally stored data; +:Scan file system for any new data to upload; +note left: The file that was renamed will be uploaded here +stop +@enduml \ No newline at end of file diff --git a/docs/puml/conflict_handling_local-first_default.png b/docs/puml/conflict_handling_local-first_default.png new file mode 100644 index 00000000..475bf0e6 Binary files /dev/null and b/docs/puml/conflict_handling_local-first_default.png differ diff --git a/docs/puml/conflict_handling_local-first_default.puml b/docs/puml/conflict_handling_local-first_default.puml new file mode 100644 index 00000000..cb48f8c2 --- /dev/null +++ b/docs/puml/conflict_handling_local-first_default.puml @@ -0,0 +1,62 @@ +@startuml +start +note left: Operational Mode 'onedrive -sync -local-first' +:Performing a database consistency and\nintegrity check on locally stored data; +note left: This data is considered the 'source-of-truth'\nOnline data should be a 'replica' of this data +repeat + :Process each DB record; + if (Is the DB record is in sync with local file) then (yes) + + else (no) + + :Log reason for discrepancy; + :Flag item to be processed as a modified local file; + + endif +repeat while + +:Process modified items to upload; + +if (Does local file DB record match current latest online JSON data) then (yes) + +else (no) + + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + :Upload renamed local file as new file; + +endif + +:Upload modified file; + +:Scan file system for any new data to upload; + +:Query OneDrive /delta API for online changes; + +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + :Compute relevant file hashes; + :Check DB for file record; + if (DB record found) then (yes) + :Compare file hash with DB hash; + if (Is the hash different) then (yes) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + else (no) + endif + else (no) + + endif + else (no) + endif + + :Download file (as per online JSON item) as required; + +else (no) + :Other handling for directories | root ojects | deleted items; +endif +stop +@enduml \ No newline at end of file diff --git a/docs/puml/conflict_handling_local-first_resync.png b/docs/puml/conflict_handling_local-first_resync.png new file mode 100644 index 00000000..63b57d5b Binary files /dev/null and b/docs/puml/conflict_handling_local-first_resync.png differ diff --git a/docs/puml/conflict_handling_local-first_resync.puml b/docs/puml/conflict_handling_local-first_resync.puml new file mode 100644 index 00000000..aa35ac58 --- /dev/null +++ b/docs/puml/conflict_handling_local-first_resync.puml @@ -0,0 +1,70 @@ +@startuml +start +note left: Operational Mode 'onedrive -sync -local-first -resync' +:Query OneDrive API and create new database with default root account objects; +:Performing a database consistency and\nintegrity check on locally stored data; +note left: This data is considered the 'source-of-truth'\nOnline data should be a 'replica' of this data\nHowever the database has only 1 record currently +:Scan file system for any new data to upload; +note left: This is where in this specific mode all local\n content is assessed for applicability for\nupload to Microsoft OneDrive + +repeat + :For each new local item; + if (Is the item a directory) then (yes) + if (Is Directory found online) then (yes) + :Save directory details from online in local database; + else (no) + :Create directory online; + :Save details in local database; + endif + else (no) + :Flag file as a potentially new item to upload; + endif +repeat while + +:Process potential new items to upload; + +repeat + :For each potential file to upload; + if (Is File found online) then (yes) + if (Does the online JSON data match local file) then (yes) + :Save details in local database; + else (no) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + :Upload renamed local file as new file; + endif + else (no) + :Upload new file; + endif +repeat while + +:Query OneDrive /delta API for online changes; +:Process received JSON data; +if (JSON item is a file) then (yes) + if (Does the file exist locally) then (yes) + :Compute relevant file hashes; + :Check DB for file record; + if (DB record found) then (yes) + :Compare file hash with DB hash; + if (Is the hash different) then (yes) + :Log that the local file was modified locally since last sync; + :Renaming local file to avoid potential local data loss; + note left: Local data loss prevention\nRenamed file will be uploaded as new file + else (no) + endif + else (no) + + endif + else (no) + endif + +:Download file (as per online JSON item) as required; + +else (no) + :Other handling for directories | root ojects | deleted items; +endif + + +stop +@enduml \ No newline at end of file diff --git a/docs/puml/database_schema.png b/docs/puml/database_schema.png new file mode 100644 index 00000000..b45eb526 Binary files /dev/null and b/docs/puml/database_schema.png differ diff --git a/docs/puml/database_schema.puml b/docs/puml/database_schema.puml new file mode 100644 index 00000000..225a03e3 --- /dev/null +++ b/docs/puml/database_schema.puml @@ -0,0 +1,39 @@ +@startuml + +class item { + driveId: TEXT + id: TEXT + name: TEXT + remoteName: TEXT + type: TEXT + eTag: TEXT + cTag: TEXT + mtime: TEXT + parentId: TEXT + quickXorHash: TEXT + sha256Hash: TEXT + remoteDriveId: TEXT + remoteParentId: TEXT + remoteId: TEXT + remoteType: TEXT + deltaLink: TEXT + syncStatus: TEXT + size: TEXT +} + +note right of item::driveId + PRIMARY KEY (driveId, id) + FOREIGN KEY (driveId, parentId) REFERENCES item +end note + +item --|> item : parentId + +note "Indexes" as N1 +note left of N1 + name_idx ON item (name) + remote_idx ON item (remoteDriveId, remoteId) + item_children_idx ON item (driveId, parentId) + selectByPath_idx ON item (name, driveId, parentId) +end note + +@enduml \ No newline at end of file diff --git a/docs/puml/downloadFile.png b/docs/puml/downloadFile.png new file mode 100644 index 00000000..4ab2322e Binary files /dev/null and b/docs/puml/downloadFile.png differ diff --git a/docs/puml/downloadFile.puml b/docs/puml/downloadFile.puml new file mode 100644 index 00000000..e61aab6c --- /dev/null +++ b/docs/puml/downloadFile.puml @@ -0,0 +1,63 @@ +@startuml +start + +partition "Download File" { + + :Get item specifics from JSON; + :Calculate item's path; + + if (Is item malware?) then (yes) + :Log malware detected; + stop + else (no) + :Check for file size in JSON; + if (File size missing) then (yes) + :Log error; + stop + endif + + :Configure hashes for comparison; + if (Hashes missing) then (yes) + :Log error; + stop + endif + + if (Does file exist locally?) then (yes) + :Check DB for item; + if (DB hash match?) then (no) + :Log modification; Perform safe backup; + note left: Local data loss prevention + endif + endif + + :Check local disk space; + if (Insufficient space?) then (yes) + :Log insufficient space; + stop + else (no) + if (Dry run?) then (yes) + :Fake download process; + else (no) + :Attempt to download file; + if (Download exception occurs?) then (yes) + :Handle exceptions; Retry download or log error; + endif + + if (File downloaded successfully?) then (yes) + :Validate download; + if (Validation passes?) then (yes) + :Log success; Update DB; + else (no) + :Log validation failure; Remove file; + endif + else (no) + :Log download failed; + endif + endif + endif + endif + +} + +stop +@enduml diff --git a/docs/puml/high_level_operational_process.png b/docs/puml/high_level_operational_process.png new file mode 100644 index 00000000..19c90602 Binary files /dev/null and b/docs/puml/high_level_operational_process.png differ diff --git a/docs/puml/high_level_operational_process.puml b/docs/puml/high_level_operational_process.puml new file mode 100644 index 00000000..37d76b99 --- /dev/null +++ b/docs/puml/high_level_operational_process.puml @@ -0,0 +1,55 @@ +@startuml + +participant "OneDrive Client\nfor Linux" as Client +participant "Microsoft OneDrive\nAPI" as API + +== Access Token Validation == +Client -> Client: Validate access and\nexisting access token\nRefresh if needed + +== Query Microsoft OneDrive /delta API == +Client -> API: Query /delta API +API -> Client: JSON responses + +== Process JSON Responses == +loop for each JSON response + Client -> Client: Determine if JSON is 'root'\nor 'deleted' item\nElse, push into temporary array for further processing + alt if 'root' or 'deleted' + Client -> Client: Process 'root' or 'deleted' items + else + Client -> Client: Evaluate against 'Client Side Filtering' rules + alt if unwanted + Client -> Client: Discard JSON + else + Client -> Client: Process JSON (create dir/download file) + Client -> Client: Save in local database cache + end + end +end + +== Local Cache Database Processing for Data Integrity == +Client -> Client: Process local cache database\nto check local data integrity and for differences +alt if difference found + Client -> API: Upload file/folder change including deletion + API -> Client: Response with item metadata + Client -> Client: Save response to local cache database +end + +== Local Filesystem Scanning == +Client -> Client: Scan local filesystem\nfor new files/folders + +loop for each new item + Client -> Client: Check item against 'Client Side Filtering' rules + alt if item passes filtering + Client -> API: Upload new file/folder change including deletion + API -> Client: Response with item metadata + Client -> Client: Save response in local\ncache database + else + Client -> Client: Discard item\n(Does not meet filtering criteria) + end +end + +== Final Data True-Up == +Client -> API: Query /delta link for true-up +API -> Client: Process further online JSON changes if required + +@enduml diff --git a/docs/puml/is_item_in_sync.png b/docs/puml/is_item_in_sync.png new file mode 100644 index 00000000..4f6a55ba Binary files /dev/null and b/docs/puml/is_item_in_sync.png differ diff --git a/docs/puml/is_item_in_sync.puml b/docs/puml/is_item_in_sync.puml new file mode 100644 index 00000000..d3fe40a2 --- /dev/null +++ b/docs/puml/is_item_in_sync.puml @@ -0,0 +1,79 @@ +@startuml +start +partition "Is item in sync" { + :Check if path exists; + if (path does not exist) then (no) + :Return false; + stop + else (yes) + endif + + :Identify item type; + switch (item type) + case (file) + + :Check if path is a file; + if (path is not a file) then (no) + :Log "item is a directory but should be a file"; + :Return false; + stop + else (yes) + endif + + :Attempt to read local file; + if (file is unreadable) then (no) + :Log "file cannot be read"; + :Return false; + stop + else (yes) + endif + + :Get local and input item modified time; + note right: The 'input item' could be a database reference object, or the online JSON object\nas provided by the Microsoft OneDrive API + :Reduce time resolution to seconds; + + if (localModifiedTime == itemModifiedTime) then (yes) + :Return true; + stop + else (no) + :Log time discrepancy; + endif + + :Check if file hash is the same; + if (hash is the same) then (yes) + :Log "hash match, correcting timestamp"; + if (local time > item time) then (yes) + if (download only mode) then (no) + :Correct timestamp online if not dryRun; + else (yes) + :Correct local timestamp if not dryRun; + endif + else (no) + :Correct local timestamp if not dryRun; + endif + :Return false; + note right: Specifically return false here as we performed a time correction\nApplication logic will then perform additional handling based on this very specific response. + stop + else (no) + :Log "different hash"; + :Return false; + stop + endif + + case (dir or remote) + :Check if path is a directory; + if (path is a directory) then (yes) + :Return true; + stop + else (no) + :Log "item is a file but should be a directory"; + :Return false; + stop + endif + + case (unknown) + :Return true but do not sync; + stop + endswitch +} +@enduml diff --git a/docs/puml/main_activity_flows.png b/docs/puml/main_activity_flows.png new file mode 100644 index 00000000..2205109c Binary files /dev/null and b/docs/puml/main_activity_flows.png differ diff --git a/docs/puml/main_activity_flows.puml b/docs/puml/main_activity_flows.puml new file mode 100644 index 00000000..d5a70080 --- /dev/null +++ b/docs/puml/main_activity_flows.puml @@ -0,0 +1,81 @@ +@startuml + +start + +:Validate access and existing access token\nRefresh if needed; + +:Query /delta API; +note right: Query Microsoft OneDrive /delta API +:Receive JSON responses; + +:Process JSON Responses; +partition "Process /delta JSON Responses" { + while (for each JSON response) is (yes) + :Determine if JSON is 'root'\nor 'deleted' item; + if ('root' or 'deleted') then (yes) + :Process 'root' or 'deleted' items; + if ('root' object) then (yes) + :Process 'root' JSON; + else (no) + if (Is 'deleted' object in sync) then (yes) + :Process delection of local item; + else (no) + :Rename local file as it is not in sync; + note right: Deletion event conflict handling\nLocal data loss prevention + endif + endif + else (no) + :Evaluate against 'Client Side Filtering' rules; + if (unwanted) then (yes) + :Discard JSON; + else (no) + :Process JSON (create dir/download file); + if (Is the 'JSON' item in the local cache) then (yes) + :Process JSON as a potentially changed local item; + note left: Run 'applyPotentiallyChangedItem' function + else (no) + :Process JSON as potentially new local item; + note right: Run 'applyPotentiallyNewLocalItem' function + endif + :Process objects in download queue; + :Download File; + note left: Download file from Microsoft OneDrive (Multi Threaded Download) + :Save in local database cache; + endif + endif + endwhile +} + +partition "Perform data integrity check based on local cache database" { + :Process local cache database\nto check local data integrity and for differences; + if (difference found) then (yes) + :Upload file/folder change including deletion; + note right: Upload local change to Microsoft OneDrive + :Receive response with item metadata; + :Save response to local cache database; + else (no) + endif +} + +partition "Local Filesystem Scanning" { + :Scan local filesystem\nfor new files/folders; + while (for each new item) is (yes) + :Check item against 'Client Side Filtering' rules; + if (item passes filtering) then (yes) + :Upload new file/folder change including deletion; + note right: Upload to Microsoft OneDrive + :Receive response with item metadata; + :Save response in local\ncache database; + else (no) + :Discard item\n(Does not meet filtering criteria); + endif + endwhile +} + +partition "Final True-Up" { + :Query /delta link for true-up; + note right: Final Data True-Up + :Process further online JSON changes if required; +} +stop +@enduml \ No newline at end of file diff --git a/docs/puml/uploadFile.png b/docs/puml/uploadFile.png new file mode 100644 index 00000000..84f60d9c Binary files /dev/null and b/docs/puml/uploadFile.png differ diff --git a/docs/puml/uploadFile.puml b/docs/puml/uploadFile.puml new file mode 100644 index 00000000..b7c1218f --- /dev/null +++ b/docs/puml/uploadFile.puml @@ -0,0 +1,62 @@ +@startuml +start +partition "Upload File" { + :Log "fileToUpload"; + :Check database for parent path; + if (parent path found?) then (yes) + if (drive ID not empty?) then (yes) + :Proceed; + else (no) + :Use defaultDriveId; + endif + else (no) + stop + endif + :Check if file exists locally; + if (file exists?) then (yes) + :Read local file; + if (can read file?) then (yes) + if (parent path in DB?) then (yes) + :Get file size; + if (file size <= max?) then (yes) + :Check available space on OneDrive; + if (space available?) then (yes) + :Check if file exists on OneDrive; + if (file exists online?) then (yes) + :Save online metadata only; + if (if local file newer) then (yes) + :Local file is newer; + :Upload file as changed local file; + else (no) + :Remote file is newer; + :Perform safe backup; + note right: Local data loss prevention + :Upload renamed file as new file; + endif + else (no) + :Attempt upload; + endif + else (no) + :Log "Insufficient space"; + endif + else (no) + :Log "File too large"; + endif + else (no) + :Log "Parent path issue"; + endif + else (no) + :Log "Cannot read file"; + endif + else (no) + :Log "File disappeared locally"; + endif + :Upload success or failure; + if (upload failed?) then (yes) + :Log failure; + else (no) + :Update cache; + endif +} +stop +@enduml diff --git a/docs/puml/uploadModifiedFile.png b/docs/puml/uploadModifiedFile.png new file mode 100644 index 00000000..6b72220d Binary files /dev/null and b/docs/puml/uploadModifiedFile.png differ diff --git a/docs/puml/uploadModifiedFile.puml b/docs/puml/uploadModifiedFile.puml new file mode 100644 index 00000000..4e30b368 --- /dev/null +++ b/docs/puml/uploadModifiedFile.puml @@ -0,0 +1,56 @@ +@startuml +start +partition "Upload Modified File" { + :Initialize API Instance; + :Check for Dry Run; + if (Is Dry Run?) then (yes) + :Create Fake Response; + else (no) + :Get Current Online Data; + if (Error Fetching Data) then (yes) + :Handle Errors; + if (Retryable Error?) then (yes) + :Retry Fetching Data; + detach + else (no) + :Log and Display Error; + endif + endif + if (filesize > 0 and valid latest online data) then (yes) + if (is online file newer) then (yes) + :Log that online is newer; + :Perform safe backup; + note left: Local data loss prevention + :Upload renamed local file as new file; + endif + endif + :Determine Upload Method; + if (Use Simple Upload?) then (yes) + :Perform Simple Upload; + if (Upload Error) then (yes) + :Handle Upload Errors and Retries; + if (Retryable Upload Error?) then (yes) + :Retry Upload; + detach + else (no) + :Log and Display Upload Error; + endif + endif + else (no) + :Create Upload Session; + :Perform Upload via Session; + if (Session Upload Error) then (yes) + :Handle Session Upload Errors and Retries; + if (Retryable Session Error?) then (yes) + :Retry Session Upload; + detach + else (no) + :Log and Display Session Error; + endif + endif + endif + endif + :Finalize; +} +stop +@enduml diff --git a/docs/usage.md b/docs/usage.md index 880de952..972d9270 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -634,7 +634,7 @@ By default, the location where your Microsoft OneDrive data is stored, is within To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored. -**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible when using 'Monitor Mode'. Local filesystem changes will be replicated between the local filesystem and Microsoft OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. +**Critical Advisory:** Please be aware that if you designate a network mount point (such as NFS, Windows Network Share, or Samba Network Share) as your `sync_dir`, this setup inherently lacks 'inotify' support. Support for 'inotify' is essential for real-time tracking of file changes, which means that the client's 'Monitor Mode' cannot immediately detect changes in files located on these network shares. Instead, synchronisation between your local filesystem and Microsoft OneDrive will occur at intervals specified by the `monitor_interval` setting. This limitation regarding 'inotify' support on network mount points like NFS or Samba is beyond the control of this client. ### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive? The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive: diff --git a/readme.md b/readme.md index 602e7232..d082bf85 100644 --- a/readme.md +++ b/readme.md @@ -14,22 +14,21 @@ Originally derived as a 'fork' from the [skilion](https://github.com/skilion/one This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018. ## Features -* Supports 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive -* Sync State Caching -* Real-Time local file monitoring with inotify -* Real-Time syncing of remote updates via webhooks -* File upload / download validation to ensure data integrity -* Resumable uploads -* Support OneDrive for Business (part of Office 365) -* Shared Folder support for OneDrive Personal and OneDrive Business accounts -* SharePoint / Office365 Shared Libraries -* Desktop notifications via libnotify -* Dry-run capability to test configuration changes -* Prevent major OneDrive accidental data deletion after configuration change -* Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China) -* Supports single & multi-tenanted applications -* Supports rate limiting of traffic -* Supports multi-threaded uploads and downloads +* Compatible with OneDrive Personal, OneDrive for Business including accessing Microsoft SharePoint Libraries +* Provides rules for client-side filtering to select data for syncing with Microsoft OneDrive accounts +* Caches sync state for efficiency +* Supports a dry-run option for safe configuration testing +* Validates file transfers to ensure data integrity +* Monitors local files in real-time using inotify +* Supports interrupted uploads for completion at a later time +* Capability to sync remote updates immediately via webhooks +* Enhanced syncronisation speed with multi-threaded file transfers +* Manages traffic bandwidth use with rate limiting +* Supports seamless access to shared folders and files across both OneDrive Personal and OneDrive for Business accounts +* Supports national cloud deployments including Microsoft Cloud for US Government, Microsoft Cloud Germany and Azure and Office 365 operated by 21Vianet in China +* Supports sending desktop alerts using libnotify +* Protects against significant data loss on OneDrive after configuration changes +* Works with both single and multi-tenant applications ## What's missing * Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive @@ -68,8 +67,8 @@ Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/doc ### Configuration and Usage Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md) -### Configure OneDrive Business Shared Folders -Refer to [docs/business-shared-folders.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-folders.md) +### Configure OneDrive Business Shared Items +Refer to [docs/business-shared-items.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-items.md) ### Configure SharePoint / Office 365 Shared Libraries (Business or Education) Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md) diff --git a/src/arsd/cgi.d b/src/arsd/cgi.d index 79f5feaa..d9a3e6bd 100644 --- a/src/arsd/cgi.d +++ b/src/arsd/cgi.d @@ -683,6 +683,7 @@ enum long defaultMaxContentLength = 5_000_000; public import std.string; public import std.stdio; public import std.conv; +import std.concurrency; import std.uri; import std.uni; import std.algorithm.comparison; @@ -3910,14 +3911,16 @@ struct RequestServer { If you want the forking worker process server, you do need to compile with the embedded_httpd_processes config though. +/ - void serveEmbeddedHttp(alias fun, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(ThisFor!fun _this) { + shared void serveEmbeddedHttp(alias fun, T, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(shared T _this) { globalStopFlag = false; static if(__traits(isStaticFunction, fun)) - alias funToUse = fun; + void funToUse(CustomCgi cgi) { + fun(_this, cgi); + } else void funToUse(CustomCgi cgi) { static if(__VERSION__ > 2097) - __traits(child, _this, fun)(cgi); + __traits(child, _inst_this, fun)(_inst_this, cgi); else static assert(0, "Not implemented in your compiler version!"); } auto manager = new ListeningConnectionManager(listeningHost, listeningPort, &doThreadHttpConnection!(CustomCgi, funToUse), null, useFork, numberOfThreads); diff --git a/src/clientSideFiltering.d b/src/clientSideFiltering.d index d20ba2f8..5e9a70fc 100644 --- a/src/clientSideFiltering.d +++ b/src/clientSideFiltering.d @@ -20,7 +20,6 @@ class ClientSideFiltering { // Class variables ApplicationConfig appConfig; string[] paths; - string[] businessSharedItemsList; Regex!char fileMask; Regex!char directoryMask; bool skipDirStrictMatch = false; @@ -41,11 +40,6 @@ class ClientSideFiltering { loadSyncList(appConfig.syncListFilePath); } - // Load the Business Shared Items file if it exists - if (exists(appConfig.businessSharedItemsFilePath)){ - loadBusinessSharedItems(appConfig.businessSharedItemsFilePath); - } - // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries // Handle skip_dir configuration in config file addLogEntry("Configuring skip_dir ...", ["debug"]); @@ -91,7 +85,6 @@ class ClientSideFiltering { void shutdown() { object.destroy(appConfig); object.destroy(paths); - object.destroy(businessSharedItemsList); object.destroy(fileMask); object.destroy(directoryMask); } @@ -109,19 +102,6 @@ class ClientSideFiltering { file.close(); } - // load business_shared_folders file - void loadBusinessSharedItems(string filepath) { - // open file as read only - auto file = File(filepath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - businessSharedItemsList ~= buildNormalizedPath(line); - } - file.close(); - } - // Configure the regex that will be used for 'skip_file' void setFileMask(const(char)[] mask) { fileMask = wild2regex(mask); diff --git a/src/config.d b/src/config.d index 2166c132..e94e9b1a 100644 --- a/src/config.d +++ b/src/config.d @@ -41,6 +41,8 @@ class ApplicationConfig { immutable string defaultLogFileDir = "/var/log/onedrive"; // - Default configuration directory immutable string defaultConfigDirName = "~/.config/onedrive"; + // - Default 'OneDrive Business Shared Files' Folder Name + immutable string defaultBusinessSharedFilesDirectoryName = "Files Shared With Me"; // Microsoft Requirements // - Default Application ID (abraunegg) @@ -106,7 +108,6 @@ class ApplicationConfig { bool debugLogging = false; long verbosityCount = 0; - // Was the application just authorised - paste of response uri bool applicationAuthorizeResponseUri = false; @@ -121,6 +122,7 @@ class ApplicationConfig { // Store the 'session_upload.CRC32-HASH' file path string uploadSessionFilePath = ""; + // API initialisation flags bool apiWasInitialised = false; bool syncEngineWasInitialised = false; @@ -161,25 +163,23 @@ class ApplicationConfig { private string applicableConfigFilePath = ""; // - Store the 'sync_list' file path string syncListFilePath = ""; - // - Store the 'business_shared_items' file path - string businessSharedItemsFilePath = ""; + // OneDrive Business Shared File handling - what directory will be used? + string configuredBusinessSharedFilesDirectoryName = ""; + // Hash files so that we can detect when the configuration has changed, in items that will require a --resync private string configHashFile = ""; private string configBackupFile = ""; private string syncListHashFile = ""; - private string businessSharedItemsHashFile = ""; // Store the actual 'runtime' hash private string currentConfigHash = ""; private string currentSyncListHash = ""; - private string currentBusinessSharedItemsHash = ""; // Store the previous config files hash values (file contents) private string previousConfigHash = ""; private string previousSyncListHash = ""; - private string previousBusinessSharedItemsHash = ""; - + // Store items that come in from the 'config' file, otherwise these need to be set the the defaults private string configFileSyncDir = defaultSyncDir; private string configFileSkipFile = defaultSkipFile; @@ -197,7 +197,6 @@ class ApplicationConfig { string[string] stringValues; long[string] longValues; bool[string] boolValues; - bool shellEnvironmentSet = false; // Initialise the application configuration @@ -275,7 +274,7 @@ class ApplicationConfig { longValues["ip_protocol_version"] = defaultIpProtocol; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only // Number of concurrent threads - longValues["threads"] = defaultConcurrentThreads; // Default is 8, user can increase or decrease + longValues["threads"] = defaultConcurrentThreads; // Default is 8, user can increase to max of 16 or decrease // - Do we wish to upload only? boolValues["upload_only"] = false; @@ -469,9 +468,6 @@ class ApplicationConfig { // - What is the full path for the system 'config' file if it is required systemConfigFilePath = buildNormalizedPath(buildPath(systemConfigDirName, "config")); - // - What is the full path for the 'business_shared_items' - businessSharedItemsFilePath = buildNormalizedPath(buildPath(configDirName, "business_shared_items")); - // To determine if any configuration items has changed, where a --resync would be required, we need to have a hash file for the following items // - 'config.backup' file // - applicable 'config' file @@ -480,8 +476,7 @@ class ApplicationConfig { configBackupFile = buildNormalizedPath(buildPath(configDirName, ".config.backup")); configHashFile = buildNormalizedPath(buildPath(configDirName, ".config.hash")); syncListHashFile = buildNormalizedPath(buildPath(configDirName, ".sync_list.hash")); - businessSharedItemsHashFile = buildNormalizedPath(buildPath(configDirName, ".business_shared_items.hash")); - + // Debug Output for application set variables based on configDirName addLogEntry("refreshTokenFilePath = " ~ refreshTokenFilePath, ["debug"]); addLogEntry("deltaLinkFilePath = " ~ deltaLinkFilePath, ["debug"]); @@ -494,8 +489,6 @@ class ApplicationConfig { addLogEntry("configBackupFile = " ~ configBackupFile, ["debug"]); addLogEntry("configHashFile = " ~ configHashFile, ["debug"]); addLogEntry("syncListHashFile = " ~ syncListHashFile, ["debug"]); - addLogEntry("businessSharedItemsFilePath = " ~ businessSharedItemsFilePath, ["debug"]); - addLogEntry("businessSharedItemsHashFile = " ~ businessSharedItemsHashFile, ["debug"]); // Configure the Hash and Backup File Permission Value string valueToConvert = to!string(defaultFilePermissionMode); @@ -900,6 +893,7 @@ class ApplicationConfig { boolValues["synchronize"] = false; boolValues["force"] = false; boolValues["list_business_shared_items"] = false; + boolValues["sync_business_shared_files"] = false; boolValues["force_sync"] = false; boolValues["with_editing_perms"] = false; @@ -995,6 +989,12 @@ class ApplicationConfig { "get-O365-drive-id", "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library (DEPRECIATED)", &stringValues["sharepoint_library_name"], + "list-shared-items", + "List OneDrive Business Shared Items", + &boolValues["list_business_shared_items"], + "sync-shared-files", + "Sync OneDrive Business Shared Files to the local filesystem", + &boolValues["sync_business_shared_files"], "local-first", "Synchronize from the local directory source first, before downloading changes from OneDrive.", &boolValues["local_first"], @@ -1365,20 +1365,7 @@ class ApplicationConfig { // Is sync_business_shared_items enabled and configured ? addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering addLogEntry("Config option 'sync_business_shared_items' = " ~ to!string(getValueBool("sync_business_shared_items"))); - - if (exists(businessSharedItemsFilePath)){ - addLogEntry("Selective Business Shared Items configured = true"); - addLogEntry("sync_business_shared_items contents:"); - // Output the sync_business_shared_items contents - auto businessSharedItemsFileList = File(businessSharedItemsFilePath, "r"); - auto range = businessSharedItemsFileList.byLine(); - foreach (line; range) - { - addLogEntry(to!string(line)); - } - } else { - addLogEntry("Selective Business Shared Items configured = false"); - } + addLogEntry("Config option 'Shared Files Directory' = " ~ configuredBusinessSharedFilesDirectoryName); // Are webhooks enabled? addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering @@ -1518,9 +1505,6 @@ class ApplicationConfig { if (currentSyncListHash != previousSyncListHash) logAndSetDifference("sync_list file has been updated, --resync needed", 0); - if (currentBusinessSharedItemsHash != previousBusinessSharedItemsHash) - logAndSetDifference("business_shared_folders file has been updated, --resync needed", 1); - // Check for updates in the config file if (currentConfigHash != previousConfigHash) { addLogEntry("Application configuration file has been updated, checking if --resync needed"); @@ -1665,7 +1649,14 @@ class ApplicationConfig { break; } } - + + // Final override + // In certain situations, regardless of config 'resync' needed status, ignore this so that the application can display 'non-syncable' information + // Options that should now be looked at are: + // --list-shared-items + if (getValueBool("list_business_shared_items")) resyncRequired = false; + + // Return the calculated boolean return resyncRequired; } @@ -1676,7 +1667,6 @@ class ApplicationConfig { addLogEntry("Cleaning up configuration hash files", ["debug"]); safeRemove(configHashFile); safeRemove(syncListHashFile); - safeRemove(businessSharedItemsHashFile); } else { // --dry-run scenario ... technically we should not be making any local file changes ....... addLogEntry("DRY RUN: Not removing hash files as --dry-run has been used"); @@ -1704,17 +1694,6 @@ class ApplicationConfig { // Hash file should only be readable by the user who created it - 0600 permissions needed syncListHashFile.setAttributes(convertedPermissionValue); } - - - // Update 'update business_shared_items' files - if (exists(businessSharedItemsFilePath)) { - // update business_shared_folders hash - addLogEntry("Updating business_shared_items hash", ["debug"]); - std.file.write(businessSharedItemsHashFile, computeQuickXorHash(businessSharedItemsFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedItemsHashFile.setAttributes(convertedPermissionValue); - } - } else { // --dry-run scenario ... technically we should not be making any local file changes ....... addLogEntry("DRY RUN: Not updating hash files as --dry-run has been used"); @@ -1746,18 +1725,6 @@ class ApplicationConfig { // Generate the runtime hash for the 'sync_list' file currentSyncListHash = computeQuickXorHash(syncListFilePath); } - - // Does a 'business_shared_items' file exist with a valid hash file - if (exists(businessSharedItemsFilePath)) { - if (!exists(businessSharedItemsHashFile)) { - // no existing hash file exists - std.file.write(businessSharedItemsHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedItemsHashFile.setAttributes(convertedPermissionValue); - } - // Generate the runtime hash for the 'sync_list' file - currentBusinessSharedItemsHash = computeQuickXorHash(businessSharedItemsFilePath); - } } // Read in the text values of the previous configurations @@ -1783,16 +1750,7 @@ class ApplicationConfig { return EXIT_FAILURE; } } - if (exists(businessSharedItemsHashFile)) { - try { - previousBusinessSharedItemsHash = readText(businessSharedItemsHashFile); - } catch (std.file.FileException e) { - // Unable to access required hash file - addLogEntry("ERROR: Unable to access " ~ e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } + return 0; } @@ -1850,10 +1808,22 @@ class ApplicationConfig { // --list-shared-folders cannot be used with --resync and/or --resync-auth if ((getValueBool("list_business_shared_items")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { - addLogEntry("ERROR: --list-shared-folders cannot be used with --resync or --resync-auth"); + addLogEntry("ERROR: --list-shared-items cannot be used with --resync or --resync-auth"); operationalConflictDetected = true; } + // --list-shared-folders cannot be used with --sync or --monitor + if ((getValueBool("list_business_shared_items")) && ((getValueBool("synchronize")) || (getValueBool("monitor")))) { + addLogEntry("ERROR: --list-shared-items cannot be used with --sync or --monitor"); + operationalConflictDetected = true; + } + + // --sync-shared-files can ONLY be used with sync_business_shared_items + if ((getValueBool("sync_business_shared_files")) && (!getValueBool("sync_business_shared_items"))) { + addLogEntry("ERROR: The --sync-shared-files option can only be utilised if the 'sync_business_shared_items' configuration setting is enabled."); + operationalConflictDetected = true; + } + // --display-sync-status cannot be used with --resync and/or --resync-auth if ((getValueBool("display_sync_status")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { addLogEntry("ERROR: --display-sync-status cannot be used with --resync or --resync-auth"); @@ -2057,6 +2027,9 @@ class ApplicationConfig { // What will runtimeSyncDirectory be actually set to? addLogEntry("sync_dir: runtimeSyncDirectory set to: " ~ runtimeSyncDirectory, ["debug"]); + // Configure configuredBusinessSharedFilesDirectoryName + configuredBusinessSharedFilesDirectoryName = buildNormalizedPath(buildPath(runtimeSyncDirectory, defaultBusinessSharedFilesDirectoryName)); + return runtimeSyncDirectory; } diff --git a/src/itemdb.d b/src/itemdb.d index c6659054..7ebfdfe1 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -18,6 +18,7 @@ import util; import log; enum ItemType { + none, file, dir, remote, @@ -37,7 +38,9 @@ struct Item { string quickXorHash; string sha256Hash; string remoteDriveId; + string remoteParentId; string remoteId; + ItemType remoteType; string syncStatus; string size; } @@ -144,8 +147,27 @@ Item makeDatabaseItem(JSONValue driveItem) { // Is the object a remote drive item - living on another driveId ? if (isItemRemote(driveItem)) { - item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; - item.remoteId = driveItem["remoteItem"]["id"].str; + // Check and assign remoteDriveId + if ("parentReference" in driveItem["remoteItem"] && "driveId" in driveItem["remoteItem"]["parentReference"]) { + item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; + } + + // Check and assign remoteParentId + if ("parentReference" in driveItem["remoteItem"] && "id" in driveItem["remoteItem"]["parentReference"]) { + item.remoteParentId = driveItem["remoteItem"]["parentReference"]["id"].str; + } + + // Check and assign remoteId + if ("id" in driveItem["remoteItem"]) { + item.remoteId = driveItem["remoteItem"]["id"].str; + } + + // Check and assign remoteType + if ("file" in driveItem["remoteItem"].object) { + item.remoteType = ItemType.file; + } else { + item.remoteType = ItemType.dir; + } } // We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not: @@ -165,7 +187,7 @@ Item makeDatabaseItem(JSONValue driveItem) { final class ItemDatabase { // increment this for every change in the db schema - immutable int itemDatabaseVersion = 12; + immutable int itemDatabaseVersion = 13; Database db; string insertItemStmt; @@ -236,12 +258,12 @@ final class ItemDatabase { db.exec("PRAGMA locking_mode = EXCLUSIVE"); insertItemStmt = " - INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus, size) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15) + INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteParentId, remoteId, remoteType, syncStatus, size) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17) "; updateItemStmt = " UPDATE item - SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteId = ?13, syncStatus = ?14, size = ?15 + SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteParentId = ?13, remoteId = ?14, remoteType = ?15, syncStatus = ?16, size = ?17 WHERE driveId = ?1 AND id = ?2 "; selectItemByIdStmt = " @@ -279,7 +301,9 @@ final class ItemDatabase { quickXorHash TEXT, sha256Hash TEXT, remoteDriveId TEXT, + remoteParentId TEXT, remoteId TEXT, + remoteType TEXT, deltaLink TEXT, syncStatus TEXT, size TEXT, @@ -447,12 +471,14 @@ final class ItemDatabase { bind(2, id); bind(3, name); bind(4, remoteName); + // type handling string typeStr = null; final switch (type) with (ItemType) { case file: typeStr = "file"; break; case dir: typeStr = "dir"; break; case remote: typeStr = "remote"; break; case unknown: typeStr = "unknown"; break; + case none: typeStr = null; break; } bind(5, typeStr); bind(6, eTag); @@ -462,15 +488,26 @@ final class ItemDatabase { bind(10, quickXorHash); bind(11, sha256Hash); bind(12, remoteDriveId); - bind(13, remoteId); - bind(14, syncStatus); - bind(15, size); + bind(13, remoteParentId); + bind(14, remoteId); + // remoteType handling + string remoteTypeStr = null; + final switch (remoteType) with (ItemType) { + case file: remoteTypeStr = "file"; break; + case dir: remoteTypeStr = "dir"; break; + case remote: remoteTypeStr = "remote"; break; + case unknown: remoteTypeStr = "unknown"; break; + case none: remoteTypeStr = null; break; + } + bind(15, remoteTypeStr); + bind(16, syncStatus); + bind(17, size); } } private Item buildItem(Statement.Result result) { assert(!result.empty, "The result must not be empty"); - assert(result.front.length == 16, "The result must have 16 columns"); + assert(result.front.length == 18, "The result must have 18 columns"); Item item = { // column 0: driveId @@ -485,10 +522,12 @@ final class ItemDatabase { // column 9: quickXorHash // column 10: sha256Hash // column 11: remoteDriveId - // column 12: remoteId - // column 13: deltaLink - // column 14: syncStatus - // column 15: size + // column 12: remoteParentId + // column 13: remoteId + // column 14: remoteType + // column 15: deltaLink + // column 16: syncStatus + // column 17: size driveId: result.front[0].dup, id: result.front[1].dup, @@ -502,17 +541,30 @@ final class ItemDatabase { quickXorHash: result.front[9].dup, sha256Hash: result.front[10].dup, remoteDriveId: result.front[11].dup, - remoteId: result.front[12].dup, - // Column 13 is deltaLink - not set here - syncStatus: result.front[14].dup, - size: result.front[15].dup + remoteParentId: result.front[12].dup, + remoteId: result.front[13].dup, + // Column 14 is remoteType - not set here + // Column 15 is deltaLink - not set here + syncStatus: result.front[16].dup, + size: result.front[17].dup }; + // Configure item.type switch (result.front[4]) { case "file": item.type = ItemType.file; break; case "dir": item.type = ItemType.dir; break; case "remote": item.type = ItemType.remote; break; default: assert(0, "Invalid item type"); } + + // Configure item.remoteType + switch (result.front[14]) { + // We only care about 'dir' and 'file' for 'remote' items + case "file": item.remoteType = ItemType.file; break; + case "dir": item.remoteType = ItemType.dir; break; + default: item.remoteType = ItemType.none; break; // Default to ItemType.none + } + + // Return item return item; } diff --git a/src/main.d b/src/main.d index 3b2d8408..f70ec631 100644 --- a/src/main.d +++ b/src/main.d @@ -32,6 +32,7 @@ import syncEngine; import itemdb; import clientSideFiltering; import monitor; +import webhook; // What other constant variables do we require? @@ -39,7 +40,7 @@ const int EXIT_RESYNC_REQUIRED = 126; // Class objects ApplicationConfig appConfig; -OneDriveApi oneDriveApiInstance; +OneDriveWebhook oneDriveWebhook; SyncEngine syncEngineInstance; ItemDatabase itemDB; ClientSideFiltering selectiveSync; @@ -341,19 +342,25 @@ int main(string[] cliArgs) { processResyncDatabaseRemoval(runtimeDatabaseFile); } } else { - // Has any of our application configuration that would require a --resync been changed? - if (appConfig.applicationChangeWhereResyncRequired()) { - // Application configuration has changed however --resync not issued, fail fast - addLogEntry(); - addLogEntry("An application configuration change has been detected where a --resync is required"); - addLogEntry(); - return EXIT_RESYNC_REQUIRED; - } else { - // No configuration change that requires a --resync to be issued - // Make a backup of the applicable configuration file - appConfig.createBackupConfigFile(); - // Update hash files and generate a new config backup - appConfig.updateHashContentsForConfigFiles(); + // Is the application currently authenticated? If not, it is pointless checking if a --resync is required until the application is authenticated + if (exists(appConfig.refreshTokenFilePath)) { + // Has any of our application configuration that would require a --resync been changed? + if (appConfig.applicationChangeWhereResyncRequired()) { + // Application configuration has changed however --resync not issued, fail fast + addLogEntry(); + addLogEntry("An application configuration change has been detected where a --resync is required"); + addLogEntry(); + return EXIT_RESYNC_REQUIRED; + } else { + // No configuration change that requires a --resync to be issued + // Special cases need to be checked - if these options were enabled, it creates a false 'Resync Required' flag, so do not create a backup + if ((!appConfig.getValueBool("list_business_shared_items"))) { + // Make a backup of the applicable configuration file + appConfig.createBackupConfigFile(); + // Update hash files and generate a new config backup + appConfig.updateHashContentsForConfigFiles(); + } + } } } @@ -416,13 +423,16 @@ int main(string[] cliArgs) { // Initialise the OneDrive API addLogEntry("Attempting to initialise the OneDrive API ...", ["verbose"]); - oneDriveApiInstance = new OneDriveApi(appConfig); + OneDriveApi oneDriveApiInstance = new OneDriveApi(appConfig); appConfig.apiWasInitialised = oneDriveApiInstance.initialise(); if (appConfig.apiWasInitialised) { addLogEntry("The OneDrive API was initialised successfully", ["verbose"]); // Flag that we were able to initalise the API in the application config oneDriveApiInstance.debugOutputConfiguredAPIItems(); + + oneDriveApiInstance.shutdown(); + object.destroy(oneDriveApiInstance); // Need to configure the itemDB and syncEngineInstance for 'sync' and 'non-sync' operations addLogEntry("Opening the item database ...", ["verbose"]); @@ -447,8 +457,9 @@ int main(string[] cliArgs) { // Are we performing some sort of 'no-sync' task? // - Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? // - Are we displaying the sync satus? - // - Are we getting the URL for a file online - // - Are we listing who modified a file last online + // - Are we getting the URL for a file online? + // - Are we listing who modified a file last online? + // - Are we listing OneDrive Business Shared Items? // - Are we createing a shareable link for an existing file on OneDrive? // - Are we just creating a directory online, without any sync being performed? // - Are we just deleting a directory online, without any sync being performed? @@ -500,6 +511,20 @@ int main(string[] cliArgs) { return EXIT_SUCCESS; } + // --list-shared-items - Are we listing OneDrive Business Shared Items + if (appConfig.getValueBool("list_business_shared_items")) { + // Is this a business account type? + if (appConfig.accountType == "business") { + // List OneDrive Business Shared Items + syncEngineInstance.listBusinessSharedObjects(); + } else { + addLogEntry("ERROR: Unsupported account type for listing OneDrive Business Shared Items"); + } + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + // --create-share-link - Are we createing a shareable link for an existing file on OneDrive? if (appConfig.getValueString("create_share_link") != "") { // Query OneDrive for the file, and if valid, create a shareable link for the file @@ -851,15 +876,16 @@ int main(string[] cliArgs) { addLogEntry("ERROR: The following inotify error was generated: " ~ e.msg); } } - - // Webhook Notification reset to false for this loop - notificationReceived = false; // Check for notifications pushed from Microsoft to the webhook if (webhookEnabled) { // Create a subscription on the first run, or renew the subscription // on subsequent runs when it is about to expire. - oneDriveApiInstance.createOrRenewSubscription(); + if (oneDriveWebhook is null) { + oneDriveWebhook = new OneDriveWebhook(thisTid, appConfig); + oneDriveWebhook.serve(); + } else + oneDriveWebhook.createOrRenewSubscription(); } // Get the current time this loop is starting @@ -1004,19 +1030,21 @@ int main(string[] cliArgs) { if(filesystemMonitor.initialised || webhookEnabled) { if(filesystemMonitor.initialised) { - // If local monitor is on + // If local monitor is on and is waiting (previous event was not from webhook) // start the worker and wait for event - filesystemMonitor.send(true); + if (!notificationReceived) + filesystemMonitor.send(true); } if(webhookEnabled) { // if onedrive webhook is enabled // update sleep time based on renew interval - Duration nextWebhookCheckDuration = oneDriveApiInstance.getNextExpirationCheckDuration(); + Duration nextWebhookCheckDuration = oneDriveWebhook.getNextExpirationCheckDuration(); if (nextWebhookCheckDuration < sleepTime) { sleepTime = nextWebhookCheckDuration; addLogEntry("Update sleeping time to " ~ to!string(sleepTime), ["debug"]); } + // Webhook Notification reset to false for this loop notificationReceived = false; } @@ -1042,17 +1070,17 @@ int main(string[] cliArgs) { // do not contain any actual changes, and we will always rely do the // delta endpoint to sync to latest. Therefore, only one sync run is // good enough to catch up for multiple notifications. - int signalCount = notificationReceived ? 1 : 0; - for (;; signalCount++) { - signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); - if (signalExists) { - notificationReceived = true; - } else { - if (notificationReceived) { + if (notificationReceived) { + int signalCount = 1; + while (true) { + signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); + if (signalExists) { + signalCount++; + } else { addLogEntry("Received " ~ to!string(signalCount) ~ " refresh signals from the webhook"); oneDriveWebhookCallback(); + break; } - break; } } @@ -1091,11 +1119,10 @@ void performStandardExitProcess(string scopeCaller = null) { addLogEntry("Running performStandardExitProcess due to: " ~ scopeCaller, ["debug"]); } - // Shutdown the OneDrive API instance - if (oneDriveApiInstance !is null) { - addLogEntry("Shutdown OneDrive API instance", ["debug"]); - oneDriveApiInstance.shutdown(); - object.destroy(oneDriveApiInstance); + // Shutdown the OneDrive Webhook instance + if (oneDriveWebhook !is null) { + oneDriveWebhook.stop(); + object.destroy(oneDriveWebhook); } // Shutdown the sync engine @@ -1145,7 +1172,7 @@ void performStandardExitProcess(string scopeCaller = null) { addLogEntry("Setting ALL Class Objects to null due to failure scope", ["debug"]); itemDB = null; appConfig = null; - oneDriveApiInstance = null; + oneDriveWebhook = null; selectiveSync = null; syncEngineInstance = null; } else { diff --git a/src/monitor.d b/src/monitor.d index 1368df7e..3d67cc2b 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -488,7 +488,9 @@ final class Monitor { while (true) { bool hasNotification = false; - while (true) { + int sleep_counter = 0; + // Batch events up to 5 seconds + while (sleep_counter < 5) { int ret = poll(&fds, 1, 0); if (ret == -1) throw new MonitorException("poll failed"); else if (ret == 0) break; // no events available @@ -621,7 +623,12 @@ final class Monitor { skip: i += inotify_event.sizeof + event.len; } - Thread.sleep(dur!"seconds"(1)); + + // Sleep for one second to prevent missing fast-changing events. + if (poll(&fds, 1, 0) == 0) { + sleep_counter += 1; + Thread.sleep(dur!"seconds"(1)); + } } if (!hasNotification) break; processChanges(); diff --git a/src/onedrive.d b/src/onedrive.d index 745b5511..f38ddd6f 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -22,9 +22,6 @@ import std.uri; import std.array; // Required for webhooks -import arsd.cgi; -import std.concurrency; -import core.atomic : atomicOp; import std.uuid; // What other modules that we have created do we need to import? @@ -56,116 +53,10 @@ class OneDriveException: Exception { } } -class OneDriveWebhook { - // We need OneDriveWebhook.serve to be a static function, otherwise we would hit the member function - // "requires a dual-context, which is deprecated" warning. The root cause is described here: - // - https://issues.dlang.org/show_bug.cgi?id=5710 - // - https://forum.dlang.org/post/fkyppfxzegenniyzztos@forum.dlang.org - // The problem is deemed a bug and should be fixed in the compilers eventually. The singleton stuff - // could be undone when it is fixed. - // - // Following the singleton pattern described here: https://wiki.dlang.org/Low-Lock_Singleton_Pattern - // Cache instantiation flag in thread-local bool - // Thread local - private static bool instantiated_; - private RequestServer server; - - // Thread global - private __gshared OneDriveWebhook instance_; - - private string host; - private ushort port; - private Tid parentTid; - private shared uint count; - private bool started; - - static OneDriveWebhook getOrCreate(string host, ushort port, Tid parentTid) { - if (!instantiated_) { - synchronized(OneDriveWebhook.classinfo) { - if (!instance_) { - instance_ = new OneDriveWebhook(host, port, parentTid); - } - - instantiated_ = true; - } - } - - return instance_; - } - - private this(string host, ushort port, Tid parentTid) { - this.host = host; - this.port = port; - this.parentTid = parentTid; - this.count = 0; - } - - void serve() { - spawn(&serveStatic); - this.started = true; - addLogEntry("Started webhook server"); - } - - void stop() { - if (this.started) { - server.stop(); - this.started = false; - } - addLogEntry("Stopped webhook server"); - object.destroy(server); - } - - // The static serve() is necessary because spawn() does not like instance methods - private static void serveStatic() { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.serveImpl(); - } - - // The static handle() is necessary to work around the dual-context warning mentioned above - private static void handle(Cgi cgi) { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.handleImpl(cgi); - } - - private void serveImpl() { - server = RequestServer(host, port); - server.serveEmbeddedHttp!handle(); - } - - private void handleImpl(Cgi cgi) { - if (debugHTTPResponseOutput) { - addLogEntry("Webhook request: " ~ to!string(cgi.requestMethod) ~ " " ~ to!string(cgi.requestUri)); - if (!cgi.postBody.empty) { - addLogEntry("Webhook post body: " ~ to!string(cgi.postBody)); - } - } - - cgi.setResponseContentType("text/plain"); - - if ("validationToken" in cgi.get) { - // For validation requests, respond with the validation token passed in the query string - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request - cgi.write(cgi.get["validationToken"]); - addLogEntry("Webhook: handled validation request"); - } else { - // Notifications don't include any information about the changes that triggered them. - // Put a refresh signal in the queue and let the main monitor loop process it. - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks - count.atomicOp!"+="(1); - send(parentTid, to!ulong(count)); - cgi.write("OK"); - addLogEntry("Webhook: sent refresh signal #" ~ to!string(count)); - } - } -} - class OneDriveApi { // Class variables ApplicationConfig appConfig; CurlEngine curlEngine; - OneDriveWebhook webhook; string clientId = ""; string companyName = ""; @@ -179,20 +70,14 @@ class OneDriveApi { string itemByPathUrl = ""; string siteSearchUrl = ""; string siteDriveUrl = ""; + string subscriptionUrl = ""; string tenantId = ""; string authScope = ""; const(char)[] refreshToken = ""; bool dryRun = false; bool debugResponse = false; ulong retryAfterValue = 0; - - // Webhook Subscriptions - string subscriptionUrl = ""; - string subscriptionId = ""; - SysTime subscriptionExpiration, subscriptionLastErrorAt; - Duration subscriptionExpirationInterval, subscriptionRenewalInterval, subscriptionRetryInterval; - string notificationUrl = ""; - + this(ApplicationConfig appConfig) { // Configure the class varaible to consume the application configuration this.appConfig = appConfig; @@ -214,14 +99,9 @@ class OneDriveApi { siteSearchUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites?search"; siteDriveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites/"; + // Subscriptions subscriptionUrl = appConfig.globalGraphEndpoint ~ "/v1.0/subscriptions"; - subscriptionExpiration = Clock.currTime(UTC()); - subscriptionLastErrorAt = SysTime.fromUnixTime(0); - subscriptionExpirationInterval = dur!"seconds"(appConfig.getValueLong("webhook_expiration_interval")); - subscriptionRenewalInterval = dur!"seconds"(appConfig.getValueLong("webhook_renewal_interval")); - subscriptionRetryInterval = dur!"seconds"(appConfig.getValueLong("webhook_retry_interval")); - notificationUrl = appConfig.getValueString("webhook_public_url"); } // Initialise the OneDrive API class @@ -475,20 +355,6 @@ class OneDriveApi { // Shutdown OneDrive API Curl Engine void shutdown() { - - // Delete subscription if there exists any - try { - deleteSubscription(); - } catch (OneDriveException e) { - logSubscriptionError(e); - } - - // Shutdown webhook server if it is running - if (webhook !is null) { - webhook.stop(); - object.destroy(webhook); - } - // Release curl instance if (curlEngine !is null) { curlEngine.release(); @@ -646,6 +512,13 @@ class OneDriveApi { return get(url); } + // Return all the items that are shared with the user + // https://docs.microsoft.com/en-us/graph/api/drive-sharedwithme + JSONValue getSharedWithMe() { + checkAccessTokenExpired(); + return get(sharedWithMeUrl); + } + // Create a shareable link for an existing file on OneDrive based on the accessScope JSON permissions // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createlink JSONValue createShareableLink(string driveId, string id, JSONValue accessScope) { @@ -834,6 +707,47 @@ class OneDriveApi { url = siteDriveUrl ~ site_id ~ "/drives"; return get(url); } + + JSONValue createSubscription(string notificationUrl, SysTime expirationDateTime) { + checkAccessTokenExpired(); + string driveId = appConfig.getValueString("drive_id"); + string url = subscriptionUrl; + + // Create a resource item based on if we have a driveId + string resourceItem; + if (driveId.length) { + resourceItem = "/drives/" ~ driveId ~ "/root"; + } else { + resourceItem = "/me/drive/root"; + } + + // create JSON request to create webhook subscription + const JSONValue request = [ + "changeType": "updated", + "notificationUrl": notificationUrl, + "resource": resourceItem, + "expirationDateTime": expirationDateTime.toISOExtString(), + "clientState": randomUUID().toString() + ]; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, request.toString()); + } + + JSONValue renewSubscription(string subscriptionId, SysTime expirationDateTime) { + string url; + url = subscriptionUrl ~ "/" ~ subscriptionId; + const JSONValue request = [ + "expirationDateTime": expirationDateTime.toISOExtString() + ]; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, request.toString()); + } + + void deleteSubscription(string subscriptionId) { + string url; + url = subscriptionUrl ~ "/" ~ subscriptionId; + performDelete(url); + } // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) { @@ -893,277 +807,7 @@ class OneDriveApi { retryAfterValue = 0; } - // Create a new subscription or renew the existing subscription - void createOrRenewSubscription() { - checkAccessTokenExpired(); - - // Kick off the webhook server first - if (webhook is null) { - webhook = OneDriveWebhook.getOrCreate( - appConfig.getValueString("webhook_listening_host"), - to!ushort(appConfig.getValueLong("webhook_listening_port")), - thisTid - ); - webhook.serve(); - } - - auto elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; - if (elapsed < subscriptionRetryInterval) { - return; - } - - try { - if (!hasValidSubscription()) { - createSubscription(); - } else if (isSubscriptionUpForRenewal()) { - renewSubscription(); - } - } catch (OneDriveException e) { - logSubscriptionError(e); - subscriptionLastErrorAt = Clock.currTime(UTC()); - addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); - } catch (JSONException e) { - addLogEntry("ERROR: Unexpected JSON error when attempting to validate subscription: " ~ e.msg); - subscriptionLastErrorAt = Clock.currTime(UTC()); - addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); - } - } - - // Return the duration to next subscriptionExpiration check - Duration getNextExpirationCheckDuration() { - SysTime now = Clock.currTime(UTC()); - if (hasValidSubscription()) { - Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; - // Check if we are waiting for the next retry - if (elapsed < subscriptionRetryInterval) - return subscriptionRetryInterval - elapsed; - else - return subscriptionExpiration - now - subscriptionRenewalInterval; - } - else - return subscriptionRetryInterval; - } - // Private functions - private bool hasValidSubscription() { - return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); - } - - private bool isSubscriptionUpForRenewal() { - return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; - } - - private void createSubscription() { - addLogEntry("Initializing subscription for updates ..."); - - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - string driveId = appConfig.getValueString("drive_id"); - string url = subscriptionUrl; - - // Create a resource item based on if we have a driveId - string resourceItem; - if (driveId.length) { - resourceItem = "/drives/" ~ driveId ~ "/root"; - } else { - resourceItem = "/me/drive/root"; - } - - // create JSON request to create webhook subscription - const JSONValue request = [ - "changeType": "updated", - "notificationUrl": notificationUrl, - "resource": resourceItem, - "expirationDateTime": expirationDateTime.toISOExtString(), - "clientState": randomUUID().toString() - ]; - curlEngine.http.addRequestHeader("Content-Type", "application/json"); - - try { - JSONValue response = post(url, request.toString()); - - // Save important subscription metadata including id and expiration - subscriptionId = response["id"].str; - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); - addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); - } catch (OneDriveException e) { - if (e.httpStatusCode == 409) { - // Take over an existing subscription on HTTP 409. - // - // Sample 409 error: - // { - // "error": { - // "code": "ObjectIdentifierInUse", - // "innerError": { - // "client-request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d", - // "date": "2023-09-26T09:27:45", - // "request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d" - // }, - // "message": "Subscription Id c0bba80e-57a3-43a7-bac2-e6f525a76e7c already exists for the requested combination" - // } - // } - - // Make sure the error code is "ObjectIdentifierInUse" - try { - if (e.error["error"]["code"].str != "ObjectIdentifierInUse") { - throw e; - } - } catch (JSONException jsonEx) { - throw e; - } - - // Extract the existing subscription id from the error message - import std.regex; - auto idReg = ctRegex!(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", "i"); - auto m = matchFirst(e.error["error"]["message"].str, idReg); - if (!m) { - throw e; - } - - // Save the subscription id and renew it immediately since we don't know the expiration timestamp - subscriptionId = m[0]; - addLogEntry("Found existing subscription " ~ subscriptionId); - renewSubscription(); - } else { - throw e; - } - } - } - - private void renewSubscription() { - addLogEntry("Renewing subscription for updates ..."); - - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - string url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - const JSONValue request = [ - "expirationDateTime": expirationDateTime.toISOExtString() - ]; - curlEngine.http.addRequestHeader("Content-Type", "application/json"); - - try { - JSONValue response = patch(url, request.toString()); - - // Update subscription expiration from the response - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); - addLogEntry("Renewed subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - addLogEntry("The subscription is not found on the server. Recreating subscription ..."); - subscriptionId = null; - subscriptionExpiration = Clock.currTime(UTC()); - createSubscription(); - } else { - throw e; - } - } - } - - private void deleteSubscription() { - if (!hasValidSubscription()) { - addLogEntry("No valid Microsoft OneDrive webhook subscription to delete", ["debug"]); - return; - } - - string url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - performDelete(url); - addLogEntry("Deleted Microsoft OneDrive webhook subscription", ["debug"]); - } - - private void logSubscriptionError(OneDriveException e) { - if (e.httpStatusCode == 400) { - // Log known 400 error where Microsoft cannot get a 200 OK from the webhook endpoint - // - // Sample 400 error: - // { - // "error": { - // "code": "InvalidRequest", - // "innerError": { - // "client-request-id": "", - // "date": "", - // "request-id": "" - // }, - // "message": "Subscription validation request failed. Notification endpoint must respond with 200 OK to validation request." - // } - // } - - try { - if (e.error["error"]["code"].str == "InvalidRequest") { - import std.regex; - auto msgReg = ctRegex!(r"Subscription validation request failed", "i"); - auto m = matchFirst(e.error["error"]["message"].str, msgReg); - if (m) { - addLogEntry("ERROR: Cannot create or renew subscription: Microsoft did not get 200 OK from the webhook endpoint."); - return; - } - } - } catch (JSONException) { - // fallthrough - } - } else if (e.httpStatusCode == 401) { - // Log known 401 error where authentication failed - // - // Sample 401 error: - // { - // "error": { - // "code": "ExtensionError", - // "innerError": { - // "client-request-id": "", - // "date": "", - // "request-id": "" - // }, - // "message": "Operation: Create; Exception: [Status Code: Unauthorized; Reason: Authentication failed]" - // } - // } - - try { - if (e.error["error"]["code"].str == "ExtensionError") { - import std.regex; - auto msgReg = ctRegex!(r"Authentication failed", "i"); - auto m = matchFirst(e.error["error"]["message"].str, msgReg); - if (m) { - addLogEntry("ERROR: Cannot create or renew subscription: Authentication failed."); - return; - } - } - } catch (JSONException) { - // fallthrough - } - } else if (e.httpStatusCode == 403) { - // Log known 403 error where the number of subscriptions on item has exceeded limit - // - // Sample 403 error: - // { - // "error": { - // "code": "ExtensionError", - // "innerError": { - // "client-request-id": "", - // "date": "", - // "request-id": "" - // }, - // "message": "Operation: Create; Exception: [Status Code: Forbidden; Reason: Number of subscriptions on item has exceeded limit]" - // } - // } - try { - if (e.error["error"]["code"].str == "ExtensionError") { - import std.regex; - auto msgReg = ctRegex!(r"Number of subscriptions on item has exceeded limit", "i"); - auto m = matchFirst(e.error["error"]["message"].str, msgReg); - if (m) { - addLogEntry("ERROR: Cannot create or renew subscription: Number of subscriptions has exceeded limit."); - return; - } - } - } catch (JSONException) { - // fallthrough - } - } - - // Log detailed message for unknown errors - addLogEntry("ERROR: Cannot create or renew subscription."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - private void addAccessTokenHeader() { curlEngine.http.addRequestHeader("Authorization", appConfig.accessToken); } @@ -1947,7 +1591,7 @@ class OneDriveApi { case 403: // OneDrive responded that the user is forbidden addLogEntry("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error", ["verbose"]); - break; + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason); // 404 - Item not found case 404: diff --git a/src/sync.d b/src/sync.d index 15462f66..95284751 100644 --- a/src/sync.d +++ b/src/sync.d @@ -581,38 +581,90 @@ class SyncEngine { // Business Account Shared Items Handling // - OneDrive Business Shared Folder - // - OneDrive Business Shared Files ?? + // - OneDrive Business Shared Files // - SharePoint Links // Get the Remote Items from the Database Item[] remoteItems = itemDB.selectRemoteItems(); foreach (remoteItem; remoteItems) { - // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty - if (appConfig.getValueString("skip_dir") != "") { - // The path that needs to be checked needs to include the '/' - // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(remoteItem.name)) { - // This directory name is excluded - addLogEntry("Skipping item - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); - continue; + // As all remote items are returned, including files, we only want to process directories here + if (remoteItem.remoteType == ItemType.dir) { + // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty + if (appConfig.getValueString("skip_dir") != "") { + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(remoteItem.name)) { + // This directory name is excluded + addLogEntry("Skipping item - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); + continue; + } + } + + // Directory name is not excluded or skip_dir is not populated + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Syncing this OneDrive Business Shared Folder: " ~ remoteItem.name); + } + + // Debug log output + addLogEntry("Fetching /delta API response for:", ["debug"]); + addLogEntry(" remoteItem.remoteDriveId: " ~ remoteItem.remoteDriveId, ["debug"]); + addLogEntry(" remoteItem.remoteId: " ~ remoteItem.remoteId, ["debug"]); + + // Check this OneDrive Business Shared Folder for changes + fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); + + // Process any download activities or cleanup actions for this OneDrive Business Shared Folder + processDownloadActivities(); + } + } + + // OneDrive Business Shared File Handling - but only if this option is enabled + if (appConfig.getValueBool("sync_business_shared_files")) { + // We need to create a 'new' local folder in the 'sync_dir' where these shared files & associated folder structure will reside + // Whilst these files are synced locally, the entire folder structure will need to be excluded from syncing back to OneDrive + // But file changes , *if any* , will need to be synced back to the original shared file location + // . + // ├── Files Shared With Me -> Directory should not be created online | Not Synced + // │   └── Display Name (email address) (of Account who shared file) -> Directory should not be created online | Not Synced + // │   │ └── shared file.ext -> File synced with original shared file location on remote drive + // │   │ └── shared file.ext -> File synced with original shared file location on remote drive + // │   │ └── ...... -> File synced with original shared file location on remote drive + // │   └── Display Name (email address) ... + // │ └── shared file.ext .... -> File synced with original shared file location on remote drive + + // Does the Local Folder to store the OneDrive Business Shared Files exist? + if (!exists(appConfig.configuredBusinessSharedFilesDirectoryName)) { + // Folder does not exist locally and needs to be created + addLogEntry("Creating the OneDrive Business Shared Files Local Directory: " ~ appConfig.configuredBusinessSharedFilesDirectoryName); + + // Local folder does not exist, thus needs to be created + mkdirRecurse(appConfig.configuredBusinessSharedFilesDirectoryName); + // As this will not be created online, generate a response so it can be saved to the database + Item sharedFilesPath = makeItem(createFakeResponse(baseName(appConfig.configuredBusinessSharedFilesDirectoryName))); + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); + } else { + // Folder exists locally, is the folder in the database? + // Query DB for this path + Item dbRecord; + if (!itemDB.selectByPath(baseName(appConfig.configuredBusinessSharedFilesDirectoryName), appConfig.defaultDriveId, dbRecord)) { + // As this will not be created online, generate a response so it can be saved to the database + Item sharedFilesPath = makeItem(createFakeResponse(baseName(appConfig.configuredBusinessSharedFilesDirectoryName))); + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); } } - // Directory name is not excluded or skip_dir is not populated - if (!appConfig.surpressLoggingOutput) { - addLogEntry("Syncing this OneDrive Business Shared Folder: " ~ remoteItem.name); - } + // Query for OneDrive Business Shared Files + addLogEntry("Checking for any applicable OneDrive Business Shared Files which need to be synced locally", ["verbose"]); + queryBusinessSharedObjects(); - // Debug log output - addLogEntry("Fetching /delta API response for:", ["debug"]); - addLogEntry(" remoteItem.remoteDriveId: " ~ remoteItem.remoteDriveId, ["debug"]); - addLogEntry(" remoteItem.remoteId: " ~ remoteItem.remoteId, ["debug"]); - - // Check this OneDrive Personal Shared Folder for changes - fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); - - // Process any download activities or cleanup actions for this OneDrive Personal Shared Folder + // Download any OneDrive Business Shared Files processDownloadActivities(); } } @@ -1099,14 +1151,15 @@ class SyncEngine { // Compute this deleted items path based on the database entries string localPathToDelete = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; - if (isItemSynced(existingDatabaseItem, localPathToDelete, itemSource)) { // Flag to delete addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]); idsToDelete ~= [thisItemDriveId, thisItemId]; } else { // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not - safeBackup(localPathToDelete, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(localPathToDelete, dryRun, renamedPath); } } else { // Flag to ignore @@ -1512,6 +1565,7 @@ class SyncEngine { if (fileSizeLimit != 0) { if (onedriveJSONItem["size"].integer >= fileSizeLimit) { addLogEntry("Skipping item - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]); + unwanted = true; } } } @@ -1717,7 +1771,9 @@ class SyncEngine { addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); } } } else { @@ -1734,7 +1790,9 @@ class SyncEngine { addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); } } @@ -1758,42 +1816,58 @@ class SyncEngine { fileJSONItemsToDownload ~= onedriveJSONItem; break; case ItemType.dir: + handleLocalDirectoryCreation(newDatabaseItem, newItemPath, onedriveJSONItem); + break; case ItemType.remote: - addLogEntry("Creating local directory: " ~ newItemPath); - if (!dryRun) { - try { - // Create the new directory - addLogEntry("Requested path does not exist, creating directory structure: " ~ newItemPath, ["debug"]); - mkdirRecurse(newItemPath); - // Configure the applicable permissions for the folder - addLogEntry("Setting directory permissions for: " ~ newItemPath, ["debug"]); - newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); - // Update the time of the folder to match the last modified time as is provided by OneDrive - // If there are any files then downloaded into this folder, the last modified time will get - // updated by the local Operating System with the latest timestamp - as this is normal operation - // as the directory has been modified - addLogEntry("Setting directory lastModifiedDateTime for: " ~ newItemPath ~ " to " ~ to!string(newDatabaseItem.mtime), ["debug"]); - addLogEntry("Calling setTimes() for this directory: " ~ newItemPath, ["debug"]); - setTimes(newItemPath, newDatabaseItem.mtime, newDatabaseItem.mtime); - // Save the item to the database - saveItem(onedriveJSONItem); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } + // Handle remote directory and files differently + if (newDatabaseItem.remoteType == ItemType.dir) { + handleLocalDirectoryCreation(newDatabaseItem, newItemPath, onedriveJSONItem); } else { - // we dont create the directory, but we need to track that we 'faked it' - idsFaked ~= [newDatabaseItem.driveId, newDatabaseItem.id]; - // Save the item to the dry-run database - saveItem(onedriveJSONItem); + // Add to the items to download array for processing + fileJSONItemsToDownload ~= onedriveJSONItem; } break; case ItemType.unknown: + case ItemType.none: // Unknown type - we dont action or sync these items break; } } + // Handle create local directory + void handleLocalDirectoryCreation(Item newDatabaseItem, string newItemPath, JSONValue onedriveJSONItem) { + + // Update the logging output to be consistent + addLogEntry("Creating local directory: " ~ "./" ~ buildNormalizedPath(newItemPath)); + if (!dryRun) { + try { + // Create the new directory + addLogEntry("Requested path does not exist, creating directory structure: " ~ newItemPath, ["debug"]); + mkdirRecurse(newItemPath); + // Configure the applicable permissions for the folder + addLogEntry("Setting directory permissions for: " ~ newItemPath, ["debug"]); + newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + // Update the time of the folder to match the last modified time as is provided by OneDrive + // If there are any files then downloaded into this folder, the last modified time will get + // updated by the local Operating System with the latest timestamp - as this is normal operation + // as the directory has been modified + addLogEntry("Setting directory lastModifiedDateTime for: " ~ newItemPath ~ " to " ~ to!string(newDatabaseItem.mtime), ["debug"]); + addLogEntry("Calling setTimes() for this directory: " ~ newItemPath, ["debug"]); + setTimes(newItemPath, newDatabaseItem.mtime, newDatabaseItem.mtime); + // Save the item to the database + saveItem(onedriveJSONItem); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } else { + // we dont create the directory, but we need to track that we 'faked it' + idsFaked ~= [newDatabaseItem.driveId, newDatabaseItem.id]; + // Save the item to the dry-run database + saveItem(onedriveJSONItem); + } + } + // If the JSON item IS in the database, this will be an update to an existing in-sync item void applyPotentiallyChangedItem(Item existingDatabaseItem, string existingItemPath, Item changedOneDriveItem, string changedItemPath, JSONValue onedriveJSONItem) { @@ -1807,6 +1881,7 @@ class SyncEngine { SysTime changedOneDriveItemModifiedTime = changedOneDriveItem.mtime; changedOneDriveItemModifiedTime.fracSecs = Duration.zero; + // Did the eTag change? if (existingDatabaseItem.eTag != changedOneDriveItem.eTag) { // The eTag has changed to what we previously cached if (existingItemPath != changedItemPath) { @@ -1827,13 +1902,17 @@ class SyncEngine { // The destination item is different addLogEntry("The destination is occupied with a different item, renaming the conflicting file...", ["verbose"]); // Backup this item, passing in if we are performing a --dry-run or not - safeBackup(changedItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(changedItemPath, dryRun, renamedPath); } } else { // The to be overwritten item is not already in the itemdb, so it should saved to avoid data loss addLogEntry("The destination is occupied by an existing un-synced file, renaming the conflicting file...", ["verbose"]); // Backup this item, passing in if we are performing a --dry-run or not - safeBackup(changedItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(changedItemPath, dryRun, renamedPath); } } @@ -1869,8 +1948,8 @@ class SyncEngine { } // What sort of changed item is this? - // Is it a file, and we did not move it .. - if ((changedOneDriveItem.type == ItemType.file) && (!itemWasMoved)) { + // Is it a file or remote file, and we did not move it .. + if (((changedOneDriveItem.type == ItemType.file) && (!itemWasMoved)) || (((changedOneDriveItem.type == ItemType.remote) && (changedOneDriveItem.remoteType == ItemType.file)) && (!itemWasMoved))) { // The eTag is notorious for being 'changed' online by some backend Microsoft process if (existingDatabaseItem.quickXorHash != changedOneDriveItem.quickXorHash) { // Add to the items to download array for processing - the file hash we previously recorded is not the same as online @@ -1952,11 +2031,11 @@ class SyncEngine { ulong jsonFileSize = 0; // Download item specifics + string downloadItemId = onedriveJSONItem["id"].str; + string downloadItemName = onedriveJSONItem["name"].str; string downloadDriveId = onedriveJSONItem["parentReference"]["driveId"].str; string downloadParentId = onedriveJSONItem["parentReference"]["id"].str; - string downloadItemName = onedriveJSONItem["name"].str; - string downloadItemId = onedriveJSONItem["id"].str; - + // Calculate this items path string newItemPath = computeItemPath(downloadDriveId, downloadParentId) ~ "/" ~ downloadItemName; addLogEntry("JSON Item calculated full path for download is: " ~ newItemPath, ["debug"]); @@ -2020,7 +2099,9 @@ class SyncEngine { addLogEntry("The local file to replace (" ~ newItemPath ~ ") has been modified locally since the last download. Renaming it to avoid potential local data loss."); // Perform the local safeBackup of the existing local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); } } @@ -2039,7 +2120,7 @@ class SyncEngine { if ((localActualFreeSpace < freeSpaceReservation) || (jsonFileSize > localActualFreeSpace)) { // localActualFreeSpace is less than freeSpaceReservation .. insufficient free space // jsonFileSize is greater than localActualFreeSpace .. insufficient free space - addLogEntry("Downloading file " ~ newItemPath ~ " ... failed!"); + addLogEntry("Downloading file: " ~ newItemPath ~ " ... failed!"); addLogEntry("Insufficient local disk space to download file"); downloadFailed = true; } else { @@ -2049,46 +2130,63 @@ class SyncEngine { OneDriveApi downloadFileOneDriveApiInstance; downloadFileOneDriveApiInstance = new OneDriveApi(appConfig); try { + // Initialise API instance downloadFileOneDriveApiInstance.initialise(); + + // OneDrive Business Shared Files - update the driveId where to get the file from + if (isItemRemote(onedriveJSONItem)) { + downloadDriveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str; + } + + // Perform the download downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); downloadFileOneDriveApiInstance.shutdown(); + // Free object and memory object.destroy(downloadFileOneDriveApiInstance); + } catch (OneDriveException exception) { addLogEntry("downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); generated a OneDriveException", ["debug"]); string thisFunctionName = getFunctionName!({}); - // HTTP request returned status code 408,429,503,504 - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // Handle the 429 - if (exception.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(downloadFileOneDriveApiInstance); - addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); - } - // re-try the specific changes queries - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // 408 - Request Time Out - // 503 - Service Unavailable - // 504 - Gateway Timeout - // Transient error - try again in 30 seconds - auto errorArray = splitLines(exception.msg); - addLogEntry(to!string(errorArray[0]) ~ " when attempting to download an item from OneDrive - retrying applicable request in 30 seconds"); - addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); - - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429, 503, 504 - but loop back calling this function - addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); - downloadFileItem(onedriveJSONItem); + // HTTP request returned status code 403 + if ((exception.httpStatusCode == 403) && (appConfig.getValueBool("sync_business_shared_files"))) { + // We attempted to download a file, that was shared with us, but this was shared with us as read-only and no download permission + addLogEntry("Unable to download this file as this was shared as read-only without download permission: " ~ newItemPath); + downloadFailed = true; } else { - // Default operation if not 408,429,503,504 errors - // display what the error is - displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); - } + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(downloadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to download an item from OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + downloadFileItem(onedriveJSONItem); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + } } catch (FileException e) { // There was a file system error // display the error message @@ -2189,12 +2287,16 @@ class SyncEngine { // other account types addLogEntry("INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); } - // We do not want this local file to remain on the local file system as it failed the integrity checks - addLogEntry("Removing file " ~ newItemPath ~ " due to failed integrity checks"); - if (!dryRun) { - safeRemove(newItemPath); + + // If the computed hash does not equal provided online hash, consider this a failed download + if (downloadedFileHash != onlineFileHash) { + // We do not want this local file to remain on the local file system as it failed the integrity checks + addLogEntry("Removing file " ~ newItemPath ~ " due to failed integrity checks"); + if (!dryRun) { + safeRemove(newItemPath); + } + downloadFailed = true; } - downloadFailed = true; } } else { // Download validation checks were disabled @@ -2211,7 +2313,7 @@ class SyncEngine { // File should have been downloaded if (!downloadFailed) { // Download did not fail - addLogEntry("Downloading file " ~ newItemPath ~ " ... done"); + addLogEntry("Downloading file: " ~ newItemPath ~ " ... done"); // Save this item into the database saveItem(onedriveJSONItem); @@ -2222,102 +2324,92 @@ class SyncEngine { } } else { // Output download failed - addLogEntry("Downloading file " ~ newItemPath ~ " ... failed!"); + addLogEntry("Downloading file: " ~ newItemPath ~ " ... failed!"); // Add the path to a list of items that failed to download - fileDownloadFailures ~= newItemPath; + if (!canFind(fileDownloadFailures, newItemPath)) { + fileDownloadFailures ~= newItemPath; // Add newItemPath if it's not already present + } } } } // Test if the given item is in-sync. Returns true if the given item corresponds to the local one bool isItemSynced(Item item, string path, string itemSource) { - - // This function is typically called when we are processing JSON objects from 'online' - // This function is not used in an --upload-only scenario - if (!exists(path)) return false; - final switch (item.type) { - case ItemType.file: - if (isFile(path)) { - // can we actually read the local file? - if (readLocalFile(path)){ - // local file is readable - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // Reduce time resolution to seconds before comparing - localModifiedTime.fracSecs = Duration.zero; - itemModifiedTime.fracSecs = Duration.zero; - if (localModifiedTime == itemModifiedTime) { - return true; - } else { - // The file has a different timestamp ... is the hash the same meaning no file modification? - addLogEntry("Local file time discrepancy detected: " ~ path, ["verbose"]); - addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["verbose"]); - - // The file has a different timestamp ... is the hash the same meaning no file modification? - // Test the file hash as the date / time stamp is different - // Generating a hash is computationally expensive - we only generate the hash if timestamp was different - if (testFileHash(path, item)) { - // The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong - addLogEntry("Local item has the same hash value as the item online - correcting the applicable file timestamp", ["verbose"]); - // Test if the local timestamp is newer - if (localModifiedTime > itemModifiedTime) { - // Local file is newer .. are we in a --download-only situation? - if (!appConfig.getValueBool("download_only")) { - // --download-only not being used - // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different - addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); - if (!dryRun) { - // Attempt to update the online date time stamp - uploadLastModifiedTime(item.driveId, item.id, localModifiedTime, item.eTag); - return false; - } - } else { - // --download-only is being used ... local file needs to be corrected ... but why is it newer - indexing application potentially changing the timestamp ? - addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --download-only", ["verbose"]); - if (!dryRun) { - addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); - setTimes(path, item.mtime, item.mtime); - return false; - } - } - } else { - // The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different - addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally", ["verbose"]); - if (!dryRun) { - addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); - setTimes(path, item.mtime, item.mtime); - return false; - } - } - } else { - // The hash is different so the content of the file has to be different as to what is stored online - addLogEntry("The local file has a different hash when compared to " ~ itemSource ~ " file hash", ["verbose"]); - return false; - } - } - } else { - // Unable to read local file - addLogEntry("Unable to determine the sync state of this file as it cannot be read (file permissions or file corruption): " ~ path); - return false; - } - } else { - addLogEntry("The local item is a directory but should be a file", ["verbose"]); + + // Combine common logic for readability and file check into a single block + if (item.type == ItemType.file || ((item.type == ItemType.remote) && (item.remoteType == ItemType.file))) { + // Can we actually read the local file? + if (!readLocalFile(path)) { + // Unable to read local file + addLogEntry("Unable to determine the sync state of this file as it cannot be read (file permissions or file corruption): " ~ path); + return false; } - break; - case ItemType.dir: - case ItemType.remote: - if (isDir(path)) { + + // Get time values + SysTime localModifiedTime = timeLastModified(path).toUTC(); + SysTime itemModifiedTime = item.mtime; + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + itemModifiedTime.fracSecs = Duration.zero; + + if (localModifiedTime == itemModifiedTime) { return true; } else { - addLogEntry("The local item is a file but should be a directory", ["verbose"]); + // The file has a different timestamp ... is the hash the same meaning no file modification? + addLogEntry("Local file time discrepancy detected: " ~ path, ["verbose"]); + addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["verbose"]); + + // The file has a different timestamp ... is the hash the same meaning no file modification? + // Test the file hash as the date / time stamp is different + // Generating a hash is computationally expensive - we only generate the hash if timestamp was different + if (testFileHash(path, item)) { + // The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong + addLogEntry("Local item has the same hash value as the item online - correcting the applicable file timestamp", ["verbose"]); + // Correction logic based on the configuration and the comparison of timestamps + if (localModifiedTime > itemModifiedTime) { + // Local file is newer .. are we in a --download-only situation? + if (!appConfig.getValueBool("download_only") && !dryRun) { + // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different + addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); + // Attempt to update the online date time stamp + // We need to use the correct driveId and itemId, especially if we are updating a OneDrive Business Shared File timestamp + if (item.type == ItemType.file) { + // Not a remote file + uploadLastModifiedTime(item, item.driveId, item.id, localModifiedTime, item.eTag); + } else { + // Remote file, remote values need to be used + uploadLastModifiedTime(item, item.remoteDriveId, item.remoteId, localModifiedTime, item.eTag); + } + } else if (!dryRun) { + // --download-only is being used ... local file needs to be corrected ... but why is it newer - indexing application potentially changing the timestamp ? + addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --download-only", ["verbose"]); + // Fix the local file timestamp + addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + setTimes(path, item.mtime, item.mtime); + } + } else if (!dryRun) { + // The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different + addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally", ["verbose"]); + // Fix the local file timestamp + addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + setTimes(path, item.mtime, item.mtime); + } + return false; + } else { + // The hash is different so the content of the file has to be different as to what is stored online + addLogEntry("The local file has a different hash when compared to " ~ itemSource ~ " file hash", ["verbose"]); + return false; + } } - break; - case ItemType.unknown: - // Unknown type - return true but we dont action or sync these items + } else if (item.type == ItemType.dir || ((item.type == ItemType.remote) && (item.remoteType == ItemType.dir))) { + // item is a directory + return true; + } else { + // ItemType.unknown or ItemType.none + // Logically, we might not want to sync these items, but a more nuanced approach may be needed based on application context return true; } - return false; } // Get the /delta data using the provided details @@ -2585,7 +2677,7 @@ class SyncEngine { } // Update the timestamp of an object online - void uploadLastModifiedTime(string driveId, string id, SysTime mtime, string eTag) { + void uploadLastModifiedTime(Item originItem, string driveId, string id, SysTime mtime, string eTag) { string itemModifiedTime; itemModifiedTime = mtime.toISOExtString(); @@ -2617,8 +2709,22 @@ class SyncEngine { uploadLastModifiedTimeApiInstance.shutdown(); // Free object and memory object.destroy(uploadLastModifiedTimeApiInstance); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); + + // Do we actually save the response? + // Special case here .. if the DB record item (originItem) is a remote object, thus, if we same the 'response' we will have a DB FOREIGN KEY constraint failed problem + // Update 'originItem.mtime' with the correct timestamp + // Update 'originItem.size' with the correct size from the response + // Update 'originItem.eTag' with the correct eTag from the response + // Update 'originItem.cTag' with the correct cTag from the response + // Update 'originItem.quickXorHash' with the correct quickXorHash from the response + // Everything else should remain the same .. and then save this DB record to the DB .. + // However, we did this, for the local modified file right before calling this function to update the online timestamp ... so .. do we need to do this again, effectivly performing a double DB write for the same data? + if ((originItem.type != ItemType.remote) && (originItem.remoteType != ItemType.file)) { + // Save the response JSON + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(response); + } + } catch (OneDriveException exception) { string thisFunctionName = getFunctionName!({}); @@ -2646,14 +2752,14 @@ class SyncEngine { } // re-try original request - retried for 429, 503, 504 - but loop back calling this function addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); - uploadLastModifiedTime(driveId, id, mtime, eTag); + uploadLastModifiedTime(originItem, driveId, id, mtime, eTag); return; } else { // Default operation if not 408,429,503,504 errors if (exception.httpStatusCode == 409) { // ETag does not match current item's value - use a null eTag addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); - uploadLastModifiedTime(driveId, id, mtime, null); + uploadLastModifiedTime(originItem, driveId, id, mtime, null); } else { // display what the error is displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); @@ -2786,6 +2892,12 @@ class SyncEngine { // Do we want to onward process this item? bool unwanted = false; + // Remote directory items we can 'skip' + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.dir)) { + // return .. nothing to check here, no logging needed + return; + } + // Compute this dbItem path early as we we use this path often localFilePath = buildNormalizedPath(computeItemPath(dbItem.driveId, dbItem.id)); @@ -2800,22 +2912,25 @@ class SyncEngine { } // Log what we are doing - addLogEntry("Processing " ~ logOutputPath, ["verbose"]); + addLogEntry("Processing: " ~ logOutputPath, ["verbose"]); // Determine which action to take final switch (dbItem.type) { case ItemType.file: - // Logging output + // Logging output result is handled by checkFileDatabaseItemForConsistency checkFileDatabaseItemForConsistency(dbItem, localFilePath); break; case ItemType.dir: - // Logging output + // Logging output result is handled by checkDirectoryDatabaseItemForConsistency checkDirectoryDatabaseItemForConsistency(dbItem, localFilePath, progress); break; case ItemType.remote: - // checkRemoteDirectoryDatabaseItemForConsistency(dbItem, localFilePath); + // DB items that match: dbItem.remoteType == ItemType.dir - these should have been skipped above + // This means that anything that hits here should be: dbItem.remoteType == ItemType.file + checkFileDatabaseItemForConsistency(dbItem, localFilePath); break; case ItemType.unknown: + case ItemType.none: // Unknown type - we dont action these items break; } @@ -2875,10 +2990,21 @@ class SyncEngine { // Local file is newer .. are we in a --download-only situation? if (!appConfig.getValueBool("download_only")) { // Not a --download-only scenario - addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]); if (!dryRun) { // Attempt to update the online date time stamp - uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); + // We need to use the correct driveId and itemId, especially if we are updating a OneDrive Business Shared File timestamp + if (dbItem.type == ItemType.file) { + // Not a remote file + // Log what is being done + addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]); + // Correct timestamp + uploadLastModifiedTime(dbItem, dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); + } else { + // Remote file, remote values need to be used, we may not even have permission to change timestamp, update local file + addLogEntry("The local item has the same hash value as the item online, however file is a OneDrive Business Shared File - correcting local timestamp", ["verbose"]); + addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]); + setTimes(localFilePath, dbItem.mtime, dbItem.mtime); + } } } else { // --download-only being used @@ -3212,7 +3338,7 @@ class SyncEngine { } // Does this JSON item (as received from OneDrive API) get excluded from any operation based on any client side filtering rules? - // This function is only used when we are fetching objects from the OneDrive API using a /children query to help speed up what object we query + // This function is used when we are fetching objects from the OneDrive API using a /children query to help speed up what object we query or when checking OneDrive Business Shared Files bool checkJSONAgainstClientSideFiltering(JSONValue onedriveJSONItem) { bool clientSideRuleExcludesPath = false; @@ -3224,7 +3350,7 @@ class SyncEngine { // - skip_file // - skip_dir // - sync_list - // - skip_size (MISSING) + // - skip_size // Return a true|false response // Use the JSON elements rather can computing a DB struct via makeItem() @@ -3440,6 +3566,19 @@ class SyncEngine { } } + // Check if this is excluded by a user set maximum filesize to download + if (!clientSideRuleExcludesPath) { + if (isItemFile(onedriveJSONItem)) { + if (fileSizeLimit != 0) { + if (onedriveJSONItem["size"].integer >= fileSizeLimit) { + addLogEntry("Skipping item - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]); + clientSideRuleExcludesPath = true; + } + } + } + } + + // return if path is excluded return clientSideRuleExcludesPath; } @@ -3467,6 +3606,7 @@ class SyncEngine { string changedItemId = localItemDetails[1]; string localFilePath = localItemDetails[2]; + // Log the path that was modified addLogEntry("uploadChangedLocalFileToOneDrive: " ~ localFilePath, ["debug"]); // How much space is remaining on OneDrive @@ -3477,18 +3617,37 @@ class SyncEngine { bool skippedMaxSize = false; // Did we skip to an exception error? bool skippedExceptionError = false; + // Flag for if space is available online + bool spaceAvailableOnline = false; + + // When we are uploading OneDrive Business Shared Files, we need to be targetting the right driveId and itemId + string targetDriveId; + string targetItemId; // Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here // This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function Item dbItem; itemDB.selectById(changedItemParentId, changedItemId, dbItem); - + + // Is this a remote target? + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.file)) { + // This is a remote file + targetDriveId = dbItem.remoteDriveId; + targetItemId = dbItem.remoteId; + // we are going to make the assumption here that as this is a OneDrive Business Shared File, that there is space available + spaceAvailableOnline = true; + } else { + // This is not a remote file + targetDriveId = dbItem.driveId; + targetItemId = dbItem.id; + } + // Fetch the details from cachedOnlineDriveData // - cachedOnlineDriveData.quotaRestricted; // - cachedOnlineDriveData.quotaAvailable; // - cachedOnlineDriveData.quotaRemaining; driveDetailsCache cachedOnlineDriveData; - cachedOnlineDriveData = getDriveDetails(dbItem.driveId); + cachedOnlineDriveData = getDriveDetails(targetDriveId); remainingFreeSpace = cachedOnlineDriveData.quotaRemaining; // Get the file size from the actual file @@ -3510,13 +3669,11 @@ class SyncEngine { addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); JSONValue uploadResponse; - bool spaceAvailableOnline = false; - // If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true - // If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused - // If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true - // Is there quota available for the given drive where we are uploading to? + // If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true if (cachedOnlineDriveData.quotaAvailable) { // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? if (calculatedSpaceOnlinePostUpload > 0) { @@ -3561,7 +3718,7 @@ class SyncEngine { // Upload failed .. why? // No space available online if (!spaceAvailableOnline) { - addLogEntry("Skipping uploading modified file " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]); + addLogEntry("Skipping uploading modified file: " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]); } // File exceeds max allowed size if (skippedMaxSize) { @@ -3570,17 +3727,34 @@ class SyncEngine { // Generic message if (skippedExceptionError) { // normal failure message if API or exception error generated - addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]); + // If Issue #2626 | Case 2-1 is triggered, the file we tried to upload was renamed, then uploaded as a new name + if (exists(localFilePath)) { + // Issue #2626 | Case 2-1 was not triggered, file still exists on local filesystem + addLogEntry("Uploading modified file: " ~ localFilePath ~ " ... failed!", ["info", "notify"]); + } } } else { // Upload was successful - addLogEntry("Uploading modified file " ~ localFilePath ~ " ... done.", ["info", "notify"]); + addLogEntry("Uploading modified file: " ~ localFilePath ~ " ... done.", ["info", "notify"]); - // Save JSON item in database - saveItem(uploadResponse); + // What do we save to the DB? Is this a OneDrive Business Shared File? + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.file)) { + // We need to 'massage' the old DB record, with data from online, as the DB record was specifically crafted for OneDrive Business Shared Files + Item tempItem = makeItem(uploadResponse); + dbItem.eTag = tempItem.eTag; + dbItem.cTag = tempItem.cTag; + dbItem.mtime = tempItem.mtime; + dbItem.quickXorHash = tempItem.quickXorHash; + dbItem.sha256Hash = tempItem.sha256Hash; + dbItem.size = tempItem.size; + itemDB.upsert(dbItem); + } else { + // Save the response JSON item in database as is + saveItem(uploadResponse); + } - // Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accuratly as possible for other threads - updateDriveDetailsCache(dbItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal); + // Update the 'cachedOnlineDriveData' record for this 'targetDriveId' so that this is tracked as accuratly as possible for other threads + updateDriveDetailsCache(targetDriveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal); // Check the integrity of the uploaded modified file if not in a --dry-run scenario if (!dryRun) { @@ -3594,70 +3768,191 @@ class SyncEngine { // Get the latest eTag, and use that string etagFromUploadResponse = uploadResponse["eTag"].str; // Attempt to update the online date time stamp based on our local data - uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse); + uploadLastModifiedTime(dbItem, targetDriveId, targetItemId, localModifiedTime, etagFromUploadResponse); } } } - // Perform the upload of a locally modified file to OneDrive + // Perform the upload of a locally modified file to OneDrive JSONValue performModifiedFileUpload(Item dbItem, string localFilePath, ulong thisFileSizeLocal) { + // Function variables JSONValue uploadResponse; OneDriveApi uploadFileOneDriveApiInstance; uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); uploadFileOneDriveApiInstance.initialise(); + // Configure JSONValue variables we use for a session upload + JSONValue currentOnlineData; + JSONValue uploadSessionData; + string currentETag; + + // When we are uploading OneDrive Business Shared Files, we need to be targetting the right driveId and itemId + string targetDriveId; + string targetParentId; + string targetItemId; + + // Is this a remote target? + if ((dbItem.type == ItemType.remote) && (dbItem.remoteType == ItemType.file)) { + // This is a remote file + targetDriveId = dbItem.remoteDriveId; + targetParentId = dbItem.remoteParentId; + targetItemId = dbItem.remoteId; + } else { + // This is not a remote file + targetDriveId = dbItem.driveId; + targetParentId = dbItem.parentId; + targetItemId = dbItem.id; + } + // Is this a dry-run scenario? if (!dryRun) { // Do we use simpleUpload or create an upload session? bool useSimpleUpload = false; - //if ((appConfig.accountType == "personal") && (thisFileSizeLocal <= sessionThresholdFileSize)) { + // Try and get the absolute latest object details from online + try { + currentOnlineData = uploadFileOneDriveApiInstance.getPathDetailsById(targetDriveId, targetItemId); + } catch (OneDriveException exception) { + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to obtain latest file details from OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + } + // Was a valid JSON response provided? + if (currentOnlineData.type() == JSONType.object) { + // Does the response contain an eTag? + if (hasETag(currentOnlineData)) { + // Use the value returned from online + currentETag = currentOnlineData["eTag"].str; + } else { + // Use the database value + currentETag = dbItem.eTag; + } + } else { + // no valid JSON response + currentETag = dbItem.eTag; + } + + // What upload method should be used? if (thisFileSizeLocal <= sessionThresholdFileSize) { useSimpleUpload = true; } + // If the filesize is greater than zero , and we have valid 'latest' online data is the online file matching what we think is in the database? + if ((thisFileSizeLocal > 0) && (currentOnlineData.type() == JSONType.object)) { + // Issue #2626 | Case 2-1 + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - potentially consituting online data loss + Item onlineFile = makeItem(currentOnlineData); + + // Which file is technically newer? The local file or the remote file? + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + SysTime onlineModifiedTime = onlineFile.mtime; + + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + onlineModifiedTime.fracSecs = Duration.zero; + + // Which file is newer? If local is newer, it will be uploaded as a modified file in the correct manner + if (localModifiedTime < onlineModifiedTime) { + // Online File is actually newer than the locally modified file + addLogEntry("currentOnlineData: " ~ to!string(currentOnlineData), ["debug"]); + addLogEntry("onlineFile: " ~ to!string(onlineFile), ["debug"]); + addLogEntry("database item: " ~ to!string(dbItem), ["debug"]); + addLogEntry("Skipping uploading this item as a locally modified file, will upload as a new file (online file already exists and is newer): " ~ localFilePath); + + // Online is newer, rename local, then upload the renamed file + // We need to know the renamed path so we can upload it + string renamedPath; + // Rename the local path + safeBackup(localFilePath, dryRun, renamedPath); + // Upload renamed local file as a new file + uploadNewFile(renamedPath); + + // Process the database entry removal for the original file. In a --dry-run scenario, this is being done against a DB copy. + // This is done so we can download the newer online file + itemDB.deleteById(targetDriveId, targetItemId); + + // This file is now uploaded, return from here, but this will trigger a response that the upload failed (technically for the original filename it did, but we renamed it, then uploaded it + return uploadResponse; + } + } + // We can only upload zero size files via simpleFileUpload regardless of account type // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 // Additionally, all files where file size is < 4MB should be uploaded by simpleUploadReplace - everything else should use a session to upload the modified file - if ((thisFileSizeLocal == 0) || (useSimpleUpload)) { // Must use Simple Upload to replace the file online try { - uploadResponse = uploadFileOneDriveApiInstance.simpleUploadReplace(localFilePath, dbItem.driveId, dbItem.id); + uploadResponse = uploadFileOneDriveApiInstance.simpleUploadReplace(localFilePath, targetDriveId, targetItemId); } catch (OneDriveException exception) { - + // Function name string thisFunctionName = getFunctionName!({}); - // HTTP request returned status code 408,429,503,504 - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // Handle the 429 - if (exception.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); - addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); - } - // re-try the specific changes queries - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // 408 - Request Time Out - // 503 - Service Unavailable - // 504 - Gateway Timeout - // Transient error - try again in 30 seconds - auto errorArray = splitLines(exception.msg); - addLogEntry(to!string(errorArray[0]) ~ " when attempting to upload a modified file to OneDrive - retrying applicable request in 30 seconds"); - addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); - - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429, 503, 504 - but loop back calling this function - addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); - performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + + // HTTP request returned status code 403 + if ((exception.httpStatusCode == 403) && (appConfig.getValueBool("sync_business_shared_files"))) { + // We attempted to upload a file, that was shared with us, but this was shared with us as read-only + addLogEntry("Unable to upload this modified file as this was shared as read-only: " ~ localFilePath); } else { - // Default operation if not 408,429,503,504 errors - // display what the error is - displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // Handle all other HTTP status codes + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to upload a modified file to OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } } } catch (FileException e) { @@ -3665,75 +3960,24 @@ class SyncEngine { displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } else { - // Configure JSONValue variables we use for a session upload - JSONValue currentOnlineData; - JSONValue uploadSessionData; - string currentETag; - // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique // The best way to do this is generate a 10 digit alphanumeric string, and use this as the file extention string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ generateAlphanumericString(); - // Get the absolute latest object details from online + // Create the upload session try { - currentOnlineData = uploadFileOneDriveApiInstance.getPathDetailsByDriveId(dbItem.driveId, localFilePath); - } catch (OneDriveException exception) { - - string thisFunctionName = getFunctionName!({}); - // HTTP request returned status code 408,429,503,504 - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // Handle the 429 - if (exception.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); - addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); - } - // re-try the specific changes queries - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // 408 - Request Time Out - // 503 - Service Unavailable - // 504 - Gateway Timeout - // Transient error - try again in 30 seconds - auto errorArray = splitLines(exception.msg); - addLogEntry(to!string(errorArray[0]) ~ " when attempting to obtain latest file details from OneDrive - retrying applicable request in 30 seconds"); - addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); - - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429, 503, 504 - but loop back calling this function - addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); - performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); - } else { - // Default operation if not 408,429,503,504 errors - // display what the error is - displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); - } - - } - - // Was a valid JSON response provided? - if (currentOnlineData.type() == JSONType.object) { - // Does the response contain an eTag? - if (hasETag(currentOnlineData)) { - // Use the value returned from online - currentETag = currentOnlineData["eTag"].str; - } else { - // Use the database value - currentETag = dbItem.eTag; - } - } else { - // no valid JSON response - currentETag = dbItem.eTag; - } - - // Create the Upload Session - try { - uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, localFilePath, dbItem.driveId, dbItem.parentId, baseName(localFilePath), currentETag, threadUploadSessionFilePath); + uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, localFilePath, targetDriveId, targetParentId, baseName(localFilePath), currentETag, threadUploadSessionFilePath); } catch (OneDriveException exception) { string thisFunctionName = getFunctionName!({}); + + // HTTP request returned status code 403 + if ((exception.httpStatusCode == 403) && (appConfig.getValueBool("sync_business_shared_files"))) { + // We attempted to upload a file, that was shared with us, but this was shared with us as read-only + addLogEntry("Unable to upload this modified file as this was shared as read-only: " ~ localFilePath); + return uploadResponse; + } + // HTTP request returned status code 408,429,503,504 if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { // Handle the 429 @@ -3770,11 +4014,11 @@ class SyncEngine { displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } - // Perform the Upload using the session + // Perform the upload using the session that has been created try { uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSizeLocal, uploadSessionData, threadUploadSessionFilePath); } catch (OneDriveException exception) { - + // Function name string thisFunctionName = getFunctionName!({}); // HTTP request returned status code 408,429,503,504 if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { @@ -3805,13 +4049,11 @@ class SyncEngine { // display what the error is displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } - } catch (FileException e) { writeln("DEBUG TO REMOVE: Modified file upload FileException Handling (Perform the Upload using the session)"); displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } - } else { // We are in a --dry-run scenario uploadResponse = createFakeResponse(localFilePath); @@ -4039,6 +4281,18 @@ class SyncEngine { maxPathLength = 400; } + // OneDrive Business Shared Files Handling - if we make a 'backup' locally of a file shared with us (because we modified it, and then maybe did a --resync), it will be treated as a new file to upload ... + // The issue here is - the 'source' was a shared file - we may not even have permission to upload a 'renamed' file to the shared file's parent folder + // In this case, we need to skip adding this new local file - we do not upload it (we cant , and we should not) + if (appConfig.accountType == "business") { + // Check appConfig.configuredBusinessSharedFilesDirectoryName against 'path' + if (canFind(path, baseName(appConfig.configuredBusinessSharedFilesDirectoryName))) { + // Log why this path is being skipped + addLogEntry("Skipping scanning path for new files as this is reserved for OneDrive Business Shared Files: " ~ path, ["info"]); + return; + } + } + // A short lived item that has already disappeared will cause an error - is the path still valid? if (!exists(path)) { addLogEntry("Skipping item - path has disappeared: " ~ path); @@ -4300,9 +4554,19 @@ class SyncEngine { // Log what we are doing addLogEntry("OneDrive Client requested to create this directory online: " ~ thisNewPathToCreate, ["verbose"]); + // Function variables Item parentItem; JSONValue onlinePathData; + // Special Folder Handling: Do NOT create the folder online if it is being used for OneDrive Business Shared Files + // These are local copy files, in a self created directory structure which is not to be replicated online + // Check appConfig.configuredBusinessSharedFilesDirectoryName against 'thisNewPathToCreate' + if (canFind(thisNewPathToCreate, baseName(appConfig.configuredBusinessSharedFilesDirectoryName))) { + // Log why this is being skipped + addLogEntry("Skipping creating '" ~ thisNewPathToCreate ~ "' as this path is used for handling OneDrive Business Shared Files", ["info", "notify"]); + return; + } + // Create a new API Instance for this thread and initialise it OneDriveApi createDirectoryOnlineOneDriveApiInstance; createDirectoryOnlineOneDriveApiInstance = new OneDriveApi(appConfig); @@ -4762,7 +5026,6 @@ class SyncEngine { parentPathFoundInDB = true; } } - } // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty @@ -4914,15 +5177,42 @@ class SyncEngine { // The local file we are attempting to upload as a new file is different to the existing file online addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]); - // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - consituting online data loss - // The file 'version history' online will have to be used to 'recover' the prior online file - string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; - string changedItemId = fileDetailsFromOneDrive["id"].str; - addLogEntry("Skipping uploading this file as moving it to upload as a modified file (online item already exists): " ~ fileToUpload); + // Issue #2626 | Case 2-2 (resync) - // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - potentially consituting online data loss + // The file 'version history' online will have to be used to 'recover' the prior online file + string changedItemParentDriveId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string changedItemId = fileDetailsFromOneDrive["id"].str; + addLogEntry("Skipping uploading this item as a new file, will upload as a modified file (online file already exists): " ~ fileToUpload); + + // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data of the existing online file to the local DB saveItem(fileDetailsFromOneDrive); - uploadChangedLocalFileToOneDrive([changedItemParentId, changedItemId, fileToUpload]); + + // Which file is technically newer? The local file or the remote file? + Item onlineFile = makeItem(fileDetailsFromOneDrive); + SysTime localModifiedTime = timeLastModified(fileToUpload).toUTC(); + SysTime onlineModifiedTime = onlineFile.mtime; + + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + onlineModifiedTime.fracSecs = Duration.zero; + + // Which file is newer? + if (localModifiedTime >= onlineModifiedTime) { + // Upload the locally modified file as-is, as it is newer + uploadChangedLocalFileToOneDrive([changedItemParentDriveId, changedItemId, fileToUpload]); + } else { + // Online is newer, rename local, then upload the renamed file + // We need to know the renamed path so we can upload it + string renamedPath; + // Rename the local path + safeBackup(fileToUpload, dryRun, renamedPath); + // Upload renamed local file as a new file + uploadNewFile(renamedPath); + // Process the database entry removal for the original file. In a --dry-run scenario, this is being done against a DB copy. + // This is done so we can download the newer online file + itemDB.deleteById(changedItemParentDriveId, changedItemId); + } } } catch (OneDriveException exception) { // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online @@ -5268,7 +5558,7 @@ class SyncEngine { string newFileId = uploadResponse["id"].str; string newFileETag = uploadResponse["eTag"].str; // Attempt to update the online date time stamp based on our local data - uploadLastModifiedTime(parentItem.driveId, newFileId, mtime, newFileETag); + uploadLastModifiedTime(parentItem, parentItem.driveId, newFileId, mtime, newFileETag); } } else { // will be removed in different event! @@ -5654,9 +5944,10 @@ class SyncEngine { } // Create a fake OneDrive response suitable for use with saveItem - JSONValue createFakeResponse(const(string) path) { - + // Create a fake OneDrive response suitable for use with saveItem + JSONValue createFakeResponse(string path) { import std.digest.sha; + // Generate a simulated JSON response which can be used // At a minimum we need: // 1. eTag @@ -5669,86 +5960,57 @@ class SyncEngine { string fakeDriveId = appConfig.defaultDriveId; string fakeRootId = appConfig.defaultRootId; - SysTime mtime = timeLastModified(path).toUTC(); - - // Need to update the 'fakeDriveId' & 'fakeRootId' with elements from the --dry-run database - // Otherwise some calls to validate objects will fail as the actual driveId being used is invalid - string parentPath = dirName(path); - Item databaseItem; - - if (parentPath != ".") { - // Not a 'root' parent - foreach (searchDriveId; onlineDriveDetails.keys) { - addLogEntry("FakeResponse: searching database for: " ~ searchDriveId ~ " " ~ parentPath, ["debug"]); - - if (itemDB.selectByPath(parentPath, searchDriveId, databaseItem)) { - addLogEntry("FakeResponse: Found Database Item: " ~ to!string(databaseItem), ["debug"]); - fakeDriveId = databaseItem.driveId; - fakeRootId = databaseItem.id; - } - } - - - - - } - - // real id / eTag / cTag are different format for personal / business account + SysTime mtime = exists(path) ? timeLastModified(path).toUTC() : Clock.currTime(UTC()); auto sha1 = new SHA1Digest(); ubyte[] fakedOneDriveItemValues = sha1.digest(path); - JSONValue fakeResponse; - - if (isDir(path)) { - // path is a directory - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(appConfig.accountType), - "id": JSONValue(fakeRootId) - ]), - "folder": JSONValue("") - ]; - } else { - // path is a file - // compute file hash - both business and personal responses use quickXorHash - string quickXorHash = computeQuickXorHash(path); - - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(appConfig.accountType), - "id": JSONValue(fakeRootId) - ]), - "file": JSONValue([ - "hashes":JSONValue([ - "quickXorHash": JSONValue(quickXorHash) - ]) - - ]) - ]; + + string parentPath = dirName(path); + if (parentPath != "." && exists(path)) { + foreach (searchDriveId; onlineDriveDetails.keys) { + Item databaseItem; + if (itemDB.selectByPath(parentPath, searchDriveId, databaseItem)) { + fakeDriveId = databaseItem.driveId; + fakeRootId = databaseItem.id; + break; // Exit loop after finding the first match + } + } } - + + fakeResponse = [ + "id": JSONValue(toHexString(fakedOneDriveItemValues)), + "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(fakeDriveId), + "driveType": JSONValue(appConfig.accountType), + "id": JSONValue(fakeRootId) + ]) + ]; + + if (exists(path)) { + if (isDir(path)) { + fakeResponse["folder"] = JSONValue(""); + } else { + string quickXorHash = computeQuickXorHash(path); + fakeResponse["file"] = JSONValue([ + "hashes": JSONValue(["quickXorHash": JSONValue(quickXorHash)]) + ]); + } + } else { + // Assume directory if path does not exist + fakeResponse["folder"] = JSONValue(""); + } + addLogEntry("Generated Fake OneDrive Response: " ~ to!string(fakeResponse), ["debug"]); return fakeResponse; } - + // Save JSON item details into the item database void saveItem(JSONValue jsonItem) { // jsonItem has to be a valid object @@ -5788,7 +6050,6 @@ class SyncEngine { } // Add to the local database - addLogEntry("Adding to database: " ~ to!string(item), ["debug"]); itemDB.upsert(item); // If we have a remote drive ID, add this to our list of known drive id's @@ -8017,4 +8278,316 @@ class SyncEngine { addLogEntry("Creating|Updating into local database a DB Tie record: " ~ to!string(tieDBItem), ["debug"]); itemDB.upsert(tieDBItem); } + + // List all the OneDrive Business Shared Items for the user to see + void listBusinessSharedObjects() { + + JSONValue sharedWithMeItems; + + // Create a new API Instance for this thread and initialise it + OneDriveApi sharedWithMeOneDriveApiInstance; + sharedWithMeOneDriveApiInstance = new OneDriveApi(appConfig); + sharedWithMeOneDriveApiInstance.initialise(); + + try { + sharedWithMeItems = sharedWithMeOneDriveApiInstance.getSharedWithMe(); + } catch (OneDriveException e) { + + // Display error message + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // Must exit here + sharedWithMeOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(sharedWithMeOneDriveApiInstance); + } + + if (sharedWithMeItems.type() == JSONType.object) { + + if (count(sharedWithMeItems["value"].array) > 0) { + // No shared items + addLogEntry(); + addLogEntry("Listing available OneDrive Business Shared Items:"); + addLogEntry(); + + // Iterate through the array + foreach (searchResult; sharedWithMeItems["value"].array) { + + // loop variables for each item + string sharedByName; + string sharedByEmail; + + // Debug response output + addLogEntry("shared folder entry: " ~ to!string(searchResult), ["debug"]); + + // Configure 'who' this was shared by + if ("sharedBy" in searchResult["remoteItem"]["shared"]) { + // we have shared by details we can use + if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; + } + if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; + } + } + + // Output query result + addLogEntry("-----------------------------------------------------------------------------------"); + if (isItemFile(searchResult)) { + addLogEntry("Shared File: " ~ to!string(searchResult["name"].str)); + } else { + addLogEntry("Shared Folder: " ~ to!string(searchResult["name"].str)); + } + + // Detail 'who' shared this + if ((sharedByName != "") && (sharedByEmail != "")) { + addLogEntry("Shared By: " ~ sharedByName ~ " (" ~ sharedByEmail ~ ")"); + } else { + if (sharedByName != "") { + addLogEntry("Shared By: " ~ sharedByName); + } + } + + // More detail if --verbose is being used + addLogEntry("Item Id: " ~ searchResult["remoteItem"]["id"].str, ["verbose"]); + addLogEntry("Parent Drive Id: " ~ searchResult["remoteItem"]["parentReference"]["driveId"].str, ["verbose"]); + if ("id" in searchResult["remoteItem"]["parentReference"]) { + addLogEntry("Parent Item Id: " ~ searchResult["remoteItem"]["parentReference"]["id"].str, ["verbose"]); + } + } + + // Close out the loop + addLogEntry("-----------------------------------------------------------------------------------"); + addLogEntry(); + + } else { + // No shared items + addLogEntry(); + addLogEntry("No OneDrive Business Shared Folders were returned"); + addLogEntry(); + } + } + + // Shutdown API access + sharedWithMeOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(sharedWithMeOneDriveApiInstance); + } + + // Query all the OneDrive Business Shared Objects to sync only Shared Files + void queryBusinessSharedObjects() { + + JSONValue sharedWithMeItems; + Item sharedFilesRootDirectoryDatabaseRecord; + + // Create a new API Instance for this thread and initialise it + OneDriveApi sharedWithMeOneDriveApiInstance; + sharedWithMeOneDriveApiInstance = new OneDriveApi(appConfig); + sharedWithMeOneDriveApiInstance.initialise(); + + try { + sharedWithMeItems = sharedWithMeOneDriveApiInstance.getSharedWithMe(); + } catch (OneDriveException e) { + + // Add eventual API error handling here + + } + + // Valid JSON response + if (sharedWithMeItems.type() == JSONType.object) { + + // Get the configuredBusinessSharedFilesDirectoryName DB item + // We need this as we need to 'fake' create all the folders for the shared files + // Then fake create the file entries for the database with the correct parent folder that is the local folder + itemDB.selectByPath(baseName(appConfig.configuredBusinessSharedFilesDirectoryName), appConfig.defaultDriveId, sharedFilesRootDirectoryDatabaseRecord); + + // For each item returned, if a file, process it + foreach (searchResult; sharedWithMeItems["value"].array) { + + // Shared Business Folders are added to the account using 'Add shortcut to My files' + // We only care here about any remaining 'files' that are shared with the user + + if (isItemFile(searchResult)) { + // Debug response output + addLogEntry("getSharedWithMe Response Shared File JSON: " ~ to!string(searchResult), ["debug"]); + + // Make a DB item from this JSON + Item sharedFileOriginalData = makeItem(searchResult); + + // Variables for each item + string sharedByName; + string sharedByEmail; + string sharedByFolderName; + string newLocalSharedFilePath; + string newItemPath; + Item sharedFilesPath; + JSONValue fileToDownload; + JSONValue detailsToUpdate; + JSONValue latestOnlineDetails; + + // Configure 'who' this was shared by + if ("sharedBy" in searchResult["remoteItem"]["shared"]) { + // we have shared by details we can use + if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; + } + if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { + sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; + } + } + + // Configure 'who' shared this, so that we can create the directory for that users shared files with us + if ((sharedByName != "") && (sharedByEmail != "")) { + sharedByFolderName = sharedByName ~ " (" ~ sharedByEmail ~ ")"; + + } else { + if (sharedByName != "") { + sharedByFolderName = sharedByName; + } + } + + // Create the local path to store this users shared files with us + newLocalSharedFilePath = buildNormalizedPath(buildPath(appConfig.configuredBusinessSharedFilesDirectoryName, sharedByFolderName)); + + // Does the Shared File Users Local Directory to store the shared file(s) exist? + if (!exists(newLocalSharedFilePath)) { + // Folder does not exist locally and needs to be created + addLogEntry("Creating the OneDrive Business Shared File Users Local Directory: " ~ newLocalSharedFilePath); + + // Local folder does not exist, thus needs to be created + mkdirRecurse(newLocalSharedFilePath); + + // As this will not be created online, generate a response so it can be saved to the database + sharedFilesPath = makeItem(createFakeResponse(baseName(newLocalSharedFilePath))); + + // Update sharedFilesPath parent items to that of sharedFilesRootDirectoryDatabaseRecord + sharedFilesPath.parentId = sharedFilesRootDirectoryDatabaseRecord.id; + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); + } else { + // Folder exists locally, is the folder in the database? + // Query DB for this path + Item dbRecord; + if (!itemDB.selectByPath(baseName(newLocalSharedFilePath), appConfig.defaultDriveId, dbRecord)) { + // As this will not be created online, generate a response so it can be saved to the database + sharedFilesPath = makeItem(createFakeResponse(baseName(newLocalSharedFilePath))); + + // Update sharedFilesPath parent items to that of sharedFilesRootDirectoryDatabaseRecord + sharedFilesPath.parentId = sharedFilesRootDirectoryDatabaseRecord.id; + + // Add DB record to the local database + addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + itemDB.upsert(sharedFilesPath); + } + } + + // The file to download JSON details + fileToDownload = searchResult; + + // Get the latest online details + latestOnlineDetails = sharedWithMeOneDriveApiInstance.getPathDetailsById(sharedFileOriginalData.remoteDriveId, sharedFileOriginalData.remoteId); + Item tempOnlineRecord = makeItem(latestOnlineDetails); + + // With the local folders created, now update 'fileToDownload' to download the file to our location: + // "parentReference": { + // "driveId": "", + // "driveType": "business", + // "id": "", + // }, + + // The getSharedWithMe() JSON response also contains an API bug where the 'hash' of the file is not provided + // Use the 'latestOnlineDetails' response to obtain the hash + // "file": { + // "hashes": { + // "quickXorHash": "" + // } + // }, + // + + // The getSharedWithMe() JSON response also contains an API bug where the 'size' of the file is not the actual size of the file + // The getSharedWithMe() JSON response also contains an API bug where the 'eTag' of the file is not present + // The getSharedWithMe() JSON response also contains an API bug where the 'lastModifiedDateTime' of the file is date when the file was shared, not the actual date last modified + + detailsToUpdate = [ + "parentReference": JSONValue([ + "driveId": JSONValue(appConfig.defaultDriveId), + "driveType": JSONValue("business"), + "id": JSONValue(sharedFilesPath.id) + ]), + "file": JSONValue([ + "hashes":JSONValue([ + "quickXorHash": JSONValue(tempOnlineRecord.quickXorHash) + ]) + ]), + "eTag": JSONValue(tempOnlineRecord.eTag) + ]; + + foreach (string key, JSONValue value; detailsToUpdate.object) { + fileToDownload[key] = value; + } + + // Update specific items + // Update 'size' + fileToDownload["size"] = to!int(tempOnlineRecord.size); + fileToDownload["remoteItem"]["size"] = to!int(tempOnlineRecord.size); + // Update 'lastModifiedDateTime' + fileToDownload["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + fileToDownload["fileSystemInfo"]["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + fileToDownload["remoteItem"]["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + fileToDownload["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; + + // Final JSON that will be used to download the file + addLogEntry("Final fileToDownload: " ~ to!string(fileToDownload), ["debug"]); + + // Make the new DB item from the consolidated JSON item + Item downloadSharedFileDbItem = makeItem(fileToDownload); + + // Calculate the full local path for this shared file + newItemPath = computeItemPath(downloadSharedFileDbItem.driveId, downloadSharedFileDbItem.parentId) ~ "/" ~ downloadSharedFileDbItem.name; + + // Does this potential file exists on disk? + if (!exists(newItemPath)) { + // The shared file does not exists locally + // Is this something we actually want? Check the JSON against Client Side Filtering Rules + bool unwanted = checkJSONAgainstClientSideFiltering(fileToDownload); + if (!unwanted) { + // File has not been excluded via Client Side Filtering + // Submit this shared file to be processed further for downloading + applyPotentiallyNewLocalItem(downloadSharedFileDbItem, fileToDownload, newItemPath); + } + } else { + // A file, in the desired local location already exists with the same name + // Is this local file in sync? + string itemSource = "remote"; + if (!isItemSynced(downloadSharedFileDbItem, newItemPath, itemSource)) { + // Not in sync .... + Item existingDatabaseItem; + bool existingDBEntry = itemDB.selectById(downloadSharedFileDbItem.driveId, downloadSharedFileDbItem.id, existingDatabaseItem); + + // Is there a DB entry? + if (existingDBEntry) { + // Existing DB entry + // Need to be consistent here with how 'newItemPath' was calculated + string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; + // Attempt to apply this changed item + applyPotentiallyChangedItem(existingDatabaseItem, existingItemPath, downloadSharedFileDbItem, newItemPath, fileToDownload); + } else { + // File exists locally, it is not in sync, there is no record in the DB of this file + // In case the renamed path is needed + string renamedPath; + // Rename the local file + safeBackup(newItemPath, dryRun, renamedPath); + // Submit this shared file to be processed further for downloading + applyPotentiallyNewLocalItem(downloadSharedFileDbItem, fileToDownload, newItemPath); + } + } else { + // Item is in sync, ensure the DB record is the same + itemDB.upsert(downloadSharedFileDbItem); + } + } + } + } + } + } } \ No newline at end of file diff --git a/src/util.d b/src/util.d index 415146b5..8c62ce21 100644 --- a/src/util.d +++ b/src/util.d @@ -49,7 +49,7 @@ static this() { } // Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario -void safeBackup(const(char)[] path, bool dryRun) { +void safeBackup(const(char)[] path, bool dryRun, out string renamedPath) { auto ext = extension(path); auto newPath = path.chomp(ext) ~ "-" ~ deviceName; int n = 2; @@ -88,7 +88,8 @@ void safeBackup(const(char)[] path, bool dryRun) { // // Use rename() as Linux is POSIX compliant, we have an atomic operation where at no point in time the 'to' is missing. try { - rename(path, newPath); + rename(path, newPath); + renamedPath = to!string(newPath); } catch (Exception e) { // Handle exceptions, e.g., log error addLogEntry("Renaming of local file failed for " ~ to!string(path) ~ ": " ~ e.msg, ["error"]); diff --git a/src/webhook.d b/src/webhook.d new file mode 100644 index 00000000..065e6fb6 --- /dev/null +++ b/src/webhook.d @@ -0,0 +1,339 @@ +module webhook; + +// What does this module require to function? +import core.atomic : atomicOp; +import std.datetime; +import std.concurrency; +import std.json; + +// What other modules that we have created do we need to import? +import arsd.cgi; +import config; +import onedrive; +import log; +import util; + +class OneDriveWebhook { + private RequestServer server; + private string host; + private ushort port; + private Tid parentTid; + private bool started; + + private ApplicationConfig appConfig; + private OneDriveApi oneDriveApiInstance; + string subscriptionId = ""; + SysTime subscriptionExpiration, subscriptionLastErrorAt; + Duration subscriptionExpirationInterval, subscriptionRenewalInterval, subscriptionRetryInterval; + string notificationUrl = ""; + + private uint count; + + this(Tid parentTid, ApplicationConfig appConfig) { + this.host = appConfig.getValueString("webhook_listening_host"); + this.port = to!ushort(appConfig.getValueLong("webhook_listening_port")); + this.parentTid = parentTid; + this.appConfig = appConfig; + + subscriptionExpiration = Clock.currTime(UTC()); + subscriptionLastErrorAt = SysTime.fromUnixTime(0); + subscriptionExpirationInterval = dur!"seconds"(appConfig.getValueLong("webhook_expiration_interval")); + subscriptionRenewalInterval = dur!"seconds"(appConfig.getValueLong("webhook_renewal_interval")); + subscriptionRetryInterval = dur!"seconds"(appConfig.getValueLong("webhook_retry_interval")); + notificationUrl = appConfig.getValueString("webhook_public_url"); + } + + // The static serve() is necessary because spawn() does not like instance methods + void serve() { + if (this.started) + return; + this.started = true; + this.count = 0; + + server.listeningHost = this.host; + server.listeningPort = this.port; + + spawn(&serveImpl, cast(shared) this); + addLogEntry("Started webhook server"); + + // Subscriptions + oneDriveApiInstance = new OneDriveApi(this.appConfig); + oneDriveApiInstance.initialise(); + + createOrRenewSubscription(); + } + + void stop() { + if (!this.started) + return; + server.stop(); + this.started = false; + + addLogEntry("Stopped webhook server"); + object.destroy(server); + + // Delete subscription if there exists any + try { + deleteSubscription(); + } catch (OneDriveException e) { + logSubscriptionError(e); + } + oneDriveApiInstance.shutdown(); + object.destroy(oneDriveApiInstance); + } + + private static void handle(shared OneDriveWebhook _this, Cgi cgi) { + if (debugHTTPResponseOutput) { + addLogEntry("Webhook request: " ~ to!string(cgi.requestMethod) ~ " " ~ to!string(cgi.requestUri)); + if (!cgi.postBody.empty) { + addLogEntry("Webhook post body: " ~ to!string(cgi.postBody)); + } + } + + cgi.setResponseContentType("text/plain"); + + if ("validationToken" in cgi.get) { + // For validation requests, respond with the validation token passed in the query string + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request + cgi.write(cgi.get["validationToken"]); + addLogEntry("Webhook: handled validation request"); + } else { + // Notifications don't include any information about the changes that triggered them. + // Put a refresh signal in the queue and let the main monitor loop process it. + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks + _this.count.atomicOp!"+="(1); + send(cast()_this.parentTid, to!ulong(_this.count)); + cgi.write("OK"); + addLogEntry("Webhook: sent refresh signal #" ~ to!string(_this.count)); + } + } + + private static void serveImpl(shared OneDriveWebhook _this) { + _this.server.serveEmbeddedHttp!(handle, OneDriveWebhook)(_this); + } + + // Create a new subscription or renew the existing subscription + void createOrRenewSubscription() { + auto elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + if (elapsed < subscriptionRetryInterval) { + return; + } + + try { + if (!hasValidSubscription()) { + createSubscription(); + } else if (isSubscriptionUpForRenewal()) { + renewSubscription(); + } + } catch (OneDriveException e) { + logSubscriptionError(e); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); + } catch (JSONException e) { + addLogEntry("ERROR: Unexpected JSON error when attempting to validate subscription: " ~ e.msg); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); + } + } + + // Return the duration to next subscriptionExpiration check + Duration getNextExpirationCheckDuration() { + SysTime now = Clock.currTime(UTC()); + if (hasValidSubscription()) { + Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + // Check if we are waiting for the next retry + if (elapsed < subscriptionRetryInterval) + return subscriptionRetryInterval - elapsed; + else + return subscriptionExpiration - now - subscriptionRenewalInterval; + } + else + return subscriptionRetryInterval; + } + + private bool hasValidSubscription() { + return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); + } + + private bool isSubscriptionUpForRenewal() { + return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; + } + + private void createSubscription() { + addLogEntry("Initializing subscription for updates ..."); + + auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; + try { + JSONValue response = oneDriveApiInstance.createSubscription(notificationUrl, expirationDateTime); + // Save important subscription metadata including id and expiration + subscriptionId = response["id"].str; + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); + } catch (OneDriveException e) { + if (e.httpStatusCode == 409) { + // Take over an existing subscription on HTTP 409. + // + // Sample 409 error: + // { + // "error": { + // "code": "ObjectIdentifierInUse", + // "innerError": { + // "client-request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d", + // "date": "2023-09-26T09:27:45", + // "request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d" + // }, + // "message": "Subscription Id c0bba80e-57a3-43a7-bac2-e6f525a76e7c already exists for the requested combination" + // } + // } + + // Make sure the error code is "ObjectIdentifierInUse" + try { + if (e.error["error"]["code"].str != "ObjectIdentifierInUse") { + throw e; + } + } catch (JSONException jsonEx) { + throw e; + } + + // Extract the existing subscription id from the error message + import std.regex; + auto idReg = ctRegex!(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", "i"); + auto m = matchFirst(e.error["error"]["message"].str, idReg); + if (!m) { + throw e; + } + + // Save the subscription id and renew it immediately since we don't know the expiration timestamp + subscriptionId = m[0]; + addLogEntry("Found existing subscription " ~ subscriptionId); + renewSubscription(); + } else { + throw e; + } + } + } + + private void renewSubscription() { + addLogEntry("Renewing subscription for updates ..."); + + auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; + try { + JSONValue response = oneDriveApiInstance.renewSubscription(subscriptionId, expirationDateTime); + + // Update subscription expiration from the response + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + addLogEntry("The subscription is not found on the server. Recreating subscription ..."); + subscriptionId = null; + subscriptionExpiration = Clock.currTime(UTC()); + createSubscription(); + } else { + throw e; + } + } + } + + private void deleteSubscription() { + if (!hasValidSubscription()) { + return; + } + oneDriveApiInstance.deleteSubscription(subscriptionId); + addLogEntry("Deleted subscription"); + } + + private void logSubscriptionError(OneDriveException e) { + if (e.httpStatusCode == 400) { + // Log known 400 error where Microsoft cannot get a 200 OK from the webhook endpoint + // + // Sample 400 error: + // { + // "error": { + // "code": "InvalidRequest", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Subscription validation request failed. Notification endpoint must respond with 200 OK to validation request." + // } + // } + + try { + if (e.error["error"]["code"].str == "InvalidRequest") { + import std.regex; + auto msgReg = ctRegex!(r"Subscription validation request failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Microsoft did not get 200 OK from the webhook endpoint."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 401) { + // Log known 401 error where authentication failed + // + // Sample 401 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Unauthorized; Reason: Authentication failed]" + // } + // } + + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Authentication failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Authentication failed."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 403) { + // Log known 403 error where the number of subscriptions on item has exceeded limit + // + // Sample 403 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Forbidden; Reason: Number of subscriptions on item has exceeded limit]" + // } + // } + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Number of subscriptions on item has exceeded limit", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Number of subscriptions has exceeded limit."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } + + // Log detailed message for unknown errors + addLogEntry("ERROR: Cannot create or renew subscription."); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } +}