This commit is contained in:
abraunegg 2024-04-14 20:02:01 +00:00 committed by GitHub
commit b2879f4669
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
95 changed files with 19967 additions and 14328 deletions

View file

@ -34,13 +34,18 @@ DEBUG = @DEBUG@
DC = @DC@
DC_TYPE = @DC_TYPE@
DCFLAGS = @DCFLAGS@
DCFLAGS += -w -g -O -J.
DCFLAGS += -w -J.
ifeq ($(DEBUG),yes)
ifeq ($(DC_TYPE),dmd)
DCFLAGS += -debug -gs
# Add DMD Debugging Flags
DCFLAGS += -g -debug -gs
else
DCFLAGS += -d-debug -gc
# Add LDC Debuggging Flags
DCFLAGS += -g -d-debug -gc
endif
else
# Only add optimisation flags if debugging is not enabled
DCFLAGS += -O
endif
ifeq ($(NOTIFICATIONS),yes)
@ -55,7 +60,7 @@ endif
system_unit_files = contrib/systemd/onedrive@.service
user_unit_files = contrib/systemd/onedrive.service
DOCFILES = README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md docs/application-security.md
DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-items.md docs/client-architecture.md docs/contributing.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md docs/known-issues.md
ifneq ("$(wildcard /etc/redhat-release)","")
RHEL = $(shell cat /etc/redhat-release | grep -E "(Red Hat Enterprise Linux|CentOS)" | wc -l)
@ -66,19 +71,19 @@ RHEL_VERSION = 0
endif
SOURCES = \
src/config.d \
src/itemdb.d \
src/log.d \
src/main.d \
src/monitor.d \
src/onedrive.d \
src/qxor.d \
src/selective.d \
src/sqlite.d \
src/sync.d \
src/upload.d \
src/config.d \
src/log.d \
src/util.d \
src/progress.d \
src/qxor.d \
src/curlEngine.d \
src/onedrive.d \
src/webhook.d \
src/sync.d \
src/itemdb.d \
src/sqlite.d \
src/clientSideFiltering.d \
src/monitor.d \
src/arsd/cgi.d
ifeq ($(NOTIFICATIONS),yes)
@ -92,10 +97,9 @@ clean:
rm -rf autom4te.cache
rm -f config.log config.status
# also remove files generated via ./configure
# Remove files generated via ./configure
distclean: clean
rm -f Makefile contrib/pacman/PKGBUILD contrib/spec/onedrive.spec onedrive.1 \
$(system_unit_files) $(user_unit_files)
rm -f Makefile contrib/pacman/PKGBUILD contrib/spec/onedrive.spec onedrive.1 $(system_unit_files) $(user_unit_files)
onedrive: $(SOURCES)
if [ -f .git/HEAD ] ; then \

View file

@ -2,6 +2,13 @@
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## 2.5.0 - TBA
### Changed
* Renamed various documentation files to align with document content
## 2.4.25 - 2023-06-21
### Fixed
* Fixed that the application was reporting as v2.2.24 when in fact it was v2.4.24 (release tagging issue)

15
config
View file

@ -3,7 +3,7 @@
# with their default values.
# All values need to be enclosed in quotes
# When changing a config option below, remove the '#' from the start of the line
# For explanations of all config options below see docs/USAGE.md or the man page.
# For explanations of all config options below see docs/usage.md or the man page.
#
# sync_dir = "~/OneDrive"
# skip_file = "~*|.~*|*.tmp"
@ -40,22 +40,19 @@
# bypass_data_preservation = "false"
# azure_ad_endpoint = ""
# azure_tenant_id = "common"
# sync_business_shared_folders = "false"
# sync_business_shared_items = "false"
# sync_dir_permissions = "700"
# sync_file_permissions = "600"
# rate_limit = "131072"
# operation_timeout = "3600"
# webhook_enabled = "false"
# webhook_public_url = ""
# webhook_listening_host = ""
# webhook_listening_port = "8888"
# webhook_expiration_interval = "86400"
# webhook_renewal_interval = "43200"
# webhook_expiration_interval = "600"
# webhook_renewal_interval = "300"
# webhook_retry_interval = "60"
# space_reservation = "50"
# display_running_config = "false"
# read_only_auth_scope = "false"
# cleanup_local_files = "false"
# operation_timeout = "3600"
# dns_timeout = "60"
# connect_timeout = "10"
# data_timeout = "600"
# ip_protocol_version = "0"

20
configure vendored
View file

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for onedrive v2.4.25.
# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-rc1.
#
# Report bugs to <https://github.com/abraunegg/onedrive>.
#
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='onedrive'
PACKAGE_TARNAME='onedrive'
PACKAGE_VERSION='v2.4.25'
PACKAGE_STRING='onedrive v2.4.25'
PACKAGE_VERSION='v2.5.0-rc1'
PACKAGE_STRING='onedrive v2.5.0-rc1'
PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive'
PACKAGE_URL=''
@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures onedrive v2.4.25 to adapt to many kinds of systems.
\`configure' configures onedrive v2.5.0-rc1 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1280,7 +1280,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of onedrive v2.4.25:";;
short | recursive ) echo "Configuration of onedrive v2.5.0-rc1:";;
esac
cat <<\_ACEOF
@ -1393,7 +1393,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
onedrive configure v2.4.25
onedrive configure v2.5.0-rc1
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by onedrive $as_me v2.4.25, which was
It was created by onedrive $as_me v2.5.0-rc1, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -2162,7 +2162,7 @@ fi
PACKAGE_DATE="June 2023"
PACKAGE_DATE="March 2024"
@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by onedrive $as_me v2.4.25, which was
This file was extended by onedrive $as_me v2.5.0-rc1, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -3212,7 +3212,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
onedrive config.status v2.4.25
onedrive config.status v2.5.0-rc1
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View file

@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure)
dnl - tag the release
AC_PREREQ([2.69])
AC_INIT([onedrive],[v2.4.25], [https://github.com/abraunegg/onedrive], [onedrive])
AC_INIT([onedrive],[v2.5.0-rc1], [https://github.com/abraunegg/onedrive], [onedrive])
AC_CONFIG_SRCDIR([src/main.d])

View file

@ -11,7 +11,7 @@ _onedrive()
prev=${COMP_WORDS[COMP_CWORD-1]}
options='--check-for-nomount --check-for-nosync --debug-https --disable-notifications --display-config --display-sync-status --download-only --disable-upload-validation --dry-run --enable-logging --force-http-1.1 --force-http-2 --get-file-link --local-first --logout -m --monitor --no-remote-delete --print-token --reauth --resync --skip-dot-files --skip-symlinks --synchronize --upload-only -v --verbose --version -h --help'
argopts='--create-directory --get-O365-drive-id --operation-timeout --remove-directory --single-directory --source-directory'
argopts='--create-directory --get-O365-drive-id --remove-directory --single-directory --source-directory'
# Loop on the arguments to manage conflicting options
for (( i=0; i < ${#COMP_WORDS[@]}-1; i++ )); do
@ -34,7 +34,7 @@ _onedrive()
fi
return 0
;;
--create-directory|--get-O365-drive-id|--operation-timeout|--remove-directory|--single-directory|--source-directory)
--create-directory|--get-O365-drive-id|--remove-directory|--single-directory|--source-directory)
return 0
;;
*)

View file

@ -23,7 +23,6 @@ complete -c onedrive -l local-first -d 'Synchronize from the local directory sou
complete -c onedrive -l logout -d 'Logout the current user.'
complete -c onedrive -n "not __fish_seen_subcommand_from --synchronize" -a "-m --monitor" -d 'Keep monitoring for local and remote changes.'
complete -c onedrive -l no-remote-delete -d 'Do not delete local file deletes from OneDrive when using --upload-only.'
complete -c onedrive -l operation-timeout -d 'Specify the maximum amount of time (in seconds) an operation is allowed to take.'
complete -c onedrive -l print-token -d 'Print the access token, useful for debugging.'
complete -c onedrive -l remote-directory -d 'Remove a directory on OneDrive - no sync will be performed.'
complete -c onedrive -l reauth -d 'Reauthenticate the client with OneDrive.'

View file

@ -27,7 +27,6 @@ all_opts=(
'--logout[Logout the current user]'
'(-m --monitor)'{-m,--monitor}'[Keep monitoring for local and remote changes]'
'--no-remote-delete[Do not delete local file deletes from OneDrive when using --upload-only]'
'--operation-timeout[Specify the maximum amount of time (in seconds) an operation is allowed to take.]:seconds:'
'--print-token[Print the access token, useful for debugging]'
'--reauth[Reauthenticate the client with OneDrive]'
'--resync[Forget the last saved state, perform a full sync]'

View file

@ -12,7 +12,7 @@ RUN apt-get clean \
COPY . /usr/src/onedrive
WORKDIR /usr/src/onedrive
RUN ./configure DC=/usr/bin/ldmd2 \
RUN ./configure DC=/usr/bin/ldc2 \
&& make clean \
&& make \
&& make install

View file

@ -118,6 +118,34 @@ if [ -n "${ONEDRIVE_SINGLE_DIRECTORY:=""}" ]; then
ARGS=(--single-directory \"${ONEDRIVE_SINGLE_DIRECTORY}\" ${ARGS[@]})
fi
# Tell client run in dry-run mode
if [ "${ONEDRIVE_DRYRUN:=0}" == "1" ]; then
echo "# We are running in dry-run mode"
echo "# Adding --dry-run"
ARGS=(--dry-run ${ARGS[@]})
fi
# Tell client to disable download validation
if [ "${ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION:=0}" == "1" ]; then
echo "# We are disabling the download integrity checks performed by this client"
echo "# Adding --disable-download-validation"
ARGS=(--disable-download-validation ${ARGS[@]})
fi
# Tell client to disable upload validation
if [ "${ONEDRIVE_DISABLE_UPLOAD_VALIDATION:=0}" == "1" ]; then
echo "# We are disabling the upload integrity checks performed by this client"
echo "# Adding --disable-upload-validation"
ARGS=(--disable-upload-validation ${ARGS[@]})
fi
# Tell client to download OneDrive Business Shared Files if 'sync_business_shared_items' option has been enabled in the configuration files
if [ "${ONEDRIVE_SYNC_SHARED_FILES:=0}" == "1" ]; then
echo "# We are attempting to sync OneDrive Business Shared Files if 'sync_business_shared_items' has been enabled in the config file"
echo "# Adding --sync-shared-files"
ARGS=(--sync-shared-files ${ARGS[@]})
fi
if [ ${#} -gt 0 ]; then
ARGS=("${@}")
fi

View file

@ -12,7 +12,7 @@
%endif
Name: onedrive
Version: 2.4.25
Version: 2.5.0
Release: 1%{?dist}
Summary: Microsoft OneDrive Client
Group: System Environment/Network

View file

@ -1,192 +0,0 @@
# How to configure OneDrive Business Shared Folder Sync
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Process Overview
Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client:
1. List available shared folders to determine which folder you wish to sync & to validate that you have access to that folder
2. Create a new file called 'business_shared_folders' in your config directory which contains a list of the shared folders you wish to sync
3. Test the configuration using '--dry-run'
4. Sync the OneDrive Business Shared folders as required
## Listing available OneDrive Business Shared Folders
List the available OneDrive Business Shared folders with the following command:
```text
onedrive --list-shared-folders
```
This will return a listing of all OneDrive Business Shared folders which have been shared with you and by whom. This is important for conflict resolution:
```text
Initializing the Synchronization Engine ...
Listing available OneDrive Business Shared Folders:
---------------------------------------
Shared Folder: SharedFolder0
Shared By: Firstname Lastname
---------------------------------------
Shared Folder: SharedFolder1
Shared By: Firstname Lastname
---------------------------------------
Shared Folder: SharedFolder2
Shared By: Firstname Lastname
---------------------------------------
Shared Folder: SharedFolder0
Shared By: Firstname Lastname (user@domain)
---------------------------------------
Shared Folder: SharedFolder1
Shared By: Firstname Lastname (user@domain)
---------------------------------------
Shared Folder: SharedFolder2
Shared By: Firstname Lastname (user@domain)
...
```
## Configuring OneDrive Business Shared Folders
1. Create a new file called 'business_shared_folders' in your config directory
2. On each new line, list the OneDrive Business Shared Folder you wish to sync
```text
[alex@centos7full onedrive]$ cat ~/.config/onedrive/business_shared_folders
# comment
Child Shared Folder
# Another comment
Top Level to Share
[alex@centos7full onedrive]$
```
3. Validate your configuration with `onedrive --display-config`:
```text
Configuration file successfully loaded
onedrive version = v2.4.3
Config path = /home/alex/.config/onedrive-business/
Config file found in config path = true
Config option 'check_nosync' = false
Config option 'sync_dir' = /home/alex/OneDriveBusiness
Config option 'skip_dir' =
Config option 'skip_file' = ~*|.~*|*.tmp
Config option 'skip_dotfiles' = false
Config option 'skip_symlinks' = false
Config option 'monitor_interval' = 300
Config option 'min_notify_changes' = 5
Config option 'log_dir' = /var/log/onedrive/
Config option 'classify_as_big_delete' = 1000
Config option 'sync_root_files' = false
Selective sync 'sync_list' configured = false
Business Shared Folders configured = true
business_shared_folders contents:
# comment
Child Shared Folder
# Another comment
Top Level to Share
```
## Performing a sync of OneDrive Business Shared Folders
Perform a standalone sync using the following command: `onedrive --synchronize --sync-shared-folders --verbose`:
```text
onedrive --synchronize --sync-shared-folders --verbose
Using 'user' Config Dir: /home/alex/.config/onedrive-business/
Using 'system' Config Dir:
Configuration file successfully loaded
Initializing the OneDrive API ...
Configuring Global Azure AD Endpoints
Opening the item database ...
All operations will be performed in: /home/alex/OneDriveBusiness
Application version: v2.4.3
Account Type: business
Default Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Default Root ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ
Remaining Free Space: 1098316220277
Fetching details for OneDrive Root
OneDrive Root exists in the database
Initializing the Synchronization Engine ...
Syncing changes from OneDrive ...
Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ
Number of items from OneDrive to process: 0
Attempting to sync OneDrive Business Shared Folders
Syncing this OneDrive Business Shared Folder: Child Shared Folder
OneDrive Business Shared Folder - Shared By: test user
Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared
Processing 11 OneDrive items to ensure consistent local state
Syncing this OneDrive Business Shared Folder: Top Level to Share
OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com)
Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 3 OneDrive items for processing from /Top Level to Share/10-Files
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP
Processing 31 OneDrive items to ensure consistent local state
Uploading differences of ~/OneDriveBusiness
Processing root
The directory has not changed
Processing SMPP_Local
The directory has not changed
Processing SMPP-IF-SPEC_v3_3-24858.pdf
The file has not changed
Processing SMPP_v3_4_Issue1_2-24857.pdf
The file has not changed
Processing new_local_file.txt
The file has not changed
Processing root
The directory has not changed
...
The directory has not changed
Processing week02-03-Combinational_Logic-v1.pptx
The file has not changed
Uploading new items of ~/OneDriveBusiness
Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ
Number of items from OneDrive to process: 0
Attempting to sync OneDrive Business Shared Folders
Syncing this OneDrive Business Shared Folder: Child Shared Folder
OneDrive Business Shared Folder - Shared By: test user
Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared
Processing 11 OneDrive items to ensure consistent local state
Syncing this OneDrive Business Shared Folder: Top Level to Share
OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com)
Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 3 OneDrive items for processing from /Top Level to Share/10-Files
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP
Processing 31 OneDrive items to ensure consistent local state
```
**Note:** Whenever you modify the `business_shared_folders` file you must perform a `--resync` of your database to clean up stale entries due to changes in your configuration.
## Enable / Disable syncing of OneDrive Business Shared Folders
Performing a sync of the configured OneDrive Business Shared Folders can be enabled / disabled via adding the following to your configuration file.
### Enable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_folders = "true"
```
### Disable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_folders = "false"
```
## Known Issues
Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders.
Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below:
![shared_with_me](./images/shared_with_me.JPG)
This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966)

File diff suppressed because it is too large Load diff

View file

@ -124,7 +124,9 @@ Example:
ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/my-new-config"
```
**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded.
> [!IMPORTANT]
> When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be manually expanded when editing your systemd file.
### Step 3: Enable the new systemd service
Once the file is correctly editied, you can enable the new systemd service using the following commands.
@ -227,10 +229,10 @@ docker run -it --name onedrive -v onedrive_conf_sharepoint_site3:/onedrive/conf
docker run -it --name onedrive -v onedrive_conf_sharepoint_site50:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite50:/onedrive/data" driveone/onedrive:latest
```
#### TIP
To avoid 're-authenticating' and 'authorising' each individual Docker container, if all the Docker containers are using the 'same' OneDrive credentials, you can re-use the 'refresh_token' from one Docker container to another by copying this file to the configuration Docker volume of each Docker container.
If the account credentials are different .. you will need to re-authenticate each Docker container individually.
> [!TIP]
> To avoid 're-authenticating' and 'authorising' each individual Docker container, if all the Docker containers are using the 'same' OneDrive credentials, you can re-use the 'refresh_token' from one Docker container to another by copying this file to the configuration Docker volume of each Docker container.
>
> If the account credentials are different .. you will need to re-authenticate each Docker container individually.
## Configuring the client for use in dual-boot (Windows / Linux) situations
When dual booting Windows and Linux, depending on the Windows OneDrive account configuration, the 'Files On-Demand' option may be enabled when running OneDrive within your Windows environment.
@ -262,7 +264,8 @@ There are a few options here which you can configure in your 'config' file to as
2. check_nomount
3. check_nosync
**Note:** Before making any change to your configuration, stop any sync process & stop any onedrive systemd service from running.
> [!NOTE]
> Before making any change to your configuration, stop any sync process & stop any onedrive systemd service from running.
### classify_as_big_delete
By default, this uses a value of 1000 files|folders. An undesirable unmount if you have more than 1000 files, this default level will prevent the client from executing the online delete. Modify this value up or down as desired

File diff suppressed because it is too large Load diff

View file

@ -63,6 +63,18 @@ When these delegated API permissions are combined, these provide the effective a
These 'default' permissions will allow the OneDrive Client for Linux to read, write and delete data associated with your OneDrive Account.
## How are the Authentication Scopes used?
When using the OneDrive Client for Linux, the above authentication scopes will be presented to the Microsoft Authentication Service (login.microsoftonline.com), where the service will validate the request and provide an applicable token to access Microsoft OneDrive with. This can be illustrated as the following:
![Linux Authentication to Microsoft OneDrive](./puml/onedrive_linux_authentication.png)
This is similar to the Microsoft Windows OneDrive Client:
![Windows Authentication to Microsoft OneDrive](./puml/onedrive_windows_authentication.png)
In a business setting, IT staff who need to authorise the use of the OneDrive Client for Linux in their environment can be assured of its safety. The primary concern for IT staff should be securing the device running the OneDrive Client for Linux. Unlike in a corporate environment where Windows devices are secured through Active Directory and Group Policy Objects (GPOs) to protect corporate data on the device, it is beyond the responsibility of this client to manage security on Linux devices.
## Configuring read-only access to your OneDrive data
In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation.
@ -72,7 +84,8 @@ read_only_auth_scope = "true"
```
This will change the user authentication scope request to use read-only access.
**Note:** When changing this value, you *must* re-authenticate the client using the `--reauth` option to utilise the change in authentication scopes.
> [!IMPORTANT]
> When changing this value, you *must* re-authenticate the client using the `--reauth` option to utilise the change in authentication scopes.
When using read-only authentication scopes, the uploading of any data or local change to OneDrive will fail with the following error:
```
@ -88,7 +101,8 @@ As such, it is also advisable for you to add the following to your configuration
download_only = "true"
```
**Important:** Additionally when using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data. See below on how to remove your prior application consent.
> [!IMPORTANT]
> Additionally when using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data. See below on how to remove your prior application consent.
## Reviewing your existing application access consent

View file

@ -0,0 +1,251 @@
# How to sync OneDrive Business Shared Items
> [!CAUTION]
> Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
> [!CAUTION]
> This feature has been 100% re-written from v2.5.0 onwards and is not backwards compatible with v2.4.x client versions. If enabling this feature, you must upgrade to v2.5.0 or above on all systems that are running this client.
>
> An additional pre-requesite before using this capability in v2.5.0 and above is for you to revert any v2.4.x Shared Business Folder configuration you may be currently using, including, but not limited to:
> * Removing `sync_business_shared_folders = "true|false"` from your 'config' file
> * Removing the 'business_shared_folders' file
> * Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues.
> * Removing any configuration online that might be related to using this feature prior to v2.5.0
## Process Overview
Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client:
1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you.
2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder
3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement.
4. Test the configuration using '--dry-run'
5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required
### Enable syncing of OneDrive Business Shared Items via config file
```text
sync_business_shared_items = "true"
```
### Disable syncing of OneDrive Business Shared Items via config file
```text
sync_business_shared_items = "false"
```
## Syncing OneDrive Business Shared Folders
Use the following steps to add a OneDrive Business Shared Folder to your account:
1. Login to Microsoft OneDrive online, and navigate to 'Shared' from the left hand side pane
![objects_shared_with_me](./images/objects_shared_with_me.png)
2. Select the respective folder you wish to sync, and click the 'Add shortcut to My files' at the top of the page
![add_shared_folder](./images/add_shared_folder.png)
3. The final result online will look like this:
![shared_folder_added](./images/shared_folder_added.png)
When using Microsoft Windows, this shared folder will appear as the following:
![windows_view_shared_folders](./images/windows_view_shared_folders.png)
4. Sync your data using `onedrive --sync --verbose`. If you have just enabled the `sync_business_shared_items = "true"` configuration option, you will be required to perform a resync. During the sync, the selected shared folder will be downloaded:
```
...
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 4
Finished processing /delta JSON response from the OneDrive API
Processing 3 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Creating local directory: ./my_shared_folder
Quota information is restricted or not available for this drive.
Syncing this OneDrive Business Shared Folder: my_shared_folder
Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 6
Finished processing /delta JSON response from the OneDrive API
Processing 6 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Creating local directory: ./my_shared_folder/asdf
Creating local directory: ./my_shared_folder/original_data
Number of items to download from OneDrive: 3
Downloading file: my_shared_folder/asdf/asdfasdfhashdkfasdf.txt ... done
Downloading file: my_shared_folder/asdf/asdfasdf.txt ... done
Downloading file: my_shared_folder/original_data/file1.data ... done
Performing a database consistency and integrity check on locally stored data
...
```
When this is viewed locally, on Linux, this shared folder is seen as the following:
![linux_shared_folder_view](./images/linux_shared_folder_view.png)
Any shared folder you add can utilise any 'client side filtering' rules that you have created.
## Syncing OneDrive Business Shared Files
There are two methods to support the syncing OneDrive Business Shared Files with the OneDrive Application
1. Add a 'shortcut' to your 'My Files' for the file, which creates a URL shortcut to the file which can be followed when using a Linux Window Manager (Gnome, KDE etc) and the link will open up in a browser. Microsoft Windows only supports this option.
2. Use `--sync-shared-files` option to sync all files shared with you to your local disk. If you use this method, you can utilise any 'client side filtering' rules that you have created to filter out files you do not want locally. This option will create a new folder locally, with sub-folders named after the person who shared the data with you.
### Syncing OneDrive Business Shared Files using Option 1
1. As per the above method for adding folders, select the shared file, then select to 'Add shorcut' to the file
![add_shared_file_shortcut](./images/add_shared_file_shortcut.png)
2. The final result online will look like this:
![add_shared_file_shortcut_added](./images/online_shared_file_link.png)
When using Microsoft Windows, this shared file will appear as the following:
![windows_view_shared_file_link](./images/windows_view_shared_file_link.png)
3. Sync your data using `onedrive --sync --verbose`. If you have just enabled the `sync_business_shared_items = "true"` configuration option, you will be required to perform a resync.
```
...
All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive
Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2
Finished processing /delta JSON response from the OneDrive API
Processing 1 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Number of items to download from OneDrive: 1
Downloading file: ./file to share.docx.url ... done
Syncing this OneDrive Business Shared Folder: my_shared_folder
Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 0
Finished processing /delta JSON response from the OneDrive API
No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive
Quota information is restricted or not available for this drive.
Performing a database consistency and integrity check on locally stored data
Processing DB entries for this Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT
Quota information is restricted or not available for this drive.
...
```
When this is viewed locally, on Linux, this shared folder is seen as the following:
![linux_view_shared_file_link](./images/linux_view_shared_file_link.png)
Any shared file link you add can utilise any 'client side filtering' rules that you have created.
### Syncing OneDrive Business Shared Files using Option 2
> [!IMPORTANT]
> When using option 2, all files that have been shared with you will be downloaded by default. To reduce this, first use `--list-shared-items` to list all shared items with your account, then use 'client side filtering' rules such as 'sync_list' configuration to selectivly sync all the files to your local system.
1. Review all items that have been shared with you by using `onedrive --list-shared-items`. This should display output similar to the following:
```
...
Listing available OneDrive Business Shared Items:
-----------------------------------------------------------------------------------
Shared File: large_document_shared.docx
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared File: no_download_access.docx
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared File: online_access_only.txt
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared File: read_only.txt
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared File: qewrqwerwqer.txt
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared File: dummy_file_to_share.docx
Shared By: testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared Folder: Sub Folder 2
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared File: file to share.docx
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared Folder: Top Folder
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared Folder: my_shared_folder
Shared By: testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
Shared Folder: Jenkins
Shared By: test user (testuser@mynasau3.onmicrosoft.com)
-----------------------------------------------------------------------------------
...
```
2. If applicable, add entries to a 'sync_list' file, to only sync the shared files that are of importance to you.
3. Run the command `onedrive --sync --verbose --sync-shared-files` to sync the shared files to your local file system. This will create a new local folder called 'Files Shared With Me', and will contain sub-directories named after the entity account that has shared the file with you. In that folder will reside the shared file:
```
...
Finished processing /delta JSON response from the OneDrive API
No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive
Syncing this OneDrive Business Shared Folder: my_shared_folder
Fetching /delta response from the OneDrive API for Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 0
Finished processing /delta JSON response from the OneDrive API
No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive
Quota information is restricted or not available for this drive.
Creating the OneDrive Business Shared Files Local Directory: /home/alex/OneDrive/Files Shared With Me
Checking for any applicable OneDrive Business Shared Files which need to be synced locally
Creating the OneDrive Business Shared File Users Local Directory: /home/alex/OneDrive/Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)
Creating the OneDrive Business Shared File Users Local Directory: /home/alex/OneDrive/Files Shared With Me/testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com)
Number of items to download from OneDrive: 7
Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/file to share.docx ... done
OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error
Unable to download this file as this was shared as read-only without download permission: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/no_download_access.docx
ERROR: File failed to download. Increase logging verbosity to determine why.
Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/no_download_access.docx ... failed!
Downloading file: Files Shared With Me/testuser2 testuser2 (testuser2@mynasau3.onmicrosoft.com)/dummy_file_to_share.docx ... done
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 0% | ETA --:--:--
Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/online_access_only.txt ... done
Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/read_only.txt ... done
Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/qewrqwerwqer.txt ... done
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 5% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 10% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 15% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 20% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 25% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 30% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 35% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 40% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 45% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 50% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 55% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 60% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 65% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 70% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 75% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 80% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 85% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 90% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 95% | ETA 00:00:00
Downloading: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... 100% | DONE in 00:00:00
Quota information is restricted or not available for this drive.
Downloading file: Files Shared With Me/test user (testuser@mynasau3.onmicrosoft.com)/large_document_shared.docx ... done
Quota information is restricted or not available for this drive.
Quota information is restricted or not available for this drive.
Performing a database consistency and integrity check on locally stored data
Processing DB entries for this Drive ID: b!BhWyqa7K_kqXqHtSIlsqjR5iJogxpWxDradnpVGTU2VxBOJh82Y6S4he4rdnGPBT
Quota information is restricted or not available for this drive.
...
```
When this is viewed locally, on Linux, this 'Files Shared With Me' and content is seen as the following:
![files_shared_with_me_folder](./images/files_shared_with_me_folder.png)
Unfortunatly there is no Microsoft Windows equivalent for this capability.
## Known Issues
Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders.
Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below:
![shared_with_me](./images/shared_with_me.JPG)
This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966)

333
docs/client-architecture.md Normal file
View file

@ -0,0 +1,333 @@
# OneDrive Client for Linux Application Architecture
## How does the client work at a high level?
The client utilises the 'libcurl' library to communicate with Microsoft OneDrive via the Microsoft Graph API. The diagram below shows this high level interaction with the Microsoft and GitHub API services online:
![client_use_of_libcurl](./puml/client_use_of_libcurl.png)
Depending on your operational environment, it is possible to 'tweak' the following options which will modify how libcurl operates with it's interaction with Microsoft OneDrive services:
* Downgrade all HTTPS operations to use HTTP1.1 (Config Option: `force_http_11`)
* Control how long a specific transfer should take before it is considered too slow and aborted (Config Option: `operation_timeout`)
* Control libcurl handling of DNS Cache Timeout (Config Option: `dns_timeout`)
* Control the maximum time allowed for the connection to be established (Config Option: `connect_timeout`)
* Control the timeout for activity on an established HTTPS connection (Config Option: `data_timeout`)
* Control what IP protocol version should be used when communicating with OneDrive (Config Option: `ip_protocol_version`)
* Control what User Agent is presented to Microsoft services (Config Option: `user_agent`)
> [!IMPORTANT]
> The default 'user_agent' value conforms to specific Microsoft requirements to identify as an ISV that complies with OneDrive traffic decoration requirements. Changing this value potentially will impact how Microsoft see's your client, thus your traffic may get throttled. For further information please read: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
Diving a little deeper into how the client operates, the diagram below outlines at a high level the operational workflow of the OneDrive Client for Linux, demonstrating how it interacts with the OneDrive API to maintain synchronisation, manage local and cloud data integrity, and ensure that user data is accurately mirrored between the local filesystem and OneDrive cloud storage.
![High Level Application Sequence](./puml/high_level_operational_process.png)
The application operational processes have several high level key stages:
1. **Access Token Validation:** Initially, the client validates its access and the existing access token, refreshing it if necessary. This step ensures that the client has the required permissions to interact with the OneDrive API.
2. **Query Microsoft OneDrive API:** The client queries the /delta API endpoint of Microsoft OneDrive, which returns JSON responses. The /delta endpoint is particularly used for syncing changes, helping the client to identify any updates in the OneDrive storage.
3. **Process JSON Responses:** The client processes each JSON response to determine if it represents a 'root' or 'deleted' item. Items not marked as 'root' or 'deleted' are temporarily stored for further processing. For 'root' or 'deleted' items, the client processes them immediately, otherwise, the client evaluates the items against client-side filtering rules to decide whether to discard them or to process and save them in the local database cache for actions like creating directories or downloading files.
4. **Local Cache Database Processing for Data Integrity:** The client processes its local cache database to check for data integrity and differences compared to the OneDrive storage. If differences are found, such as a file or folder change including deletions, the client uploads these changes to OneDrive. Responses from the API, including item metadata, are saved to the local cache database.
5. **Local Filesystem Scanning:** The client scans the local filesystem for new files or folders. Each new item is checked against client-side filtering rules. If an item passes the filtering, it is uploaded to OneDrive. Otherwise, it is discarded if it doesn't meet the filtering criteria.
6. **Final Data True-Up:** Lastly, the client queries the /delta link for a final true-up, processing any further online JSON changes if required. This ensures that the local and OneDrive storages are fully synchronised.
## What are the operational modes of the client?
There are 2 main operational modes that the client can utilise:
1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. This method is used when you utilise `--sync`.
2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive and utilises 'inotify' to watch for local system changes. This method is used when you utilise `--monitor`.
By default, both modes consider all data stored online within Microsoft OneDrive as the 'source-of-truth' - that is, what is online, is the correct data (file version, file content, file timestamp, folder structure and so on). This consideration also matches how the Microsoft OneDrive Client for Windows operates.
However, in standalone mode (`--sync`), you can *change* what reference the client will use as the 'source-of-truth' for your data by using the `--local-first` option so that the application will look at your local files *first* and consider your local files as your 'source-of-truth' to replicate that directory structure to Microsoft OneDrive.
> [!IMPORTANT]
> Please be aware that if you designate a network mount point (such as NFS, Windows Network Share, or Samba Network Share) as your `sync_dir`, this setup inherently lacks 'inotify' support. Support for 'inotify' is essential for real-time tracking of file changes, which means that the client's 'Monitor Mode' cannot immediately detect changes in files located on these network shares. Instead, synchronisation between your local filesystem and Microsoft OneDrive will occur at intervals specified by the `monitor_interval` setting. This limitation regarding 'inotify' support on network mount points like NFS or Samba is beyond the control of this client.
## OneDrive Client for Linux High Level Activity Flows
The diagrams below show the high level process flow and decision making when running the application
### Main functional activity flows
![Main Activity](./puml/main_activity_flows.png)
### Processing a potentially new local item
![applyPotentiallyNewLocalItem](./puml/applyPotentiallyNewLocalItem.png)
### Processing a potentially changed local item
![applyPotentiallyChangedItem](./puml/applyPotentiallyChangedItem.png)
### Download a file from Microsoft OneDrive
![downloadFile](./puml/downloadFile.png)
### Upload a modified file to Microsoft OneDrive
![uploadModifiedFile](./puml/uploadModifiedFile.png)
### Upload a new local file to Microsoft OneDrive
![uploadFile](./puml/uploadFile.png)
### Determining if an 'item' is syncronised between Microsoft OneDrive and the local file system
![Item Sync Determination](./puml/is_item_in_sync.png)
### Determining if an 'item' is excluded due to 'Client Side Filtering' rules
By default, the OneDrive Client for Linux will sync all files and folders between Microsoft OneDrive and the local filesystem.
Client Side Filtering in the context of this client refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this:
* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process.
* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local.
* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage.
* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync.
This exclusion process can be illustrated by the following activity diagram. A 'true' return value means that the path being evaluated needs to be excluded:
![Client Side Filtering Determination](./puml/client_side_filtering_rules.png)
## File conflict handling - default operational modes
When using the default operational modes (`--sync` or `--monitor`) the client application is conforming to how the Microsoft Windows OneDrive client operates in terms of resolving conflicts for files.
Additionally, when using `--resync` this conflict resolution can differ slightly, as, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system.
Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash.
### Default Operational Modes - Conflict Handling
#### Scenario
1. Create a local file
2. Perform a sync with Microsoft OneDrive using `onedrive --sync`
3. Modify file online
4. Modify file locally with different data|contents
5. Perform a sync with Microsoft OneDrive using `onedrive --sync`
![conflict_handling_default](./puml/conflict_handling_default.png)
#### Evidence of Conflict Handling
```
...
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2
Finished processing /delta JSON response from the OneDrive API
Processing 1 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Number of items to download from OneDrive: 1
The local file to replace (./1.txt) has been modified locally since the last download. Renaming it to avoid potential local data loss.
The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt
Downloading file ./1.txt ... done
Performing a database consistency and integrity check on locally stored data
Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing ~/OneDrive
The directory has not changed
Processing α
...
The file has not changed
Processing เอกสาร
The directory has not changed
Processing 1.txt
The file has not changed
Scanning the local file system '~/OneDrive' for new data to upload
...
New items to upload to OneDrive: 1
Total New Data to Upload: 52 Bytes
Uploading new file ./1-onedrive-client-dev.txt ... done.
Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process
Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2
Finished processing /delta JSON response from the OneDrive API
Processing 1 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Sync with Microsoft OneDrive is complete
Waiting for all internal threads to complete before exiting application
```
### Default Operational Modes - Conflict Handling with --resync
#### Scenario
1. Create a local file
2. Perform a sync with Microsoft OneDrive using `onedrive --sync`
3. Modify file online
4. Modify file locally with different data|contents
5. Perform a sync with Microsoft OneDrive using `onedrive --sync --resync`
![conflict_handling_default_resync](./puml/conflict_handling_default_resync.png)
#### Evidence of Conflict Handling
```
...
Deleting the saved application sync status ...
Using IPv4 and IPv6 (if configured) for all network operations
Checking Application Version ...
...
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 14
Finished processing /delta JSON response from the OneDrive API
Processing 13 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Local file time discrepancy detected: ./1.txt
This local file has a different modified time 2024-Feb-19 19:32:55Z (UTC) when compared to remote modified time 2024-Feb-19 19:32:36Z (UTC)
The local file has a different hash when compared to remote file hash
Local item does not exist in local database - replacing with file from OneDrive - failed download?
The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt
Number of items to download from OneDrive: 1
Downloading file ./1.txt ... done
Performing a database consistency and integrity check on locally stored data
Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing ~/OneDrive
The directory has not changed
Processing α
...
Processing เอกสาร
The directory has not changed
Processing 1.txt
The file has not changed
Scanning the local file system '~/OneDrive' for new data to upload
...
New items to upload to OneDrive: 1
Total New Data to Upload: 52 Bytes
Uploading new file ./1-onedrive-client-dev.txt ... done.
Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process
Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2
Finished processing /delta JSON response from the OneDrive API
Processing 1 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Sync with Microsoft OneDrive is complete
Waiting for all internal threads to complete before exiting application
```
## File conflict handling - local-first operational mode
When using `--local-first` as your operational parameter the client application is now using your local filesystem data as the 'source-of-truth' as to what should be stored online.
However - Microsoft OneDrive itself, has *zero* acknowledgement of this concept, thus, conflict handling needs to be aligned to how Microsoft OneDrive on other platforms operate, that is, rename the local offending file.
Additionally, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system.
Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash or use of `--local-first`.
### Local First Operational Modes - Conflict Handling
#### Scenario
1. Create a local file
2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first`
3. Modify file locally with different data|contents
4. Modify file online with different data|contents
5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first`
![conflict_handling_local-first_default](./puml/conflict_handling_local-first_default.png)
#### Evidence of Conflict Handling
```
Reading configuration file: /home/alex/.config/onedrive/config
...
Using IPv4 and IPv6 (if configured) for all network operations
Checking Application Version ...
...
Sync Engine Initialised with new Onedrive API instance
All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive
Performing a database consistency and integrity check on locally stored data
Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing ~/OneDrive
The directory has not changed
Processing α
The directory has not changed
...
The file has not changed
Processing เอกสาร
The directory has not changed
Processing 1.txt
Local file time discrepancy detected: 1.txt
The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive
Changed local items to upload to OneDrive: 1
The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: 1.txt -> 1-onedrive-client-dev.txt
Uploading new file 1-onedrive-client-dev.txt ... done.
Scanning the local file system '~/OneDrive' for new data to upload
...
Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 3
Finished processing /delta JSON response from the OneDrive API
Processing 2 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Number of items to download from OneDrive: 1
Downloading file ./1.txt ... done
Sync with Microsoft OneDrive is complete
Waiting for all internal threads to complete before exiting application
```
### Local First Operational Modes - Conflict Handling with --resync
#### Scenario
1. Create a local file
2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first`
3. Modify file locally with different data|contents
4. Modify file online with different data|contents
5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first --resync`
![conflict_handling_local-first_resync](./puml/conflict_handling_local-first_resync.png)
#### Evidence of Conflict Handling
```
...
The usage of --resync will delete your local 'onedrive' client state, thus no record of your current 'sync status' will exist.
This has the potential to overwrite local versions of files with perhaps older versions of documents downloaded from OneDrive, resulting in local data loss.
If in doubt, backup your local data before using --resync
Are you sure you wish to proceed with --resync? [Y/N] y
Deleting the saved application sync status ...
Using IPv4 and IPv6 (if configured) for all network operations
...
Sync Engine Initialised with new Onedrive API instance
All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive
Performing a database consistency and integrity check on locally stored data
Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing ~/OneDrive
The directory has not changed
Scanning the local file system '~/OneDrive' for new data to upload
Skipping item - excluded by sync_list config: ./random_25k_files
OneDrive Client requested to create this directory online: ./α
The requested directory to create was found on OneDrive - skipping creating the directory: ./α
...
New items to upload to OneDrive: 9
Total New Data to Upload: 49 KB
...
The file we are attemtping to upload as a new file already exists on Microsoft OneDrive: ./1.txt
Skipping uploading this item as a new file, will upload as a modified file (online file already exists): ./1.txt
The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt
Uploading new file ./1-onedrive-client-dev.txt ... done.
Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 15
Finished processing /delta JSON response from the OneDrive API
Processing 14 applicable changes and items received from Microsoft OneDrive
Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Number of items to download from OneDrive: 1
Downloading file ./1.txt ... done
Sync with Microsoft OneDrive is complete
Waiting for all internal threads to complete before exiting application
```
## Client Functional Component Architecture Relationships
The diagram below shows the main functional relationship of application code components, and how these relate to each relevant code module within this application:
![Functional Code Components](./puml/code_functional_component_relationships.png)
## Database Schema
The diagram below shows the database schema that is used within the application
![Database Schema](./puml/database_schema.png)

175
docs/contributing.md Normal file
View file

@ -0,0 +1,175 @@
# OneDrive Client for Linux: Coding Style Guidelines
## Introduction
This document outlines the coding style guidelines for code contributions for the OneDrive Client for Linux.
These guidelines are intended to ensure the codebase remains clean, well-organised, and accessible to all contributors, new and experienced alike.
## Code Layout
> [!NOTE]
> When developing any code contribution, please utilise either Microsoft Visual Studio Code or Notepad++.
### Indentation
Most of the codebase utilises tabs for space indentation, with 4 spaces to a tab. Please keep to this convention.
### Line Length
Try and keep line lengths to a reasonable length. Do not constrain yourself to short line lengths such as 80 characters. This means when the code is being displayed in the code editor, lines are correctly displayed when using screen resolutions of 1920x1080 and above.
If you wish to use shorter line lengths (80 characters for example), please do not follow this sort of example:
```code
...
void functionName(
string somevar,
bool someOtherVar,
cost(char) anotherVar=null
){
....
```
### Coding Style | Braces
Please use 1TBS (One True Brace Style) which is a variation of the K&R (Kernighan & Ritchie) style. This approach is intended to improve readability and maintain consistency throughout the code.
When using this coding style, even when the code of the `if`, `else`, `for`, or function definition contains only one statement, braces are used to enclose it.
```code
// What this if statement is doing
if (condition) {
// The condition was true
.....
} else {
// The condition was false
.....
}
// Loop 10 times to do something
for (int i = 0; i < 10; i++) {
// Loop body
}
// This function is to do this
void functionExample() {
// Function body
}
```
## Naming Conventsions
### Variables and Functions
Please use `camelCase` for variable and function names.
### Classes and Interfaces
Please use `PascalCase` for classes, interfaces, and structs.
### Constants
Use uppercase with underscores between words.
## Documentation
### Language and Spelling
To maintain consistency across the project's documentation, comments, and code, all written text must adhere to British English spelling conventions, not American English. This requirement applies to all aspects of the codebase, including variable names, comments, and documentation.
For example, use "specialise" instead of "specialize", "colour" instead of "color", and "organise" instead of "organize". This standard ensures that the project maintains a cohesive and consistent linguistic style.
### Code Comments
Please comment code at all levels. Use `//` for all line comments. Detail why a statement is needed, or what is expected to happen so future readers or contributors can read through the intent of the code with clarity.
If fixing a 'bug', please add a link to the GitHub issue being addressed as a comment, for example:
```code
...
// Before discarding change - does this ID still exist on OneDrive - as in IS this
// potentially a --single-directory sync and the user 'moved' the file out of the 'sync-dir' to another OneDrive folder
// This is a corner edge case - https://github.com/skilion/onedrive/issues/341
// What is the original local path for this ID in the database? Does it match 'syncFolderChildPath'
if (itemdb.idInLocalDatabase(driveId, item["id"].str)){
// item is in the database
string originalLocalPath = computeItemPath(driveId, item["id"].str);
...
```
All code should be clearly commented.
### Application Logging Output
If making changes to any application logging output, please first discuss this either via direct communication or email.
For reference, below are the available application logging output functions and examples:
```code
// most used
addLogEntry("Basic 'info' message", ["info"]); .... or just use addLogEntry("Basic 'info' message");
addLogEntry("Basic 'verbose' message", ["verbose"]);
addLogEntry("Basic 'debug' message", ["debug"]);
// GUI notify only
addLogEntry("Basic 'notify' ONLY message and displayed in GUI if notifications are enabled", ["notify"]);
// info and notify
addLogEntry("Basic 'info and notify' message and displayed in GUI if notifications are enabled", ["info", "notify"]);
// log file only
addLogEntry("Information sent to the log file only, and only if logging to a file is enabled", ["logFileOnly"]);
// Console only (session based upload|download)
addLogEntry("Basic 'Console only with new line' message", ["consoleOnly"]);
// Console only with no new line
addLogEntry("Basic 'Console only with no new line' message", ["consoleOnlyNoNewLine"]);
```
### Documentation Updates
If the code changes any of the functionality that is documented, it is expected that any PR submission will also include updating the respective section of user documentation and/or man page as part of the code submission.
## Development Testing
Whilst there are more modern DMD and LDC compilers available, ensuring client build compatability with older platforms is a key requirement.
The issue stems from Debian and Ubuntu LTS versions - such as Ubuntu 20.04. It's [ldc package](https://packages.ubuntu.com/focal/ldc) is only v1.20.1 , thus, this is the minimum version that all compilation needs to be tested against.
The reason LDC v1.20.1 must be used, is that this is the version that is used to compile the packages presented at [OpenSuSE Build Service ](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) - which is where most Debian and Ubuntu users will install the client from.
It is assumed here that you know how to download and install the correct LDC compiler for your platform.
## Submitting a PR
When submitting a PR, please provide your testing evidence in the PR submission of what has been fixed, in the format of:
### Without PR
```
Application output that is doing whatever | or illustration of issue | illustration of bug
```
### With PR
```
Application output that is doing whatever | or illustration of issue being fixed | illustration of bug being fixed
```
Please also include validation of compilation using the minimum LDC package version.
To assist with your testing validation against the minimum LDC compiler version, a script as per below could assit you with this validation:
```bash
#!/bin/bash
PR=<Your_PR_Number>
rm -rf ./onedrive-pr${PR}
git clone https://github.com/abraunegg/onedrive.git onedrive-pr${PR}
cd onedrive-pr${PR}
git fetch origin pull/${PR}/head:pr${PR}
git checkout pr${PR}
# MIN LDC Version to compile
# MIN Version for ARM / Compiling with LDC
source ~/dlang/ldc-1.20.1/activate
# Compile code with specific LDC version
./configure --enable-debug --enable-notifications; make clean; make;
deactivate
./onedrive --version
```
## References
* D Language Official Style Guide: https://dlang.org/dstyle.html
* British English spelling conventions: https://www.collinsdictionary.com/

View file

@ -23,7 +23,8 @@ The 'edge' Docker Container will align closer to all documentation and features,
Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in.
**Note:** The below instructions for docker has been tested and validated when logging into the system as an unprivileged user (non 'root' user).
> [!NOTE]
> The below instructions for docker has been tested and validated when logging into the system as an unprivileged user (non 'root' user).
## High Level Configuration Steps
1. Install 'docker' as per your distribution platform's instructions if not already installed.
@ -37,7 +38,10 @@ Additionally there are specific version release tags for each release. Refer to
## Configuration Steps
### 1. Install 'docker' on your platform
Install 'docker' as per your distribution platform's instructions if not already installed.
Install 'docker' as per your distribution platform's instructions if not already installed as per the instructions on https://docs.docker.com/engine/install/
> [!CAUTION]
> If you are using Ubuntu, do not install Docker from your distribution platform's repositories as these contain obsolete and outdated versions. You *must* install Docker from Docker provided packages.
### 2. Configure 'docker' to allow non-privileged users to run Docker commands
Read https://docs.docker.com/engine/install/linux-postinstall/ to configure the 'docker' user group with your user account to allow your non 'root' user to run 'docker' commands.
@ -131,17 +135,19 @@ This will create a docker volume labeled `onedrive_data` and will map to a path
* The owner of this specified folder must have permissions for its parent directory
* Docker will attempt to change the permissions of the volume to the user the container is configured to run as
**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Docker container will fail to start with the following error message:
```bash
ROOT level privileges prohibited!
```
> [!IMPORTANT]
> Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Docker container will fail to start with the following error message:
> ```bash
> ROOT level privileges prohibited!
> ```
### 6. First run of Docker container under docker and performing authorisation
The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running docker in interactive mode.
Run the docker image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`).
**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the docker container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the docker volume mapping to occur.
> [!IMPORTANT]
> The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the docker container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the docker volume mapping to occur.
It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values.
```bash
@ -228,7 +234,7 @@ docker volume inspect onedrive_conf
Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first.
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration)
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration)
### Syncing multiple accounts
There are many ways to do this, the easiest is probably to do the following:
@ -270,10 +276,14 @@ docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/
| <B>ONEDRIVE_NOREMOTEDELETE</B> | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_LOGOUT</B> | Controls "--logout" switch. Default is 0 | 1 |
| <B>ONEDRIVE_REAUTH</B> | Controls "--reauth" switch. Default is 0 | 1 |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | Please read [CLI Option: --auth-files](./application-config-options.md#cli-option---auth-files) |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | Please read [CLI Option: --auth-response](./application-config-options.md#cli-option---auth-response) |
| <B>ONEDRIVE_DISPLAY_CONFIG</B> | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_SINGLE_DIRECTORY</B> | Controls "--single-directory" option. Default = "" | "mydir" |
| <B>ONEDRIVE_DRYRUN</B> | Controls "--dry-run" option. Default is 0 | 1 |
| <B>ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION</B> | Controls "--disable-download-validation" option. Default is 0 | 1 |
| <B>ONEDRIVE_DISABLE_UPLOAD_VALIDATION</B> | Controls "--disable-upload-validation" option. Default is 0 | 1 |
| <B>ONEDRIVE_SYNC_SHARED_FILES</B> | Controls "--sync-shared-files" option. Default is 0 | 1 |
### Environment Variables Usage Examples
**Verbose Output:**
@ -334,7 +344,8 @@ If you are running a Raspberry Pi, you will need to edit your system configurati
* Modify the file `/etc/dphys-swapfile` and edit the `CONF_SWAPSIZE`, for example: `CONF_SWAPSIZE=2048`.
A reboot of your Raspberry Pi is required to make this change effective.
> [!IMPORTANT]
> A reboot of your Raspberry Pi is required to make this change effective.
### Building and running a custom Docker image
You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive):

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 223 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 226 KiB

View file

@ -5,51 +5,41 @@ This project has been packaged for the following Linux distributions as per belo
Only the current release version or greater is supported. Earlier versions are not supported and should not be installed or used.
#### Important Note:
Distribution packages may be of an older release when compared to the latest release that is [available](https://github.com/abraunegg/onedrive/releases). If any package version indicator below is 'red' for your distribution, it is recommended that you build from source. Do not install the software from the available distribution package. If a package is out of date, please contact the package maintainer for resolution.
> [!CAUTION]
> Distribution packages may be of an older release when compared to the latest release that is [available](https://github.com/abraunegg/onedrive/releases). If any package version indicator below is 'red' for your distribution, it is recommended that you build from source. Do not install the software from the available distribution package. If a package is out of date, please contact the package maintainer for resolution.
| Distribution | Package Name & Package Link | &nbsp;&nbsp;PKG_Version&nbsp;&nbsp; | &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 | Extra Details |
|---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |<a href="https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge"><img src="https://repology.org/badge/version-for-repo/alpine_edge/onedrive.svg?header=" alt="Alpine Linux Edge package" width="46" height="20"></a>|❌|✔|❌|✔ | |
| Arch Linux<br><br>Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |<a href="https://aur.archlinux.org/packages/onedrive-abraunegg"><img src="https://repology.org/badge/version-for-repo/aur/onedrive-abraunegg.svg?header=" alt="AUR package" width="46" height="20"></a>|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)<br><br>**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'<br><br>**Note:** System must have at least 1GB of memory & 1GB swap space
| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |<a href="https://packages.debian.org/bullseye/source/onedrive"><img src="https://repology.org/badge/version-for-repo/debian_11/onedrive.svg?header=" alt="Debian 11 package" width="46" height="20"></a>|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories<br><br>It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |<a href="https://packages.debian.org/bookworm/source/onedrive"><img src="https://repology.org/badge/version-for-repo/debian_12/onedrive.svg?header=" alt="Debian 12 package" width="46" height="20"></a>|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories<br><br>It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Arch Linux<br><br>Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |<a href="https://aur.archlinux.org/packages/onedrive-abraunegg"><img src="https://repology.org/badge/version-for-repo/aur/onedrive-abraunegg.svg?header=" alt="AUR package" width="46" height="20"></a>|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)<br><br>**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR<br><br>**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'<br><br>**Note:** System must have at least 1GB of memory & 1GB swap space
| CentOS 8 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |<a href="https://koji.fedoraproject.org/koji/packageinfo?packageID=26044"><img src="https://repology.org/badge/version-for-repo/epel_8/onedrive.svg?header=" alt="CentOS 8 package" width="46" height="20"></a>|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first |
| CentOS 9 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |<a href="https://koji.fedoraproject.org/koji/packageinfo?packageID=26044"><img src="https://repology.org/badge/version-for-repo/epel_9/onedrive.svg?header=" alt="CentOS 9 package" width="46" height="20"></a>|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first |
| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |<a href="https://packages.debian.org/bullseye/source/onedrive"><img src="https://repology.org/badge/version-for-repo/debian_11/onedrive.svg?header=" alt="Debian 11 package" width="46" height="20"></a>|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |<a href="https://packages.debian.org/bookworm/source/onedrive"><img src="https://repology.org/badge/version-for-repo/debian_12/onedrive.svg?header=" alt="Debian 12 package" width="46" height="20"></a>|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |<a href="https://packages.debian.org/sid/onedrive"><img src="https://repology.org/badge/version-for-repo/debian_unstable/onedrive.svg?header=" alt="Debian Sid package" width="46" height="20"></a>|✔|✔|✔|✔| |
| Fedora | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |<a href="https://koji.fedoraproject.org/koji/packageinfo?packageID=26044"><img src="https://repology.org/badge/version-for-repo/fedora_rawhide/onedrive.svg?header=" alt="Fedora Rawhide package" width="46" height="20"></a>|✔|✔|✔|✔| |
| Gentoo | [onedrive](https://gpo.zugaina.org/net-misc/onedrive) | No API Available |✔|✔|❌|❌| |
| Homebrew | [onedrive](https://formulae.brew.sh/formula/onedrive) | <a href="https://formulae.brew.sh/formula/onedrive"><img src="https://repology.org/badge/version-for-repo/homebrew/onedrive.svg?header=" alt="Homebrew package" width="46" height="20"></a> |❌|✔|❌|❌| |
| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |<a href="https://community.linuxmint.com/software/view/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_20_04/onedrive.svg?header=" alt="Ubuntu 20.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories<br><br>It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |<a href="https://community.linuxmint.com/software/view/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_22_04/onedrive.svg?header=" alt="Ubuntu 22.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories<br><br>It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |<a href="https://community.linuxmint.com/software/view/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_20_04/onedrive.svg?header=" alt="Ubuntu 20.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |<a href="https://community.linuxmint.com/software/view/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_22_04/onedrive.svg?header=" alt="Ubuntu 22.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| NixOS | [onedrive](https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive)|<a href="https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive"><img src="https://repology.org/badge/version-for-repo/nix_unstable/onedrive.svg?header=" alt="nixpkgs unstable package" width="46" height="20"></a>|❌|✔|❌|❌| Use package `onedrive` either by adding it to `configuration.nix` or by using the command `nix-env -iA <channel name>.onedrive`. This does not install a service. To install a service, use unstable channel (will stabilize in 20.09) and add `services.onedrive.enable=true` in `configuration.nix`. You can also add a custom package using the `services.onedrive.package` option (recommended since package lags upstream). Enabling the service installs a default package too (based on the channel). You can also add multiple onedrive accounts trivially, see [documentation](https://github.com/NixOS/nixpkgs/pull/77734#issuecomment-575874225). |
| OpenSuSE | [onedrive](https://software.opensuse.org/package/onedrive) |<a href="https://software.opensuse.org/package/onedrive"><img src="https://repology.org/badge/version-for-repo/opensuse_network_tumbleweed/onedrive.svg?header=" alt="openSUSE Tumbleweed package" width="46" height="20"></a>|✔|✔|❌|❌| |
| OpenSuSE Build Service | [onedrive](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) | No API Available |✔|✔|✔|✔| Package Build Service for Debian and Ubuntu |
| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |<a href="https://archive.raspbian.org/raspbian/pool/main/o/onedrive/"><img src="https://repology.org/badge/version-for-repo/raspbian_stable/onedrive.svg?header=" alt="Raspbian Stable package" width="46" height="20"></a> |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories<br><br>It is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |<a href="https://archive.raspbian.org/raspbian/pool/main/o/onedrive/"><img src="https://repology.org/badge/version-for-repo/raspbian_stable/onedrive.svg?header=" alt="Raspbian Stable package" width="46" height="20"></a> |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Slackware | [onedrive](https://slackbuilds.org/result/?search=onedrive&sv=) |<a href="https://slackbuilds.org/result/?search=onedrive&sv="><img src="https://repology.org/badge/version-for-repo/slackbuilds/onedrive.svg?header=" alt="SlackBuilds package" width="46" height="20"></a>|✔|✔|❌|❌| |
| Solus | [onedrive](https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R) |<a href="https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R"><img src="https://repology.org/badge/version-for-repo/solus/onedrive.svg?header=" alt="Solus package" width="46" height="20"></a>|✔|✔|❌|❌| |
| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |<a href="https://packages.ubuntu.com/focal/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_20_04/onedrive.svg?header=" alt="Ubuntu 20.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe<br><br>It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |<a href="https://packages.ubuntu.com/jammy/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_22_04/onedrive.svg?header=" alt="Ubuntu 22.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe<br><br>It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |<a href="https://packages.ubuntu.com/lunar/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_23_04/onedrive.svg?header=" alt="Ubuntu 23.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe<br><br>It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |<a href="https://packages.ubuntu.com/focal/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_20_04/onedrive.svg?header=" alt="Ubuntu 20.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |<a href="https://packages.ubuntu.com/jammy/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_22_04/onedrive.svg?header=" alt="Ubuntu 22.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |<a href="https://packages.ubuntu.com/lunar/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_23_04/onedrive.svg?header=" alt="Ubuntu 23.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Void Linux | [onedrive](https://voidlinux.org/packages/?arch=x86_64&q=onedrive) |<a href="https://voidlinux.org/packages/?arch=x86_64&q=onedrive"><img src="https://repology.org/badge/version-for-repo/void_x86_64/onedrive.svg?header=" alt="Void Linux x86_64 package" width="46" height="20"></a>|✔|✔|❌|❌| |
#### Important information for all Ubuntu and Ubuntu based distribution users:
This information is specifically for the following platforms and distributions:
* Ubuntu
* Lubuntu
* Linux Mint
* POP OS
* Peppermint OS
Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all&section=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Universe packages are out-of-date and are not supported and should not be used. If you wish to use a package, it is highly recommended that you utilise the [OpenSuSE Build Service](ubuntu-package-install.md) to install packages for these platforms. If the OpenSuSE Build Service does not cater for your version, your only option is to build from source.
If you wish to change this situation so that you can just use the Universe packages via 'apt install onedrive', consider becoming the Ubuntu package maintainer and contribute back to your community.
## Building from Source - High Level Requirements
* Build environment must have at least 1GB of memory & 1GB swap space
* Install the required distribution package dependencies
* [libcurl](http://curl.haxx.se/libcurl/)
* [SQLite 3](https://www.sqlite.org/) >= 3.7.15
* [Digital Mars D Compiler (DMD)](http://dlang.org/download.html) or [LDC the LLVM-based D Compiler](https://github.com/ldc-developers/ldc)
* For successful compilation of this application, it's crucial that the build environment is equipped with a minimum of 1GB of memory and an additional 1GB of swap space.
* Install the required distribution package dependencies coverering the required development tools and development libraries for curl and sqlite
* Install the [Digital Mars D Compiler (DMD)](http://dlang.org/download.html) or [LDC the LLVM-based D Compiler](https://github.com/ldc-developers/ldc)
**Note:** DMD version >= 2.088.0 or LDC version >= 1.18.0 is required to compile this application
> [!IMPORTANT]
> To compile this application successfully, it is essential to use either DMD version **2.088.0** or higher, or LDC version **1.18.0** or higher. Ensuring compatibility and optimal performance necessitates the use of these specific versions or their more recent updates.
### Example for installing DMD Compiler
```text
@ -101,7 +91,7 @@ For notifications the following is also necessary:
sudo yum install libnotify-devel
```
### Dependencies: Fedora > Version 18 / CentOS 8.x / RHEL 8.x / RHEL 9.x
### Dependencies: Fedora > Version 18 / CentOS 8.x / CentOS 9.x/ RHEL 8.x / RHEL 9.x
```text
sudo dnf groupinstall 'Development Tools'
sudo dnf install libcurl-devel sqlite-devel
@ -122,14 +112,16 @@ sudo pacman -S libnotify
```
### Dependencies: Raspbian (ARMHF) and Ubuntu 22.x / Debian 11 / Debian 12 / Raspbian (ARM64)
**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later.
> [!CAUTION]
> The minimum LDC compiler version required to compile this application is 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later.
These instructions were validated using:
* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-armhf-lite) using Raspberry Pi 3B (revision 1.2)
* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-arm64-lite) using Raspberry Pi 3B (revision 1.2)
* `Linux ubuntu 5.15.0-1005-raspi #5-Ubuntu SMP PREEMPT Mon Apr 4 12:21:48 UTC 2022 aarch64 aarch64 aarch64 GNU/Linux` (ubuntu-22.04-preinstalled-server-arm64+raspi) using Raspberry Pi 3B (revision 1.2)
**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`.
> [!IMPORTANT]
> For successful compilation of this application, it's crucial that the build environment is equipped with a minimum of 1GB of memory and an additional 1GB of swap space. To verify your system's swap space availability, you can use the `swapon` command. Ensuring these requirements are met is vital for the application's compilation process.
```text
sudo apt install build-essential
@ -200,7 +192,8 @@ Run `deactivate` later on to restore your environment.
```
Without performing this step, the compilation process will fail.
**Note:** Depending on your DMD version, substitute `2.088.0` above with your DMD version that is installed.
> [!NOTE]
> Depending on your DMD version, substitute `2.088.0` above with your DMD version that is installed.
```text
git clone https://github.com/abraunegg/onedrive.git
@ -211,8 +204,10 @@ sudo make install
```
### Build options
Notifications can be enabled using the `configure` switch `--enable-notifications`.
#### GUI Notification Support
GUI notification support can be enabled using the `configure` switch `--enable-notifications`.
#### systemd service directory customisation support
Systemd service files are installed in the appropriate directories on the system,
as provided by `pkg-config systemd` settings. If the need for overriding the
deduced path are necessary, the two options `--with-systemdsystemunitdir` (for
@ -220,9 +215,11 @@ the Systemd system unit location), and `--with-systemduserunitdir` (for the
Systemd user unit location) can be specified. Passing in `no` to one of these
options disabled service file installation.
#### Additional Compiler Debug
By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug
information, useful (for example) to get `perf`-issued figures.
#### Shell Completion Support
By passing `--enable-completions` to the `configure` call, shell completion functions are
installed for `bash`, `zsh` and `fish`. The installation directories are determined
as far as possible automatically, but can be overridden by passing
@ -231,9 +228,12 @@ as far as possible automatically, but can be overridden by passing
### Building using a different compiler (for example [LDC](https://wiki.dlang.org/LDC))
#### ARMHF Architecture (Raspbian) and ARM64 Architecture (Ubuntu 22.x / Debian 11 / Raspbian)
**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later.
> [!CAUTION]
> The minimum LDC compiler version required to compile this application is 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later.
> [!IMPORTANT]
> For successful compilation of this application, it's crucial that the build environment is equipped with a minimum of 1GB of memory and an additional 1GB of swap space. To verify your system's swap space availability, you can use the `swapon` command. Ensuring these requirements are met is vital for the application's compilation process.
**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`.
```text
git clone https://github.com/abraunegg/onedrive.git
cd onedrive
@ -247,11 +247,13 @@ If you have installed the client from a distribution package, the client will be
If you have built the client from source, to upgrade your client, it is recommended that you first uninstall your existing 'onedrive' binary (see below), then re-install the client by re-cloning, re-compiling and re-installing the client again to install the new version.
**Note:** Following the uninstall process will remove all client components including *all* systemd files, including any custom files created for specific access such as SharePoint Libraries.
> [!NOTE]
> Following the uninstall process will remove all client components including *all* systemd files, including any custom files created for specific access such as SharePoint Libraries.
You can optionally choose to not perform this uninstallation step, and simply re-install the client by re-cloning, re-compiling and re-installing the client again - however the risk here is that you end up with two onedrive client binaries on your system, and depending on your system search path preferences, this will determine which binary is used.
**Important:** Before performing any upgrade, it is highly recommended for you to stop any running systemd service if applicable to ensure that these services are restarted using the updated client version.
> [!CAUTION]
> Before performing any upgrade, it is highly recommended for you to stop any running systemd service if applicable to ensure that these services are restarted using the updated client version.
Post re-install, to confirm that you have the new version of the client installed, use `onedrive --version` to determine the client version that is now installed.
@ -269,7 +271,8 @@ If you are not upgrading your client, to remove your application state and confi
```
rm -rf ~/.config/onedrive
```
**Note:** If you are using the `--confdir option`, substitute `~/.config/onedrive` for the correct directory storing your client configuration.
> [!IMPORTANT]
> If you are using the `--confdir option`, substitute `~/.config/onedrive` for the correct directory storing your client configuration.
If you want to just delete the application key, but keep the items database:
```text

View file

@ -1,54 +1,60 @@
# Known Issues
The below are known issues with this client:
# List of Identified Known Issues
The following points detail known issues associated with this client:
## Moving files into different folders should not cause data to delete and be re-uploaded
**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876)
## Renaming or Moving Files in Standalone Mode causes online deletion and re-upload to occur
**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876), [#2579](https://github.com/abraunegg/onedrive/issues/2579)
**Description:**
**Summary:**
When running the client in standalone mode (`--synchronize`) moving folders that are successfully synced around between subsequent standalone syncs causes a deletion & re-upload of data to occur.
Renaming or moving files and/or folders while using the standalone sync option `--sync` this results in unnecessary data deletion online and subsequent re-upload.
**Explanation:**
**Detailed Description:**
Technically, the client is 'working' correctly, as, when moving files, you are 'deleting' them from the current location, but copying them to the 'new location'. As the client is running in standalone sync mode, there is no way to track what OS operations have been done when the client is not running - thus, this is why the 'delete and upload' is occurring.
In standalone mode (`--sync`), the renaming or moving folders locally that have already been synchronized leads to the data being deleted online and then re-uploaded in the next synchronization process.
**Workaround:**
**Technical Explanation:**
If the tracking of moving data to new local directories is requried, it is better to run the client in service mode (`--monitor`) rather than in standalone mode, as the 'move' of files can then be handled at the point when it occurs, so that the data is moved to the new location on OneDrive without the need to be deleted and re-uploaded.
This behavior is expected from the client under these specific conditions. Renaming or moving files is interpreted as deleting them from their original location and creating them in a new location. In standalone sync mode, the client lacks the capability to track file system changes (including renames and moves) that occur when it is not running. This limitation is the root cause of the observed 'deletion and re-upload' cycle.
**Recommended Workaround:**
For effective tracking of file and folder renames or moves to new local directories, it is recommended to run the client in service mode (`--monitor`) rather than in standalone mode. This approach allows the client to immediately process these changes, enabling the data to be updated (renamed or moved) in the new location on OneDrive without undergoing deletion and re-upload.
## Application 'stops' running without any visible reason
**Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884), [#1162](https://github.com/abraunegg/onedrive/issues/1162), [#1408](https://github.com/abraunegg/onedrive/issues/1408), [#1520](https://github.com/abraunegg/onedrive/issues/1520), [#1526](https://github.com/abraunegg/onedrive/issues/1526)
**Description:**
**Summary:**
When running the client and performing an upload or download operation, the application just stops working without any reason or explanation. If `echo $?` is used after the application has exited without visible reason, an error level of 141 may be provided.
Users experience sudden shutdowns in a client application during file transfers with Microsoft's Europe Data Centers, likely due to unstable internet or HTTPS inspection issues. This problem, often signaled by an error code of 141, is related to the application's reliance on Curl and OpenSSL. Resolution steps include system updates, seeking support from OS vendors, ISPs, OpenSSL/Curl teams, and providing detailed debug logs to Microsoft for analysis.
Additionally, this issue has mainly been seen when the client is operating against Microsoft's Europe Data Centre's.
**Detailed Description:**
**Explanation:**
The application unexpectedly stops functioning during upload or download operations when using the client. This issue occurs without any apparent reason. Running `echo $?` after the unexpected exit may return an error code of 141.
The client is heavily dependant on Curl and OpenSSL to perform the activities with the Microsoft OneDrive service. Generally, when this issue occurs, the following is found in the HTTPS Debug Log:
This problem predominantly arises when the client interacts with Microsoft's Europe Data Centers.
**Technical Explanation:**
The client heavily relies on Curl and OpenSSL for operations with the Microsoft OneDrive service. A common observation during this error is an entry in the HTTPS Debug Log stating:
```
OpenSSL SSL_read: SSL_ERROR_SYSCALL, errno 104
```
The only way to determine that this is the cause of the application ceasing to work is to generate a HTTPS debug log using the following additional flags:
To confirm this as the root cause, a detailed HTTPS debug log can be generated with these commands:
```
--verbose --verbose --debug-https
```
This is indicative of the following:
* Some sort of flaky Internet connection somewhere between you and the OneDrive service
* Some sort of 'broken' HTTPS transparent inspection service inspecting your traffic somewhere between you and the OneDrive service
This error typically suggests one of the following issues:
* An unstable internet connection between the user and the OneDrive service.
* An issue with HTTPS transparent inspection services that monitor the traffic en route to the OneDrive service.
**How to resolve:**
**Recommended Resolution:**
The best avenue of action here are:
* Ensure your OS is as up-to-date as possible
* Get support from your OS vendor
* Speak to your ISP or Help Desk for assistance
* Open a ticket with OpenSSL and/or Curl teams to better handle this sort of connection failure
* Generate a HTTPS Debug Log for this application and open a new support request with Microsoft and provide the debug log file for their analysis.
Recommended steps to address this issue include:
* Updating your operating system to the latest version.
* Seeking assistance from your OS vendor.
* Contacting your Internet Service Provider (ISP) or your IT Help Desk.
* Reporting the issue to the OpenSSL and/or Curl teams for improved handling of such connection failures.
* Creating a HTTPS Debug Log during the issue and submitting a support request to Microsoft with the log for their analysis.
If you wish to diagnose this issue further, refer to the following:
https://maulwuff.de/research/ssl-debugging.html
For more in-depth SSL troubleshooting, please read: https://maulwuff.de/research/ssl-debugging.html

View file

@ -1,6 +1,6 @@
# How to configure access to specific Microsoft Azure deployments
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
> [!CAUTION]
> Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Process Overview
In some cases it is a requirement to utilise specific Microsoft Azure cloud deployments to conform with data and security reuqirements that requires data to reside within the geographic borders of that country.
@ -37,7 +37,8 @@ In order to successfully use these specific Microsoft Azure deployments, the fol
![application_registration_done](./images/application_registration_done.jpg)
**Note:** The Application (client) ID UUID as displayed after client registration, is what is required as the 'application_id' for Step 4 below.
> [!NOTE]
> The Application (client) ID UUID as displayed after client registration, is what is required as the 'application_id' for Step 4 below.
## Step 2: Configure application authentication scopes
Configure the API permissions as per the following:

View file

@ -23,7 +23,8 @@ The 'edge' Docker Container will align closer to all documentation and features,
Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in.
**Note:** The below instructions for podman has been tested and validated when logging into the system as an unprivileged user (non 'root' user).
> [!NOTE]
> The below instructions for podman has been tested and validated when logging into the system as an unprivileged user (non 'root' user).
## High Level Configuration Steps
1. Install 'podman' as per your distribution platform's instructions if not already installed.
@ -103,17 +104,19 @@ This will create a podman volume labeled `onedrive_data` and will map to a path
* The owner of this specified folder must not be root
* Podman will attempt to change the permissions of the volume to the user the container is configured to run as
**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Podman container will fail to start with the following error message:
```bash
ROOT level privileges prohibited!
```
> [!IMPORTANT]
> Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Podman container will fail to start with the following error message:
> ```bash
> ROOT level privileges prohibited!
> ```
### 5. First run of Docker container under podman and performing authorisation
The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running podman in interactive mode.
Run the podman image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`).
**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the podman container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the podman volume mapping to occur.
> [!IMPORTANT]
> The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the podman container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the podman volume mapping to occur.
It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values.
```bash
@ -127,7 +130,8 @@ podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \
driveone/onedrive:edge
```
**Important:** In some scenarios, 'podman' sets the configuration and data directories to a different UID & GID as specified. To resolve this situation, you must run 'podman' with the `--userns=keep-id` flag to ensure 'podman' uses the UID and GID as specified. The updated script example when using `--userns=keep-id` is below:
> [!IMPORTANT]
> In some scenarios, 'podman' sets the configuration and data directories to a different UID & GID as specified. To resolve this situation, you must run 'podman' with the `--userns=keep-id` flag to ensure 'podman' uses the UID and GID as specified. The updated script example when using `--userns=keep-id` is below:
```bash
export ONEDRIVE_DATA_DIR="${HOME}/OneDrive"
@ -142,7 +146,8 @@ podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \
```
**Important:** If you plan to use the 'podman' built in auto-updating of container images described in 'Systemd Service & Auto Updating' below, you must pass an additional argument to set a label during the first run. The updated script example to support auto-updating of container images is below:
> [!IMPORTANT]
> If you plan to use the 'podman' built in auto-updating of container images described in 'Systemd Service & Auto Updating' below, you must pass an additional argument to set a label during the first run. The updated script example to support auto-updating of container images is below:
```bash
export ONEDRIVE_DATA_DIR="${HOME}/OneDrive"
@ -195,7 +200,6 @@ podman start onedrive
podman rm -f onedrive
```
## Advanced Usage
### Systemd Service & Auto Updating
@ -255,7 +259,7 @@ podman volume inspect onedrive_conf
```
Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first.
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration)
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration)
### Syncing multiple accounts
There are many ways to do this, the easiest is probably to do the following:
@ -290,10 +294,14 @@ podman run -it --name onedrive_work --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \
| <B>ONEDRIVE_NOREMOTEDELETE</B> | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_LOGOUT</B> | Controls "--logout" switch. Default is 0 | 1 |
| <B>ONEDRIVE_REAUTH</B> | Controls "--reauth" switch. Default is 0 | 1 |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | Please read [CLI Option: --auth-files](./application-config-options.md#cli-option---auth-files) |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | Please read [CLI Option: --auth-response](./application-config-options.md#cli-option---auth-response) |
| <B>ONEDRIVE_DISPLAY_CONFIG</B> | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_SINGLE_DIRECTORY</B> | Controls "--single-directory" option. Default = "" | "mydir" |
| <B>ONEDRIVE_DRYRUN</B> | Controls "--dry-run" option. Default is 0 | 1 |
| <B>ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION</B> | Controls "--disable-download-validation" option. Default is 0 | 1 |
| <B>ONEDRIVE_DISABLE_UPLOAD_VALIDATION</B> | Controls "--disable-upload-validation" option. Default is 0 | 1 |
| <B>ONEDRIVE_SYNC_SHARED_FILES</B> | Controls "--sync-shared-files" option. Default is 0 | 1 |
### Environment Variables Usage Examples
**Verbose Output:**

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

View file

@ -0,0 +1,48 @@
@startuml
start
partition "applyPotentiallyChangedItem" {
:Check if existing item path differs from changed item path;
if (itemWasMoved) then (yes)
:Log moving item;
if (destination exists) then (yes)
if (item in database) then (yes)
:Check if item is synced;
if (item is synced) then (yes)
:Log destination is in sync;
else (no)
:Log destination occupied with a different item;
:Backup conflicting file;
note right: Local data loss prevention
endif
else (no)
:Log destination occupied by an un-synced file;
:Backup conflicting file;
note right: Local data loss prevention
endif
endif
:Try to rename path;
if (dry run) then (yes)
:Track as faked id item;
:Track path not renamed;
else (no)
:Rename item;
:Flag item as moved;
if (item is a file) then (yes)
:Set local timestamp to match online;
endif
endif
else (no)
endif
:Check if eTag changed;
if (eTag changed) then (yes)
if (item is a file and not moved) then (yes)
:Decide if to download based on hash;
else (no)
:Update database;
endif
else (no)
:Update database if timestamp differs or in specific operational mode;
endif
}
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 141 KiB

View file

@ -0,0 +1,90 @@
@startuml
start
partition "applyPotentiallyNewLocalItem" {
:Check if path exists;
if (Path exists?) then (yes)
:Log "Path on local disk already exists";
if (Is symbolic link?) then (yes)
:Log "Path is a symbolic link";
if (Can read symbolic link?) then (no)
:Log "Reading symbolic link failed";
:Log "Skipping item - invalid symbolic link";
stop
endif
endif
:Determine if item is in-sync;
note right: Execute 'isItemSynced()' function
if (Is item in-sync?) then (yes)
:Log "Item in-sync";
:Update/Insert item in DB;
stop
else (no)
:Log "Item not in-sync";
:Compare local & remote modification times;
if (Local time > Remote time?) then (yes)
if (ID in database?) then (yes)
:Log "Local file is newer & ID in DB";
:Fetch latest DB record;
if (Times equal?) then (yes)
:Log "Times match, keeping local file";
else (no)
:Log "Local time newer, keeping file";
note right: Online item has an 'older' modified timestamp wise than the local file\nIt is assumed that the local file is the file to keep
endif
stop
else (no)
:Log "Local item not in DB";
if (Bypass data protection?) then (yes)
:Log "WARNING: Data protection disabled";
else (no)
:Safe backup local file;
note right: Local data loss prevention
endif
stop
endif
else (no)
if (Remote time > Local time?) then (yes)
:Log "Remote item is newer";
if (Bypass data protection?) then (yes)
:Log "WARNING: Data protection disabled";
else (no)
:Safe backup local file;
note right: Local data loss prevention
endif
endif
if (Times equal?) then (yes)
note left: Specific handling if timestamp was\nadjusted by isItemSynced()
:Log "Times equal, no action required";
:Update/Insert item in DB;
stop
endif
endif
endif
else (no)
:Handle as potentially new item;
switch (Item type)
case (File)
:Add to download queue;
case (Directory)
:Log "Creating local directory";
if (Dry run?) then (no)
:Create directory & set attributes;
:Save item to DB;
else
:Log "Dry run, faking directory creation";
:Save item to dry-run DB;
endif
case (Unknown)
:Log "Unknown type, no action";
endswitch
endif
}
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

View file

@ -0,0 +1,71 @@
@startuml
start
:Start;
partition "checkPathAgainstClientSideFiltering" {
:Get localFilePath;
if (Does path exist?) then (no)
:Return false;
stop
endif
if (Check .nosync?) then (yes)
:Check for .nosync file;
if (.nosync found) then (yes)
:Log and return true;
stop
endif
endif
if (Skip dotfiles?) then (yes)
:Check if dotfile;
if (Is dotfile) then (yes)
:Log and return true;
stop
endif
endif
if (Skip symlinks?) then (yes)
:Check if symlink;
if (Is symlink) then (yes)
if (Config says skip?) then (yes)
:Log and return true;
stop
elseif (Unexisting symlink?) then (yes)
:Check if relative link works;
if (Relative link ok) then (no)
:Log and return true;
stop
endif
endif
endif
endif
if (Skip dir or file?) then (yes)
:Check dir or file exclusion;
if (Excluded by config?) then (yes)
:Log and return true;
stop
endif
endif
if (Use sync_list?) then (yes)
:Check sync_list exclusions;
if (Excluded by sync_list?) then (yes)
:Log and return true;
stop
endif
endif
if (Check file size?) then (yes)
:Check for file size limit;
if (File size exceeds limit?) then (yes)
:Log and return true;
stop
endif
endif
:Return false;
}
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

View file

@ -0,0 +1,41 @@
@startuml
participant "OneDrive Client\nfor Linux" as od
participant "libcurl" as lc
participant "Client Web Browser" as browser
participant "Microsoft Authentication Service\n(OAuth 2.0 Endpoint)" as oauth
participant "GitHub API" as github
participant "Microsoft Graph API" as graph
activate od
activate lc
od->od: Generate Authentication\nService URL
activate browser
od->browser: Navigate to Authentication\nService URL via Client Web Browser
browser->oauth: Request access token
activate oauth
oauth-->browser: Access token
browser-->od: Access token
deactivate oauth
deactivate browser
od->lc: Check application version\nvia api.github.com
activate github
lc->github: Query release status
activate github
github-->lc: Release information
deactivate github
lc-->od: Process release information
deactivate lc
loop API Communication
od->lc: Construct HTTPS request (with token)
activate lc
lc->graph: API Request
activate graph
graph-->lc: API Response
deactivate graph
lc-->od: Process response
deactivate lc
end
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

View file

@ -0,0 +1,78 @@
@startuml
!define DATABASE_ENTITY(x) entity x
component main {
}
component config {
}
component log {
}
component curlEngine {
}
component util {
}
component onedrive {
}
component syncEngine {
}
component itemdb {
}
component clientSideFiltering {
}
component monitor {
}
component sqlite {
}
component qxor {
}
DATABASE_ENTITY("Database")
main --> config
main --> log
main --> curlEngine
main --> util
main --> onedrive
main --> syncEngine
main --> itemdb
main --> clientSideFiltering
main --> monitor
config --> log
config --> util
clientSideFiltering --> config
clientSideFiltering --> util
clientSideFiltering --> log
syncEngine --> config
syncEngine --> log
syncEngine --> util
syncEngine --> onedrive
syncEngine --> itemdb
syncEngine --> clientSideFiltering
util --> log
util --> config
util --> qxor
util --> curlEngine
sqlite --> log
sqlite -> "Database" : uses
onedrive --> config
onedrive --> log
onedrive --> util
onedrive --> curlEngine
monitor --> config
monitor --> util
monitor --> log
monitor --> clientSideFiltering
monitor .> syncEngine : inotify event
itemdb --> sqlite
itemdb --> util
itemdb --> log
curlEngine --> log
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

View file

@ -0,0 +1,31 @@
@startuml
start
note left: Operational Mode 'onedrive --sync'
:Query OneDrive /delta API for online changes;
note left: This data is considered the 'source-of-truth'\nLocal data should be a 'replica' of this data
:Process received JSON data;
if (JSON item is a file) then (yes)
if (Does the file exist locally) then (yes)
:Compute relevant file hashes;
:Check DB for file record;
if (DB record found) then (yes)
:Compare file hash with DB hash;
if (Is the hash different) then (yes)
:Log that the local file was modified locally since last sync;
:Renaming local file to avoid potential local data loss;
note left: Local data loss prevention\nRenamed file will be uploaded as new file
else (no)
endif
else (no)
endif
else (no)
endif
:Download file (as per online JSON item) as required;
else (no)
:Other handling for directories | root ojects | deleted items;
endif
:Performing a database consistency and\nintegrity check on locally stored data;
:Scan file system for any new data to upload;
note left: The file that was renamed will be uploaded here
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

View file

@ -0,0 +1,35 @@
@startuml
start
note left: Operational Mode 'onedrive -sync --resync'
:Query OneDrive /delta API for online changes;
note left: This data is considered the 'source-of-truth'\nLocal data should be a 'replica' of this data
:Process received JSON data;
if (JSON item is a file) then (yes)
if (Does the file exist locally) then (yes)
note left: In a --resync scenario there are no DB\nrecords that can be used or referenced\nuntil the JSON item is processed and\nadded to the local database cache
if (Can the file be read) then (yes)
:Compute UTC timestamp data from local file and JSON data;
if (timestamps are equal) then (yes)
else (no)
:Log that a local file time discrepancy was detected;
if (Do file hashes match) then (yes)
:Correct the offending timestamp as hashes match;
else (no)
:Local file is technically different;
:Renaming local file to avoid potential local data loss;
note left: Local data loss prevention\nRenamed file will be uploaded as new file
endif
endif
else (no)
endif
else (no)
endif
:Download file (as per online JSON item) as required;
else (no)
:Other handling for directories | root ojects | deleted items;
endif
:Performing a database consistency and\nintegrity check on locally stored data;
:Scan file system for any new data to upload;
note left: The file that was renamed will be uploaded here
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

View file

@ -0,0 +1,62 @@
@startuml
start
note left: Operational Mode 'onedrive -sync -local-first'
:Performing a database consistency and\nintegrity check on locally stored data;
note left: This data is considered the 'source-of-truth'\nOnline data should be a 'replica' of this data
repeat
:Process each DB record;
if (Is the DB record is in sync with local file) then (yes)
else (no)
:Log reason for discrepancy;
:Flag item to be processed as a modified local file;
endif
repeat while
:Process modified items to upload;
if (Does local file DB record match current latest online JSON data) then (yes)
else (no)
:Log that the local file was modified locally since last sync;
:Renaming local file to avoid potential local data loss;
note left: Local data loss prevention\nRenamed file will be uploaded as new file
:Upload renamed local file as new file;
endif
:Upload modified file;
:Scan file system for any new data to upload;
:Query OneDrive /delta API for online changes;
:Process received JSON data;
if (JSON item is a file) then (yes)
if (Does the file exist locally) then (yes)
:Compute relevant file hashes;
:Check DB for file record;
if (DB record found) then (yes)
:Compare file hash with DB hash;
if (Is the hash different) then (yes)
:Log that the local file was modified locally since last sync;
:Renaming local file to avoid potential local data loss;
note left: Local data loss prevention\nRenamed file will be uploaded as new file
else (no)
endif
else (no)
endif
else (no)
endif
:Download file (as per online JSON item) as required;
else (no)
:Other handling for directories | root ojects | deleted items;
endif
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

View file

@ -0,0 +1,70 @@
@startuml
start
note left: Operational Mode 'onedrive -sync -local-first -resync'
:Query OneDrive API and create new database with default root account objects;
:Performing a database consistency and\nintegrity check on locally stored data;
note left: This data is considered the 'source-of-truth'\nOnline data should be a 'replica' of this data\nHowever the database has only 1 record currently
:Scan file system for any new data to upload;
note left: This is where in this specific mode all local\n content is assessed for applicability for\nupload to Microsoft OneDrive
repeat
:For each new local item;
if (Is the item a directory) then (yes)
if (Is Directory found online) then (yes)
:Save directory details from online in local database;
else (no)
:Create directory online;
:Save details in local database;
endif
else (no)
:Flag file as a potentially new item to upload;
endif
repeat while
:Process potential new items to upload;
repeat
:For each potential file to upload;
if (Is File found online) then (yes)
if (Does the online JSON data match local file) then (yes)
:Save details in local database;
else (no)
:Log that the local file was modified locally since last sync;
:Renaming local file to avoid potential local data loss;
note left: Local data loss prevention\nRenamed file will be uploaded as new file
:Upload renamed local file as new file;
endif
else (no)
:Upload new file;
endif
repeat while
:Query OneDrive /delta API for online changes;
:Process received JSON data;
if (JSON item is a file) then (yes)
if (Does the file exist locally) then (yes)
:Compute relevant file hashes;
:Check DB for file record;
if (DB record found) then (yes)
:Compare file hash with DB hash;
if (Is the hash different) then (yes)
:Log that the local file was modified locally since last sync;
:Renaming local file to avoid potential local data loss;
note left: Local data loss prevention\nRenamed file will be uploaded as new file
else (no)
endif
else (no)
endif
else (no)
endif
:Download file (as per online JSON item) as required;
else (no)
:Other handling for directories | root ojects | deleted items;
endif
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

View file

@ -0,0 +1,39 @@
@startuml
class item {
driveId: TEXT
id: TEXT
name: TEXT
remoteName: TEXT
type: TEXT
eTag: TEXT
cTag: TEXT
mtime: TEXT
parentId: TEXT
quickXorHash: TEXT
sha256Hash: TEXT
remoteDriveId: TEXT
remoteParentId: TEXT
remoteId: TEXT
remoteType: TEXT
deltaLink: TEXT
syncStatus: TEXT
size: TEXT
}
note right of item::driveId
PRIMARY KEY (driveId, id)
FOREIGN KEY (driveId, parentId) REFERENCES item
end note
item --|> item : parentId
note "Indexes" as N1
note left of N1
name_idx ON item (name)
remote_idx ON item (remoteDriveId, remoteId)
item_children_idx ON item (driveId, parentId)
selectByPath_idx ON item (name, driveId, parentId)
end note
@enduml

BIN
docs/puml/downloadFile.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

View file

@ -0,0 +1,63 @@
@startuml
start
partition "Download File" {
:Get item specifics from JSON;
:Calculate item's path;
if (Is item malware?) then (yes)
:Log malware detected;
stop
else (no)
:Check for file size in JSON;
if (File size missing) then (yes)
:Log error;
stop
endif
:Configure hashes for comparison;
if (Hashes missing) then (yes)
:Log error;
stop
endif
if (Does file exist locally?) then (yes)
:Check DB for item;
if (DB hash match?) then (no)
:Log modification; Perform safe backup;
note left: Local data loss prevention
endif
endif
:Check local disk space;
if (Insufficient space?) then (yes)
:Log insufficient space;
stop
else (no)
if (Dry run?) then (yes)
:Fake download process;
else (no)
:Attempt to download file;
if (Download exception occurs?) then (yes)
:Handle exceptions; Retry download or log error;
endif
if (File downloaded successfully?) then (yes)
:Validate download;
if (Validation passes?) then (yes)
:Log success; Update DB;
else (no)
:Log validation failure; Remove file;
endif
else (no)
:Log download failed;
endif
endif
endif
endif
}
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View file

@ -0,0 +1,55 @@
@startuml
participant "OneDrive Client\nfor Linux" as Client
participant "Microsoft OneDrive\nAPI" as API
== Access Token Validation ==
Client -> Client: Validate access and\nexisting access token\nRefresh if needed
== Query Microsoft OneDrive /delta API ==
Client -> API: Query /delta API
API -> Client: JSON responses
== Process JSON Responses ==
loop for each JSON response
Client -> Client: Determine if JSON is 'root'\nor 'deleted' item\nElse, push into temporary array for further processing
alt if 'root' or 'deleted'
Client -> Client: Process 'root' or 'deleted' items
else
Client -> Client: Evaluate against 'Client Side Filtering' rules
alt if unwanted
Client -> Client: Discard JSON
else
Client -> Client: Process JSON (create dir/download file)
Client -> Client: Save in local database cache
end
end
end
== Local Cache Database Processing for Data Integrity ==
Client -> Client: Process local cache database\nto check local data integrity and for differences
alt if difference found
Client -> API: Upload file/folder change including deletion
API -> Client: Response with item metadata
Client -> Client: Save response to local cache database
end
== Local Filesystem Scanning ==
Client -> Client: Scan local filesystem\nfor new files/folders
loop for each new item
Client -> Client: Check item against 'Client Side Filtering' rules
alt if item passes filtering
Client -> API: Upload new file/folder change including deletion
API -> Client: Response with item metadata
Client -> Client: Save response in local\ncache database
else
Client -> Client: Discard item\n(Does not meet filtering criteria)
end
end
== Final Data True-Up ==
Client -> API: Query /delta link for true-up
API -> Client: Process further online JSON changes if required
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

View file

@ -0,0 +1,79 @@
@startuml
start
partition "Is item in sync" {
:Check if path exists;
if (path does not exist) then (no)
:Return false;
stop
else (yes)
endif
:Identify item type;
switch (item type)
case (file)
:Check if path is a file;
if (path is not a file) then (no)
:Log "item is a directory but should be a file";
:Return false;
stop
else (yes)
endif
:Attempt to read local file;
if (file is unreadable) then (no)
:Log "file cannot be read";
:Return false;
stop
else (yes)
endif
:Get local and input item modified time;
note right: The 'input item' could be a database reference object, or the online JSON object\nas provided by the Microsoft OneDrive API
:Reduce time resolution to seconds;
if (localModifiedTime == itemModifiedTime) then (yes)
:Return true;
stop
else (no)
:Log time discrepancy;
endif
:Check if file hash is the same;
if (hash is the same) then (yes)
:Log "hash match, correcting timestamp";
if (local time > item time) then (yes)
if (download only mode) then (no)
:Correct timestamp online if not dryRun;
else (yes)
:Correct local timestamp if not dryRun;
endif
else (no)
:Correct local timestamp if not dryRun;
endif
:Return false;
note right: Specifically return false here as we performed a time correction\nApplication logic will then perform additional handling based on this very specific response.
stop
else (no)
:Log "different hash";
:Return false;
stop
endif
case (dir or remote)
:Check if path is a directory;
if (path is a directory) then (yes)
:Return true;
stop
else (no)
:Log "item is a file but should be a directory";
:Return false;
stop
endif
case (unknown)
:Return true but do not sync;
stop
endswitch
}
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

View file

@ -0,0 +1,81 @@
@startuml
start
:Validate access and existing access token\nRefresh if needed;
:Query /delta API;
note right: Query Microsoft OneDrive /delta API
:Receive JSON responses;
:Process JSON Responses;
partition "Process /delta JSON Responses" {
while (for each JSON response) is (yes)
:Determine if JSON is 'root'\nor 'deleted' item;
if ('root' or 'deleted') then (yes)
:Process 'root' or 'deleted' items;
if ('root' object) then (yes)
:Process 'root' JSON;
else (no)
if (Is 'deleted' object in sync) then (yes)
:Process delection of local item;
else (no)
:Rename local file as it is not in sync;
note right: Deletion event conflict handling\nLocal data loss prevention
endif
endif
else (no)
:Evaluate against 'Client Side Filtering' rules;
if (unwanted) then (yes)
:Discard JSON;
else (no)
:Process JSON (create dir/download file);
if (Is the 'JSON' item in the local cache) then (yes)
:Process JSON as a potentially changed local item;
note left: Run 'applyPotentiallyChangedItem' function
else (no)
:Process JSON as potentially new local item;
note right: Run 'applyPotentiallyNewLocalItem' function
endif
:Process objects in download queue;
:Download File;
note left: Download file from Microsoft OneDrive (Multi Threaded Download)
:Save in local database cache;
endif
endif
endwhile
}
partition "Perform data integrity check based on local cache database" {
:Process local cache database\nto check local data integrity and for differences;
if (difference found) then (yes)
:Upload file/folder change including deletion;
note right: Upload local change to Microsoft OneDrive
:Receive response with item metadata;
:Save response to local cache database;
else (no)
endif
}
partition "Local Filesystem Scanning" {
:Scan local filesystem\nfor new files/folders;
while (for each new item) is (yes)
:Check item against 'Client Side Filtering' rules;
if (item passes filtering) then (yes)
:Upload new file/folder change including deletion;
note right: Upload to Microsoft OneDrive
:Receive response with item metadata;
:Save response in local\ncache database;
else (no)
:Discard item\n(Does not meet filtering criteria);
endif
endwhile
}
partition "Final True-Up" {
:Query /delta link for true-up;
note right: Final Data True-Up
:Process further online JSON changes if required;
}
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

View file

@ -0,0 +1,47 @@
@startuml
participant "OneDrive Client for Linux"
participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer
participant "User's Device (for MFA)" as UserDevice
participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI
participant "Microsoft OneDrive"
"OneDrive Client for Linux" -> AuthServer: Request Authorization\n(Client Credentials, Scopes)
AuthServer -> "OneDrive Client for Linux": Provide Authorization Code
"OneDrive Client for Linux" -> AuthServer: Request Access Token\n(Authorization Code, Client Credentials)
alt MFA Enabled
AuthServer -> UserDevice: Trigger MFA Challenge
UserDevice -> AuthServer: Provide MFA Verification
AuthServer -> "OneDrive Client for Linux": Return Access Token\n(and Refresh Token)
"OneDrive Client for Linux" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token)
loop Token Expiry Check
"OneDrive Client for Linux" -> AuthServer: Is Access Token Expired?
alt Token Expired
"OneDrive Client for Linux" -> AuthServer: Request New Access Token\n(Refresh Token)
AuthServer -> "OneDrive Client for Linux": Return New Access Token
else Token Valid
GraphAPI -> "Microsoft OneDrive": Retrieve Data
"Microsoft OneDrive" -> GraphAPI: Return Data
GraphAPI -> "OneDrive Client for Linux": Provide Data
end
end
else MFA Not Required
AuthServer -> "OneDrive Client for Linux": Return Access Token\n(and Refresh Token)
"OneDrive Client for Linux" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token)
loop Token Expiry Check
"OneDrive Client for Linux" -> AuthServer: Is Access Token Expired?
alt Token Expired
"OneDrive Client for Linux" -> AuthServer: Request New Access Token\n(Refresh Token)
AuthServer -> "OneDrive Client for Linux": Return New Access Token
else Token Valid
GraphAPI -> "Microsoft OneDrive": Retrieve Data
"Microsoft OneDrive" -> GraphAPI: Return Data
GraphAPI -> "OneDrive Client for Linux": Provide Data
end
end
else MFA Failed or Other Auth Error
AuthServer -> "OneDrive Client for Linux": Error Message (e.g., Invalid Credentials, MFA Failure)
end
@enduml

View file

@ -0,0 +1,59 @@
@startuml
participant "Microsoft Windows OneDrive Client"
participant "Azure Active Directory\n(Active Directory)\n(login.microsoftonline.com)" as AzureAD
participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer
participant "User's Device (for MFA)" as UserDevice
participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI
participant "Microsoft OneDrive"
"Microsoft Windows OneDrive Client" -> AzureAD: Request Authorization\n(Client Credentials, Scopes)
AzureAD -> AuthServer: Validate Credentials\n(Forward Request)
AuthServer -> AzureAD: Provide Authorization Code
AzureAD -> "Microsoft Windows OneDrive Client": Provide Authorization Code (via AzureAD)
"Microsoft Windows OneDrive Client" -> AzureAD: Request Access Token\n(Authorization Code, Client Credentials)
AzureAD -> AuthServer: Request Access Token\n(Authorization Code, Forwarded Credentials)
AuthServer -> AzureAD: Return Access Token\n(and Refresh Token)
AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (via AzureAD)
alt MFA Enabled
AzureAD -> UserDevice: Trigger MFA Challenge
UserDevice -> AzureAD: Provide MFA Verification
AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (Post MFA)
"Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token)
loop Token Expiry Check
"Microsoft Windows OneDrive Client" -> AzureAD: Is Access Token Expired?
AzureAD -> AuthServer: Validate Token Expiry
alt Token Expired
"Microsoft Windows OneDrive Client" -> AzureAD: Request New Access Token\n(Refresh Token)
AzureAD -> AuthServer: Request New Access Token\n(Refresh Token)
AuthServer -> AzureAD: Return New Access Token
AzureAD -> "Microsoft Windows OneDrive Client": Return New Access Token (via AzureAD)
else Token Valid
GraphAPI -> "Microsoft OneDrive": Retrieve Data
"Microsoft OneDrive" -> GraphAPI: Return Data
GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data
end
end
else MFA Not Required
AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (Direct)
"Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token)
loop Token Expiry Check
"Microsoft Windows OneDrive Client" -> AzureAD: Is Access Token Expired?
AzureAD -> AuthServer: Validate Token Expiry
alt Token Expired
"Microsoft Windows OneDrive Client" -> AzureAD: Request New Access Token\n(Refresh Token)
AzureAD -> AuthServer: Request New Access Token\n(Refresh Token)
AuthServer -> AzureAD: Return New Access Token
AzureAD -> "Microsoft Windows OneDrive Client": Return New Access Token (via AzureAD)
else Token Valid
GraphAPI -> "Microsoft OneDrive": Retrieve Data
"Microsoft OneDrive" -> GraphAPI: Return Data
GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data
end
end
else MFA Failed or Other Auth Error
AzureAD -> "Microsoft Windows OneDrive Client": Error Message (e.g., Invalid Credentials, MFA Failure)
end
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

View file

@ -0,0 +1,47 @@
@startuml
participant "Microsoft Windows OneDrive Client"
participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer
participant "User's Device (for MFA)" as UserDevice
participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI
participant "Microsoft OneDrive"
"Microsoft Windows OneDrive Client" -> AuthServer: Request Authorization\n(Client Credentials, Scopes)
AuthServer -> "Microsoft Windows OneDrive Client": Provide Authorization Code
"Microsoft Windows OneDrive Client" -> AuthServer: Request Access Token\n(Authorization Code, Client Credentials)
alt MFA Enabled
AuthServer -> UserDevice: Trigger MFA Challenge
UserDevice -> AuthServer: Provide MFA Verification
AuthServer -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token)
"Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token)
loop Token Expiry Check
"Microsoft Windows OneDrive Client" -> AuthServer: Is Access Token Expired?
alt Token Expired
"Microsoft Windows OneDrive Client" -> AuthServer: Request New Access Token\n(Refresh Token)
AuthServer -> "Microsoft Windows OneDrive Client": Return New Access Token
else Token Valid
GraphAPI -> "Microsoft OneDrive": Retrieve Data
"Microsoft OneDrive" -> GraphAPI: Return Data
GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data
end
end
else MFA Not Required
AuthServer -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token)
"Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token)
loop Token Expiry Check
"Microsoft Windows OneDrive Client" -> AuthServer: Is Access Token Expired?
alt Token Expired
"Microsoft Windows OneDrive Client" -> AuthServer: Request New Access Token\n(Refresh Token)
AuthServer -> "Microsoft Windows OneDrive Client": Return New Access Token
else Token Valid
GraphAPI -> "Microsoft OneDrive": Retrieve Data
"Microsoft OneDrive" -> GraphAPI: Return Data
GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data
end
end
else MFA Failed or Other Auth Error
AuthServer -> "Microsoft Windows OneDrive Client": Error Message (e.g., Invalid Credentials, MFA Failure)
end
@enduml

BIN
docs/puml/uploadFile.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

62
docs/puml/uploadFile.puml Normal file
View file

@ -0,0 +1,62 @@
@startuml
start
partition "Upload File" {
:Log "fileToUpload";
:Check database for parent path;
if (parent path found?) then (yes)
if (drive ID not empty?) then (yes)
:Proceed;
else (no)
:Use defaultDriveId;
endif
else (no)
stop
endif
:Check if file exists locally;
if (file exists?) then (yes)
:Read local file;
if (can read file?) then (yes)
if (parent path in DB?) then (yes)
:Get file size;
if (file size <= max?) then (yes)
:Check available space on OneDrive;
if (space available?) then (yes)
:Check if file exists on OneDrive;
if (file exists online?) then (yes)
:Save online metadata only;
if (if local file newer) then (yes)
:Local file is newer;
:Upload file as changed local file;
else (no)
:Remote file is newer;
:Perform safe backup;
note right: Local data loss prevention
:Upload renamed file as new file;
endif
else (no)
:Attempt upload;
endif
else (no)
:Log "Insufficient space";
endif
else (no)
:Log "File too large";
endif
else (no)
:Log "Parent path issue";
endif
else (no)
:Log "Cannot read file";
endif
else (no)
:Log "File disappeared locally";
endif
:Upload success or failure;
if (upload failed?) then (yes)
:Log failure;
else (no)
:Update cache;
endif
}
stop
@enduml

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

View file

@ -0,0 +1,56 @@
@startuml
start
partition "Upload Modified File" {
:Initialize API Instance;
:Check for Dry Run;
if (Is Dry Run?) then (yes)
:Create Fake Response;
else (no)
:Get Current Online Data;
if (Error Fetching Data) then (yes)
:Handle Errors;
if (Retryable Error?) then (yes)
:Retry Fetching Data;
detach
else (no)
:Log and Display Error;
endif
endif
if (filesize > 0 and valid latest online data) then (yes)
if (is online file newer) then (yes)
:Log that online is newer;
:Perform safe backup;
note left: Local data loss prevention
:Upload renamed local file as new file;
endif
endif
:Determine Upload Method;
if (Use Simple Upload?) then (yes)
:Perform Simple Upload;
if (Upload Error) then (yes)
:Handle Upload Errors and Retries;
if (Retryable Upload Error?) then (yes)
:Retry Upload;
detach
else (no)
:Log and Display Upload Error;
endif
endif
else (no)
:Create Upload Session;
:Perform Upload via Session;
if (Session Upload Error) then (yes)
:Handle Session Upload Errors and Retries;
if (Retryable Session Error?) then (yes)
:Retry Session Upload;
detach
else (no)
:Log and Display Session Error;
endif
endif
endif
endif
:Finalize;
}
stop
@enduml

View file

@ -1,21 +1,23 @@
# How to configure OneDrive SharePoint Shared Library sync
**WARNING:** Several users have reported files being overwritten causing data loss as a result of using this client with SharePoint Libraries when running as a systemd service.
When this has been investigated, the following has been noted as potential root causes:
* File indexing application such as Baloo File Indexer or Tracker3 constantly indexing your OneDrive data
* The use of WPS Office and how it 'saves' files by deleting the existing item and replaces it with the saved data
> [!CAUTION]
> Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
Additionally there could be a yet unknown bug with the client, however all debugging and data provided previously shows that an 'external' process to the 'onedrive' application modifies the files triggering the undesirable upload to occur.
**Possible Preventative Actions:**
* Disable all File Indexing for your SharePoint Library data. It is out of scope to detail on how you should do this.
* Disable using a systemd service for syncing your SharePoint Library data.
* Do not use WPS Office to edit your documents. Use OpenOffice or LibreOffice as these do not exhibit the same 'delete to save' action that WPS Office has.
Additionally, please use caution when using this client with SharePoint.
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
> [!CAUTION]
> Several users have reported files being overwritten causing data loss as a result of using this client with SharePoint Libraries when running as a systemd service.
>
> When this has been investigated, the following has been noted as potential root causes:
> * File indexing application such as Baloo File Indexer or Tracker3 constantly indexing your OneDrive data
> * The use of WPS Office and how it 'saves' files by deleting the existing item and replaces it with the saved data. Do not use WPS Office.
>
> Additionally there could be a yet unknown bug with the client, however all debugging and data provided previously shows that an 'external' process to the 'onedrive' application modifies the files triggering the undesirable upload to occur.
>
> **Possible Preventative Actions:**
> * Disable all File Indexing for your SharePoint Library data. It is out of scope to detail on how you should do this.
> * Disable using a systemd service for syncing your SharePoint Library data.
> * Do not use WPS Office to edit your documents. Use OpenOffice or LibreOffice as these do not exhibit the same 'delete to save' action that WPS Office has.
>
> Additionally has been 100% re-written from v2.5.0 onwards, thus the mechanism for saving data to SharePoint has been critically overhauled to simplify actions to negate the impacts where SharePoint will *modify* your file post upload, breaking file integrity as the file you have locally, is not the file that is stored online. Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for relevant details.
## Process Overview
Syncing a OneDrive SharePoint library requires additional configuration for your 'onedrive' client:
@ -26,7 +28,8 @@ Syncing a OneDrive SharePoint library requires additional configuration for your
5. Test the configuration using '--dry-run'
6. Sync the SharePoint Library as required
**Note:** The `--get-O365-drive-id` process below requires a fully configured 'onedrive' configuration so that the applicable Drive ID for the given Office 365 SharePoint Shared Library can be determined. It is highly recommended that you do not use the application 'default' configuration directory for any SharePoint Site, and configure separate items for each site you wish to use.
> [!IMPORTANT]
> The `--get-sharepoint-drive-id` process below requires a fully configured 'onedrive' configuration so that the applicable Drive ID for the given SharePoint Shared Library can be determined. It is highly recommended that you do not use the application 'default' configuration directory for any SharePoint Site, and configure separate items for each site you wish to use.
## 1. Listing available OneDrive SharePoint Libraries
Login to the OneDrive web interface and determine which shared library you wish to configure the client for:
@ -35,7 +38,7 @@ Login to the OneDrive web interface and determine which shared library you wish
## 2. Query OneDrive API to obtain required configuration details
Run the following command using the 'onedrive' client to query the OneDrive API to obtain the required 'drive_id' of the SharePoint Library that you wish to sync:
```text
onedrive --get-O365-drive-id '<your site name to search>'
onedrive --get-sharepoint-drive-id '<your site name to search>'
```
This will return something similar to the following:
```text
@ -78,7 +81,8 @@ Create a new local folder to store the SharePoint Library data in:
mkdir ~/SharePoint_My_Library_Name
```
**Note:** Do not use spaces in the directory name, use '_' as a replacement
> [!TIP]
> Do not use spaces in the directory name, use '_' as a replacement
## 4. Configure SharePoint Library config file with the required 'drive_id' & 'sync_dir' options
Download a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above:
@ -97,7 +101,8 @@ drive_id = "insert the drive_id value from above here"
```
The OneDrive client will now be configured to sync this SharePoint shared library to your local system and the location you have configured.
**Note:** After changing `drive_id`, you must perform a full re-synchronization by adding `--resync` to your existing command line.
> [!IMPORTANT]
> After changing `drive_id`, you must perform a full re-synchronization by adding `--resync` to your existing command line.
## 5. Validate and Test the configuration
Validate your new configuration using the `--display-config` option to validate you have configured the application correctly:
@ -110,7 +115,8 @@ Test your new configuration using the `--dry-run` option to validate the applica
onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose --dry-run
```
**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration.
> [!IMPORTANT]
> As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration.
## 6. Sync the SharePoint Library as required
Sync the SharePoint Library to your system with either `--synchronize` or `--monitor` operations:
@ -122,7 +128,8 @@ onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbos
onedrive --confdir="~/.config/SharePoint_My_Library_Name" --monitor --verbose
```
**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration.
> [!IMPORTANT]
> As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration.
## 7. Enable custom systemd service for SharePoint Library
Systemd can be used to automatically run this configuration in the background, however, a unique systemd service will need to be setup for this SharePoint Library instance
@ -163,7 +170,8 @@ Example:
ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/SharePoint_My_Library_Name"
```
**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded.
> [!IMPORTANT]
> When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be manually expanded when editing your systemd file.
### Step 3: Enable the new systemd service
Once the file is correctly editied, you can enable the new systemd service using the following commands.

View file

@ -1,24 +1,30 @@
# Installation of 'onedrive' package on Debian and Ubuntu
This document covers the appropriate steps to install the 'onedrive' client using the provided packages for Debian and Ubuntu.
This document outlines the steps for installing the 'onedrive' client on Debian, Ubuntu, and their derivatives using the OpenSuSE Build Service Packages.
#### Important information for all Ubuntu and Ubuntu based distribution users:
This information is specifically for the following platforms and distributions:
* Lubuntu
* Linux Mint
* POP OS
* Peppermint OS
* Raspbian
* Ubuntu
Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all&section=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Ubuntu Universe packages are out-of-date and are not supported and should not be used.
> [!CAUTION]
> This information is specifically for the following platforms and distributions:
> * Debian
> * Deepin
> * Elementary OS
> * Kali Linux
> * Lubuntu
> * Linux Mint
> * Pop!_OS
> * Peppermint OS
> * Raspbian | Raspberry Pi OS
> * Ubuntu | Kubuntu | Xubuntu | Ubuntu Mate
> * Zorin OS
>
> Although packages for the 'onedrive' client are available through distribution repositories, it is strongly advised against installing them. These distribution-provided packages are outdated, unsupported, and contain bugs and issues that have already been resolved in newer versions. They should not be used.
## Determine which instructions to use
Ubuntu and its clones are based on various different releases, thus, you must use the correct instructions below, otherwise you may run into package dependancy issues and will be unable to install the client.
### Step 1: Remove any configured PPA and associated 'onedrive' package and systemd service files
Many Internet 'help' pages provide inconsistent details on how to install the OneDrive Client for Linux. A number of these websites continue to point users to install the client via the yann1ck PPA repository however this PPA no longer exists and should not be used.
#### Step 1a: Remove PPA if configured
Many Internet 'help' pages provide inconsistent details on how to install the OneDrive Client for Linux. A number of these websites continue to point users to install the client via the yann1ck PPA repository however this PPA no longer exists and should not be used. If you have previously configured, or attempted to add this PPA, this needs to be removed.
To remove the PPA repository and the older client, perform the following actions:
```text
@ -26,10 +32,22 @@ sudo apt remove onedrive
sudo add-apt-repository --remove ppa:yann1ck/onedrive
```
Additionally, Ubuntu and its clones have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated. This systemd entry is erroneous and needs to be removed.
#### Step 1b: Remove errant systemd service file installed by PPA or distribution package
Additionally, the distributon packages have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated:
```
Created symlink /etc/systemd/user/default.target.wants/onedrive.service → /usr/lib/systemd/user/onedrive.service.
```
This systemd entry is erroneous and needs to be removed. Without removing this erroneous systemd link, this increases your risk of getting the following error message:
```
Opening the item database ...
ERROR: onedrive application is already running - check system process list for active application instances
- Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process
Waiting for all internal threads to complete before exiting application
```
To remove this symbolic link, run the following command:
```
sudo rm /etc/systemd/user/default.target.wants/onedrive.service
@ -141,6 +159,7 @@ If required, review the table below based on your 'lsb_release' information to p
| Debian 10 | You must build from source or upgrade your Operating System to Debian 12 |
| Debian 11 | Use [Debian 11](#distribution-debian-11) instructions below |
| Debian 12 | Use [Debian 12](#distribution-debian-12) instructions below |
| Debian Sid | Refer to https://packages.debian.org/sid/onedrive for assistance |
| Raspbian GNU/Linux 10 | You must build from source or upgrade your Operating System to Raspbian GNU/Linux 12 |
| Raspbian GNU/Linux 11 | Use [Debian 11](#distribution-debian-11) instructions below |
| Raspbian GNU/Linux 12 | Use [Debian 12](#distribution-debian-12) instructions below |
@ -153,6 +172,12 @@ If required, review the table below based on your 'lsb_release' information to p
| Ubuntu 23.04 / Lunar | Use [Ubuntu 23.04](#distribution-ubuntu-2304) instructions below |
| Ubuntu 23.10 / Mantic | Use [Ubuntu 23.10](#distribution-ubuntu-2310) instructions below |
> [!IMPORTANT]
> If your Linux distribution and release is not in the table above, you have 2 options:
>
> 1. Compile the application from source. Refer to install.md (Compilation & Installation) for assistance.
> 2. Raise a support case with your Linux Distribution to provide you with an applicable package you can use.
## Distribution Package Install Instructions
### Distribution: Debian 11

1027
docs/usage.md Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,303 +1,64 @@
.TH ONEDRIVE "1" "@PACKAGE_DATE@" "@PACKAGE_VERSION@" "User Commands"
.SH NAME
onedrive \- folder synchronization with OneDrive
onedrive \- A client for the Microsoft OneDrive Cloud Service
.SH SYNOPSIS
.B onedrive
[\fI\,OPTION\/\fR] \-\-synchronize
[\fI\,OPTION\/\fR] --sync
.br
.B onedrive
[\fI\,OPTION\/\fR] \-\-monitor
[\fI\,OPTION\/\fR] --monitor
.br
.B onedrive
[\fI\,OPTION\/\fR] \-\-display-config
[\fI\,OPTION\/\fR] --display-config
.br
.B onedrive
[\fI\,OPTION\/\fR] \-\-display-sync-status
[\fI\,OPTION\/\fR] --display-sync-status
.br
.B onedrive
[\fI\,OPTION\/\fR] -h | --help
.br
.B onedrive
--version
.SH DESCRIPTION
A complete tool to interact with OneDrive on Linux.
.SH OPTIONS
Without any option given, no sync is done and the program exits.
.TP
\fB\-\-auth\-files\fP ARG
Perform authorization via two files passed in as \fBARG\fP in the format \fBauthUrl:responseUrl\fP.
The authorization URL is written to the \fBauthUrl\fP, then \fBonedrive\fP waits for
the file \fBresponseUrl\fP to be present, and reads the response from that file.
.TP
\fB\-\-auth\-response\fP ARG
Perform authentication not via interactive dialog but via providing the response url directly.
.TP
\fB\-\-check\-for\-nomount\fP
Check for the presence of .nosync in the syncdir root. If found, do not perform sync.
.br
Configuration file key: \fBcheck_nomount\fP (default: \fBfalse\fP)
.TP
\fB\-\-check\-for\-nosync\fP
Check for the presence of .nosync in each directory. If found, skip directory from sync.
.br
Configuration file key: \fBcheck_nosync\fP (default: \fBfalse\fP)
.TP
\fB\-\-classify\-as\-big\-delete\fP
Number of children in a path that is locally removed which will be classified as a 'big data delete'
.br
Configuration file key: \fBclassify_as_big_delete\fP (default: \fB1000\fP)
.TP
\fB\-\-cleanup\-local\-files\fP
Cleanup additional local files when using \-\-download-only. This will remove local data.
.br
Configuration file key: \fBcleanup_local_files\fP (default: \fBfalse\fP)
.TP
\fB\-\-confdir\fP ARG
Set the directory used to store the configuration files
.TP
\fB\-\-create\-directory\fP ARG
Create a directory on OneDrive \- no sync will be performed.
.TP
\fB\-\-create\-share\-link\fP ARG
Create a shareable link for an existing file on OneDrive
.TP
\fB\-\-debug\-https\fP
Debug OneDrive HTTPS communication.
.br
Configuration file key: \fBdebug_https\fP (default: \fBfalse\fP)
.TP
\fB\-\-destination\-directory\fP ARG
Destination directory for renamed or move on OneDrive \- no sync will be performed.
.TP
\fB\-\-disable\-download\-validation\fP
Disable download validation when downloading from OneDrive
.br
Configuration file key: \fBdisable_download_validation\fP (default: \fBfalse\fP)
.TP
\fB\-\-disable\-notifications\fP
Do not use desktop notifications in monitor mode
.br
Configuration file key: \fBdisable_notifications\fP (default: \fBfalse\fP)
.TP
\fB\-\-disable\-upload\-validation\fP
Disable upload validation when uploading to OneDrive
.br
Configuration file key: \fBdisable_upload_validation\fP (default: \fBfalse\fP)
.TP
\fB\-\-display\-config\fP
Display what options the client will use as currently configured \- no sync will be performed.
.TP
\fB\-\-display\-running\-config\fP
Display what options the client has been configured to use on application startup.
.TP
\fB\-\-display\-sync\-status\fP
Display the sync status of the client \- no sync will be performed.
.TP
\fB\-\-download\-only\fP
Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive.
.br
Configuration file key: \fBdownload_only\fP (default: \fBfalse\fP)
.TP
\fB\-\-dry\-run\fP
Perform a trial sync with no changes made. Can ONLY be used with --synchronize. Will be ignored for --monitor
.br
Configuration file key: \fBdry_run\fP (default: \fBfalse\fP)
.TP
\fB\-\-enable\-logging\fP
Enable client activity to a separate log file
.br
Configuration file key: \fBenable_logging\fP (default: \fBfalse\fP)
.TP
\fB\-\-force\fP
Force the deletion of data when a 'big delete' is detected
.TP
\fB\-\-force\-http\-11\fP
Force the use of HTTP 1.1 for all operations
.br
Configuration file key: \fBforce_http_11\fP (default: \fBfalse\fP)
.TP
\fB\-\-force\-sync\fP
Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignore
.br
all non-default skip_dir and skip_file rules
.TP
\fB\-\-get\-O365\-drive\-id\fP ARG
Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library
.TP
\fB\-\-get\-file\-link\fP ARG
Display the file link of a synced file
.TP
\fB\-\-list\-shared\-folders\fP
List OneDrive Business Shared Folders
.TP
\fB\-\-local\-first\fP
Synchronize from the local directory source first, before downloading changes from OneDrive.
.br
Configuration file key: \fBlocal_first\fP (default: \fBfalse\fP)
.TP
\fB\-\-logout\fP
Logout the current user
.TP
\fB\-\-log\-dir\fP ARG
defines the directory where logging output is saved to, needs to end with a slash
.br
Configuration file key: \fBlog_dir\fP (default: \fB/var/log/onedrive/\fP)
.TP
\fB\-\-min\-notify\-changes\fP
the minimum number of pending incoming changes necessary to trigger
a desktop notification
.br
Configuration file key: \fBmin_notify_changes\fP (default: \fB5\fP)
.TP
\fB\-m \-\-modified\-by\fP ARG
Display the last modified by details of a given path
.TP
\fB\-m \-\-monitor\fP
Keep monitoring for local and remote changes
.TP
\fB\-\-monitor\-interval\fP ARG
The number of seconds by which each sync operation is undertaken when
idle under monitor mode
.br
Configuration file key: \fBmonitor_interval\fP (default: \fB300\fP)
.TP
\fB\-\-monitor\-fullscan-frequency\fP ARG
Number of sync runs before performing a full local scan of the synced directory
.br
Configuration file key: \fBmonitor_fullscan_frequency\fP (default: \fB10\fP)
.TP
\fB\-\-monitor\-log\-frequency\fP ARG
Frequency of logging in monitor mode
.br
Configuration file key: \fBmonitor_log_frequency\fP (default: \fB5\fP)
.TP
\fB\-\-no\-remote\-delete\fP
Do not delete local file 'deletes' from OneDrive when using \fB\-\-upload\-only\fR
.br
Configuration file key: \fBno_remote_delete\fP (default: \fBfalse\fP)
.TP
\fB\-\-operation\-timeout\fP ARG
Set the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc.
.br
Configuration file key: \fBoperation_timeout\fP (default: \fB3600\fP)
.TP
\fB\-\-print\-token\fP
Print the access token, useful for debugging
.TP
\fB\-\-reauth\fP
Reauthenticate the client with OneDrive
.TP
\fB\-\-remove\-directory\fP ARG
Remove a directory on OneDrive \- no sync will be performed.
.TP
\fB\-\-remove\-source\-files\fP
Remove source file after successful transfer to OneDrive when using \-\-upload-only
.br
Configuration file key: \fBremove_source_files\fP (default: \fBfalse\fP)
.TP
\fB\-\-resync\fP
Forget the last saved state, perform a full sync
.TP
\fB\-\-resync\-auth\fP
Approve the use of performing a --resync action without needing CLI authorization
.TP
\fB\-\-single\-directory\fP ARG
Specify a single local directory within the OneDrive root to sync.
.TP
\fB\-\-skip\-dir\fP ARG
Skip any directories that match this pattern from syncing
.TP
\fB\-\-skip\-dir\-strict\-match\fP
When matching skip_dir directories, only match explicit matches
.br
Configuration file key: \fBskip_dir_strict_match\fP (default: \fBfalse\fP)
.TP
\fB\-\-skip\-dot\-files\fP
Skip dot files and folders from syncing
.br
Configuration file key: \fBskip_dotfiles\fP (default: \fBfalse\fP)
.TP
\fB\-\-skip\-file\fP
Skip any files that match this pattern from syncing
.br
Configuration file key: \fBskip_file\fP (default: \fB~*|.~*|*.tmp\fP)
.TP
\fB\-\-skip\-size\fP ARG
Skip new files larger than this size (in MB)
.TP
\fB\-\-skip\-symlinks\fP
Skip syncing of symlinks
.br
Configuration file key: \fBskip_symlinks\fP (default: \fBfalse\fP)
.TP
\fB\-\-source\-directory\fP ARG
Source directory to rename or move on OneDrive \- no sync will be performed.
.TP
\fB\-\-space\-reservation\fP ARG
The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation
.TP
\fB\-\-sync\-root\-files\fP
Sync all files in sync_dir root when using sync_list.
.TP
\fB\-\-sync\-shared\-folders\fP
Sync OneDrive Business Shared Folders
.br
Configuration file key: \fBsync_business_shared_folders\fP (default: \fBfalse\fP)
.TP
\fB\-\-syncdir\fP ARG
Set the directory used to sync the files that are synced
.br
Configuration file key: \fBsync_dir\fP (default: \fB~/OneDrive\fP)
.TP
\fB\-\-synchronize\fP
Perform a synchronization
.TP
\fB\-\-upload\-only\fP
Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive.
.br
Configuration file key: \fBupload_only\fP (default: \fBfalse\fP)
.TP
\fB\-\-user\-agent\fP ARG
Set the used User Agent identifier
.br
Configuration file key: \fBuser_agent\fP (default: don't change)
.TP
\fB\-v \-\-verbose\fP
Print more details, useful for debugging. Given two times (or more)
enables even more verbose debug statements.
.TP
\fB\-\-version\fP
Print the version and exit
.TP
\fB\-\-with\-editing\-perms\fP
Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link <file>
.TP
\fB\-h \-\-help\fP
This help information.
.PP
This is a free Microsoft OneDrive Client designed to work with OneDrive Personal, OneDrive for Business, Office365 OneDrive, and SharePoint Libraries. It's fully compatible with most major Linux distributions and FreeBSD, and can be containerised using Docker or Podman. The client offers secure one-way and two-way synchronisation capabilities, making it easy to connect to Microsoft OneDrive services across various platforms.
.SH FEATURES
State caching
Real-Time file monitoring with Inotify
File upload / download validation to ensure data integrity
Resumable uploads
Support OneDrive for Business (part of Office 365)
Shared Folder support for OneDrive Personal and OneDrive Business accounts
SharePoint / Office365 Shared Libraries
Desktop notifications via libnotify
Dry-run capability to test configuration changes
Prevent major OneDrive accidental data deletion after configuration change
Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China)
.br
* Compatible with OneDrive Personal, OneDrive for Business including accessing Microsoft SharePoint Libraries
.br
* Provides rules for client-side filtering to select data for syncing with Microsoft OneDrive accounts
.br
* Caches sync state for efficiency
.br
* Supports a dry-run option for safe configuration testing
.br
* Validates file transfers to ensure data integrity
.br
* Monitors local files in real-time using inotify
.br
* Supports interrupted uploads for completion at a later time
.br
* Capability to sync remote updates immediately via webhooks
.br
* Enhanced synchronisation speed with multi-threaded file transfers
.br
* Manages traffic bandwidth use with rate limiting
.br
* Supports seamless access to shared folders and files across both OneDrive Personal and OneDrive for Business accounts
.br
* Supports national cloud deployments including Microsoft Cloud for US Government, Microsoft Cloud Germany, and Azure and Office 365 operated by 21Vianet in China
.br
* Supports sending desktop alerts using libnotify
.br
* Protects against significant data loss on OneDrive after configuration changes
.br
* Works with both single and multi-tenant applications
.SH CONFIGURATION
By default, the client will use a sensible set of default values to interact with the Microsoft OneDrive service.
.TP
Should you wish to change these defaults, you should copy the default config file into your home directory before making any applicable changes:
You should copy the default config file into your home directory before making changes:
.nf
\fB
mkdir\ \-p\ ~/.config/onedrive
@ -305,87 +66,299 @@ cp\ @DOCDIR@/config\ ~/.config/onedrive/config
\fP
.fi
For the supported options see the above list of command line options
for the availability of a configuration key.
.PP
Pattern are case insensitive.
\fB*\fP and \fB?\fP wildcards characters are supported.
Use \fB|\fP to separate multiple patterns.
.TP
Please refer to the online documentation file application-config-options.md for details on all configuration file options.
After changing the filters (\fBskip_file\fP or \fBskip_dir\fP in your configs) you must
execute \fBonedrive --synchronize --resync\fP.
.SH CLIENT SIDE FILTERING
Client Side Filtering in the context of the OneDrive Client for Linux refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this:
.TP
.B skip_dir
Specifies directories that should not be synchronised with OneDrive. Useful for omitting large or irrelevant directories from the sync process.
.TP
.B skip_dotfiles
Excludes dotfiles, usually configuration files or scripts, from the sync. Ideal for users who prefer to keep these files local.
.TP
.B skip_file
Allows specifying specific files to exclude from synchronisation. Offers flexibility in selecting essential files for cloud storage.
.TP
.B skip_symlinks
Prevents symlinks, which often point to files outside the OneDrive directory or to irrelevant locations, from being included in the sync.
.PP
Additionally, the OneDrive Client for Linux allows the implementation of Client Side Filtering rules through a 'sync_list' file. This file explicitly states which directories or files should be included in the synchronisation. By default, any item not listed in the 'sync_list' file is excluded. This approach offers granular control over synchronisation, ensuring that only necessary data is transferred to and from Microsoft OneDrive.
.PP
These configurable options and the 'sync_list' file provide users with the flexibility to tailor the synchronisation process to their specific needs, conserving bandwidth and storage space while ensuring that important files are always backed up and accessible.
.TP
.B NOTE:
After changing any Client Side Filtering rule, a full re-synchronisation must be performed using --resync
.SH FIRST RUN
After installing the application you must run it at least once from the terminal
to authorize it.
You will be asked to open a specific link using your web browser where you
will have to login into your Microsoft Account and give the application the
permission to access your files. After giving the permission, you will be
redirected to a blank page. Copy the URI of the blank page into the application.
.SH SYSTEMD INTEGRATION
Service files are installed into user and system directories.
Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches.
.TP
OneDrive service running as root user
To enable this mode, run as root user
.nf
\fB
systemctl enable onedrive
systemctl start onedrive
\fP
.fi
Please be aware that some companies may require you to explicitly add this app to the Microsoft MyApps portal. To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department.
.TP
When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application.
.TP
This process authenticates your application with your account information, and it is now ready to use to sync your data between your local system and Microsoft OneDrive.
.SH GUI NOTIFICATIONS
If the client has been compiled with support for notifications, the client will send notifications about client activity via libnotify to the GUI via DBus when the client is being run in --monitor mode.
.SH APPLICATION LOGGING
When running onedrive all actions can be logged to a separate log file. This can be enabled by using the \fB--enable-logging\fP flag. By default, log files will be written to \fB/var/log/onedrive\fP. All logfiles will be in the format of \fB%username%.onedrive.log\fP, where \fB%username%\fP represents the user who ran the client.
.SH ALL CLI OPTIONS
The options below allow you to control the behavior of the onedrive client from the CLI. Without any specific option, if the client is already authenticated, the client will exit without any further action.
.TP
OneDrive service running as root user for a non-root user
This mode allows starting the OneDrive service automatically with
system start for multiple users. For each \fB<username>\fP run:
.nf
\fB
systemctl enable onedrive@<username>
systemctl start onedrive@<username>
\fP
.fi
\fB\-\-sync\fR
Do a one-time synchronisation with OneDrive.
.TP
OneDrive service running as non-root user
In this mode the service will be started when the user logs in.
Run as user
.nf
\fB
systemctl --user enable onedrive
systemctl --user start onedrive
\fP
.fi
\fB\-\-monitor\fR
Monitor filesystem for changes and sync regularly.
.SH LOGGING OUTPUT
.TP
\fB\-\-display-config\fR
Display the currently used configuration for the onedrive client.
When running onedrive all actions can be logged to a separate log file.
This can be enabled by using the \fB--enable-logging\fP flag.
By default, log files will be written to \fB/var/log/onedrive\fP.
.TP
\fB\-\-display-sync-status\fR
Query OneDrive service and report on pending changes.
All logfiles will be in the format of \fB%username%.onedrive.log\fP,
where \fB%username%\fP represents the user who ran the client.
.TP
\fB\-\-auth-files\fR \fIARG\fR
Perform authentication not via interactive dialog but via files that are read/writen when using this option. The two files are passed in as \fBARG\fP in the format \fBauthUrl:responseUrl\fP.
The authorisation URL is written to the \fBauthUrl\fP file, then \fBonedrive\fP waits for the file \fBresponseUrl\fP to be present, and reads the response from that file.
.br
Always specify the full path when using this option, otherwise the application will default to using the default configuration path for these files (~/.config/onedrive/)
.TP
\fB\-\-auth-response\fR \fIARG\fR
Perform authentication not via interactive dialog but via providing the response URL directly.
.SH NOTIFICATIONS
.TP
\fB\-\-check-for-nomount\fR
Check for the presence of .nosync in the syncdir root. If found, do not perform sync.
If OneDrive has been compiled with support for notifications, a running
\fBonedrive\fP in monitor mode will send notifications about
initialization and errors via libnotify to the dbus.
.TP
\fB\-\-check-for-nosync\fR
Check for the presence of .nosync in each directory. If found, skip directory from sync.
.TP
\fB\-\-classify-as-big-delete\fR \fIARG\fR
Number of children in a path that is locally removed which will be classified as a 'big data delete'.
.TP
\fB\-\-cleanup-local-files\fR
Cleanup additional local files when using --download-only. This will remove local data.
.TP
\fB\-\-confdir\fR \fIARG\fR
Set the directory used to store the configuration files.
.TP
\fB\-\-create-directory\fR \fIARG\fR
Create a directory on OneDrive - no sync will be performed.
.TP
\fB\-\-create-share-link\fR \fIARG\fR
Create a shareable link for an existing file on OneDrive.
.TP
\fB\-\-debug-https\fR
Debug OneDrive HTTPS communication.
.TP
\fB\-\-destination-directory\fR \fIARG\fR
Destination directory for renamed or moved items on OneDrive - no sync will be performed.
.TP
\fB\-\-disable-download-validation\fR
Disable download validation when downloading from OneDrive.
.TP
\fB\-\-disable-notifications\fR
Do not use desktop notifications in monitor mode.
.TP
\fB\-\-disable-upload-validation\fR
Disable upload validation when uploading to OneDrive.
.TP
\fB\-\-display-quota\fR
Display the quota status of the client - no sync will be performed.
.TP
\fB\-\-display-running-config\fR
Display what options the client has been configured to use on application startup.
.TP
\fB\-\-download-only\fR
Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive.
.TP
\fB\-\-dry-run\fR
Perform a trial sync with no changes made.
.TP
\fB\-\-enable-logging\fR
Enable client activity to a separate log file.
.TP
\fB\-\-force\fR
Force the deletion of data when a 'big delete' is detected.
.TP
\fB\-\-force-http-11\fR
Force the use of HTTP 1.1 for all operations.
.TP
\fB\-\-force-sync\fR
Force a synchronisation of a specific folder, only when using --sync --single-directory and ignore all non-default skip_dir and skip_file rules.
.TP
\fB\-\-get-O365-drive-id\fR \fIARG\fR
Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library (DEPRECATED).
.TP
\fB\-\-get-file-link\fR \fIARG\fR
Display the file link of a synced file.
.TP
\fB\-\-get-sharepoint-drive-id\fR
Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library.
.TP
\fB\-\-help\fR, \fB\-h\fR
Display application help.
.TP
\fB\-\-list-shared-items\fR
List OneDrive Business Shared Items.
.TP
\fB\-\-local-first\fR
Synchronise from the local directory source first, before downloading changes from OneDrive.
.TP
\fB\-\-log-dir\fR \fIARG\fR
Directory where logging output is saved to, needs to end with a slash.
.TP
\fB\-\-logout\fR
Logout the current user.
.TP
\fB\-\-modified-by\fR \fIARG\fR
Display the last modified by details of a given path.
.TP
\fB\-\-monitor-interval\fR \fIARG\fR
Number of seconds by which each sync operation is undertaken when idle under monitor mode.
.TP
\fB\-\-monitor-log-frequency\fR \fIARG\fR
Frequency of logging in monitor mode.
.TP
\fB\-\-no-remote-delete\fR
Do not delete local file 'deletes' from OneDrive when using --upload-only.
.TP
\fB\-\-print-access-token\fR
Print the access token, useful for debugging.
.TP
\fB\-\-reauth\fR
Reauthenticate the client with OneDrive.
.TP
\fB\-\-remove-directory\fR \fIARG\fR
Remove a directory on OneDrive - no sync will be performed.
.TP
\fB\-\-remove-source-files\fR
Remove source file after successful transfer to OneDrive when using --upload-only.
.TP
\fB\-\-resync\fR
Forget the last saved state, perform a full sync.
.TP
\fB\-\-resync-auth\fR
Approve the use of performing a --resync action.
.TP
\fB\-\-single-directory\fR \fIARG\fR
Specify a single local directory within the OneDrive root to sync.
.TP
\fB\-\-skip-dir\fR \fIARG\fR
Skip any directories that match this pattern from syncing.
.TP
\fB\-\-skip-dir-strict-match\fR
When matching skip_dir directories, only match explicit matches.
.TP
\fB\-\-skip-dot-files\fR
Skip dot files and folders from syncing.
.TP
\fB\-\-skip-file\fR \fIARG\fR
Skip any files that match this pattern from syncing.
.TP
\fB\-\-skip-size\fR \fIARG\fR
Skip new files larger than this size (in MB).
.TP
\fB\-\-skip-symlinks\fR
Skip syncing of symlinks.
.TP
\fB\-\-source-directory\fR \fIARG\fR
Source directory to rename or move on OneDrive - no sync will be performed.
.TP
\fB\-\-space-reservation\fR \fIARG\fR
The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation.
.TP
\fB\-\-sync-root-files\fR
Sync all files in sync_dir root when using sync_list.
.TP
\fB\-\-sync-shared-files\fR
Sync OneDrive Business Shared Files to the local filesystem.
.TP
\fB\-\-syncdir\fR \fIARG\fR
Specify the local directory used for synchronisation to OneDrive.
.TP
\fB\-\-synchronize\fR
Perform a synchronisation with Microsoft OneDrive (DEPRECATED).
.TP
\fB\-\-upload-only\fR
Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive.
.TP
\fB\-\-verbose\fR, \fB\-v+\fR
Print more details, useful for debugging (repeat for extra debugging).
.TP
\fB\-\-version\fR
Print the version and exit.
.TP
\fB\-\-with-editing-perms\fR
Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link <file>.
.SH DOCUMENTATION
All documentation is available on GitHub: https://github.com/abraunegg/onedrive/tree/master/docs/
Note that this does not work if \fBonedrive\fP is started as root
for a user via the \fBonedrive@<username>\fP service.
.SH SEE ALSO
Further examples and documentation is available in
\f[C]README.md\f[]
\f[C]docs/USAGE.md\f[]
\f[C]docs/advanced-usage.md\f[]
\f[C]docs/BusinessSharedFolders.md\f[]
\f[C]docs/SharePoint-Shared-Libraries.md\f[]
\f[C]docs/national-cloud-deployments.md\f[]
.BR curl(1),

View file

@ -5,27 +5,30 @@
[![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml)
[![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive)
A free Microsoft OneDrive Client which supports OneDrive Personal, OneDrive for Business, OneDrive for Office365 and SharePoint.
Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries.
This powerful and highly configurable client can run on all major Linux distributions, FreeBSD, or as a Docker container. It supports one-way and two-way sync capabilities and securely connects to Microsoft OneDrive services.
This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services.
This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) client, which the developer has confirmed he has no desire to maintain or support the client ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). This fork has been in active development since mid 2018.
Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)).
This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018.
## Features
* State caching
* Real-Time local file monitoring with inotify
* Real-Time syncing of remote updates via webhooks
* File upload / download validation to ensure data integrity
* Resumable uploads
* Support OneDrive for Business (part of Office 365)
* Shared Folder support for OneDrive Personal and OneDrive Business accounts
* SharePoint / Office365 Shared Libraries
* Desktop notifications via libnotify
* Dry-run capability to test configuration changes
* Prevent major OneDrive accidental data deletion after configuration change
* Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China)
* Supports single & multi-tenanted applications
* Supports rate limiting of traffic
* Compatible with OneDrive Personal, OneDrive for Business including accessing Microsoft SharePoint Libraries
* Provides rules for client-side filtering to select data for syncing with Microsoft OneDrive accounts
* Caches sync state for efficiency
* Supports a dry-run option for safe configuration testing
* Validates file transfers to ensure data integrity
* Monitors local files in real-time using inotify
* Supports interrupted uploads for completion at a later time
* Capability to sync remote updates immediately via webhooks
* Enhanced syncronisation speed with multi-threaded file transfers
* Manages traffic bandwidth use with rate limiting
* Supports seamless access to shared folders and files across both OneDrive Personal and OneDrive for Business accounts
* Supports national cloud deployments including Microsoft Cloud for US Government, Microsoft Cloud Germany and Azure and Office 365 operated by 21Vianet in China
* Supports sending desktop alerts using libnotify
* Protects against significant data loss on OneDrive after configuration changes
* Works with both single and multi-tenant applications
## What's missing
* Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive
@ -36,28 +39,17 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl
* Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log)
* System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray)
## Supported Application Version
Only the current application release version or greater is supported.
The current application release version is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases)
Check the version of the application you are using `onedrive --version` and ensure that you are running either the current release or compile the application yourself from master to get the latest version.
If you are not using the above application version or greater, you must upgrade your application to obtain support.
## Have a Question
If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions)
Be sure to review the Frequently Asked Questions as well before raising a new discussion post.
## Frequently Asked Questions
Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions)
## Reporting an Issue or Bug
If you encounter any bugs you can report them here on GitHub. Before filing an issue be sure to:
## Have a question
If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions)
1. Check the version of the application you are using `onedrive --version` and ensure that you are running a supported application version. If you are not using a supported application version, you must first upgrade your application to a supported version and then re-test for your issue.
2. If you are using a supported applcation version, fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md)
## Reporting an Issue or Bug
If you encounter any bugs you can report them here on Github. Before filing an issue be sure to:
1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master.
2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md)
3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support)
* If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue
* If you are still concerned, provide an NDA or confidentiality document to sign
@ -70,23 +62,23 @@ Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/maste
## Documentation and Configuration Assistance
### Installing from Distribution Packages or Building the OneDrive Client for Linux from source
Refer to [docs/INSTALL.md](https://github.com/abraunegg/onedrive/blob/master/docs/INSTALL.md)
Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md)
### Configuration and Usage
Refer to [docs/USAGE.md](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md)
Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md)
### Configure OneDrive Business Shared Folders
Refer to [docs/BusinessSharedFolders.md](https://github.com/abraunegg/onedrive/blob/master/docs/BusinessSharedFolders.md)
### Configure OneDrive Business Shared Items
Refer to [docs/business-shared-items.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-items.md)
### Configure SharePoint / Office 365 Shared Libraries (Business or Education)
Refer to [docs/SharePoint-Shared-Libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/SharePoint-Shared-Libraries.md)
Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md)
### Configure National Cloud support
Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md)
### Docker support
Refer to [docs/Docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/Docker.md)
Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md)
### Podman support
Refer to [docs/Podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/Podman.md)
Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md)

View file

@ -683,6 +683,7 @@ enum long defaultMaxContentLength = 5_000_000;
public import std.string;
public import std.stdio;
public import std.conv;
import std.concurrency;
import std.uri;
import std.uni;
import std.algorithm.comparison;
@ -3910,14 +3911,16 @@ struct RequestServer {
If you want the forking worker process server, you do need to compile with the embedded_httpd_processes config though.
+/
void serveEmbeddedHttp(alias fun, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(ThisFor!fun _this) {
shared void serveEmbeddedHttp(alias fun, T, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(shared T _this) {
globalStopFlag = false;
static if(__traits(isStaticFunction, fun))
alias funToUse = fun;
void funToUse(CustomCgi cgi) {
fun(_this, cgi);
}
else
void funToUse(CustomCgi cgi) {
static if(__VERSION__ > 2097)
__traits(child, _this, fun)(cgi);
__traits(child, _inst_this, fun)(_inst_this, cgi);
else static assert(0, "Not implemented in your compiler version!");
}
auto manager = new ListeningConnectionManager(listeningHost, listeningPort, &doThreadHttpConnection!(CustomCgi, funToUse), null, useFork, numberOfThreads);

380
src/clientSideFiltering.d Normal file
View file

@ -0,0 +1,380 @@
// What is this module called?
module clientSideFiltering;
// What does this module require to function?
import std.algorithm;
import std.array;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
import std.conv;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
class ClientSideFiltering {
// Class variables
ApplicationConfig appConfig;
string[] paths;
Regex!char fileMask;
Regex!char directoryMask;
bool skipDirStrictMatch = false;
bool skipDotfiles = false;
this(ApplicationConfig appConfig) {
// Configure the class varaible to consume the application configuration
this.appConfig = appConfig;
}
// Initialise the required items
bool initialise() {
// Log what is being done
addLogEntry("Configuring Client Side Filtering (Selective Sync)", ["debug"]);
// Load the sync_list file if it exists
if (exists(appConfig.syncListFilePath)){
loadSyncList(appConfig.syncListFilePath);
}
// Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries
// Handle skip_dir configuration in config file
addLogEntry("Configuring skip_dir ...", ["debug"]);
addLogEntry("skip_dir: " ~ to!string(appConfig.getValueString("skip_dir")), ["debug"]);
setDirMask(appConfig.getValueString("skip_dir"));
// Was --skip-dir-strict-match configured?
addLogEntry("Configuring skip_dir_strict_match ...", ["debug"]);
addLogEntry("skip_dir_strict_match: " ~ to!string(appConfig.getValueBool("skip_dir_strict_match")), ["debug"]);
if (appConfig.getValueBool("skip_dir_strict_match")) {
setSkipDirStrictMatch();
}
// Was --skip-dot-files configured?
addLogEntry("Configuring skip_dotfiles ...", ["debug"]);
addLogEntry("skip_dotfiles: " ~ to!string(appConfig.getValueBool("skip_dotfiles")), ["debug"]);
if (appConfig.getValueBool("skip_dotfiles")) {
setSkipDotfiles();
}
// Handle skip_file configuration in config file
addLogEntry("Configuring skip_file ...", ["debug"]);
// Validate skip_file to ensure that this does not contain an invalid configuration
// Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process.
foreach(entry; appConfig.getValueString("skip_file").split("|")){
if (entry == ".*") {
// invalid entry element detected
addLogEntry("ERROR: Invalid skip_file entry '.*' detected");
return false;
}
}
// All skip_file entries are valid
addLogEntry("skip_file: " ~ appConfig.getValueString("skip_file"), ["debug"]);
setFileMask(appConfig.getValueString("skip_file"));
// All configured OK
return true;
}
// Shutdown components
void shutdown() {
object.destroy(appConfig);
object.destroy(paths);
object.destroy(fileMask);
object.destroy(directoryMask);
}
// Load sync_list file if it exists
void loadSyncList(string filepath) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
paths ~= buildNormalizedPath(line);
}
file.close();
}
// Configure the regex that will be used for 'skip_file'
void setFileMask(const(char)[] mask) {
fileMask = wild2regex(mask);
addLogEntry("Selective Sync File Mask: " ~ to!string(fileMask), ["debug"]);
}
// Configure the regex that will be used for 'skip_dir'
void setDirMask(const(char)[] dirmask) {
directoryMask = wild2regex(dirmask);
addLogEntry("Selective Sync Directory Mask: " ~ to!string(directoryMask), ["debug"]);
}
// Configure skipDirStrictMatch if function is called
// By default, skipDirStrictMatch = false;
void setSkipDirStrictMatch() {
skipDirStrictMatch = true;
}
// Configure skipDotfiles if function is called
// By default, skipDotfiles = false;
void setSkipDotfiles() {
skipDotfiles = true;
}
// return value of skipDotfiles
bool getSkipDotfiles() {
return skipDotfiles;
}
// Match against sync_list only
bool isPathExcludedViaSyncList(string path) {
// Debug output that we are performing a 'sync_list' inclusion / exclusion test
return isPathExcluded(path, paths);
}
// config file skip_dir parameter
bool isDirNameExcluded(string name) {
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
addLogEntry("skip_dir evaluation for: " ~ name, ["debug"]);
// Try full path match first
if (!name.matchFirst(directoryMask).empty) {
addLogEntry("'!name.matchFirst(directoryMask).empty' returned true = matched", ["debug"]);
return true;
} else {
// Do we check the base name as well?
if (!skipDirStrictMatch) {
addLogEntry("No Strict Matching Enforced", ["debug"]);
// Test the entire path working backwards from child
string path = buildNormalizedPath(name);
string checkPath;
auto paths = pathSplitter(path);
foreach_reverse(directory; paths) {
if (directory != "/") {
// This will add a leading '/' but that needs to be stripped to check
checkPath = "/" ~ directory ~ checkPath;
if(!checkPath.strip('/').matchFirst(directoryMask).empty) {
addLogEntry("'!checkPath.matchFirst(directoryMask).empty' returned true = matched", ["debug"]);
return true;
}
}
}
} else {
// No match
addLogEntry("Strict Matching Enforced - No Match", ["debug"]);
}
}
// no match
return false;
}
// config file skip_file parameter
bool isFileNameExcluded(string name) {
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
addLogEntry("skip_file evaluation for: " ~ name, ["debug"]);
// Try full path match first
if (!name.matchFirst(fileMask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(fileMask).empty) {
return true;
}
}
// no match
return false;
}
// test if the given path is not included in the allowed paths
// if there are no allowed paths always return false
private bool isPathExcluded(string path, string[] allowedPaths) {
// function variables
bool exclude = false;
bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
int offset;
string wildcard = "*";
// always allow the root
if (path == ".") return false;
// if there are no allowed paths always return false
if (allowedPaths.empty) return false;
path = buildNormalizedPath(path);
addLogEntry("Evaluation against 'sync_list' for this path: " ~ path, ["debug"]);
addLogEntry("[S]exclude = " ~ to!string(exclude), ["debug"]);
addLogEntry("[S]exludeDirectMatch = " ~ to!string(exludeDirectMatch), ["debug"]);
addLogEntry("[S]excludeMatched = " ~ to!string(excludeMatched), ["debug"]);
// unless path is an exact match, entire sync_list entries need to be processed to ensure
// negative matches are also correctly detected
foreach (allowedPath; allowedPaths) {
// is this an inclusion path or finer grained exclusion?
switch (allowedPath[0]) {
case '-':
// sync_list path starts with '-', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '-/' offset needs to be 2, else 1
if (startsWith(allowedPath, "-/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '!':
// sync_list path starts with '!', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '!/' offset needs to be 2, else 1
if (startsWith(allowedPath, "!/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '/':
// sync_list path starts with '/', this user wants to include this path
// but a '/' at the start causes matching issues, so use the offset for comparison
exclude = false;
offset = 1;
break;
default:
// no negative pattern, default is to not exclude
exclude = false;
offset = 0;
}
// What are we comparing against?
addLogEntry("Evaluation against 'sync_list' entry: " ~ allowedPath, ["debug"]);
// Generate the common prefix from the path vs the allowed path
auto comm = commonPrefix(path, allowedPath[offset..$]);
// Is path is an exact match of the allowed path?
if (comm.length == path.length) {
// we have a potential exact match
// strip any potential '/*' from the allowed path, to avoid a potential lesser common match
string strippedAllowedPath = strip(allowedPath[offset..$], "/*");
if (path == strippedAllowedPath) {
// we have an exact path match
addLogEntry("Exact path match with 'sync_list' entry", ["debug"]);
if (!exclude) {
addLogEntry("Evaluation against 'sync_list' result: direct match", ["debug"]);
finalResult = false;
// direct match, break and go sync
break;
} else {
addLogEntry("Evaluation against 'sync_list' result: direct match - path to be excluded", ["debug"]);
// do not set excludeMatched = true here, otherwise parental path also gets excluded
// flag exludeDirectMatch so that a 'wildcard match' will not override this exclude
exludeDirectMatch = true;
// final result
finalResult = true;
}
} else {
// no exact path match, but something common does match
addLogEntry("Something 'common' matches the 'sync_list' input path", ["debug"]);
auto splitAllowedPaths = pathSplitter(strippedAllowedPath);
string pathToEvaluate = "";
foreach(base; splitAllowedPaths) {
pathToEvaluate ~= base;
if (path == pathToEvaluate) {
// The input path matches what we want to evaluate against as a direct match
if (!exclude) {
addLogEntry("Evaluation against 'sync_list' result: direct match for parental path item", ["debug"]);
finalResult = false;
// direct match, break and go sync
break;
} else {
addLogEntry("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded", ["debug"]);
finalResult = true;
// do not set excludeMatched = true here, otherwise parental path also gets excluded
}
}
pathToEvaluate ~= dirSeparator;
}
}
}
// Is path is a subitem/sub-folder of the allowed path?
if (comm.length == allowedPath[offset..$].length) {
// The given path is potentially a subitem of an allowed path
// We want to capture sub-folders / files of allowed paths here, but not explicitly match other items
// if there is no wildcard
auto subItemPathCheck = allowedPath[offset..$] ~ "/";
if (canFind(path, subItemPathCheck)) {
// The 'path' includes the allowed path, and is 'most likely' a sub-path item
if (!exclude) {
addLogEntry("Evaluation against 'sync_list' result: parental path match", ["debug"]);
finalResult = false;
// parental path matches, break and go sync
break;
} else {
addLogEntry("Evaluation against 'sync_list' result: parental path match but must be excluded", ["debug"]);
finalResult = true;
excludeMatched = true;
}
}
}
// Does the allowed path contain a wildcard? (*)
if (canFind(allowedPath[offset..$], wildcard)) {
// allowed path contains a wildcard
// manually replace '*' for '.*' to be compatible with regex
string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*");
auto allowedMask = regex(regexCompatiblePath);
if (matchAll(path, allowedMask)) {
// regex wildcard evaluation matches
// if we have a prior pattern match for an exclude, excludeMatched = true
if (!exclude && !excludeMatched && !exludeDirectMatch) {
// nothing triggered an exclusion before evaluation against wildcard match attempt
addLogEntry("Evaluation against 'sync_list' result: wildcard pattern match", ["debug"]);
finalResult = false;
} else {
addLogEntry("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded", ["debug"]);
finalResult = true;
excludeMatched = true;
}
}
}
}
// Interim results
addLogEntry("[F]exclude = " ~ to!string(exclude), ["debug"]);
addLogEntry("[F]exludeDirectMatch = " ~ to!string(exludeDirectMatch), ["debug"]);
addLogEntry("[F]excludeMatched = " ~ to!string(excludeMatched), ["debug"]);
// If exclude or excludeMatched is true, then finalResult has to be true
if ((exclude) || (excludeMatched) || (exludeDirectMatch)) {
finalResult = true;
}
// results
if (finalResult) {
addLogEntry("Evaluation against 'sync_list' final result: EXCLUDED", ["debug"]);
} else {
addLogEntry("Evaluation against 'sync_list' final result: included for sync", ["debug"]);
}
return finalResult;
}
}

File diff suppressed because it is too large Load diff

394
src/curlEngine.d Normal file
View file

@ -0,0 +1,394 @@
// What is this module called?
module curlEngine;
// What does this module require to function?
import std.net.curl;
import etc.c.curl: CurlOption;
import std.datetime;
import std.conv;
import std.file;
import std.json;
import std.stdio;
import std.range;
// What other modules that we have created do we need to import?
import log;
class CurlResponse {
HTTP.Method method;
const(char)[] url;
const(char)[][const(char)[]] requestHeaders;
const(char)[] postBody;
string[string] responseHeaders;
HTTP.StatusLine statusLine;
char[] content;
void reset() {
method = HTTP.Method.undefined;
url = null;
requestHeaders = null;
postBody = null;
responseHeaders = null;
object.destroy(statusLine);
content = null;
}
void addRequestHeader(const(char)[] name, const(char)[] value) {
requestHeaders[name] = value;
}
void connect(HTTP.Method method, const(char)[] url) {
this.method = method;
this.url = url;
}
const JSONValue json() {
JSONValue json;
try {
json = content.parseJSON();
} catch (JSONException e) {
// Log that a JSON Exception was caught, dont output the HTML response from OneDrive
addLogEntry("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further", ["debug"]);
}
return json;
};
void update(HTTP *http) {
this.responseHeaders = http.responseHeaders();
this.statusLine = http.statusLine;
}
@safe pure HTTP.StatusLine getStatus() {
return this.statusLine;
}
// Return the current value of retryAfterValue
ulong getRetryAfterValue() {
ulong delayBeforeRetry;
// is retry-after in the response headers
if ("retry-after" in responseHeaders) {
// Set the retry-after value
addLogEntry("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: " ~ to!string(responseHeaders["retry-after"]), ["debug"]);
addLogEntry("curlEngine.http.perform() => Setting retryAfterValue to: " ~ responseHeaders["retry-after"], ["debug"]);
delayBeforeRetry = to!ulong(responseHeaders["retry-after"]);
} else {
// Use a 120 second delay as a default given header value was zero
// This value is based on log files and data when determining correct process for 429 response handling
delayBeforeRetry = 120;
// Update that we are over-riding the provided value with a default
addLogEntry("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]);
}
return delayBeforeRetry; // default to 60 seconds
}
const string parseHeaders(const(string[string]) headers) {
string responseHeadersStr = "";
foreach (const(char)[] header; headers.byKey()) {
responseHeadersStr ~= "> " ~ header ~ ": " ~ headers[header] ~ "\n";
}
return responseHeadersStr;
}
const string parseHeaders(const(const(char)[][const(char)[]]) headers) {
string responseHeadersStr = "";
foreach (string header; headers.byKey()) {
if (header == "Authorization")
continue;
responseHeadersStr ~= "< " ~ header ~ ": " ~ headers[header] ~ "\n";
}
return responseHeadersStr;
}
const string dumpDebug() {
import std.range;
import std.format : format;
string str = "";
str ~= format("< %s %s\n", method, url);
if (!requestHeaders.empty) {
str ~= parseHeaders(requestHeaders);
}
if (!postBody.empty) {
str ~= format("----\n%s\n----\n", postBody);
}
str ~= format("< %s\n", statusLine);
if (!responseHeaders.empty) {
str ~= parseHeaders(responseHeaders);
}
return str;
}
const string dumpResponse() {
import std.range;
import std.format : format;
string str = "";
if (!content.empty) {
str ~= format("----\n%s\n----\n", content);
}
return str;
}
override string toString() const {
string str = "Curl debugging: \n";
str ~= dumpDebug();
str ~= "Curl response: \n";
str ~= dumpResponse();
return str;
}
CurlResponse dup() {
CurlResponse copy = new CurlResponse();
copy.method = method;
copy.url = url;
copy.requestHeaders = requestHeaders;
copy.postBody = postBody;
copy.responseHeaders = responseHeaders;
copy.statusLine = statusLine;
copy.content = content;
return copy;
}
}
class CurlEngine {
__gshared CurlEngine[] curlEnginePool;
static CurlEngine get() {
synchronized(CurlEngine.classinfo) {
if (curlEnginePool.empty) {
return new CurlEngine;
} else {
CurlEngine curlEngine = curlEnginePool[$-1];
curlEnginePool.popBack();
return curlEngine;
}
}
}
static releaseAll() {
synchronized(CurlEngine.classinfo) {
foreach(curlEngine; curlEnginePool) {
curlEngine.shutdown();
object.destroy(curlEngine);
}
curlEnginePool = null;
}
}
void release() {
cleanUp();
synchronized(CurlEngine.classinfo) {
curlEnginePool ~= this;
}
}
HTTP http;
bool keepAlive;
ulong dnsTimeout;
CurlResponse response;
this() {
http = HTTP();
response = new CurlResponse();
}
~this() {
object.destroy(http);
object.destroy(response);
}
void initialise(ulong dnsTimeout, ulong connectTimeout, ulong dataTimeout, ulong operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, ulong userRateLimit, ulong protocolVersion, bool keepAlive=true) {
// Setting this to false ensures that when we close the curl instance, any open sockets are closed - which we need to do when running
// multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly
this.keepAlive = keepAlive;
this.dnsTimeout = dnsTimeout;
// Curl Timeout Handling
// libcurl dns_cache_timeout timeout
// https://curl.se/libcurl/c/CURLOPT_DNS_CACHE_TIMEOUT.html
// https://dlang.org/library/std/net/curl/http.dns_timeout.html
http.dnsTimeout = (dur!"seconds"(dnsTimeout));
// Timeout for HTTPS connections
// https://curl.se/libcurl/c/CURLOPT_CONNECTTIMEOUT.html
// https://dlang.org/library/std/net/curl/http.connect_timeout.html
http.connectTimeout = (dur!"seconds"(connectTimeout));
// Timeout for activity on connection
// This is a DMD | DLANG specific item, not a libcurl item
// https://dlang.org/library/std/net/curl/http.data_timeout.html
// https://raw.githubusercontent.com/dlang/phobos/master/std/net/curl.d - private enum _defaultDataTimeout = dur!"minutes"(2);
http.dataTimeout = (dur!"seconds"(dataTimeout));
// Maximum time any operation is allowed to take
// This includes dns resolution, connecting, data transfer, etc.
// https://curl.se/libcurl/c/CURLOPT_TIMEOUT_MS.html
// https://dlang.org/library/std/net/curl/http.operation_timeout.html
http.operationTimeout = (dur!"seconds"(operationTimeout));
// Specify how many redirects should be allowed
http.maxRedirects(maxRedirects);
// Debug HTTPS
http.verbose = httpsDebug;
// Use the configured 'user_agent' value
http.setUserAgent = userAgent;
// What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6
http.handle.set(CurlOption.ipresolve,protocolVersion); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// What version of HTTP protocol do we use?
// Curl >= 7.62.0 defaults to http2 for a significant number of operations
if (httpProtocol) {
// Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1
http.handle.set(CurlOption.http_version,2);
}
// Configure upload / download rate limits if configured
// 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts
// A 0 value means rate is unlimited, and is the curl default
if (userRateLimit > 0) {
// set rate limit
http.handle.set(CurlOption.max_send_speed_large,userRateLimit);
http.handle.set(CurlOption.max_recv_speed_large,userRateLimit);
}
// Explicitly set these libcurl options
// https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html
// Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals
http.handle.set(CurlOption.nosignal,0);
// https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html
// Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled
http.handle.set(CurlOption.tcp_nodelay,0);
// https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html
// CURLOPT_FORBID_REUSE - make connection get closed at once after use
// Setting this to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable
// Setting this to 1 ensures that when we close the curl instance, any open sockets are forced closed when the API curl instance is destroyed
// The libcurl default is 0 as per the documentation (to REUSE connections) - ensure we are configuring to reuse sockets
http.handle.set(CurlOption.forbid_reuse,0);
if (httpsDebug) {
// Output what options we are using so that in the debug log this can be tracked
addLogEntry("http.dnsTimeout = " ~ to!string(dnsTimeout), ["debug"]);
addLogEntry("http.connectTimeout = " ~ to!string(connectTimeout), ["debug"]);
addLogEntry("http.dataTimeout = " ~ to!string(dataTimeout), ["debug"]);
addLogEntry("http.operationTimeout = " ~ to!string(operationTimeout), ["debug"]);
addLogEntry("http.maxRedirects = " ~ to!string(maxRedirects), ["debug"]);
addLogEntry("http.CurlOption.ipresolve = " ~ to!string(protocolVersion), ["debug"]);
addLogEntry("http.header.Connection.keepAlive = " ~ to!string(keepAlive), ["debug"]);
}
}
void addRequestHeader(const(char)[] name, const(char)[] value) {
http.addRequestHeader(name, value);
response.addRequestHeader(name, value);
}
void connect(HTTP.Method method, const(char)[] url) {
if (!keepAlive)
addRequestHeader("Connection", "close");
http.method = method;
http.url = url;
response.connect(method, url);
}
void setContent(const(char)[] contentType, const(char)[] sendData) {
addRequestHeader("Content-Type", contentType);
if (sendData) {
http.contentLength = sendData.length;
http.onSend = (void[] buf) {
import std.algorithm: min;
size_t minLen = min(buf.length, sendData.length);
if (minLen == 0) return 0;
buf[0 .. minLen] = cast(void[]) sendData[0 .. minLen];
sendData = sendData[minLen .. $];
return minLen;
};
response.postBody = sendData;
}
}
void setFile(File* file, ulong offsetSize) {
addRequestHeader("Content-Type", "application/octet-stream");
http.onSend = data => file.rawRead(data).length;
http.contentLength = offsetSize;
}
CurlResponse execute() {
scope(exit) {
cleanUp();
}
http.onReceive = (ubyte[] data) {
response.content ~= data;
// HTTP Server Response Code Debugging if --https-debug is being used
return data.length;
};
http.perform();
response.update(&http);
return response.dup;
}
CurlResponse download(string originalFilename, string downloadFilename) {
// Threshold for displaying download bar
long thresholdFileSize = 4 * 2^^20; // 4 MiB
CurlResponse response = new CurlResponse();
// open downloadFilename as write in binary mode
auto file = File(downloadFilename, "wb");
// function scopes
scope(exit) {
cleanUp();
if (file.isOpen()){
// close open file
file.close();
}
}
http.onReceive = (ubyte[] data) {
file.rawWrite(data);
return data.length;
};
http.perform();
// Rename downloaded file
rename(downloadFilename, originalFilename);
response.update(&http);
return response;
}
void cleanUp() {
// Reset any values to defaults, freeing any set objects
http.clearRequestHeaders();
http.onSend = null;
http.onReceive = null;
http.onReceiveHeader = null;
http.onReceiveStatusLine = null;
http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) {
return 0;
};
http.contentLength = 0;
response.reset();
}
void shutdown() {
// Shut down the curl instance & close any open sockets
http.shutdown();
}
void setDisableSSLVerifyPeer() {
addLogEntry("CAUTION: Switching off CurlOption.ssl_verifypeer ... this makes the application insecure.", ["debug"]);
http.handle.set(CurlOption.ssl_verifypeer, 0);
}
}

View file

@ -1,3 +1,7 @@
// What is this module called?
module itemdb;
// What does this module require to function?
import std.datetime;
import std.exception;
import std.path;
@ -5,19 +9,27 @@ import std.string;
import std.stdio;
import std.algorithm.searching;
import core.stdc.stdlib;
import std.json;
import std.conv;
// What other modules that we have created do we need to import?
import sqlite;
static import log;
import util;
import log;
enum ItemType {
none,
file,
dir,
remote
remote,
unknown
}
struct Item {
string driveId;
string id;
string name;
string remoteName;
ItemType type;
string eTag;
string cTag;
@ -26,25 +38,167 @@ struct Item {
string quickXorHash;
string sha256Hash;
string remoteDriveId;
string remoteParentId;
string remoteId;
ItemType remoteType;
string syncStatus;
string size;
}
final class ItemDatabase
{
// Construct an Item struct from a JSON driveItem
Item makeDatabaseItem(JSONValue driveItem) {
Item item = {
id: driveItem["id"].str,
name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Business
eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Business
cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Business)
remoteName: "actualOnlineName" in driveItem ? driveItem["actualOnlineName"].str : null, // actualOnlineName is only used with OneDrive Business Shared Folders
};
// OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834
// OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive
if(isItemDeleted(driveItem)) {
// Set mtime to SysTime(0)
item.mtime = SysTime(0);
} else {
// Item is not in a deleted state
// Resolve 'Key not found: fileSystemInfo' when then item is a remote item
// https://github.com/abraunegg/onedrive/issues/11
if (isItemRemote(driveItem)) {
// remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default
// Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI
// to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash
// See: https://github.com/abraunegg/onedrive/issues/1533
if ("fileSystemInfo" in driveItem["remoteItem"]) {
// 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases
item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str);
} else {
// is a remote item, but 'fileSystemInfo' is missing from 'remoteItem'
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
} else {
// Does fileSystemInfo exist at all ?
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
}
// Set this item object type
bool typeSet = false;
if (isItemFile(driveItem)) {
// 'file' object exists in the JSON
addLogEntry("Flagging object as a file", ["debug"]);
typeSet = true;
item.type = ItemType.file;
}
if (isItemFolder(driveItem)) {
// 'folder' object exists in the JSON
addLogEntry("Flagging object as a directory", ["debug"]);
typeSet = true;
item.type = ItemType.dir;
}
if (isItemRemote(driveItem)) {
// 'remote' object exists in the JSON
addLogEntry("Flagging object as a remote", ["debug"]);
typeSet = true;
item.type = ItemType.remote;
}
// root and remote items do not have parentReference
if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) {
item.driveId = driveItem["parentReference"]["driveId"].str;
if (hasParentReferenceId(driveItem)) {
item.parentId = driveItem["parentReference"]["id"].str;
}
}
// extract the file hash and file size
if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) {
// Get file size
if (hasFileSize(driveItem)) {
item.size = to!string(driveItem["size"].integer);
// Get quickXorHash as default
if ("quickXorHash" in driveItem["file"]["hashes"]) {
item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str;
} else {
addLogEntry("quickXorHash is missing from " ~ driveItem["id"].str, ["debug"]);
}
// If quickXorHash is empty ..
if (item.quickXorHash.empty) {
// Is there a sha256Hash?
if ("sha256Hash" in driveItem["file"]["hashes"]) {
item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str;
} else {
addLogEntry("sha256Hash is missing from " ~ driveItem["id"].str, ["debug"]);
}
}
} else {
// So that we have at least a zero value here as the API provided no 'size' data for this file item
item.size = "0";
}
}
// Is the object a remote drive item - living on another driveId ?
if (isItemRemote(driveItem)) {
// Check and assign remoteDriveId
if ("parentReference" in driveItem["remoteItem"] && "driveId" in driveItem["remoteItem"]["parentReference"]) {
item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str;
}
// Check and assign remoteParentId
if ("parentReference" in driveItem["remoteItem"] && "id" in driveItem["remoteItem"]["parentReference"]) {
item.remoteParentId = driveItem["remoteItem"]["parentReference"]["id"].str;
}
// Check and assign remoteId
if ("id" in driveItem["remoteItem"]) {
item.remoteId = driveItem["remoteItem"]["id"].str;
}
// Check and assign remoteType
if ("file" in driveItem["remoteItem"].object) {
item.remoteType = ItemType.file;
} else {
item.remoteType = ItemType.dir;
}
}
// We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not:
// - National Cloud Deployments do not support /delta as a query
// - When using --single-directory
// - When using --download-only --cleanup-local-files
//
// Thus we need to track in the database that this item is in sync
// As we are making an item, set the syncStatus to Y
// ONLY when either of the three modes above are being used, all the existing DB entries will get set to N
// so when processing /children, it can be identified what the 'deleted' difference is
item.syncStatus = "Y";
// Return the created item
return item;
}
final class ItemDatabase {
// increment this for every change in the db schema
immutable int itemDatabaseVersion = 11;
immutable int itemDatabaseVersion = 13;
Database db;
string insertItemStmt;
string updateItemStmt;
string selectItemByIdStmt;
string selectItemByRemoteIdStmt;
string selectItemByParentIdStmt;
string deleteItemByIdStmt;
bool databaseInitialised = false;
this(const(char)[] filename)
{
this(const(char)[] filename) {
db = Database(filename);
int dbVersion;
try {
@ -52,14 +206,40 @@ final class ItemDatabase
} catch (SqliteException e) {
// An error was generated - what was the error?
if (e.msg == "database is locked") {
writeln();
log.error("ERROR: onedrive application is already running - check system process list for active application instances");
log.vlog(" - Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process");
writeln();
addLogEntry();
addLogEntry("ERROR: The 'onedrive' application is already running - please check system process list for active application instances");
addLogEntry(" - Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process");
addLogEntry();
} else {
writeln();
log.error("ERROR: An internal database error occurred: " ~ e.msg);
writeln();
// A different error .. detail the message, detail the actual SQLite Error Code to assist with troubleshooting
addLogEntry();
addLogEntry("ERROR: An internal database error occurred: " ~ e.msg ~ " (SQLite Error Code: " ~ to!string(e.errorCode) ~ ")");
addLogEntry();
// Give the user some additional information and pointers on this error
// The below list is based on user issue / discussion reports since 2018
switch (e.errorCode) {
case 7: // SQLITE_NOMEM
addLogEntry("The operation could not be completed due to insufficient memory. Please close unnecessary applications to free up memory and try again.");
break;
case 10: // SQLITE_IOERR
addLogEntry("A disk I/O error occurred. This could be due to issues with the storage medium (e.g., disk full, hardware failure, filesystem corruption). Please check your disk's health using a disk utility tool, ensure there is enough free space, and check the filesystem for errors.");
break;
case 11: // SQLITE_CORRUPT
addLogEntry("The database file appears to be corrupt. This could be due to incomplete or failed writes, hardware issues, or unexpected interruptions during database operations. Please perform a --resync operation.");
break;
case 14: // SQLITE_CANTOPEN
addLogEntry("The database file could not be opened. Please check that the database file exists, has the correct permissions, and is not being blocked by another process or security software.");
break;
case 26: // SQLITE_NOTADB
addLogEntry("The file attempted to be opened does not appear to be a valid SQLite database, or it may have been corrupted to a point where it's no longer recognizable. Please check your application configuration directory and/or perform a --resync operation.");
break;
default:
addLogEntry("An unexpected error occurred. Please consult the application documentation or support to resolve this issue.");
break;
}
// Blank line before exit
addLogEntry();
}
return;
}
@ -67,10 +247,15 @@ final class ItemDatabase
if (dbVersion == 0) {
createTable();
} else if (db.getVersion() != itemDatabaseVersion) {
log.log("The item database is incompatible, re-creating database table structures");
addLogEntry("The item database is incompatible, re-creating database table structures");
db.exec("DROP TABLE item");
createTable();
}
// What is the threadsafe value
auto threadsafeValue = db.getThreadsafeValue();
addLogEntry("Threadsafe database value: " ~ to!string(threadsafeValue), ["debug"]);
// Set the enforcement of foreign key constraints.
// https://www.sqlite.org/pragma.html#pragma_foreign_keys
// PRAGMA foreign_keys = boolean;
@ -99,12 +284,12 @@ final class ItemDatabase
db.exec("PRAGMA locking_mode = EXCLUSIVE");
insertItemStmt = "
INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)
INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteParentId, remoteId, remoteType, syncStatus, size)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17)
";
updateItemStmt = "
UPDATE item
SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentId = ?8, quickXorHash = ?9, sha256Hash = ?10, remoteDriveId = ?11, remoteId = ?12, syncStatus = ?13
SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteParentId = ?13, remoteId = ?14, remoteType = ?15, syncStatus = ?16, size = ?17
WHERE driveId = ?1 AND id = ?2
";
selectItemByIdStmt = "
@ -112,6 +297,11 @@ final class ItemDatabase
FROM item
WHERE driveId = ?1 AND id = ?2
";
selectItemByRemoteIdStmt = "
SELECT *
FROM item
WHERE remoteDriveId = ?1 AND remoteId = ?2
";
selectItemByParentIdStmt = "SELECT * FROM item WHERE driveId = ? AND parentId = ?";
deleteItemByIdStmt = "DELETE FROM item WHERE driveId = ? AND id = ?";
@ -119,17 +309,16 @@ final class ItemDatabase
databaseInitialised = true;
}
bool isDatabaseInitialised()
{
bool isDatabaseInitialised() {
return databaseInitialised;
}
void createTable()
{
void createTable() {
db.exec("CREATE TABLE item (
driveId TEXT NOT NULL,
id TEXT NOT NULL,
name TEXT NOT NULL,
remoteName TEXT,
type TEXT NOT NULL,
eTag TEXT,
cTag TEXT,
@ -138,9 +327,12 @@ final class ItemDatabase
quickXorHash TEXT,
sha256Hash TEXT,
remoteDriveId TEXT,
remoteParentId TEXT,
remoteId TEXT,
remoteType TEXT,
deltaLink TEXT,
syncStatus TEXT,
size TEXT,
PRIMARY KEY (driveId, id),
FOREIGN KEY (driveId, parentId)
REFERENCES item (driveId, id)
@ -154,32 +346,27 @@ final class ItemDatabase
db.setVersion(itemDatabaseVersion);
}
void insert(const ref Item item)
{
void insert(const ref Item item) {
auto p = db.prepare(insertItemStmt);
bindItem(item, p);
p.exec();
}
void update(const ref Item item)
{
void update(const ref Item item) {
auto p = db.prepare(updateItemStmt);
bindItem(item, p);
p.exec();
}
void dump_open_statements()
{
void dump_open_statements() {
db.dump_open_statements();
}
int db_checkpoint()
{
int db_checkpoint() {
return db.db_checkpoint();
}
void upsert(const ref Item item)
{
void upsert(const ref Item item) {
auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?");
s.bind(1, item.driveId);
s.bind(2, item.id);
@ -191,8 +378,7 @@ final class ItemDatabase
stmt.exec();
}
Item[] selectChildren(const(char)[] driveId, const(char)[] id)
{
Item[] selectChildren(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(selectItemByParentIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -205,8 +391,7 @@ final class ItemDatabase
return items;
}
bool selectById(const(char)[] driveId, const(char)[] id, out Item item)
{
bool selectById(const(char)[] driveId, const(char)[] id, out Item item) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -218,9 +403,20 @@ final class ItemDatabase
return false;
}
bool selectByRemoteId(const(char)[] remoteDriveId, const(char)[] remoteId, out Item item) {
auto p = db.prepare(selectItemByRemoteIdStmt);
p.bind(1, remoteDriveId);
p.bind(2, remoteId);
auto r = p.exec();
if (!r.empty) {
item = buildItem(r);
return true;
}
return false;
}
// returns true if an item id is in the database
bool idInLocalDatabase(const(string) driveId, const(string)id)
{
bool idInLocalDatabase(const(string) driveId, const(string)id) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -233,18 +429,11 @@ final class ItemDatabase
// returns the item with the given path
// the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3"
bool selectByPath(const(char)[] path, string rootDriveId, out Item item)
{
bool selectByPath(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
if (startsWith(path, "./") || path == ".") {
// Need to remove the . from the path prefix
path = "root/" ~ path.chompPrefix(".");
} else {
// Leave path as it is
path = "root/" ~ path;
}
path = "root/" ~ (startsWith(path, "./") || path == "." ? path.chompPrefix(".") : path);
auto s = db.prepare("SELECT * FROM item WHERE name = ?1 AND driveId IS ?2 AND parentId IS ?3");
foreach (name; pathSplitter(path)) {
@ -254,12 +443,15 @@ final class ItemDatabase
auto r = s.exec();
if (r.empty) return false;
currItem = buildItem(r);
// if the item is of type remote substitute it with the child
// If the item is of type remote substitute it with the child
if (currItem.type == ItemType.remote) {
addLogEntry("Record is a Remote Object: " ~ to!string(currItem), ["debug"]);
Item child;
if (selectById(currItem.remoteDriveId, currItem.remoteId, child)) {
assert(child.type != ItemType.remote, "The type of the child cannot be remote");
currItem = child;
addLogEntry("Selecting Record that is NOT Remote Object: " ~ to!string(currItem), ["debug"]);
}
}
}
@ -267,19 +459,12 @@ final class ItemDatabase
return true;
}
// same as selectByPath() but it does not traverse remote folders
bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item)
{
// same as selectByPath() but it does not traverse remote folders, returns the remote element if that is what is required
bool selectByPathIncludingRemoteItems(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
if (startsWith(path, "./") || path == ".") {
// Need to remove the . from the path prefix
path = "root/" ~ path.chompPrefix(".");
} else {
// Leave path as it is
path = "root/" ~ path;
}
path = "root/" ~ (startsWith(path, "./") || path == "." ? path.chompPrefix(".") : path);
auto s = db.prepare("SELECT * FROM item WHERE name IS ?1 AND driveId IS ?2 AND parentId IS ?3");
foreach (name; pathSplitter(path)) {
@ -290,75 +475,129 @@ final class ItemDatabase
if (r.empty) return false;
currItem = buildItem(r);
}
if (currItem.type == ItemType.remote) {
addLogEntry("Record selected is a Remote Object: " ~ to!string(currItem), ["debug"]);
}
item = currItem;
return true;
}
void deleteById(const(char)[] driveId, const(char)[] id)
{
void deleteById(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(deleteItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
p.exec();
}
private void bindItem(const ref Item item, ref Statement stmt)
{
private void bindItem(const ref Item item, ref Statement stmt) {
with (stmt) with (item) {
bind(1, driveId);
bind(2, id);
bind(3, name);
bind(4, remoteName);
// type handling
string typeStr = null;
final switch (type) with (ItemType) {
case file: typeStr = "file"; break;
case dir: typeStr = "dir"; break;
case remote: typeStr = "remote"; break;
case unknown: typeStr = "unknown"; break;
case none: typeStr = null; break;
}
bind(4, typeStr);
bind(5, eTag);
bind(6, cTag);
bind(7, mtime.toISOExtString());
bind(8, parentId);
bind(9, quickXorHash);
bind(10, sha256Hash);
bind(11, remoteDriveId);
bind(12, remoteId);
bind(13, syncStatus);
bind(5, typeStr);
bind(6, eTag);
bind(7, cTag);
bind(8, mtime.toISOExtString());
bind(9, parentId);
bind(10, quickXorHash);
bind(11, sha256Hash);
bind(12, remoteDriveId);
bind(13, remoteParentId);
bind(14, remoteId);
// remoteType handling
string remoteTypeStr = null;
final switch (remoteType) with (ItemType) {
case file: remoteTypeStr = "file"; break;
case dir: remoteTypeStr = "dir"; break;
case remote: remoteTypeStr = "remote"; break;
case unknown: remoteTypeStr = "unknown"; break;
case none: remoteTypeStr = null; break;
}
bind(15, remoteTypeStr);
bind(16, syncStatus);
bind(17, size);
}
}
private Item buildItem(Statement.Result result)
{
private Item buildItem(Statement.Result result) {
assert(!result.empty, "The result must not be empty");
assert(result.front.length == 14, "The result must have 14 columns");
assert(result.front.length == 18, "The result must have 18 columns");
Item item = {
// column 0: driveId
// column 1: id
// column 2: name
// column 3: remoteName - only used when there is a difference in the local name & remote shared folder name
// column 4: type
// column 5: eTag
// column 6: cTag
// column 7: mtime
// column 8: parentId
// column 9: quickXorHash
// column 10: sha256Hash
// column 11: remoteDriveId
// column 12: remoteParentId
// column 13: remoteId
// column 14: remoteType
// column 15: deltaLink
// column 16: syncStatus
// column 17: size
driveId: result.front[0].dup,
id: result.front[1].dup,
name: result.front[2].dup,
eTag: result.front[4].dup,
cTag: result.front[5].dup,
mtime: SysTime.fromISOExtString(result.front[6]),
parentId: result.front[7].dup,
quickXorHash: result.front[8].dup,
sha256Hash: result.front[9].dup,
remoteDriveId: result.front[10].dup,
remoteId: result.front[11].dup,
syncStatus: result.front[12].dup
remoteName: result.front[3].dup,
// Column 4 is type - not set here
eTag: result.front[5].dup,
cTag: result.front[6].dup,
mtime: SysTime.fromISOExtString(result.front[7]),
parentId: result.front[8].dup,
quickXorHash: result.front[9].dup,
sha256Hash: result.front[10].dup,
remoteDriveId: result.front[11].dup,
remoteParentId: result.front[12].dup,
remoteId: result.front[13].dup,
// Column 14 is remoteType - not set here
// Column 15 is deltaLink - not set here
syncStatus: result.front[16].dup,
size: result.front[17].dup
};
switch (result.front[3]) {
// Configure item.type
switch (result.front[4]) {
case "file": item.type = ItemType.file; break;
case "dir": item.type = ItemType.dir; break;
case "remote": item.type = ItemType.remote; break;
default: assert(0, "Invalid item type");
}
// Configure item.remoteType
switch (result.front[14]) {
// We only care about 'dir' and 'file' for 'remote' items
case "file": item.remoteType = ItemType.file; break;
case "dir": item.remoteType = ItemType.dir; break;
default: item.remoteType = ItemType.none; break; // Default to ItemType.none
}
// Return item
return item;
}
// computes the path of the given item id
// the path is relative to the sync directory ex: "Music/Turbo Killer.mp3"
// the trailing slash is not added even if the item is a directory
string computePath(const(char)[] driveId, const(char)[] id)
{
string computePath(const(char)[] driveId, const(char)[] id) {
assert(driveId && id);
string path;
Item item;
@ -406,9 +645,9 @@ final class ItemDatabase
}
} else {
// broken tree
log.vdebug("The following generated a broken tree query:");
log.vdebug("Drive ID: ", driveId);
log.vdebug("Item ID: ", id);
addLogEntry("The following generated a broken tree query:", ["debug"]);
addLogEntry("Drive ID: " ~ to!string(driveId), ["debug"]);
addLogEntry("Item ID: " ~ to!string(id), ["debug"]);
assert(0);
}
}
@ -416,8 +655,7 @@ final class ItemDatabase
return path;
}
Item[] selectRemoteItems()
{
Item[] selectRemoteItems() {
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE remoteDriveId IS NOT NULL");
auto res = stmt.exec();
@ -428,8 +666,11 @@ final class ItemDatabase
return items;
}
string getDeltaLink(const(char)[] driveId, const(char)[] id)
{
string getDeltaLink(const(char)[] driveId, const(char)[] id) {
// Log what we received
addLogEntry("DeltaLink Query (driveId): " ~ to!string(driveId), ["debug"]);
addLogEntry("DeltaLink Query (id): " ~ to!string(id), ["debug"]);
assert(driveId && id);
auto stmt = db.prepare("SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
@ -439,8 +680,7 @@ final class ItemDatabase
return res.front[0].dup;
}
void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink)
{
void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) {
assert(driveId && id);
assert(deltaLink);
auto stmt = db.prepare("UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2");
@ -455,8 +695,7 @@ final class ItemDatabase
// As we query /children to get all children from OneDrive, update anything in the database
// to be flagged as not-in-sync, thus, we can use that flag to determing what was previously
// in-sync, but now deleted on OneDrive
void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id)
{
void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) {
assert(driveId);
auto stmt = db.prepare("UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
@ -466,8 +705,7 @@ final class ItemDatabase
// National Cloud Deployments (US and DE) do not support /delta as a query
// Select items that have a out-of-sync flag set
Item[] selectOutOfSyncItems(const(char)[] driveId)
{
Item[] selectOutOfSyncItems(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1");
@ -482,8 +720,7 @@ final class ItemDatabase
// OneDrive Business Folders are stored in the database potentially without a root | parentRoot link
// Select items associated with the provided driveId
Item[] selectByDriveId(const(char)[] driveId)
{
Item[] selectByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL");
@ -496,22 +733,37 @@ final class ItemDatabase
return items;
}
// Select all items associated with the provided driveId
Item[] selectAllItemsByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1");
stmt.bind(1, driveId);
auto res = stmt.exec();
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
// Perform a vacuum on the database, commit WAL / SHM to file
void performVacuum()
{
void performVacuum() {
addLogEntry("Attempting to perform a database vacuum to merge any temporary data", ["debug"]);
try {
auto stmt = db.prepare("VACUUM;");
stmt.exec();
addLogEntry("Database vacuum is complete", ["debug"]);
} catch (SqliteException e) {
writeln();
log.error("ERROR: Unable to perform a database vacuum: " ~ e.msg);
writeln();
addLogEntry();
addLogEntry("ERROR: Unable to perform a database vacuum: " ~ e.msg);
addLogEntry();
}
}
// Select distinct driveId items from database
string[] selectDistinctDriveIds()
{
string[] selectDistinctDriveIds() {
string[] driveIdArray;
auto stmt = db.prepare("SELECT DISTINCT driveId FROM item;");
auto res = stmt.exec();
@ -522,4 +774,4 @@ final class ItemDatabase
}
return driveIdArray;
}
}
}

382
src/log.d
View file

@ -1,239 +1,195 @@
// What is this module called?
module log;
// What does this module require to function?
import std.stdio;
import std.file;
import std.datetime;
import std.process;
import std.conv;
import core.memory;
import core.sys.posix.pwd, core.sys.posix.unistd, core.stdc.string : strlen;
import std.algorithm : splitter;
import std.concurrency;
import std.typecons;
import core.sync.condition;
import core.sync.mutex;
import core.thread;
import std.format;
import std.string;
version(Notifications) {
import dnotify;
}
// enable verbose logging
long verbose;
bool writeLogFile = false;
bool logFileWriteFailFlag = false;
// Shared module object
shared LogBuffer logBuffer;
private bool doNotifications;
// Timer for logging
shared MonoTime lastInsertedTime;
// shared string variable for username
string username;
string logFilePath;
class LogBuffer {
private:
string[3][] buffer;
Mutex bufferLock;
Condition condReady;
string logFilePath;
bool writeToFile;
bool verboseLogging;
bool debugLogging;
Thread flushThread;
bool isRunning;
bool sendGUINotification;
void init(string logDir)
{
writeLogFile = true;
username = getUserName();
logFilePath = logDir;
if (!exists(logFilePath)){
// logfile path does not exist
try {
mkdirRecurse(logFilePath);
}
catch (std.file.FileException e) {
// we got an error ..
writeln("\nUnable to access ", logFilePath);
writeln("Please manually create '",logFilePath, "' and set appropriate permissions to allow write access");
writeln("The requested client activity log will instead be located in your users home directory");
}
}
}
public:
this(bool verboseLogging, bool debugLogging) {
// Initialise the mutex
bufferLock = new Mutex();
condReady = new Condition(bufferLock);
// Initialise other items
this.logFilePath = logFilePath;
this.writeToFile = writeToFile;
this.verboseLogging = verboseLogging;
this.debugLogging = debugLogging;
this.isRunning = true;
this.sendGUINotification = true;
this.flushThread = new Thread(&flushBuffer);
flushThread.isDaemon(true);
flushThread.start();
}
void setNotifications(bool value)
{
version(Notifications) {
// if we try to enable notifications, check for server availability
// and disable in case dbus server is not reachable
if (value) {
auto serverAvailable = dnotify.check_availability();
if (!serverAvailable) {
log("Notification (dbus) server not available, disabling");
value = false;
void shutdown() {
synchronized(bufferLock) {
if (!isRunning) return; // Prevent multiple shutdowns
isRunning = false;
condReady.notifyAll(); // Wake up all waiting threads
}
flushThread.join(); // Wait for the flush thread to finish
flush(); // Perform a final flush to ensure all data is processed
}
}
doNotifications = value;
}
void log(T...)(T args)
{
writeln(args);
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void logAndNotify(T...)(T args)
{
notify(args);
log(args);
}
void fileOnly(T...)(T args)
{
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void vlog(T...)(T args)
{
if (verbose >= 1) {
writeln(args);
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
}
void vdebug(T...)(T args)
{
if (verbose >= 2) {
writeln("[DEBUG] ", args);
if(writeLogFile){
// Write to log file
logfileWriteLine("[DEBUG] ", args);
}
}
}
void vdebugNewLine(T...)(T args)
{
if (verbose >= 2) {
writeln("\n[DEBUG] ", args);
if(writeLogFile){
// Write to log file
logfileWriteLine("\n[DEBUG] ", args);
}
}
}
void error(T...)(T args)
{
stderr.writeln(args);
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void errorAndNotify(T...)(T args)
{
notify(args);
error(args);
}
void notify(T...)(T args)
{
version(Notifications) {
if (doNotifications) {
string result;
foreach (index, arg; args) {
result ~= to!string(arg);
if (index != args.length - 1)
result ~= " ";
}
auto n = new Notification("OneDrive", result, "IGNORED");
try {
n.show();
// Sent message to notification daemon
if (verbose >= 2) {
writeln("[DEBUG] Sent notification to notification service. If notification is not displayed, check dbus or notification-daemon for errors");
shared void logThisMessage(string message, string[] levels = ["info"]) {
// Generate the timestamp for this log entry
auto timeStamp = leftJustify(Clock.currTime().toString(), 28, '0');
synchronized(bufferLock) {
foreach (level; levels) {
// Normal application output
if (!debugLogging) {
if ((level == "info") || ((verboseLogging) && (level == "verbose")) || (level == "logFileOnly") || (level == "consoleOnly") || (level == "consoleOnlyNoNewLine")) {
// Add this message to the buffer, with this format
buffer ~= [timeStamp, level, format("%s", message)];
}
} else {
// Debug Logging (--verbose --verbose | -v -v | -vv) output
// Add this message, regardless of 'level' to the buffer, with this format
buffer ~= [timeStamp, level, format("DEBUG: %s", message)];
// If there are multiple 'levels' configured, ignore this and break as we are doing debug logging
break;
}
// Submit the message to the dbus / notification daemon for display within the GUI being used
// Will not send GUI notifications when running in debug mode
if ((!debugLogging) && (level == "notify")) {
version(Notifications) {
if (sendGUINotification) {
notify(message);
}
}
}
}
(cast()condReady).notify();
}
}
shared void notify(string message) {
// Use dnotify's functionality for GUI notifications, if GUI notifications is enabled
version(Notifications) {
try {
auto n = new Notification("Log Notification", message, "IGNORED");
n.show();
} catch (NotificationError e) {
sendGUINotification = false;
addLogEntry("Unable to send notification; disabled in the following: " ~ e.message);
}
} catch (Throwable e) {
vlog("Got exception from showing notification: ", e);
}
}
}
}
private void flushBuffer() {
while (isRunning) {
flush();
}
stdout.flush();
}
private void flush() {
string[3][] messages;
synchronized(bufferLock) {
while (buffer.empty && isRunning) {
condReady.wait();
}
messages = buffer;
buffer.length = 0;
}
foreach (msg; messages) {
// timestamp, logLevel, message
// Always write the log line to the console, if level != logFileOnly
if (msg[1] != "logFileOnly") {
// Console output .. what sort of output
if (msg[1] == "consoleOnlyNoNewLine") {
// This is used write out a message to the console only, without a new line
// This is used in non-verbose mode to indicate something is happening when downloading JSON data from OneDrive or when we need user input from --resync
write(msg[2]);
} else {
// write this to the console with a new line
writeln(msg[2]);
}
}
// Was this just console only output?
if ((msg[1] != "consoleOnlyNoNewLine") && (msg[1] != "consoleOnly")) {
// Write to the logfile only if configured to do so - console only items should not be written out
if (writeToFile) {
string logFileLine = format("[%s] %s", msg[0], msg[2]);
std.file.append(logFilePath, logFileLine ~ "\n");
}
}
}
}
}
private void logfileWriteLine(T...)(T args)
{
static import std.exception;
// Write to log file
string logFileName = .logFilePath ~ .username ~ ".onedrive.log";
auto currentTime = Clock.currTime();
auto timeString = currentTime.toString();
File logFile;
// Resolve: std.exception.ErrnoException@std/stdio.d(423): Cannot open file `/var/log/onedrive/xxxxx.onedrive.log' in mode `a' (Permission denied)
try {
logFile = File(logFileName, "a");
}
catch (std.exception.ErrnoException e) {
// We cannot open the log file in logFilePath location for writing
// The user is not part of the standard 'users' group (GID 100)
// Change logfile to ~/onedrive.log putting the log file in the users home directory
if (!logFileWriteFailFlag) {
// write out error message that we cant log to the requested file
writeln("\nUnable to write activity log to ", logFileName);
writeln("Please set appropriate permissions to allow write access to the logging directory for your user account");
writeln("The requested client activity log will instead be located in your users home directory\n");
// set the flag so we dont keep printing this error message
logFileWriteFailFlag = true;
}
string homePath = environment.get("HOME");
string logFileNameAlternate = homePath ~ "/onedrive.log";
logFile = File(logFileNameAlternate, "a");
}
// Write to the log file
logFile.writeln(timeString, "\t", args);
logFile.close();
// Function to initialize the logging system
void initialiseLogging(bool verboseLogging = false, bool debugLogging = false) {
logBuffer = cast(shared) new LogBuffer(verboseLogging, debugLogging);
lastInsertedTime = MonoTime.currTime();
}
private string getUserName()
{
auto pw = getpwuid(getuid);
// get required details
auto runtime_pw_name = pw.pw_name[0 .. strlen(pw.pw_name)].splitter(',');
auto runtime_pw_uid = pw.pw_uid;
auto runtime_pw_gid = pw.pw_gid;
// user identifiers from process
vdebug("Process ID: ", pw);
vdebug("User UID: ", runtime_pw_uid);
vdebug("User GID: ", runtime_pw_gid);
// What should be returned as username?
if (!runtime_pw_name.empty && runtime_pw_name.front.length){
// user resolved
vdebug("User Name: ", runtime_pw_name.front.idup);
return runtime_pw_name.front.idup;
// Function to add a log entry with multiple levels
void addLogEntry(string message = "", string[] levels = ["info"]) {
logBuffer.logThisMessage(message, levels);
}
void addProcessingLogHeaderEntry(string message, long verbosityCount) {
if (verbosityCount == 0) {
addLogEntry(message, ["logFileOnly"]);
// Use the dots to show the application is 'doing something' if verbosityCount == 0
addLogEntry(message ~ " .", ["consoleOnlyNoNewLine"]);
} else {
// Unknown user?
vdebug("User Name: unknown");
return "unknown";
// Fallback to normal logging if in verbose or above level
addLogEntry(message);
}
}
void displayMemoryUsagePreGC()
{
// Display memory usage
writeln("\nMemory Usage pre GC (bytes)");
writeln("--------------------");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
// uncomment this if required, if not using LDC 1.16 as this does not exist in that version
//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
void addProcessingDotEntry() {
if (MonoTime.currTime() - lastInsertedTime < dur!"seconds"(1)) {
// Don't flood the log buffer
return;
}
lastInsertedTime = MonoTime.currTime();
addLogEntry(".", ["consoleOnlyNoNewLine"]);
}
void displayMemoryUsagePostGC()
{
// Display memory usage
writeln("\nMemory Usage post GC (bytes)");
writeln("--------------------");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
// uncomment this if required, if not using LDC 1.16 as this does not exist in that version
//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
// Function to set logFilePath and enable logging to a file
void enableLogFileOutput(string configuredLogFilePath) {
logBuffer.logFilePath = configuredLogFilePath;
logBuffer.writeToFile = true;
}
void disableGUINotifications(bool userConfigDisableNotifications) {
logBuffer.sendGUINotification = userConfigDisableNotifications;
}

3114
src/main.d

File diff suppressed because it is too large Load diff

View file

@ -1,87 +1,341 @@
import core.sys.linux.sys.inotify;
import core.stdc.errno;
import core.sys.posix.poll, core.sys.posix.unistd;
import std.exception, std.file, std.path, std.regex, std.stdio, std.string, std.algorithm;
import core.stdc.stdlib;
import config;
import selective;
import util;
static import log;
// What is this module called?
module monitor;
// relevant inotify events
// What does this module require to function?
import core.stdc.errno;
import core.stdc.stdlib;
import core.sys.linux.sys.inotify;
import core.sys.posix.poll;
import core.sys.posix.unistd;
import core.sys.posix.sys.select;
import core.thread;
import core.time;
import std.algorithm;
import std.concurrency;
import std.exception;
import std.file;
import std.path;
import std.process;
import std.regex;
import std.stdio;
import std.string;
import std.conv;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
import clientSideFiltering;
// Relevant inotify events
private immutable uint32_t mask = IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_MOVE | IN_IGNORED | IN_Q_OVERFLOW;
class MonitorException: ErrnoException
{
@safe this(string msg, string file = __FILE__, size_t line = __LINE__)
{
class MonitorException: ErrnoException {
@safe this(string msg, string file = __FILE__, size_t line = __LINE__) {
super(msg, file, line);
}
}
final class Monitor
{
bool verbose;
class MonitorBackgroundWorker {
// inotify file descriptor
private int fd;
int fd;
Pipe p;
bool isAlive;
this() {
isAlive = true;
p = pipe();
}
shared void initialise() {
fd = inotify_init();
if (fd < 0) throw new MonitorException("inotify_init failed");
}
// Add this path to be monitored
shared int addInotifyWatch(string pathname) {
int wd = inotify_add_watch(fd, toStringz(pathname), mask);
if (wd < 0) {
if (errno() == ENOSPC) {
// Get the current value
ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/fs/inotify/max_user_watches")));
addLogEntry("The user limit on the total number of inotify watches has been reached.");
addLogEntry("Your current limit of inotify watches is: " ~ to!string(maxInotifyWatches));
addLogEntry("It is recommended that you change the max number of inotify watches to at least double your existing value.");
addLogEntry("To change the current max number of watches to " ~ to!string((maxInotifyWatches * 2)) ~ " run:");
addLogEntry("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=" ~ to!string((maxInotifyWatches * 2)));
}
if (errno() == 13) {
addLogEntry("WARNING: inotify_add_watch failed - permission denied: " ~ pathname, ["verbose"]);
}
// Flag any other errors
addLogEntry("ERROR: inotify_add_watch failed: " ~ pathname);
return wd;
}
// Add path to inotify watch - required regardless if a '.folder' or 'folder'
addLogEntry("inotify_add_watch successfully added for: " ~ pathname, ["debug"]);
// Do we log that we are monitoring this directory?
if (isDir(pathname)) {
// Log that this is directory is being monitored
addLogEntry("Monitoring directory: " ~ pathname, ["verbose"]);
}
return wd;
}
shared int removeInotifyWatch(int wd) {
return inotify_rm_watch(fd, wd);
}
shared void watch(Tid callerTid) {
// On failure, send -1 to caller
int res;
// wait for the caller to be ready
receiveOnly!int();
while (isAlive) {
fd_set fds;
FD_ZERO (&fds);
FD_SET(fd, &fds);
// Listen for messages from the caller
FD_SET((cast()p).readEnd.fileno, &fds);
res = select(FD_SETSIZE, &fds, null, null, null);
if(res == -1) {
if(errno() == EINTR) {
// Received an interrupt signal but no events are available
// directly watch again
} else {
// Error occurred, tell caller to terminate.
callerTid.send(-1);
break;
}
} else {
// Wake up caller
callerTid.send(1);
// wait for the caller to be ready
if (isAlive)
isAlive = receiveOnly!bool();
}
}
}
shared void interrupt() {
isAlive = false;
(cast()p).writeEnd.writeln("done");
(cast()p).writeEnd.flush();
}
shared void shutdown() {
isAlive = false;
if (fd > 0) {
close(fd);
fd = 0;
(cast()p).close();
}
}
}
void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid)
{
try {
worker.watch(callerTid);
} catch (OwnerTerminated error) {
// caller is terminated
worker.shutdown();
}
}
enum ActionType {
moved,
deleted,
changed,
createDir
}
struct Action {
ActionType type;
bool skipped;
string src;
string dst;
}
struct ActionHolder {
Action[] actions;
size_t[string] srcMap;
void append(ActionType type, string src, string dst=null) {
size_t[] pendingTargets;
switch (type) {
case ActionType.changed:
if (src in srcMap && actions[srcMap[src]].type == ActionType.changed) {
// skip duplicate operations
return;
}
break;
case ActionType.createDir:
break;
case ActionType.deleted:
if (src in srcMap) {
size_t pendingTarget = srcMap[src];
// Skip operations require reading local file that is gone
switch (actions[pendingTarget].type) {
case ActionType.changed:
case ActionType.createDir:
actions[srcMap[src]].skipped = true;
srcMap.remove(src);
break;
default:
break;
}
}
break;
case ActionType.moved:
for(int i = 0; i < actions.length; i++) {
// Only match for latest operation
if (actions[i].src in srcMap) {
switch (actions[i].type) {
case ActionType.changed:
case ActionType.createDir:
// check if the source is the prefix of the target
string prefix = src ~ "/";
string target = actions[i].src;
if (prefix[0] != '.')
prefix = "./" ~ prefix;
if (target[0] != '.')
target = "./" ~ target;
string comm = commonPrefix(prefix, target);
if (src == actions[i].src || comm.length == prefix.length) {
// Hold operations require reading local file that is moved after the target is moved online
pendingTargets ~= i;
actions[i].skipped = true;
srcMap.remove(actions[i].src);
if (comm.length == target.length)
actions[i].src = dst;
else
actions[i].src = dst ~ target[comm.length - 1 .. target.length];
}
break;
default:
break;
}
}
}
break;
default:
break;
}
actions ~= Action(type, false, src, dst);
srcMap[src] = actions.length - 1;
foreach (pendingTarget; pendingTargets) {
actions ~= actions[pendingTarget];
actions[$-1].skipped = false;
srcMap[actions[$-1].src] = actions.length - 1;
}
}
}
final class Monitor {
// Class variables
ApplicationConfig appConfig;
ClientSideFiltering selectiveSync;
// Are we verbose in logging output
bool verbose = false;
// skip symbolic links
bool skip_symlinks = false;
// check for .nosync if enabled
bool check_nosync = false;
// check if initialised
bool initialised = false;
// Worker Tid
Tid workerTid;
// Configure Private Class Variables
shared(MonitorBackgroundWorker) worker;
// map every inotify watch descriptor to its directory
private string[int] wdToDirName;
// map the inotify cookies of move_from events to their path
private string[int] cookieToPath;
// buffer to receive the inotify events
private void[] buffer;
// skip symbolic links
bool skip_symlinks;
// check for .nosync if enabled
bool check_nosync;
private SelectiveSync selectiveSync;
// Configure function delegates
void delegate(string path) onDirCreated;
void delegate(string path) onFileChanged;
void delegate(string[] path) onFileChanged;
void delegate(string path) onDelete;
void delegate(string from, string to) onMove;
// List of paths that were moved, not deleted
bool[string] movedNotDeleted;
this(SelectiveSync selectiveSync)
{
assert(selectiveSync);
ActionHolder actionHolder;
// Configure the class varaible to consume the application configuration including selective sync
this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) {
this.appConfig = appConfig;
this.selectiveSync = selectiveSync;
}
void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync)
{
this.verbose = verbose;
this.skip_symlinks = skip_symlinks;
this.check_nosync = check_nosync;
// Initialise the monitor class
void initialise() {
// Configure the variables
skip_symlinks = appConfig.getValueBool("skip_symlinks");
check_nosync = appConfig.getValueBool("check_nosync");
if (appConfig.getValueLong("verbose") > 0) {
verbose = true;
}
assert(onDirCreated && onFileChanged && onDelete && onMove);
fd = inotify_init();
if (fd < 0) throw new MonitorException("inotify_init failed");
if (!buffer) buffer = new void[4096];
worker = cast(shared) new MonitorBackgroundWorker;
worker.initialise();
// from which point do we start watching for changes?
string monitorPath;
if (cfg.getValueString("single_directory") != ""){
// single directory in use, monitor only this
monitorPath = "./" ~ cfg.getValueString("single_directory");
if (appConfig.getValueString("single_directory") != ""){
// single directory in use, monitor only this path
monitorPath = "./" ~ appConfig.getValueString("single_directory");
} else {
// default
monitorPath = ".";
}
addRecursive(monitorPath);
// Start monitoring
workerTid = spawn(&startMonitorJob, worker, thisTid);
initialised = true;
}
void shutdown()
{
if (fd > 0) close(fd);
// Communication with worker
void send(bool isAlive) {
workerTid.send(isAlive);
}
// Shutdown the monitor class
void shutdown() {
if(!initialised)
return;
initialised = false;
// Release all resources
removeAll();
// Notify the worker that the monitor has been shutdown
worker.interrupt();
send(false);
wdToDirName = null;
}
private void addRecursive(string dirname)
{
// Recursivly add this path to be monitored
private void addRecursive(string dirname) {
// skip non existing/disappeared items
if (!exists(dirname)) {
log.vlog("Not adding non-existing/disappeared directory: ", dirname);
addLogEntry("Not adding non-existing/disappeared directory: " ~ dirname, ["verbose"]);
return;
}
@ -93,7 +347,7 @@ final class Monitor
if (isDir(dirname)) {
if (selectiveSync.isDirNameExcluded(dirname.strip('.'))) {
// dont add a watch for this item
log.vdebug("Skipping monitoring due to skip_dir match: ", dirname);
addLogEntry("Skipping monitoring due to skip_dir match: " ~ dirname, ["debug"]);
return;
}
}
@ -103,14 +357,14 @@ final class Monitor
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
if (selectiveSync.isFileNameExcluded(dirname.strip('.'))) {
// dont add a watch for this item
log.vdebug("Skipping monitoring due to skip_file match: ", dirname);
addLogEntry("Skipping monitoring due to skip_file match: " ~ dirname, ["debug"]);
return;
}
}
// is the path exluded by sync_list?
if (selectiveSync.isPathExcludedViaSyncList(buildNormalizedPath(dirname))) {
// dont add a watch for this item
log.vdebug("Skipping monitoring due to sync_list match: ", dirname);
addLogEntry("Skipping monitoring due to sync_list match: " ~ dirname, ["debug"]);
return;
}
}
@ -127,15 +381,27 @@ final class Monitor
// Do we need to check for .nosync? Only if check_nosync is true
if (check_nosync) {
if (exists(buildNormalizedPath(dirname) ~ "/.nosync")) {
log.vlog("Skipping watching path - .nosync found & --check-for-nosync enabled: ", buildNormalizedPath(dirname));
addLogEntry("Skipping watching path - .nosync found & --check-for-nosync enabled: " ~ buildNormalizedPath(dirname), ["verbose"]);
return;
}
}
if (isDir(dirname)) {
// This is a directory
// is the path exluded if skip_dotfiles configured and path is a .folder?
if ((selectiveSync.getSkipDotfiles()) && (isDotFile(dirname))) {
// dont add a watch for this directory
return;
}
}
// passed all potential exclusions
// add inotify watch for this path / directory / file
log.vdebug("Calling add() for this dirname: ", dirname);
add(dirname);
addLogEntry("Calling worker.addInotifyWatch() for this dirname: " ~ dirname, ["debug"]);
int wd = worker.addInotifyWatch(dirname);
if (wd > 0) {
wdToDirName[wd] = buildNormalizedPath(dirname) ~ "/";
}
// if this is a directory, recursivly add this path
if (isDir(dirname)) {
@ -144,7 +410,7 @@ final class Monitor
auto pathList = dirEntries(dirname, SpanMode.shallow, false);
foreach(DirEntry entry; pathList) {
if (entry.isDir) {
log.vdebug("Calling addRecursive() for this directory: ", entry.name);
addLogEntry("Calling addRecursive() for this directory: " ~ entry.name, ["debug"]);
addRecursive(entry.name);
}
}
@ -158,10 +424,10 @@ final class Monitor
// Need to check for: Failed to stat file in error message
if (canFind(e.msg, "Failed to stat file")) {
// File system access issue
log.error("ERROR: The local file system returned an error with the following message:");
log.error(" Error Message: ", e.msg);
log.error("ACCESS ERROR: Please check your UID and GID access to this file, as the permissions on this file is preventing this application to read it");
log.error("\nFATAL: Exiting application to avoid deleting data due to local file system access issues\n");
addLogEntry("ERROR: The local file system returned an error with the following message:");
addLogEntry(" Error Message: " ~ e.msg);
addLogEntry("ACCESS ERROR: Please check your UID and GID access to this file, as the permissions on this file is preventing this application to read it");
addLogEntry("\nFATAL: Forcing exiting application to avoid deleting data due to local file system access issues\n");
// Must exit here
exit(-1);
} else {
@ -173,219 +439,238 @@ final class Monitor
}
}
private void add(string pathname)
{
int wd = inotify_add_watch(fd, toStringz(pathname), mask);
if (wd < 0) {
if (errno() == ENOSPC) {
log.log("The user limit on the total number of inotify watches has been reached.");
log.log("To see the current max number of watches run:");
log.log("sysctl fs.inotify.max_user_watches");
log.log("To change the current max number of watches to 524288 run:");
log.log("sudo sysctl fs.inotify.max_user_watches=524288");
}
if (errno() == 13) {
if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) {
// no misleading output that we could not add a watch due to permission denied
return;
} else {
log.vlog("WARNING: inotify_add_watch failed - permission denied: ", pathname);
return;
}
}
// Flag any other errors
log.error("ERROR: inotify_add_watch failed: ", pathname);
return;
}
// Add path to inotify watch - required regardless if a '.folder' or 'folder'
wdToDirName[wd] = buildNormalizedPath(pathname) ~ "/";
log.vdebug("inotify_add_watch successfully added for: ", pathname);
// Do we log that we are monitoring this directory?
if (isDir(pathname)) {
// This is a directory
// is the path exluded if skip_dotfiles configured and path is a .folder?
if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) {
// no misleading output that we are monitoring this directory
return;
}
// Log that this is directory is being monitored
log.vlog("Monitor directory: ", pathname);
// Remove a watch descriptor
private void removeAll() {
string[int] copy = wdToDirName.dup;
foreach (wd, path; copy) {
remove(wd);
}
}
// remove a watch descriptor
private void remove(int wd)
{
private void remove(int wd) {
assert(wd in wdToDirName);
int ret = inotify_rm_watch(fd, wd);
int ret = worker.removeInotifyWatch(wd);
if (ret < 0) throw new MonitorException("inotify_rm_watch failed");
log.vlog("Monitored directory removed: ", wdToDirName[wd]);
addLogEntry("Monitored directory removed: " ~ to!string(wdToDirName[wd]), ["verbose"]);
wdToDirName.remove(wd);
}
// remove the watch descriptors associated to the given path
private void remove(const(char)[] path)
{
// Remove the watch descriptors associated to the given path
private void remove(const(char)[] path) {
path ~= "/";
foreach (wd, dirname; wdToDirName) {
if (dirname.startsWith(path)) {
int ret = inotify_rm_watch(fd, wd);
int ret = worker.removeInotifyWatch(wd);
if (ret < 0) throw new MonitorException("inotify_rm_watch failed");
wdToDirName.remove(wd);
log.vlog("Monitored directory removed: ", dirname);
addLogEntry("Monitored directory removed: " ~ dirname, ["verbose"]);
}
}
}
// return the file path from an inotify event
private string getPath(const(inotify_event)* event)
{
// Return the file path from an inotify event
private string getPath(const(inotify_event)* event) {
string path = wdToDirName[event.wd];
if (event.len > 0) path ~= fromStringz(event.name.ptr);
log.vdebug("inotify path event for: ", path);
addLogEntry("inotify path event for: " ~ path, ["debug"]);
return path;
}
void update(bool useCallbacks = true)
{
// Update
void update(bool useCallbacks = true) {
if(!initialised)
return;
pollfd fds = {
fd: fd,
fd: worker.fd,
events: POLLIN
};
while (true) {
int ret = poll(&fds, 1, 0);
if (ret == -1) throw new MonitorException("poll failed");
else if (ret == 0) break; // no events available
bool hasNotification = false;
int sleep_counter = 0;
// Batch events up to 5 seconds
while (sleep_counter < 5) {
int ret = poll(&fds, 1, 0);
if (ret == -1) throw new MonitorException("poll failed");
else if (ret == 0) break; // no events available
hasNotification = true;
size_t length = read(worker.fd, buffer.ptr, buffer.length);
if (length == -1) throw new MonitorException("read failed");
size_t length = read(fd, buffer.ptr, buffer.length);
if (length == -1) throw new MonitorException("read failed");
int i = 0;
while (i < length) {
inotify_event *event = cast(inotify_event*) &buffer[i];
string path;
string evalPath;
// inotify event debug
log.vdebug("inotify event wd: ", event.wd);
log.vdebug("inotify event mask: ", event.mask);
log.vdebug("inotify event cookie: ", event.cookie);
log.vdebug("inotify event len: ", event.len);
log.vdebug("inotify event name: ", event.name);
if (event.mask & IN_ACCESS) log.vdebug("inotify event flag: IN_ACCESS");
if (event.mask & IN_MODIFY) log.vdebug("inotify event flag: IN_MODIFY");
if (event.mask & IN_ATTRIB) log.vdebug("inotify event flag: IN_ATTRIB");
if (event.mask & IN_CLOSE_WRITE) log.vdebug("inotify event flag: IN_CLOSE_WRITE");
if (event.mask & IN_CLOSE_NOWRITE) log.vdebug("inotify event flag: IN_CLOSE_NOWRITE");
if (event.mask & IN_MOVED_FROM) log.vdebug("inotify event flag: IN_MOVED_FROM");
if (event.mask & IN_MOVED_TO) log.vdebug("inotify event flag: IN_MOVED_TO");
if (event.mask & IN_CREATE) log.vdebug("inotify event flag: IN_CREATE");
if (event.mask & IN_DELETE) log.vdebug("inotify event flag: IN_DELETE");
if (event.mask & IN_DELETE_SELF) log.vdebug("inotify event flag: IN_DELETE_SELF");
if (event.mask & IN_MOVE_SELF) log.vdebug("inotify event flag: IN_MOVE_SELF");
if (event.mask & IN_UNMOUNT) log.vdebug("inotify event flag: IN_UNMOUNT");
if (event.mask & IN_Q_OVERFLOW) log.vdebug("inotify event flag: IN_Q_OVERFLOW");
if (event.mask & IN_IGNORED) log.vdebug("inotify event flag: IN_IGNORED");
if (event.mask & IN_CLOSE) log.vdebug("inotify event flag: IN_CLOSE");
if (event.mask & IN_MOVE) log.vdebug("inotify event flag: IN_MOVE");
if (event.mask & IN_ONLYDIR) log.vdebug("inotify event flag: IN_ONLYDIR");
if (event.mask & IN_DONT_FOLLOW) log.vdebug("inotify event flag: IN_DONT_FOLLOW");
if (event.mask & IN_EXCL_UNLINK) log.vdebug("inotify event flag: IN_EXCL_UNLINK");
if (event.mask & IN_MASK_ADD) log.vdebug("inotify event flag: IN_MASK_ADD");
if (event.mask & IN_ISDIR) log.vdebug("inotify event flag: IN_ISDIR");
if (event.mask & IN_ONESHOT) log.vdebug("inotify event flag: IN_ONESHOT");
if (event.mask & IN_ALL_EVENTS) log.vdebug("inotify event flag: IN_ALL_EVENTS");
// skip events that need to be ignored
if (event.mask & IN_IGNORED) {
// forget the directory associated to the watch descriptor
wdToDirName.remove(event.wd);
goto skip;
} else if (event.mask & IN_Q_OVERFLOW) {
throw new MonitorException("Inotify overflow, events missing");
}
// if the event is not to be ignored, obtain path
path = getPath(event);
// configure the skip_dir & skip skip_file comparison item
evalPath = path.strip('.');
// Skip events that should be excluded based on application configuration
// We cant use isDir or isFile as this information is missing from the inotify event itself
// Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995
// Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions
// Directory events should only be compared against skip_dir and file events should only be compared against skip_file
if (event.mask & IN_ISDIR) {
// The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if (selectiveSync.isDirNameExcluded(evalPath)) {
// The path to evaluate matches a path that the user has configured to skip
int i = 0;
while (i < length) {
inotify_event *event = cast(inotify_event*) &buffer[i];
string path;
string evalPath;
// inotify event debug
addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]);
addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]);
addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]);
addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]);
addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]);
// inotify event handling
if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]);
if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]);
if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]);
if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]);
if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]);
if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]);
if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]);
if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]);
if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]);
if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]);
if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]);
if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]);
if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]);
if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]);
if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]);
if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]);
if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]);
if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]);
if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]);
if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]);
if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]);
if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]);
if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]);
// skip events that need to be ignored
if (event.mask & IN_IGNORED) {
// forget the directory associated to the watch descriptor
wdToDirName.remove(event.wd);
goto skip;
} else if (event.mask & IN_Q_OVERFLOW) {
throw new MonitorException("inotify overflow, inotify events will be missing");
}
} else {
// The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
if (selectiveSync.isFileNameExcluded(evalPath)) {
// The path to evaluate matches a file that the user has configured to skip
goto skip;
}
}
// is the path, excluded via sync_list
if (selectiveSync.isPathExcludedViaSyncList(path)) {
// The path to evaluate matches a directory or file that the user has configured not to include in the sync
goto skip;
}
// handle the inotify events
if (event.mask & IN_MOVED_FROM) {
log.vdebug("event IN_MOVED_FROM: ", path);
cookieToPath[event.cookie] = path;
} else if (event.mask & IN_MOVED_TO) {
log.vdebug("event IN_MOVED_TO: ", path);
if (event.mask & IN_ISDIR) addRecursive(path);
auto from = event.cookie in cookieToPath;
if (from) {
cookieToPath.remove(event.cookie);
if (useCallbacks) onMove(*from, path);
// if the event is not to be ignored, obtain path
path = getPath(event);
// configure the skip_dir & skip skip_file comparison item
evalPath = path.strip('.');
// Skip events that should be excluded based on application configuration
// We cant use isDir or isFile as this information is missing from the inotify event itself
// Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995
// Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions
// Directory events should only be compared against skip_dir and file events should only be compared against skip_file
if (event.mask & IN_ISDIR) {
// The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if (selectiveSync.isDirNameExcluded(evalPath)) {
// The path to evaluate matches a path that the user has configured to skip
goto skip;
}
} else {
// item moved from the outside
if (event.mask & IN_ISDIR) {
if (useCallbacks) onDirCreated(path);
} else {
if (useCallbacks) onFileChanged(path);
// The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
if (selectiveSync.isFileNameExcluded(evalPath)) {
// The path to evaluate matches a file that the user has configured to skip
goto skip;
}
}
} else if (event.mask & IN_CREATE) {
log.vdebug("event IN_CREATE: ", path);
if (event.mask & IN_ISDIR) {
addRecursive(path);
if (useCallbacks) onDirCreated(path);
// is the path, excluded via sync_list
if (selectiveSync.isPathExcludedViaSyncList(path)) {
// The path to evaluate matches a directory or file that the user has configured not to include in the sync
goto skip;
}
} else if (event.mask & IN_DELETE) {
log.vdebug("event IN_DELETE: ", path);
if (useCallbacks) onDelete(path);
} else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) {
log.vdebug("event IN_CLOSE_WRITE and ...: ", path);
if (useCallbacks) onFileChanged(path);
} else {
log.vdebug("event unhandled: ", path);
assert(0);
// handle the inotify events
if (event.mask & IN_MOVED_FROM) {
addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]);
cookieToPath[event.cookie] = path;
movedNotDeleted[path] = true; // Mark as moved, not deleted
} else if (event.mask & IN_MOVED_TO) {
addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]);
if (event.mask & IN_ISDIR) addRecursive(path);
auto from = event.cookie in cookieToPath;
if (from) {
cookieToPath.remove(event.cookie);
if (useCallbacks) actionHolder.append(ActionType.moved, *from, path);
movedNotDeleted.remove(*from); // Clear moved status
} else {
// Handle file moved in from outside
if (event.mask & IN_ISDIR) {
if (useCallbacks) actionHolder.append(ActionType.createDir, path);
} else {
if (useCallbacks) actionHolder.append(ActionType.changed, path);
}
}
} else if (event.mask & IN_CREATE) {
addLogEntry("event IN_CREATE: " ~ path, ["debug"]);
if (event.mask & IN_ISDIR) {
addRecursive(path);
if (useCallbacks) actionHolder.append(ActionType.createDir, path);
}
} else if (event.mask & IN_DELETE) {
if (path in movedNotDeleted) {
movedNotDeleted.remove(path); // Ignore delete for moved files
} else {
addLogEntry("event IN_DELETE: " ~ path, ["debug"]);
if (useCallbacks) actionHolder.append(ActionType.deleted, path);
}
} else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) {
addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]);
if (useCallbacks) actionHolder.append(ActionType.changed, path);
} else {
addLogEntry("event unhandled: " ~ path, ["debug"]);
assert(0);
}
skip:
i += inotify_event.sizeof + event.len;
}
skip:
i += inotify_event.sizeof + event.len;
// Sleep for one second to prevent missing fast-changing events.
if (poll(&fds, 1, 0) == 0) {
sleep_counter += 1;
Thread.sleep(dur!"seconds"(1));
}
}
// assume that the items moved outside the watched directory have been deleted
if (!hasNotification) break;
processChanges();
// Assume that the items moved outside the watched directory have been deleted
foreach (cookie, path; cookieToPath) {
log.vdebug("deleting (post loop): ", path);
addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]);
if (useCallbacks) onDelete(path);
remove(path);
cookieToPath.remove(cookie);
}
// Debug Log that all inotify events are flushed
addLogEntry("inotify events flushed", ["debug"]);
}
}
private void processChanges() {
string[] changes;
foreach(action; actionHolder.actions) {
if (action.skipped)
continue;
switch (action.type) {
case ActionType.changed:
changes ~= action.src;
break;
case ActionType.deleted:
onDelete(action.src);
break;
case ActionType.createDir:
onDirCreated(action.src);
break;
case ActionType.moved:
onMove(action.src, action.dst);
break;
default:
break;
}
}
if (!changes.empty)
onFileChanged(changes);
object.destroy(actionHolder);
}
}

View file

@ -79,11 +79,11 @@ void init(in char[] name) {
alias notify_is_initted is_initted;
alias notify_uninit uninit;
static this() {
shared static this() {
init(__FILE__);
}
static ~this() {
shared static ~this() {
uninit();
}

File diff suppressed because it is too large Load diff

View file

@ -1,156 +0,0 @@
module progress;
import std.stdio;
import std.range;
import std.format;
import std.datetime;
import core.sys.posix.unistd;
import core.sys.posix.sys.ioctl;
class Progress
{
private:
immutable static size_t default_width = 80;
size_t max_width = 40;
size_t width = default_width;
ulong start_time;
string caption = "Progress";
size_t iterations;
size_t counter;
size_t getTerminalWidth() {
size_t column = default_width;
version (CRuntime_Musl) {
} else version(Android) {
} else {
winsize ws;
if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) != -1 && ws.ws_col > 0) {
column = ws.ws_col;
}
}
return column;
}
void clear() {
write("\r");
for(auto i = 0; i < width; i++) write(" ");
write("\r");
}
int calc_eta() {
immutable auto ratio = cast(double)counter / iterations;
auto current_time = Clock.currTime.toUnixTime();
auto duration = cast(int)(current_time - start_time);
int hours, minutes, seconds;
double elapsed = (current_time - start_time);
int eta_sec = cast(int)((elapsed / ratio) - elapsed);
// Return an ETA or Duration?
if (eta_sec != 0){
return eta_sec;
} else {
return duration;
}
}
string progressbarText(string header_text, string footer_text) {
immutable auto ratio = cast(double)counter / iterations;
string result = "";
double bar_length = width - header_text.length - footer_text.length;
if(bar_length > max_width && max_width > 0) {
bar_length = max_width;
}
size_t i = 0;
for(; i < ratio * bar_length; i++) result ~= "o";
for(; i < bar_length; i++) result ~= " ";
return header_text ~ result ~ footer_text;
}
void print() {
immutable auto ratio = cast(double)counter / iterations;
auto header = appender!string();
auto footer = appender!string();
header.formattedWrite("%s %3d%% |", caption, cast(int)(ratio * 100));
if(counter <= 0 || ratio == 0.0) {
footer.formattedWrite("| ETA --:--:--:");
} else {
int h, m, s;
dur!"seconds"(calc_eta())
.split!("hours", "minutes", "seconds")(h, m, s);
if (counter != iterations){
footer.formattedWrite("| ETA %02d:%02d:%02d ", h, m, s);
} else {
footer.formattedWrite("| DONE IN %02d:%02d:%02d ", h, m, s);
}
}
write(progressbarText(header.data, footer.data));
}
void update() {
width = getTerminalWidth();
clear();
print();
stdout.flush();
}
public:
this(size_t iterations) {
if(iterations <= 0) iterations = 1;
counter = -1;
this.iterations = iterations;
start_time = Clock.currTime.toUnixTime;
}
@property {
string title() { return caption; }
string title(string text) { return caption = text; }
}
@property {
size_t count() { return counter; }
size_t count(size_t val) {
if(val > iterations) val = iterations;
return counter = val;
}
}
@property {
size_t maxWidth() { return max_width; }
size_t maxWidth(size_t w) {
return max_width = w;
}
}
void reset() {
counter = -1;
start_time = Clock.currTime.toUnixTime;
}
void next() {
counter++;
if(counter > iterations) counter = iterations;
update();
}
}

View file

@ -1,7 +1,11 @@
// What is this module called?
module qxor;
// What does this module require to function?
import std.algorithm;
import std.digest;
// implementation of the QuickXorHash algorithm in D
// Implementation of the QuickXorHash algorithm in D
// https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md
struct QuickXor
{
@ -71,18 +75,4 @@ struct QuickXor
}
return tmp;
}
}
unittest
{
assert(isDigest!QuickXor);
}
unittest
{
QuickXor qxor;
qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog");
assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE");
}
alias QuickXorDigest = WrapperDigest!(QuickXor);
}

View file

@ -1,422 +0,0 @@
import std.algorithm;
import std.array;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
import util;
import log;
final class SelectiveSync
{
private string[] paths;
private string[] businessSharedFoldersList;
private Regex!char mask;
private Regex!char dirmask;
private bool skipDirStrictMatch = false;
private bool skipDotfiles = false;
// load sync_list file
void load(string filepath)
{
if (exists(filepath)) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
paths ~= buildNormalizedPath(line);
}
file.close();
}
}
// Configure skipDirStrictMatch if function is called
// By default, skipDirStrictMatch = false;
void setSkipDirStrictMatch()
{
skipDirStrictMatch = true;
}
// load business_shared_folders file
void loadSharedFolders(string filepath)
{
if (exists(filepath)) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
businessSharedFoldersList ~= buildNormalizedPath(line);
}
file.close();
}
}
void setFileMask(const(char)[] mask)
{
this.mask = wild2regex(mask);
}
void setDirMask(const(char)[] dirmask)
{
this.dirmask = wild2regex(dirmask);
}
// Configure skipDotfiles if function is called
// By default, skipDotfiles = false;
void setSkipDotfiles()
{
skipDotfiles = true;
}
// return value of skipDotfiles
bool getSkipDotfiles()
{
return skipDotfiles;
}
// config file skip_dir parameter
bool isDirNameExcluded(string name)
{
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
log.vdebug("skip_dir evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(dirmask).empty) {
log.vdebug("'!name.matchFirst(dirmask).empty' returned true = matched");
return true;
} else {
// Do we check the base name as well?
if (!skipDirStrictMatch) {
log.vdebug("No Strict Matching Enforced");
// Test the entire path working backwards from child
string path = buildNormalizedPath(name);
string checkPath;
auto paths = pathSplitter(path);
foreach_reverse(directory; paths) {
if (directory != "/") {
// This will add a leading '/' but that needs to be stripped to check
checkPath = "/" ~ directory ~ checkPath;
if(!checkPath.strip('/').matchFirst(dirmask).empty) {
log.vdebug("'!checkPath.matchFirst(dirmask).empty' returned true = matched");
return true;
}
}
}
} else {
log.vdebug("Strict Matching Enforced - No Match");
}
}
// no match
return false;
}
// config file skip_file parameter
bool isFileNameExcluded(string name)
{
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
log.vdebug("skip_file evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(mask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(mask).empty) {
return true;
}
}
// no match
return false;
}
// Match against sync_list only
bool isPathExcludedViaSyncList(string path)
{
// Debug output that we are performing a 'sync_list' inclusion / exclusion test
return .isPathExcluded(path, paths);
}
// Match against skip_dir, skip_file & sync_list entries
bool isPathExcludedMatchAll(string path)
{
return .isPathExcluded(path, paths) || .isPathMatched(path, mask) || .isPathMatched(path, dirmask);
}
// is the path a dotfile?
bool isDotFile(string path)
{
// always allow the root
if (path == ".") return false;
path = buildNormalizedPath(path);
auto paths = pathSplitter(path);
foreach(base; paths) {
if (startsWith(base, ".")){
return true;
}
}
return false;
}
// is business shared folder matched
bool isSharedFolderMatched(string name)
{
// if there are no shared folder always return false
if (businessSharedFoldersList.empty) return false;
if (!name.matchFirst(businessSharedFoldersList).empty) {
return true;
} else {
// try a direct comparison just in case
foreach (userFolder; businessSharedFoldersList) {
if (userFolder == name) {
// direct match
log.vdebug("'matchFirst' failed to match, however direct comparison was matched: ", name);
return true;
}
}
return false;
}
}
// is business shared folder included
bool isPathIncluded(string path, string[] allowedPaths)
{
// always allow the root
if (path == ".") return true;
// if there are no allowed paths always return true
if (allowedPaths.empty) return true;
path = buildNormalizedPath(path);
foreach (allowed; allowedPaths) {
auto comm = commonPrefix(path, allowed);
if (comm.length == path.length) {
// the given path is contained in an allowed path
return true;
}
if (comm.length == allowed.length && path[comm.length] == '/') {
// the given path is a subitem of an allowed path
return true;
}
}
return false;
}
}
// test if the given path is not included in the allowed paths
// if there are no allowed paths always return false
private bool isPathExcluded(string path, string[] allowedPaths)
{
// function variables
bool exclude = false;
bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
int offset;
string wildcard = "*";
// always allow the root
if (path == ".") return false;
// if there are no allowed paths always return false
if (allowedPaths.empty) return false;
path = buildNormalizedPath(path);
log.vdebug("Evaluation against 'sync_list' for this path: ", path);
log.vdebug("[S]exclude = ", exclude);
log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[S]excludeMatched = ", excludeMatched);
// unless path is an exact match, entire sync_list entries need to be processed to ensure
// negative matches are also correctly detected
foreach (allowedPath; allowedPaths) {
// is this an inclusion path or finer grained exclusion?
switch (allowedPath[0]) {
case '-':
// sync_list path starts with '-', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '-/' offset needs to be 2, else 1
if (startsWith(allowedPath, "-/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '!':
// sync_list path starts with '!', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '!/' offset needs to be 2, else 1
if (startsWith(allowedPath, "!/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '/':
// sync_list path starts with '/', this user wants to include this path
// but a '/' at the start causes matching issues, so use the offset for comparison
exclude = false;
offset = 1;
break;
default:
// no negative pattern, default is to not exclude
exclude = false;
offset = 0;
}
// What are we comparing against?
log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath);
// Generate the common prefix from the path vs the allowed path
auto comm = commonPrefix(path, allowedPath[offset..$]);
// Is path is an exact match of the allowed path?
if (comm.length == path.length) {
// we have a potential exact match
// strip any potential '/*' from the allowed path, to avoid a potential lesser common match
string strippedAllowedPath = strip(allowedPath[offset..$], "/*");
if (path == strippedAllowedPath) {
// we have an exact path match
log.vdebug("exact path match");
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded");
// do not set excludeMatched = true here, otherwise parental path also gets excluded
// flag exludeDirectMatch so that a 'wildcard match' will not override this exclude
exludeDirectMatch = true;
// final result
finalResult = true;
}
} else {
// no exact path match, but something common does match
log.vdebug("something 'common' matches the input path");
auto splitAllowedPaths = pathSplitter(strippedAllowedPath);
string pathToEvaluate = "";
foreach(base; splitAllowedPaths) {
pathToEvaluate ~= base;
if (path == pathToEvaluate) {
// The input path matches what we want to evaluate against as a direct match
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded");
finalResult = true;
// do not set excludeMatched = true here, otherwise parental path also gets excluded
}
}
pathToEvaluate ~= dirSeparator;
}
}
}
// Is path is a subitem/sub-folder of the allowed path?
if (comm.length == allowedPath[offset..$].length) {
// The given path is potentially a subitem of an allowed path
// We want to capture sub-folders / files of allowed paths here, but not explicitly match other items
// if there is no wildcard
auto subItemPathCheck = allowedPath[offset..$] ~ "/";
if (canFind(path, subItemPathCheck)) {
// The 'path' includes the allowed path, and is 'most likely' a sub-path item
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: parental path match");
finalResult = false;
// parental path matches, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
// Does the allowed path contain a wildcard? (*)
if (canFind(allowedPath[offset..$], wildcard)) {
// allowed path contains a wildcard
// manually replace '*' for '.*' to be compatible with regex
string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*");
auto allowedMask = regex(regexCompatiblePath);
if (matchAll(path, allowedMask)) {
// regex wildcard evaluation matches
// if we have a prior pattern match for an exclude, excludeMatched = true
if (!exclude && !excludeMatched && !exludeDirectMatch) {
// nothing triggered an exclusion before evaluation against wildcard match attempt
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match");
finalResult = false;
} else {
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
}
// Interim results
log.vdebug("[F]exclude = ", exclude);
log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[F]excludeMatched = ", excludeMatched);
// If exclude or excludeMatched is true, then finalResult has to be true
if ((exclude) || (excludeMatched) || (exludeDirectMatch)) {
finalResult = true;
}
// results
if (finalResult) {
log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED");
} else {
log.vdebug("Evaluation against 'sync_list' final result: included for sync");
}
return finalResult;
}
// test if the given path is matched by the regex expression.
// recursively test up the tree.
private bool isPathMatched(string path, Regex!char mask) {
path = buildNormalizedPath(path);
auto paths = pathSplitter(path);
string prefix = "";
foreach(base; paths) {
prefix ~= base;
if (!path.matchFirst(mask).empty) {
// the given path matches something which we should skip
return true;
}
prefix ~= dirSeparator;
}
return false;
}
// unit tests
unittest
{
assert(isPathExcluded("Documents2", ["Documents"]));
assert(!isPathExcluded("Documents", ["Documents"]));
assert(!isPathExcluded("Documents/a.txt", ["Documents"]));
assert(isPathExcluded("Hello/World", ["Hello/John"]));
assert(!isPathExcluded(".", ["Documents"]));
}

View file

@ -1,100 +1,104 @@
// What is this module called?
module sqlite;
// What does this module require to function?
import std.stdio;
import etc.c.sqlite3;
import std.string: fromStringz, toStringz;
import core.stdc.stdlib;
import std.conv;
static import log;
// What other modules that we have created do we need to import?
import log;
extern (C) immutable(char)* sqlite3_errstr(int); // missing from the std library
static this()
{
static this() {
if (sqlite3_libversion_number() < 3006019) {
throw new SqliteException("sqlite 3.6.19 or newer is required");
throw new SqliteException(-1,"sqlite 3.6.19 or newer is required");
}
}
private string ifromStringz(const(char)* cstr)
{
private string ifromStringz(const(char)* cstr) {
return fromStringz(cstr).dup;
}
class SqliteException: Exception
{
@safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
class SqliteException: Exception {
int errorCode; // Add an errorCode member to store the SQLite error code
@safe pure nothrow this(int errorCode, string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
{
super(msg, file, line, next);
this.errorCode = errorCode; // Set the errorCode
}
@safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
@safe pure nothrow this(int errorCode, string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
{
super(msg, file, line, next);
this.errorCode = errorCode; // Set the errorCode
}
}
struct Database
{
struct Database {
private sqlite3* pDb;
this(const(char)[] filename)
{
this(const(char)[] filename) {
open(filename);
}
~this()
{
~this() {
close();
}
int db_checkpoint()
{
int db_checkpoint() {
return sqlite3_wal_checkpoint(pDb, null);
}
void dump_open_statements()
{
log.log("Dumpint open statements: \n");
void dump_open_statements() {
addLogEntry("Dumping open statements:", ["debug"]);
auto p = sqlite3_next_stmt(pDb, null);
while (p != null) {
log.log (" - " ~ ifromStringz(sqlite3_sql(p)) ~ "\n");
addLogEntry(" - " ~ to!string(ifromStringz(sqlite3_sql(p))));
p = sqlite3_next_stmt(pDb, p);
}
}
void open(const(char)[] filename)
{
void open(const(char)[] filename) {
// https://www.sqlite.org/c3ref/open.html
int rc = sqlite3_open(toStringz(filename), &pDb);
if (rc == SQLITE_CANTOPEN) {
// Database cannot be opened
log.error("\nThe database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3\n");
addLogEntry();
addLogEntry("The database cannot be opened. Please check the permissions of " ~ to!string(filename));
addLogEntry();
close();
exit(-1);
}
if (rc != SQLITE_OK) {
log.error("\nA database access error occurred: " ~ getErrorMessage() ~ "\n");
addLogEntry();
addLogEntry("A database access error occurred: " ~ getErrorMessage());
addLogEntry();
close();
exit(-1);
}
sqlite3_extended_result_codes(pDb, 1); // always use extended result codes
}
void exec(const(char)[] sql)
{
void exec(const(char)[] sql) {
// https://www.sqlite.org/c3ref/exec.html
int rc = sqlite3_exec(pDb, toStringz(sql), null, null, null);
if (rc != SQLITE_OK) {
log.error("\nA database execution error occurred: "~ getErrorMessage() ~ "\n");
log.error("Please retry your command with --resync to fix any local database corruption issues.\n");
addLogEntry();
addLogEntry("A database execution error occurred: "~ getErrorMessage());
addLogEntry();
addLogEntry("Please retry your command with --resync to fix any local database corruption issues.");
addLogEntry();
close();
exit(-1);
}
}
int getVersion()
{
int getVersion() {
int userVersion;
extern (C) int callback(void* user_version, int count, char** column_text, char** column_name) {
import core.stdc.stdlib: atoi;
@ -103,73 +107,69 @@ struct Database
}
int rc = sqlite3_exec(pDb, "PRAGMA user_version", &callback, &userVersion, null);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(pDb)));
throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(pDb)));
}
return userVersion;
}
int getThreadsafeValue() {
// Get the threadsafe value
auto threadsafeValue = sqlite3_threadsafe();
return threadsafeValue;
}
string getErrorMessage()
{
string getErrorMessage() {
return ifromStringz(sqlite3_errmsg(pDb));
}
void setVersion(int userVersion)
{
void setVersion(int userVersion) {
import std.conv: to;
exec("PRAGMA user_version=" ~ to!string(userVersion));
}
Statement prepare(const(char)[] zSql)
{
Statement prepare(const(char)[] zSql) {
Statement s;
// https://www.sqlite.org/c3ref/prepare.html
int rc = sqlite3_prepare_v2(pDb, zSql.ptr, cast(int) zSql.length, &s.pStmt, null);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(pDb)));
throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(pDb)));
}
return s;
}
void close()
{
void close() {
// https://www.sqlite.org/c3ref/close.html
sqlite3_close_v2(pDb);
pDb = null;
}
}
struct Statement
{
struct Result
{
struct Statement {
struct Result {
private sqlite3_stmt* pStmt;
private const(char)[][] row;
private this(sqlite3_stmt* pStmt)
{
private this(sqlite3_stmt* pStmt) {
this.pStmt = pStmt;
step(); // initialize the range
}
@property bool empty()
{
@property bool empty() {
return row.length == 0;
}
@property auto front()
{
@property auto front() {
return row;
}
alias step popFront;
void step()
{
void step() {
// https://www.sqlite.org/c3ref/step.html
int rc = sqlite3_step(pStmt);
if (rc == SQLITE_BUSY) {
// Database is locked by another onedrive process
log.error("The database is currently locked by another process - cannot sync");
addLogEntry("The database is currently locked by another process - cannot sync");
return;
}
if (rc == SQLITE_DONE) {
@ -185,8 +185,11 @@ struct Statement
}
} else {
string errorMessage = ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)));
log.error("\nA database statement execution error occurred: "~ errorMessage ~ "\n");
log.error("Please retry your command with --resync to fix any local database corruption issues.\n");
addLogEntry();
addLogEntry("A database statement execution error occurred: "~ errorMessage);
addLogEntry();
addLogEntry("Please retry your command with --resync to fix any local database corruption issues.");
addLogEntry();
exit(-1);
}
}
@ -194,63 +197,30 @@ struct Statement
private sqlite3_stmt* pStmt;
~this()
{
~this() {
// https://www.sqlite.org/c3ref/finalize.html
sqlite3_finalize(pStmt);
}
void bind(int index, const(char)[] value)
{
void bind(int index, const(char)[] value) {
reset();
// https://www.sqlite.org/c3ref/bind_blob.html
int rc = sqlite3_bind_text(pStmt, index, value.ptr, cast(int) value.length, SQLITE_STATIC);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
}
}
Result exec()
{
Result exec() {
reset();
return Result(pStmt);
}
private void reset()
{
private void reset() {
// https://www.sqlite.org/c3ref/reset.html
int rc = sqlite3_reset(pStmt);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
throw new SqliteException(rc, ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
}
}
}
unittest
{
auto db = Database(":memory:");
db.exec("CREATE TABLE test(
id TEXT PRIMARY KEY,
value TEXT
)");
assert(db.getVersion() == 0);
db.setVersion(1);
assert(db.getVersion() == 1);
auto s = db.prepare("INSERT INTO test VALUES (?, ?)");
s.bind(1, "key1");
s.bind(2, "value");
s.exec();
s.bind(1, "key2");
s.bind(2, null);
s.exec();
s = db.prepare("SELECT * FROM test ORDER BY id ASC");
auto r = s.exec();
assert(r.front[0] == "key1");
r.popFront();
assert(r.front[1] == null);
r.popFront();
assert(r.empty);
}
}

14431
src/sync.d

File diff suppressed because it is too large Load diff

View file

@ -1,302 +0,0 @@
import std.algorithm, std.conv, std.datetime, std.file, std.json;
import std.stdio, core.thread, std.string;
import progress, onedrive, util;
static import log;
private long fragmentSize = 10 * 2^^20; // 10 MiB
struct UploadSession
{
private OneDriveApi onedrive;
private bool verbose;
// https://dev.onedrive.com/resources/uploadSession.htm
private JSONValue session;
// path where to save the session
private string sessionFilePath;
this(OneDriveApi onedrive, string sessionFilePath)
{
assert(onedrive);
this.onedrive = onedrive;
this.sessionFilePath = sessionFilePath;
this.verbose = verbose;
}
JSONValue upload(string localPath, const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null)
{
// Fix https://github.com/abraunegg/onedrive/issues/2
// More Details https://github.com/OneDrive/onedrive-api-docs/issues/778
SysTime localFileLastModifiedTime = timeLastModified(localPath).toUTC();
localFileLastModifiedTime.fracSecs = Duration.zero;
JSONValue fileSystemInfo = [
"item": JSONValue([
"@name.conflictBehavior": JSONValue("replace"),
"fileSystemInfo": JSONValue([
"lastModifiedDateTime": localFileLastModifiedTime.toISOExtString()
])
])
];
// Try to create the upload session for this file
session = onedrive.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo);
if ("uploadUrl" in session){
session["localPath"] = localPath;
save();
return upload();
} else {
// there was an error
log.vlog("Create file upload session failed ... skipping file upload");
// return upload() will return a JSONValue response, create an empty JSONValue response to return
JSONValue response;
return response;
}
}
/* Restore the previous upload session.
* Returns true if the session is valid. Call upload() to resume it.
* Returns false if there is no session or the session is expired. */
bool restore()
{
if (exists(sessionFilePath)) {
log.vlog("Trying to restore the upload session ...");
// We cant use JSONType.object check, as this is currently a string
// We cant use a try & catch block, as it does not catch std.json.JSONException
auto sessionFileText = readText(sessionFilePath);
if(canFind(sessionFileText,"@odata.context")) {
session = readText(sessionFilePath).parseJSON();
} else {
log.vlog("Upload session resume data is invalid");
remove(sessionFilePath);
return false;
}
// Check the session resume file for expirationDateTime
if ("expirationDateTime" in session){
// expirationDateTime in the file
auto expiration = SysTime.fromISOExtString(session["expirationDateTime"].str);
if (expiration < Clock.currTime()) {
log.vlog("The upload session is expired");
return false;
}
if (!exists(session["localPath"].str)) {
log.vlog("The file does not exist anymore");
return false;
}
// Can we read the file - as a permissions issue or file corruption will cause a failure on resume
// https://github.com/abraunegg/onedrive/issues/113
if (readLocalFile(session["localPath"].str)){
// able to read the file
// request the session status
JSONValue response;
try {
response = onedrive.requestUploadStatus(session["uploadUrl"].str);
} catch (OneDriveException e) {
// handle any onedrive error response
if (e.httpStatusCode == 400) {
log.vlog("Upload session not found");
return false;
}
}
// do we have a valid response from OneDrive?
if (response.type() == JSONType.object){
// JSON object
if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)){
// has the elements we need
session["expirationDateTime"] = response["expirationDateTime"];
session["nextExpectedRanges"] = response["nextExpectedRanges"];
if (session["nextExpectedRanges"].array.length == 0) {
log.vlog("The upload session is completed");
return false;
}
} else {
// bad data
log.vlog("Restore file upload session failed - invalid data response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
} else {
// not a JSON object
log.vlog("Restore file upload session failed - invalid response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
return true;
} else {
// unable to read the local file
log.vlog("Restore file upload session failed - unable to read the local file");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
} else {
// session file contains an error - cant resume
log.vlog("Restore file upload session failed - cleaning up session resume");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
}
return false;
}
JSONValue upload()
{
// Response for upload
JSONValue response;
// session JSON needs to contain valid elements
long offset;
long fileSize;
if ("nextExpectedRanges" in session){
offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long;
}
if ("localPath" in session){
fileSize = getSize(session["localPath"].str);
}
if ("uploadUrl" in session){
// Upload file via session created
// Upload Progress Bar
size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1;
Progress p = new Progress(iteration);
p.title = "Uploading";
long fragmentCount = 0;
long fragSize = 0;
// Initialise the download bar at 0%
p.next();
while (true) {
fragmentCount++;
log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration);
p.next();
log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " fileSize: ", fileSize );
fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset;
log.vdebugNewLine("Using fragSize: ", fragSize);
// fragSize must not be a negative value
if (fragSize < 0) {
// Session upload will fail
// not a JSON object - fragment upload failed
log.vlog("File upload session failed - invalid calculation of fragment size");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
// set response to null as error
response = null;
return response;
}
// If the resume upload fails, we need to check for a return code here
try {
response = onedrive.uploadFragment(
session["uploadUrl"].str,
session["localPath"].str,
offset,
fragSize,
fileSize
);
} catch (OneDriveException e) {
// if a 100 response is generated, continue
if (e.httpStatusCode == 100) {
continue;
}
// there was an error response from OneDrive when uploading the file fragment
// handle 'HTTP request returned status code 429 (Too Many Requests)' first
if (e.httpStatusCode == 429) {
auto retryAfterValue = onedrive.getRetryAfterValue();
log.vdebug("Fragment upload failed - received throttle request response from OneDrive");
log.vdebug("Using Retry-After Value = ", retryAfterValue);
// Sleep thread as per request
log.log("\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled");
log.log("Sleeping for ", retryAfterValue, " seconds");
Thread.sleep(dur!"seconds"(retryAfterValue));
log.log("Retrying fragment upload");
} else {
// insert a new line as well, so that the below error is inserted on the console in the right location
log.vlog("\nFragment upload failed - received an exception response from OneDrive");
// display what the error is
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
// retry fragment upload in case error is transient
log.vlog("Retrying fragment upload");
}
try {
response = onedrive.uploadFragment(
session["uploadUrl"].str,
session["localPath"].str,
offset,
fragSize,
fileSize
);
} catch (OneDriveException e) {
// OneDrive threw another error on retry
log.vlog("Retry to upload fragment failed");
// display what the error is
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
// set response to null as the fragment upload was in error twice
response = null;
}
}
// was the fragment uploaded without issue?
if (response.type() == JSONType.object){
offset += fragmentSize;
if (offset >= fileSize) break;
// update the session details
session["expirationDateTime"] = response["expirationDateTime"];
session["nextExpectedRanges"] = response["nextExpectedRanges"];
save();
} else {
// not a JSON object - fragment upload failed
log.vlog("File upload session failed - invalid response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
// set response to null as error
response = null;
return response;
}
}
// upload complete
p.next();
writeln();
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return response;
} else {
// session elements were not present
log.vlog("Session has no valid upload URL ... skipping this file upload");
// return an empty JSON response
response = null;
return response;
}
}
string getUploadSessionLocalFilePath() {
// return the session file path
string localPath = "";
if ("localPath" in session){
localPath = session["localPath"].str;
}
return localPath;
}
// save session details to temp file
private void save()
{
std.file.write(sessionFilePath, session.toString());
}
}

1151
src/util.d

File diff suppressed because it is too large Load diff

339
src/webhook.d Normal file
View file

@ -0,0 +1,339 @@
module webhook;
// What does this module require to function?
import core.atomic : atomicOp;
import std.datetime;
import std.concurrency;
import std.json;
// What other modules that we have created do we need to import?
import arsd.cgi;
import config;
import onedrive;
import log;
import util;
class OneDriveWebhook {
private RequestServer server;
private string host;
private ushort port;
private Tid parentTid;
private bool started;
private ApplicationConfig appConfig;
private OneDriveApi oneDriveApiInstance;
string subscriptionId = "";
SysTime subscriptionExpiration, subscriptionLastErrorAt;
Duration subscriptionExpirationInterval, subscriptionRenewalInterval, subscriptionRetryInterval;
string notificationUrl = "";
private uint count;
this(Tid parentTid, ApplicationConfig appConfig) {
this.host = appConfig.getValueString("webhook_listening_host");
this.port = to!ushort(appConfig.getValueLong("webhook_listening_port"));
this.parentTid = parentTid;
this.appConfig = appConfig;
subscriptionExpiration = Clock.currTime(UTC());
subscriptionLastErrorAt = SysTime.fromUnixTime(0);
subscriptionExpirationInterval = dur!"seconds"(appConfig.getValueLong("webhook_expiration_interval"));
subscriptionRenewalInterval = dur!"seconds"(appConfig.getValueLong("webhook_renewal_interval"));
subscriptionRetryInterval = dur!"seconds"(appConfig.getValueLong("webhook_retry_interval"));
notificationUrl = appConfig.getValueString("webhook_public_url");
}
// The static serve() is necessary because spawn() does not like instance methods
void serve() {
if (this.started)
return;
this.started = true;
this.count = 0;
server.listeningHost = this.host;
server.listeningPort = this.port;
spawn(&serveImpl, cast(shared) this);
addLogEntry("Started webhook server");
// Subscriptions
oneDriveApiInstance = new OneDriveApi(this.appConfig);
oneDriveApiInstance.initialise();
createOrRenewSubscription();
}
void stop() {
if (!this.started)
return;
server.stop();
this.started = false;
addLogEntry("Stopped webhook server");
object.destroy(server);
// Delete subscription if there exists any
try {
deleteSubscription();
} catch (OneDriveException e) {
logSubscriptionError(e);
}
oneDriveApiInstance.shutdown();
object.destroy(oneDriveApiInstance);
}
private static void handle(shared OneDriveWebhook _this, Cgi cgi) {
if (debugHTTPResponseOutput) {
addLogEntry("Webhook request: " ~ to!string(cgi.requestMethod) ~ " " ~ to!string(cgi.requestUri));
if (!cgi.postBody.empty) {
addLogEntry("Webhook post body: " ~ to!string(cgi.postBody));
}
}
cgi.setResponseContentType("text/plain");
if ("validationToken" in cgi.get) {
// For validation requests, respond with the validation token passed in the query string
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request
cgi.write(cgi.get["validationToken"]);
addLogEntry("Webhook: handled validation request");
} else {
// Notifications don't include any information about the changes that triggered them.
// Put a refresh signal in the queue and let the main monitor loop process it.
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks
_this.count.atomicOp!"+="(1);
send(cast()_this.parentTid, to!ulong(_this.count));
cgi.write("OK");
addLogEntry("Webhook: sent refresh signal #" ~ to!string(_this.count));
}
}
private static void serveImpl(shared OneDriveWebhook _this) {
_this.server.serveEmbeddedHttp!(handle, OneDriveWebhook)(_this);
}
// Create a new subscription or renew the existing subscription
void createOrRenewSubscription() {
auto elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt;
if (elapsed < subscriptionRetryInterval) {
return;
}
try {
if (!hasValidSubscription()) {
createSubscription();
} else if (isSubscriptionUpForRenewal()) {
renewSubscription();
}
} catch (OneDriveException e) {
logSubscriptionError(e);
subscriptionLastErrorAt = Clock.currTime(UTC());
addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval));
} catch (JSONException e) {
addLogEntry("ERROR: Unexpected JSON error when attempting to validate subscription: " ~ e.msg);
subscriptionLastErrorAt = Clock.currTime(UTC());
addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval));
}
}
// Return the duration to next subscriptionExpiration check
Duration getNextExpirationCheckDuration() {
SysTime now = Clock.currTime(UTC());
if (hasValidSubscription()) {
Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt;
// Check if we are waiting for the next retry
if (elapsed < subscriptionRetryInterval)
return subscriptionRetryInterval - elapsed;
else
return subscriptionExpiration - now - subscriptionRenewalInterval;
}
else
return subscriptionRetryInterval;
}
private bool hasValidSubscription() {
return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC());
}
private bool isSubscriptionUpForRenewal() {
return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval;
}
private void createSubscription() {
addLogEntry("Initializing subscription for updates ...");
auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval;
try {
JSONValue response = oneDriveApiInstance.createSubscription(notificationUrl, expirationDateTime);
// Save important subscription metadata including id and expiration
subscriptionId = response["id"].str;
subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str);
addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString()));
} catch (OneDriveException e) {
if (e.httpStatusCode == 409) {
// Take over an existing subscription on HTTP 409.
//
// Sample 409 error:
// {
// "error": {
// "code": "ObjectIdentifierInUse",
// "innerError": {
// "client-request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d",
// "date": "2023-09-26T09:27:45",
// "request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d"
// },
// "message": "Subscription Id c0bba80e-57a3-43a7-bac2-e6f525a76e7c already exists for the requested combination"
// }
// }
// Make sure the error code is "ObjectIdentifierInUse"
try {
if (e.error["error"]["code"].str != "ObjectIdentifierInUse") {
throw e;
}
} catch (JSONException jsonEx) {
throw e;
}
// Extract the existing subscription id from the error message
import std.regex;
auto idReg = ctRegex!(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", "i");
auto m = matchFirst(e.error["error"]["message"].str, idReg);
if (!m) {
throw e;
}
// Save the subscription id and renew it immediately since we don't know the expiration timestamp
subscriptionId = m[0];
addLogEntry("Found existing subscription " ~ subscriptionId);
renewSubscription();
} else {
throw e;
}
}
}
private void renewSubscription() {
addLogEntry("Renewing subscription for updates ...");
auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval;
try {
JSONValue response = oneDriveApiInstance.renewSubscription(subscriptionId, expirationDateTime);
// Update subscription expiration from the response
subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str);
addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString()));
} catch (OneDriveException e) {
if (e.httpStatusCode == 404) {
addLogEntry("The subscription is not found on the server. Recreating subscription ...");
subscriptionId = null;
subscriptionExpiration = Clock.currTime(UTC());
createSubscription();
} else {
throw e;
}
}
}
private void deleteSubscription() {
if (!hasValidSubscription()) {
return;
}
oneDriveApiInstance.deleteSubscription(subscriptionId);
addLogEntry("Deleted subscription");
}
private void logSubscriptionError(OneDriveException e) {
if (e.httpStatusCode == 400) {
// Log known 400 error where Microsoft cannot get a 200 OK from the webhook endpoint
//
// Sample 400 error:
// {
// "error": {
// "code": "InvalidRequest",
// "innerError": {
// "client-request-id": "<uuid>",
// "date": "<timestamp>",
// "request-id": "<uuid>"
// },
// "message": "Subscription validation request failed. Notification endpoint must respond with 200 OK to validation request."
// }
// }
try {
if (e.error["error"]["code"].str == "InvalidRequest") {
import std.regex;
auto msgReg = ctRegex!(r"Subscription validation request failed", "i");
auto m = matchFirst(e.error["error"]["message"].str, msgReg);
if (m) {
addLogEntry("ERROR: Cannot create or renew subscription: Microsoft did not get 200 OK from the webhook endpoint.");
return;
}
}
} catch (JSONException) {
// fallthrough
}
} else if (e.httpStatusCode == 401) {
// Log known 401 error where authentication failed
//
// Sample 401 error:
// {
// "error": {
// "code": "ExtensionError",
// "innerError": {
// "client-request-id": "<uuid>",
// "date": "<timestamp>",
// "request-id": "<uuid>"
// },
// "message": "Operation: Create; Exception: [Status Code: Unauthorized; Reason: Authentication failed]"
// }
// }
try {
if (e.error["error"]["code"].str == "ExtensionError") {
import std.regex;
auto msgReg = ctRegex!(r"Authentication failed", "i");
auto m = matchFirst(e.error["error"]["message"].str, msgReg);
if (m) {
addLogEntry("ERROR: Cannot create or renew subscription: Authentication failed.");
return;
}
}
} catch (JSONException) {
// fallthrough
}
} else if (e.httpStatusCode == 403) {
// Log known 403 error where the number of subscriptions on item has exceeded limit
//
// Sample 403 error:
// {
// "error": {
// "code": "ExtensionError",
// "innerError": {
// "client-request-id": "<uuid>",
// "date": "<timestamp>",
// "request-id": "<uuid>"
// },
// "message": "Operation: Create; Exception: [Status Code: Forbidden; Reason: Number of subscriptions on item has exceeded limit]"
// }
// }
try {
if (e.error["error"]["code"].str == "ExtensionError") {
import std.regex;
auto msgReg = ctRegex!(r"Number of subscriptions on item has exceeded limit", "i");
auto m = matchFirst(e.error["error"]["message"].str, msgReg);
if (m) {
addLogEntry("ERROR: Cannot create or renew subscription: Number of subscriptions has exceeded limit.");
return;
}
}
} catch (JSONException) {
// fallthrough
}
}
// Log detailed message for unknown errors
addLogEntry("ERROR: Cannot create or renew subscription.");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
}
}