Uplift to v2.5.0-alpha-2

* Uplift to v2.5.0-alpha-2
This commit is contained in:
abraunegg 2023-10-05 17:24:30 +11:00
parent cbe3e6ea84
commit 51f0ffcb1f
38 changed files with 32081 additions and 23 deletions

View file

@ -55,7 +55,7 @@ endif
system_unit_files = contrib/systemd/onedrive@.service
user_unit_files = contrib/systemd/onedrive.service
DOCFILES = README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md docs/application-security.md
DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-folders.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md
ifneq ("$(wildcard /etc/redhat-release)","")
RHEL = $(shell cat /etc/redhat-release | grep -E "(Red Hat Enterprise Linux|CentOS)" | wc -l)
@ -66,20 +66,19 @@ RHEL_VERSION = 0
endif
SOURCES = \
src/config.d \
src/itemdb.d \
src/log.d \
src/main.d \
src/monitor.d \
src/onedrive.d \
src/qxor.d \
src/selective.d \
src/sqlite.d \
src/sync.d \
src/upload.d \
src/config.d \
src/log.d \
src/util.d \
src/qxor.d \
src/curlEngine.d \
src/onedrive.d \
src/sync.d \
src/itemdb.d \
src/sqlite.d \
src/clientSideFiltering.d \
src/progress.d \
src/arsd/cgi.d
src/monitor.d
ifeq ($(NOTIFICATIONS),yes)
SOURCES += src/notifications/notify.d src/notifications/dnotify.d

1041
changelog.md Normal file

File diff suppressed because it is too large Load diff

20
configure vendored
View file

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for onedrive v2.4.25.
# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-alpha-2.
#
# Report bugs to <https://github.com/abraunegg/onedrive>.
#
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='onedrive'
PACKAGE_TARNAME='onedrive'
PACKAGE_VERSION='v2.4.25'
PACKAGE_STRING='onedrive v2.4.25'
PACKAGE_VERSION='v2.5.0-alpha-2'
PACKAGE_STRING='onedrive v2.5.0-alpha-2'
PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive'
PACKAGE_URL=''
@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures onedrive v2.4.25 to adapt to many kinds of systems.
\`configure' configures onedrive v2.5.0-alpha-2 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1280,7 +1280,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of onedrive v2.4.25:";;
short | recursive ) echo "Configuration of onedrive v2.5.0-alpha-2:";;
esac
cat <<\_ACEOF
@ -1393,7 +1393,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
onedrive configure v2.4.25
onedrive configure v2.5.0-alpha-2
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by onedrive $as_me v2.4.25, which was
It was created by onedrive $as_me v2.5.0-alpha-2, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -2162,7 +2162,7 @@ fi
PACKAGE_DATE="June 2023"
PACKAGE_DATE="October 2023"
@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by onedrive $as_me v2.4.25, which was
This file was extended by onedrive $as_me v2.5.0-alpha-2, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -3212,7 +3212,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
onedrive config.status v2.4.25
onedrive config.status v2.5.0-alpha-2
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View file

@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure)
dnl - tag the release
AC_PREREQ([2.69])
AC_INIT([onedrive],[v2.4.25], [https://github.com/abraunegg/onedrive], [onedrive])
AC_INIT([onedrive],[v2.5.0-alpha-2], [https://github.com/abraunegg/onedrive], [onedrive])
AC_CONFIG_SRCDIR([src/main.d])

302
docs/advanced-usage.md Normal file
View file

@ -0,0 +1,302 @@
# Advanced Configuration of the OneDrive Free Client
This document covers the following scenarios:
* [Configuring the client to use multiple OneDrive accounts / configurations](#configuring-the-client-to-use-multiple-onedrive-accounts--configurations)
* [Configuring the client to use multiple OneDrive accounts / configurations using Docker](#configuring-the-client-to-use-multiple-onedrive-accounts--configurations-using-docker)
* [Configuring the client for use in dual-boot (Windows / Linux) situations](#configuring-the-client-for-use-in-dual-boot-windows--linux-situations)
* [Configuring the client for use when 'sync_dir' is a mounted directory](#configuring-the-client-for-use-when-sync_dir-is-a-mounted-directory)
* [Upload data from the local ~/OneDrive folder to a specific location on OneDrive](#upload-data-from-the-local-onedrive-folder-to-a-specific-location-on-onedrive)
## Configuring the client to use multiple OneDrive accounts / configurations
Essentially, each OneDrive account or SharePoint Shared Library which you require to be synced needs to have its own and unique configuration, local sync directory and service files. To do this, the following steps are needed:
1. Create a unique configuration folder for each onedrive client configuration that you need
2. Copy to this folder a copy of the default configuration file
3. Update the default configuration file as required, changing the required minimum config options and any additional options as needed to support your multi-account configuration
4. Authenticate the client using the new configuration directory
5. Test the configuration using '--display-config' and '--dry-run'
6. Sync the OneDrive account data as required using `--synchronize` or `--monitor`
7. Configure a unique systemd service file for this account configuration
### 1. Create a unique configuration folder for each onedrive client configuration that you need
Make the configuration folder as required for this new configuration, for example:
```text
mkdir ~/.config/my-new-config
```
### 2. Copy to this folder a copy of the default configuration file
Copy to this folder a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above:
```text
wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/my-new-config/config
```
### 3. Update the default configuration file
The following config options *must* be updated to ensure that individual account data is not cross populated with other OneDrive accounts or other configurations:
* sync_dir
Other options that may require to be updated, depending on the OneDrive account that is being configured:
* drive_id
* application_id
* sync_business_shared_folders
* skip_dir
* skip_file
* Creation of a 'sync_list' file if required
* Creation of a 'business_shared_folders' file if required
### 4. Authenticate the client
Authenticate the client using the specific configuration file:
```text
onedrive --confdir="~/.config/my-new-config"
```
You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application.
```text
[user@hostname ~]$ onedrive --confdir="~/.config/my-new-config"
Configuration file successfully loaded
Configuring Global Azure AD Endpoints
Authorize this app visiting:
https://.....
Enter the response uri:
```
### 5. Display and Test the configuration
Test the configuration using '--display-config' and '--dry-run'. By doing so, this allows you to test any configuration that you have currently made, enabling you to fix this configuration before using the configuration.
#### Display the configuration
```text
onedrive --confdir="~/.config/my-new-config" --display-config
```
#### Test the configuration by performing a dry-run
```text
onedrive --confdir="~/.config/my-new-config" --synchronize --verbose --dry-run
```
If both of these operate as per your expectation, the configuration of this client setup is complete and validated. If not, amend your configuration as required.
### 6. Sync the OneDrive account data as required
Sync the data for the new account configuration as required:
```text
onedrive --confdir="~/.config/my-new-config" --synchronize --verbose
```
or
```text
onedrive --confdir="~/.config/my-new-config" --monitor --verbose
```
* `--synchronize` does a one-time sync
* `--monitor` keeps the application running and monitoring for changes both local and remote
### 7. Automatic syncing of new OneDrive configuration
In order to automatically start syncing your OneDrive accounts, you will need to create a service file for each account. From the applicable 'systemd folder' where the applicable systemd service file exists:
* RHEL / CentOS: `/usr/lib/systemd/system`
* Others: `/usr/lib/systemd/user` and `/lib/systemd/system`
### Step1: Create a new systemd service file
#### Red Hat Enterprise Linux, CentOS Linux
Copy the required service file to a new name:
```text
sudo cp /usr/lib/systemd/system/onedrive.service /usr/lib/systemd/system/onedrive-my-new-config
```
or
```text
sudo cp /usr/lib/systemd/system/onedrive@.service /usr/lib/systemd/system/onedrive-my-new-config@.service
```
#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
Copy the required service file to a new name:
```text
sudo cp /usr/lib/systemd/user/onedrive.service /usr/lib/systemd/user/onedrive-my-new-config.service
```
or
```text
sudo cp /lib/systemd/system/onedrive@.service /lib/systemd/system/onedrive-my-new-config@.service
```
### Step 2: Edit new systemd service file
Edit the new systemd file, updating the line beginning with `ExecStart` so that the confdir mirrors the one you used above:
```text
ExecStart=/usr/local/bin/onedrive --monitor --confdir="/full/path/to/config/dir"
```
Example:
```text
ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/my-new-config"
```
**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded.
### Step 3: Enable the new systemd service
Once the file is correctly editied, you can enable the new systemd service using the following commands.
#### Red Hat Enterprise Linux, CentOS Linux
```text
systemctl enable onedrive-my-new-config
systemctl start onedrive-my-new-config
```
#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
```text
systemctl --user enable onedrive-my-new-config
systemctl --user start onedrive-my-new-config
```
or
```text
systemctl --user enable onedrive-my-new-config@myusername.service
systemctl --user start onedrive-my-new-config@myusername.service
```
### Step 4: Viewing systemd status and logs for the custom service
#### Viewing systemd service status - Red Hat Enterprise Linux, CentOS Linux
```text
systemctl status onedrive-my-new-config
```
#### Viewing systemd service status - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
```text
systemctl --user status onedrive-my-new-config
```
#### Viewing journalctl systemd logs - Red Hat Enterprise Linux, CentOS Linux
```text
journalctl --unit=onedrive-my-new-config -f
```
#### Viewing journalctl systemd logs - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
```text
journalctl --user --unit=onedrive-my-new-config -f
```
### Step 5: (Optional) Run custom systemd service at boot without user login
In some cases it may be desirable for the systemd service to start without having to login as your 'user'
All the systemd steps above that utilise the `--user` option, will run the systemd service as your particular user. As such, the systemd service will not start unless you actually login to your system.
To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system:
```text
loginctl enable-linger <your_user_name>
```
Example:
```text
alex@ubuntu-headless:~$ loginctl enable-linger alex
```
Repeat these steps for each OneDrive new account that you wish to use.
## Configuring the client to use multiple OneDrive accounts / configurations using Docker
In some situations it may be desirable to run multiple Docker containers at the same time, each with their own configuration.
To run the Docker container successfully, it needs two unique Docker volumes to operate:
* Your configuration Docker volumes
* Your data Docker volume
When running multiple Docker containers, this is no different - each Docker container must have it's own configuration and data volume.
### High level steps:
1. Create the required unique Docker volumes for the configuration volume
2. Create the required unique local path used for the Docker data volume
3. Start the multiple Docker containers with the required configuration for each container
#### Create the required unique Docker volumes for the configuration volume
Create the required unique Docker volumes for the configuration volume(s):
```text
docker volume create onedrive_conf_sharepoint_site1
docker volume create onedrive_conf_sharepoint_site2
docker volume create onedrive_conf_sharepoint_site3
...
docker volume create onedrive_conf_sharepoint_site50
```
#### Create the required unique local path used for the Docker data volume
Create the required unique local path used for the Docker data volume
```text
mkdir -p /use/full/local/path/no/tilda/SharePointSite1
mkdir -p /use/full/local/path/no/tilda/SharePointSite2
mkdir -p /use/full/local/path/no/tilda/SharePointSite3
...
mkdir -p /use/full/local/path/no/tilda/SharePointSite50
```
#### Start the Docker container with the required configuration (example)
```text
docker run -it --name onedrive -v onedrive_conf_sharepoint_site1:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite1:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site2:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite2:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site3:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite3:/onedrive/data" driveone/onedrive:latest
...
docker run -it --name onedrive -v onedrive_conf_sharepoint_site50:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite50:/onedrive/data" driveone/onedrive:latest
```
#### TIP
To avoid 're-authenticating' and 'authorising' each individual Docker container, if all the Docker containers are using the 'same' OneDrive credentials, you can re-use the 'refresh_token' from one Docker container to another by copying this file to the configuration Docker volume of each Docker container.
If the account credentials are different .. you will need to re-authenticate each Docker container individually.
## Configuring the client for use in dual-boot (Windows / Linux) situations
When dual booting Windows and Linux, depending on the Windows OneDrive account configuration, the 'Files On-Demand' option may be enabled when running OneDrive within your Windows environment.
When this option is enabled in Windows, if you are sharing this location between your Windows and Linux systems, all files will be a 0 byte link, and cannot be used under Linux.
To fix the problem of windows turning all files (that should be kept offline) into links, you have to uncheck a specific option in the onedrive settings window. The option in question is `Save space and download files as you use them`.
To find this setting, open the onedrive pop-up window from the taskbar, click "Help & Settings" > "Settings". This opens a new window. Go to the tab "Settings" and look for the section "Files On-Demand".
After unchecking the option and clicking "OK", the Windows OneDrive client should restart itself and start actually downloading your files so they will truely be available on your disk when offline. These files will then be fully accessible under Linux and the Linux OneDrive client.
| OneDrive Personal | Onedrive Business<br>SharePoint |
|---|---|
| ![Uncheck-Personal](./images/personal-files-on-demand.png) | ![Uncheck-Business](./images/business-files-on-demand.png) |
## Configuring the client for use when 'sync_dir' is a mounted directory
In some environments, your setup might be that your configured 'sync_dir' is pointing to another mounted file system - a NFS|CIFS location, an external drive (USB stuc, eSATA etc). As such, you configure your 'sync_dir' as follows:
```text
sync_dir = "/path/to/mountpoint/OneDrive"
```
The issue here is - how does the client react if the mount point gets removed - network loss, device removal?
The client has zero knowledge of any event that causes a mountpoint to become unavailable, thus, the client (if you are running as a service) will assume that you deleted the files, thus, will go ahead and delete all your files on OneDrive. This is most certainly an undesirable action.
There are a few options here which you can configure in your 'config' file to assist you to prevent this sort of item from occuring:
1. classify_as_big_delete
2. check_nomount
3. check_nosync
**Note:** Before making any change to your configuration, stop any sync process & stop any onedrive systemd service from running.
### classify_as_big_delete
By default, this uses a value of 1000 files|folders. An undesirable unmount if you have more than 1000 files, this default level will prevent the client from executing the online delete. Modify this value up or down as desired
### check_nomount & check_nosync
These two options are really the right safe guards to use.
In your 'mount point', *before* you mount your external folder|device, create empty `.nosync` file, so that this is the *only* file present in the mount location before you mount your data to your mount point. When you mount your data, this '.nosync' file will not be visible, but, if the device you are mounting goes away - this '.nosync' file is the only file visible.
Next, in your 'config' file, configure the following options: `check_nomount = "true"` and `check_nosync = "true"`
What this will do is tell the client, if at *any* point you see this file - stop syncing - thus, protecting your online data from being deleted by the mounted device being suddenly unavailable.
After making this sort of change - test with `--dry-run` so you can see the impacts of your mount point being unavailable, and how the client is now reacting. Once you are happy with how the system will react, restart your sync processes.
## Upload data from the local ~/OneDrive folder to a specific location on OneDrive
In some environments, you may not want your local ~/OneDrive folder to be uploaded directly to the root of your OneDrive account online.
Unfortunatly, the OneDrive API lacks any facility to perform a re-direction of data during upload.
The workaround for this is to structure your local filesystem and reconfigure your client to achieve the desired goal.
### High level steps:
1. Create a new folder, for example `/opt/OneDrive`
2. Configure your application config 'sync_dir' to look at this folder
3. Inside `/opt/OneDrive` create the folder you wish to sync the data online to, for example: `/opt/OneDrive/RemoteOnlineDestination`
4. Configure the application to only sync `/opt/OneDrive/RemoteDestination` via 'sync_list'
5. Symbolically link `~/OneDrive` -> `/opt/OneDrive/RemoteOnlineDestination`
### Outcome:
* Your `~/OneDrive` will look / feel as per normal
* The data will be stored online under `/RemoteOnlineDestination`
### Testing:
* Validate your configuration with `onedrive --display-config`
* Test your configuration with `onedrive --dry-run`

View file

@ -0,0 +1,871 @@
# Application Configuration Options for the OneDrive Client for Linux
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Table of Contents
TABLE OF CONTENTS GOES HERE
## Configuration File Options
### application_id
_**Description:**_ This is the config option for application id that used used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option.
_**Value Type:**_ String
_**Default Value:**_ d50ca740-c83f-4d1b-b616-12c519384f0c
_**Config Example:**_ `application_id = "d50ca740-c83f-4d1b-b616-12c519384f0c"`
### azure_ad_endpoint
_**Description:**_ This is the config option to change the Microsoft Azure Authentication Endpoint that the client uses to conform with data and security requirements that requires data to reside within the geographic borders of that country.
_**Value Type:**_ String
_**Default Value:**_ *Empty* - not required for normal operation
_**Valid Values:**_ USL4, USL5, DE, CN
_**Config Example:**_ `azure_ad_endpoint = "DE"`
### azure_tenant_id
_**Description:**_ This config option allows the locking of the client to a specific single tenant and will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". The tenant id may be the GUID Directory ID or the fully qualified tenant name.
_**Value Type:**_ String
_**Default Value:**_ *Empty* - not required for normal operation
_**Config Example:**_ `azure_tenant_id = "example.onmicrosoft.us"` or `azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc"`
_**Additional Usage Requirement:**_ Must be configured if 'azure_ad_endpoint' is configured.
### bypass_data_preservation
_**Description:**_ This config option allows the disabling of preserving local data by renaming the local file in the event of data conflict. If this is enabled, you will experience data loss on your local data as the local file will be over-written with data from OneDrive online. Use with care and caution.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `bypass_data_preservation = "false"` or `bypass_data_preservation = "true"`
### check_nomount
_**Description:**_ This config option is useful to prevent application startup & ongoing use in 'Monitor Mode' if the configured 'sync_dir' is a separate disk that is being mounted by your system. This option will check for the presence of a `.nosync` file in your mount point, and if present, abort any sync process to preserve data.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `check_nomount = "false"` or `check_nomount = "true"`
_**CLI Option:**_ `--check-for-nomount`
_**Additional Usage Requirement:**_ Create a `.nosync` file in your mount point *before* you mount your disk so that this is visible, in your mount point if your disk is unmounted.
### check_nosync
_**Description:**_ This config option is useful to prevent the sync of a *local* directory to Microsoft OneDrive. It will *not* check for this file online to prevent the download of directories to your local system.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `check_nosync = "false"` or `check_nosync = "true"`
_**CLI Option Use:**_ `--check-for-nosync`
_**Additional Usage Requirement:**_ Create a `.nosync` file in any *local* directory that you wish to not sync to Microsoft OneDrive when you enable this option.
### classify_as_big_delete
_**Description:**_ This config option defines the number of children in a path that is locally removed which will be classified as a 'big data delete' to safeguard large data removals - which are typically accidental local delete events.
_**Value Type:**_ Integer
_**Default Value:**_ 1000
_**Config Example:**_ `classify_as_big_delete = "2000"`
_**CLI Option Use:**_ `--classify-as-big-delete 2000`
_**Additional Usage Requirement:**_ If this option is triggered, you will need to add `--force` to force a sync to occur.
### cleanup_local_files
_**Description:**_ This config option provides the capability to cleanup local files and folders if they are removed online.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `cleanup_local_files = "false"` or `cleanup_local_files = "true"`
_**CLI Option Use:**_ `--cleanup-local-files`
_**Additional Usage Requirement:**_ This configuration option can only be used with 'download_only'. It cannot be used with any other application option.
### connect_timeout
_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library.
_**Value Type:**_ Integer
_**Default Value:**_ 30
_**Config Example:**_ `connect_timeout = "20"`
### data_timeout
_**Description:**_ This setting controls the timeout duration, in seconds, for when data is not received on an active connection to Microsoft OneDrive over HTTPS when using the curl library, before that connection is timeout out.
_**Value Type:**_ Integer
_**Default Value:**_ 240
_**Config Example:**_ `data_timeout = "300"`
### debug_https
_**Description:**_ This setting controls whether the curl library is configured to output additional data to assist with diagnosing HTTPS issues and problems.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `debug_https = "false"` or `debug_https = "true"`
_**CLI Option Use:**_ `--debug-https`
_**Additional Usage Notes:**_ Whilst this option can be used at any time, it is advisable that you only use this option when advised as this will output your `Authorization: bearer` - which is your authentication token to Microsoft OneDrive.
### disable_download_validation
_**Description:**_ This option determines whether the client will conduct integrity validation on files downloaded from Microsoft OneDrive. Sometimes, when downloading files, particularly from SharePoint, there is a discrepancy between the file size reported by the OneDrive API and the byte count received from the SharePoint HTTP Server for the same file. Enable this option to disable the integrity checks performed by this client.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `disable_download_validation = "false"` or `disable_download_validation = "true"`
_**CLI Option Use:**_ `--disable-download-validation`
_**Additional Usage Notes:**_ If you're downloading data from SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack.
### disable_notifications
_**Description:**_ This setting controls whether GUI notifications are sent from the client to your display manager session.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `disable_notifications = "false"` or `disable_notifications = "true"`
_**CLI Option Use:**_ `--disable-notifications`
### disable_upload_validation
_**Description:**_ This option determines whether the client will conduct integrity validation on files uploaded to Microsoft OneDrive. Sometimes, when uploading files, particularly to SharePoint, SharePoint will modify your file post upload by adding new data to your file which breaks the integrity checking of the upload performed by this client. Enable this option to disable the integrity checks performed by this client.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `disable_upload_validation = "false"` or `disable_upload_validation = "true"`
_**CLI Option Use:**_ `--disable-upload-validation`
_**Additional Usage Notes:**_ If you're uploading data to SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack.
### display_running_config
_**Description:**_ This option will include the running config of the application at application startup. This may be desirable to enable when running in containerised environments so that any application logging that is occuring, will have the application configuration being consumed at startup, written out to any applicable log file.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `display_running_config = "false"` or `display_running_config = "true"`
_**CLI Option Use:**_ `--display-running-config`
### dns_timeout
_**Description:**_ This setting controls the libcurl DNS cache value. By default, libcurl caches this info for 60 seconds. This libcurl DNS cache timeout is entirely speculative that a name resolves to the same address for a small amount of time into the future as libcurl does not use DNS TTL properties. We recommend users not to tamper with this option unless strictly necessary.
_**Value Type:**_ Integer
_**Default Value:**_ 60
_**Config Example:**_ `dns_timeout = "90"`
### download_only
_**Description:**_ This setting forces the client to only download data from Microsoft OneDrive and replicate that data locally. No changes made locally will be uploaded to Microsoft OneDrive when using this option.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `download_only = "false"` or `download_only = "true"`
_**CLI Option Use:**_ `--download-only`
### drive_id
_**Description:**_ This setting controls the specific drive identifier the client will use when syncing with Microsoft OneDrive.
_**Value Type:**_ String
_**Default Value:**_ *None*
_**Config Example:**_ `drive_id = "b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB"`
_**Additional Usage Notes:**_ This option is typically only used when configuring the client to sync a specific SharePoint Library. If this configuration option is specified in your config file, a value must be specified otherwise the application will exit citing a fatal error has occured.
### dry_run
_**Description:**_ This setting controls the application capability to test your application configuration without actually performing any actual activity (download, upload, move, delete, folder creation).
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `dry_run = "false"` or `dry_run = "true"`
_**CLI Option Use:**_ `--dry-run`
### enable_logging
_**Description:**_ This setting controls the application logging all actions to a separate file. By default, all log files will be written to `/var/log/onedrive`, however this can changed by using the 'log_dir' config option
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `enable_logging = "false"` or `enable_logging = "true"`
_**CLI Option Use:**_ `--enable-logging`
_**Additional Usage Notes:**_ Additional configuration is potentially required to configure the default log directory. Refer to usage.md for details (ADD LINK)
### force_http_11
_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP prodocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `force_http_11 = "false"` or `force_http_11 = "true"`
_**CLI Option Use:**_ `--force-http-11`
### ip_protocol_version
_**Description:**_ This setting controls the application IP protocol that should be used when communicating with Microsoft OneDrive. The default is to use IPv4 and IPv6 networks for communicating to Microsoft OneDrive.
_**Value Type:**_ Integer
_**Default Value:**_ 0
_**Valid Values:**_ 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
_**Config Example:**_ `ip_protocol_version = "0"` or `ip_protocol_version = "1"` or `ip_protocol_version = "2"`
_**Additional Usage Notes:**_ In some environments where IPv4 and IPv6 are configured at the same time, this causes resolution and routing issues to Microsoft OneDrive. If this is the case, it is advisable to change 'ip_protocol_version' to match your environment.
### local_first
_**Description:**_ This setting controls what the application considers the 'source of truth' for your data. By default, what is stored online will be considered as the 'source of truth' when syncing to your local machine. When using this option, your local data will be considered the 'source of truth'.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `local_first = "false"` or `local_first = "true"`
_**CLI Option Use:**_ `--local-first`
### log_dir
_**Description:**_ This setting controls the custom application log path when 'enable_logging' has been enabled. By default, all log files will be written to `/var/log/onedrive`.
_**Value Type:**_ String
_**Default Value:**_ *None*
_**Config Example:**_ `log_dir = "~/logs/"`
_**CLI Option Use:**_ `--log-dir "~/logs/"`
### monitor_fullscan_frequency
_**Description:**_ This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency.
_**Value Type:**_ Integer
_**Default Value:**_ 12
_**Config Example:**_ `monitor_fullscan_frequency = "24"`
_**CLI Option Use:**_ `--monitor-fullscan-frequency '24'`
_**Additional Usage Notes:**_ By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This setting is only applicable when running in `--monitor` mode.
### monitor_interval
_**Description:**_ This configuration setting determines how often the synchronisation loops run in --monitor mode, measured in seconds. When this time period elapses, the client will check for online changes in Microsoft OneDrive, conduct integrity checks on local data and scan the local 'sync_dir' to identify any new content that hasn't been uploaded yet.
_**Value Type:**_ Integer
_**Default Value:**_ 300
_**Config Example:**_ `monitor_interval = "600"`
_**CLI Option Use:**_ `--monitor-interval '600'`
_**Additional Usage Notes:**_ A minimum value of 300 is enforced for this configuration setting.
### monitor_log_frequency
_**Description:**_ This configuration option controls the suppression of frequently printed log items to the system console when using `--monitor` mode. The aim of this configuration item is to reduce the log output when near zero sync activity is occuring.
_**Value Type:**_ Integer
_**Default Value:**_ 12
_**Config Example:**_ `monitor_log_frequency = "24"`
_**CLI Option Use:**_ `--monitor-log-frequency '24'`
_**Additional Usage Notes:**_
By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and has performed all the initial processing steps:
```text
Reading configuration file: /home/user/.config/onedrive/config
Configuration file successfully loaded
Configuring Global Azure AD Endpoints
Sync Engine Initialised with new Onedrive API instance
All application operations will be performed in: /home/user/OneDrive
OneDrive synchronisation interval (seconds): 300
Initialising filesystem inotify monitoring ...
Performing initial syncronisation to ensure consistent local state ...
Starting a sync with Microsoft OneDrive
Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB ..
Processing changes and items received from Microsoft OneDrive ...
Performing a database consistency and integrity check on locally stored data ...
Scanning the local file system '~/OneDrive' for new data to upload ...
Performing a final true-up scan of online data from Microsoft OneDrive
Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB ..
Processing changes and items received from Microsoft OneDrive ...
Sync with Microsoft OneDrive is complete
```
Then, based on 'monitor_log_frequency', the following output will be logged until the suppression loop value is reached:
```text
Starting a sync with Microsoft OneDrive
Syncing changes from Microsoft OneDrive ...
Sync with Microsoft OneDrive is complete
```
**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'.
**Note:** If verbose application output is being used (`--verbose`), then this configuration setting has zero effect, as application verbose output takes priority over application output surpression.
### no_remote_delete
_**Description:**_ This configuration option controls whether local file and folder deletes are actioned on Microsoft OneDrive.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `local_first = "false"` or `local_first = "true"`
_**CLI Option Use:**_ `--no-remote-delete`
_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only`
### operation_timeout
_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary.
_**Value Type:**_ Integer
_**Default Value:**_ 3600
_**Config Example:**_ `operation_timeout = "3600"`
### rate_limit
_**Description:**_ This configuration option controls the bandwidth used by the application, per thread, when interacting with Microsoft OneDrive.
_**Value Type:**_ Integer
_**Default Value:**_ 0 (unlimited, use available bandwidth per thread)
_**Valid Values:**_ Valid tested values for this configuration option are as follows:
* 131072 = 128 KB/s - absolute minimum for basic application operations to prevent timeouts
* 262144 = 256 KB/s
* 524288 = 512 KB/s
* 1048576 = 1 MB/s
* 10485760 = 10 MB/s
* 104857600 = 100 MB/s
_**Config Example:**_ `rate_limit = "131072"`
### read_only_auth_scope
_**Description:**_ This configuration option controls whether the OneDrive Client for Linux operates in a totally in read-only operation.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `read_only_auth_scope = "false"` or `read_only_auth_scope = "true"`
_**Additional Usage Notes:**_ When using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data until you revoke this consent.
### remove_source_files
_**Description:**_ This configuration option controls whether the OneDrive Client for Linux removes the local file post successful transfer to Microsoft OneDrive.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `remove_source_files = "false"` or `remove_source_files = "true"`
_**CLI Option Use:**_ `--remove-source-files`
_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only`
### resync
_**Description:**_ This configuration option controls whether the known local sync state with Microsoft OneDrive is removed at application startup. When this option is used, a full scan of your data online is performed to ensure that the local sync state is correctly built back up.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `resync = "false"` or `resync = "true"`
_**CLI Option Use:**_ `--resync`
_**Additional Usage Notes:**_ It's highly recommended to use this option only if the application prompts you to do so. Don't blindly use this option as a default option. If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration:
* drive_id
* sync_dir
* skip_file
* skip_dir
* skip_dotfiles
* skip_symlinks
* sync_business_shared_items
* Creating, Modifying or Deleting the 'sync_list' file
### resync_auth
_**Description:**_ This configuration option controls the approval of performing a 'resync' which can be beneficial in automated environments.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `resync_auth = "false"` or `resync_auth = "true"`
_**CLI Option Use:**_ `--resync-auth`
_**Additional Usage Notes:**_ In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' resync requirement, this option allows you to automatically acknowledge the resync prompt.
### skip_dir
_**Description:**_ This configuration option controls whether the application skips certain directories from being synced.
_**Value Type:**_ String
_**Default Value:**_ *Empty* - not required for normal operation
_**Config Example:**_
Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. Entries for 'skip_dir' are *relative* to your 'sync_dir' path.
```text
# When changing a config option below, remove the '#' from the start of the line
# For explanations of all config options below see docs/USAGE.md or the man page.
#
# sync_dir = "~/OneDrive"
# skip_file = "~*|.~*|*.tmp"
# monitor_interval = "300"
skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell"
# log_dir = "/var/log/onedrive/"
```
The 'skip_dir' option can be specified multiple times within your config file, for example:
```text
skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir"
skip_dir = "/Path/To/A/Directory"
skip_dir = "/Another/Path/To/Different/Directory"
```
This will be interpreted the same as:
```text
skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir|/Path/To/A/Directory|/Another/Path/To/Different/Directory"
```
_**CLI Option Use:**_ `--skip-dir 'SomeDir|OtherDir|ThisDir|ThatDir|/Path/To/A/Directory|/Another/Path/To/Different/Directory'`
_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync.
### skip_dir_strict_match
_**Description:**_ This configuration option controls whether the application performs strict directory matching when checking 'skip_dir' items. When enabled, the 'skip_dir' item must be a full path match to the path to be skipped.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `skip_dir_strict_match = "false"` or `skip_dir_strict_match = "true"`
_**CLI Option Use:**_ `--skip-dir-strict-match`
### skip_dotfiles
_**Description:**_ This configuration option controls whether the application will skip all .files and .folders when performing sync operations.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `skip_dotfiles = "false"` or `skip_dotfiles = "true"`
_**CLI Option Use:**_ `--skip-dot-files`
_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync.
### skip_file
_**Description:**_ This configuration option controls whether the application skips certain files from being synced.
_**Value Type:**_ String
_**Default Value:**_ `~*|.~*|*.tmp|*.swp|*.partial`
_**Config Example:**_
Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns.
By default, the following files will be skipped:
* Files that start with ~
* Files that start with .~ (like .~lock.* files generated by LibreOffice)
* Files that end in .tmp, .swp and .partial
Files can be skipped in the following fashion:
* Specify a wildcard, eg: '*.txt' (skip all txt files)
* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext'
* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext'
```text
# When changing a config option below, remove the '#' from the start of the line
# For explanations of all config options below see docs/USAGE.md or the man page.
#
# sync_dir = "~/OneDrive"
skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx"
# monitor_interval = "300"
# skip_dir = ""
# log_dir = "/var/log/onedrive/"
```
The 'skip_file' option can be specified multiple times within your config file, for example:
```text
skip_file = "~*|.~*|*.tmp|*.swp"
skip_file = "*.blah"
skip_file = "never_sync.file"
skip_file = "/Documents/keepass.kdbx"
```
This will be interpreted the same as:
```text
skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx"
```
_**CLI Option Use:**_ `--skip-file '~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx'`
_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync.
### skip_size
_**Description:**_ This configuration option controls whether the application skips syncing certain files larger than the specified size. The value specified is in MB.
_**Value Type:**_ Integer
_**Default Value:**_ 0 (all files, regardless of size, are synced)
_**Config Example:**_ `skip_size = "50"`
_**CLI Option Use:**_ `--skip-size '50'`
### skip_symlinks
_**Description:**_ This configuration option controls whether the application will skip all symbolic links when performing sync operations. Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `skip_symlinks = "false"` or `skip_symlinks = "true"`
_**CLI Option Use:**_ `--skip-symlinks`
_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync.
### space_reservation
_**Description:**_ This configuration option controls how much local disk space should be reserved, to prevent the application from filling up your entire disk due to misconfiguration
_**Value Type:**_ Integer
_**Default Value:**_ 50 MB (expressesed as Bytes when using `--display-config`)
_**Config Example:**_ `space_reservation = "100"`
_**CLI Option Use:**_ `--space-reservation '100'`
### sync_business_shared_items
_**Description:**_ This configuration option controls whether OneDrive Business | Office 365 Shared Folders, when added as a 'shortcut' to your 'My Files' will be synced to your local system.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `sync_business_shared_items = "false"` or `sync_business_shared_items = "true"`
_**CLI Option Use:**_ *none* - this is a config file option only
_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync.
### sync_dir
_**Description:**_ This configuration option determines the location on your local filesystem where your data from Microsoft OneDrive will be saved.
_**Value Type:**_ String
_**Default Value:**_ `~/OneDrive`
_**Config Example:**_ `sync_dir = "~/MyDirToSync"`
_**CLI Option Use:**_ `--syncdir '~/MyDirToSync'`
_**Additional Usage Notes:**_ After changing this option, you will be required to perform a resync.
### sync_dir_permissions
_**Description:**_ This configuration option defines the directory permissions applied when a new directory is created locally during the process of syncing your data from Microsoft OneDrive.
_**Value Type:**_ Integer
_**Default Value:**_ `700` - This provides the following permissions: `drwx------`
_**Config Example:**_ `sync_dir_permissions = "700"`
_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value.
### sync_file_permissions
_**Description:**_ This configuration option defines the file permissions applied when a new file is created locally during the process of syncing your data from Microsoft OneDrive.
_**Value Type:**_ Integer
_**Default Value:**_ `600` - This provides the following permissions: `-rw-------`
_**Config Example:**_ `sync_file_permissions = "600"`
_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value.
### sync_root_files
_**Description:**_ This configuration option manages the synchronisation of files located in the 'sync_dir' root when using a 'sync_list.' It enables you to sync all these files by default, eliminating the need to repeatedly modify your 'sync_list' and initiate resynchronisation.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `sync_root_files = "false"` or `sync_root_files = "true"`
_**CLI Option Use:**_ `--sync-root-files`
_**Additional Usage Notes:**_ Although it's not mandatory, it's recommended that after enabling this option, you perform a `--resync`. This ensures that any previously excluded content is now included in your sync process.
### upload_only
_**Description:**_ This setting forces the client to only upload data to Microsoft OneDrive and replicate the locate state online. By default, this will also remove content online, that has been removed locally.
_**Value Type:**_ Boolean
_**Default Value:**_ False
_**Config Example:**_ `upload_only = "false"` or `upload_only = "true"`
_**CLI Option Use:**_ `--upload-only`
_**Additional Usage Notes:**_ To ensure that data deleted locally remains accessible online, you can use the 'no_remote_delete' option. If you want to delete the data from your local storage after a successful upload to Microsoft OneDrive, you can use the 'remove_source_files' option.
### user_agent
_**Description:**_ This configuration option controls the 'User-Agent' request header that is presented to Microsoft Graph API when accessing the Microsoft OneDrive service. This string lets servers and network peers identify the application, operating system, vendor, and/or version of the application making the request. We recommend users not to tamper with this option unless strictly necessary.
_**Value Type:**_ String
_**Default Value:**_ `ISV|abraunegg|OneDrive Client for Linux/vX.Y.Z-A-bcdefghi`
_**Config Example:**_ `user_agent = "ISV|CompanyName|AppName/Version"`
_**Additional Usage Notes:**_ The current value conforms the the Microsoft Graph API documentation for presenting an appropriate 'User-Agent' header and aligns to the registered 'application_id' that this application uses.
### webhook_enabled
_**Description:**_
_**Value Type:**_
_**Default Value:**_
_**Config Example:**_
### webhook_expiration_interval
_**Description:**_
_**Value Type:**_
_**Default Value:**_
_**Config Example:**_
### webhook_listening_host
_**Description:**_
_**Value Type:**_
_**Default Value:**_
_**Config Example:**_
### webhook_listening_port
_**Description:**_
_**Value Type:**_
_**Default Value:**_
_**Config Example:**_
### webhook_public_url
_**Description:**_
_**Value Type:**_
_**Default Value:**_
_**Config Example:**_
### webhook_renewal_interval
_**Description:**_
_**Value Type:**_
_**Default Value:**_
_**Config Example:**_
## Command Line Interface (CLI) Only Options
### CLI Option: --auth-files
_**Description:**_
_**Usage Example:**_
### CLI Option: --auth-response
_**Description:**_
_**Usage Example:**_
### CLI Option: --confdir
_**Description:**_ This CLI option allows the user to specify where all the application configuration and relevant components are stored.
_**Usage Example:**_ `onedrive --confdir '~/.config/onedrive-business/'`
_**Additional Usage Notes:**_ If using this option, it must be specified each and every time the application is used. If this is ommited, the application default configuration directory will be used.
### CLI Option: --create-directory
_**Description:**_
_**Usage Example:**_
### CLI Option: --create-share-link
_**Description:**_
_**Usage Example:**_
### CLI Option: --destination-directory
_**Description:**_
_**Usage Example:**_
### CLI Option: --display-config
_**Description:**_ This CLI option will display the effective application configuration
_**Usage Example:**_ `onedrive --display-config`
### CLI Option: --display-sync-status
_**Description:**_
_**Usage Example:**_
### CLI Option: --force
_**Description:**_ This CLI option enables the force the deletion of data when a 'big delete' is detected
_**Usage Example:**_
### CLI Option: --force-sync
_**Description:**_
_**Usage Example:**_
### CLI Option: --get-file-link
_**Description:**_
_**Usage Example:**_
### CLI Option: --get-sharepoint-drive-id
_**Description:**_ This CLI option queries the OneDrive API and return's the Office 365 Drive ID for a given Office 365 SharePoint Shared Library that can then be used with 'drive_id' to sync a specific SharePoint Library.
_**Usage Example:**_ `onedrive --get-sharepoint-drive-id '*'` or `onedrive --get-sharepoint-drive-id 'PointPublishing Hub Site'`
### CLI Option: --logout
_**Description:**_ This CLI option removes this clients authentictaion status with Microsoft OneDrive. Any further application use will requrie the application to be re-authenticated with Microsoft OneDrive.
_**Usage Example:**_ `onedrive --logout`
### CLI Option: --modified-by
_**Description:**_
_**Usage Example:**_
### CLI Option: --monitor | -m
_**Description:**_ This CLI option controls the 'Monitor Mode' operational aspect of the client. When this option is used, the client will perform on-going syncs of data between Microsoft OneDrive and your local system. Local changes will be uploaded in near-realtime, whilst online changes will be downloaded on the next sync process. The frequency of these checks is governed by the 'monitor_interval' value.
_**Usage Example:**_ `onedrive --monitor` or `onedrive -m`
### CLI Option: --print-access-token
_**Description:**_ Print the current access token being used to access Microsoft OneDrive.
_**Usage Example:**_ `onedrive --verbose --verbose --debug-https --print-access-token`
_**Additional Usage Notes:**_ Do not use this option if you do not know why you are wanting to use it. Be highly cautious of exposing this object. Change your password if you feel that you have inadvertantly exposed this token.
### CLI Option: --reauth
_**Description:**_ This CLI option controls the ability to re-authenticate your client with Microsoft OneDrive.
_**Usage Example:**_ `onedrive --reauth`
### CLI Option: --remove-directory
_**Description:**_
_**Usage Example:**_
### CLI Option: --single-directory
_**Description:**_
_**Usage Example:**_
### CLI Option: --source-directory
_**Description:**_
_**Usage Example:**_
### CLI Option: --sync | -s
_**Description:**_ This CLI option controls the 'Standalone Mode' operational aspect of the client. When this option is used, the client will perform a one-time sync of data between Microsoft OneDrive and your local system.
_**Usage Example:**_ `onedrive --sync` or `onedrive -s`
### CLI Option: --verbose | -v+
_**Description:**_
_**Usage Example:**_
### CLI Option: --with-editing-perms
_**Description:**_
_**Usage Example:**_
## Depreciated Configuration File and CLI Options
The following configuration options are no longer supported
### min_notify_changes
_**Description:**_ Minimum number of pending incoming changes necessary to trigger a GUI desktop notification.
_**Depreciated Config Example:**_ `min_notify_changes = "50"`
_**Depreciated CLI Option:**_ `--min-notify-changes '50'`
_**Reason for depreciation:**_ Application has been totally re-written. When this item was introduced, it was done so to reduce spamming of all events to the GUI desktop.
### CLI Option: --synchronize
_**Description:**_ Perform a synchronisation with Microsoft OneDrive
_**Depreciated CLI Option:**_ `--synchronize`
_**Reason for depreciation:**_ `--synchronize` has been depreciated in favour of `--sync` or `-s`

View file

@ -0,0 +1,97 @@
# OneDrive Client for Linux Application Security
This document details the following information:
* Why is this application an 'unverified publisher'?
* Application Security and Permission Scopes
* How to change Permission Scopes
* How to review your existing application access consent
## Why is this application an 'unverified publisher'?
Publisher Verification, as per the Microsoft [process](https://learn.microsoft.com/en-us/azure/active-directory/develop/publisher-verification-overview) has actually been configured, and, actually has been verified!
### Verified Publisher Configuration Evidence
As per the image below, the Azure portal shows that the 'Publisher Domain' has actually been verified:
![confirmed_verified_publisher](./images/confirmed_verified_publisher.jpg)
* The 'Publisher Domain' is: https://abraunegg.github.io/
* The required 'Microsoft Identity Association' is: https://abraunegg.github.io/.well-known/microsoft-identity-association.json
## Application Security and Permission Scopes
There are 2 main components regarding security for this application:
* Azure Application Permissions
* User Authentication Permissions
Keeping this in mind, security options should follow the security principal of 'least privilege':
> The principle that a security architecture should be designed so that each entity
> is granted the minimum system resources and authorizations that the entity needs
> to perform its function.
Reference: [https://csrc.nist.gov/glossary/term/least_privilege](https://csrc.nist.gov/glossary/term/least_privilege)
As such, the following API permissions are used by default:
### Default Azure Application Permissions
| API / Permissions name | Type | Description | Admin consent required |
|---|---|---|---|
| Files.Read | Delegated | Have read-only access to user files | No |
| Files.Read.All | Delegated | Have read-only access to all files user can access | No |
| Sites.Read.All | Delegated | Have read-only access to all items in all site collections | No |
| offline_access | Delegated | Maintain access to data you have given it access to | No |
![default_authentication_scopes](./images/default_authentication_scopes.jpg)
### Default User Authentication Permissions
When a user authenticates with Microsoft OneDrive, additional account permissions are provided by service to give the user specific access to their data. These are delegated permissions provided by the platform:
| API / Permissions name | Type | Description | Admin consent required |
|---|---|---|---|
| Files.ReadWrite | Delegated | Have full access to user files | No |
| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No |
| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No |
| offline_access | Delegated | Maintain access to data you have given it access to | No |
When these delegated API permissions are combined, these provide the effective authentication scope for the OneDrive Client for Linux to access your data. The resulting effective 'default' permissions will be:
| API / Permissions name | Type | Description | Admin consent required |
|---|---|---|---|
| Files.ReadWrite | Delegated | Have full access to user files | No |
| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No |
| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No |
| offline_access | Delegated | Maintain access to data you have given it access to | No |
These 'default' permissions will allow the OneDrive Client for Linux to read, write and delete data associated with your OneDrive Account.
## Configuring read-only access to your OneDrive data
In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation.
To change the application to 'read-only' access, add the following to your configuration file:
```text
read_only_auth_scope = "true"
```
This will change the user authentication scope request to use read-only access.
**Note:** When changing this value, you *must* re-authenticate the client using the `--reauth` option to utilise the change in authentication scopes.
When using read-only authentication scopes, the uploading of any data or local change to OneDrive will fail with the following error:
```
2022-Aug-06 13:16:45.3349625 ERROR: Microsoft OneDrive API returned an error with the following message:
2022-Aug-06 13:16:45.3351661 Error Message: HTTP request returned status code 403 (Forbidden)
2022-Aug-06 13:16:45.3352467 Error Reason: Access denied
2022-Aug-06 13:16:45.3352838 Error Timestamp: 2022-06-12T13:16:45
2022-Aug-06 13:16:45.3353171 API Request ID: <redacted>
```
As such, it is also advisable for you to add the following to your configuration file so that 'uploads' are prevented:
```text
download_only = "true"
```
**Important:** Additionally when using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data. See below on how to remove your prior application consent.
## Reviewing your existing application access consent
To review your existing application access consent, you need to access the following URL: https://account.live.com/consent/Manage
From here, you are able to review what applications have been given what access to your data, and remove application access as required.

379
docs/build-rpm-howto.md Normal file
View file

@ -0,0 +1,379 @@
# RPM Package Build Process
The instuctions below have been tested on the following systems:
* CentOS 7 x86_64
* CentOS 8 x86_64
These instructions should also be applicable for RedHat & Fedora platforms, or any other RedHat RPM based distribution.
## Prepare Package Development Environment (CentOS 7, 8)
Install the following dependencies on your build system:
```text
sudo yum groupinstall -y 'Development Tools'
sudo yum install -y libcurl-devel
sudo yum install -y sqlite-devel
sudo yum install -y libnotify-devel
sudo yum install -y wget
sudo yum install -y http://downloads.dlang.org/releases/2.x/2.088.0/dmd-2.088.0-0.fedora.x86_64.rpm
mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
```
## Build RPM from spec file
Build the RPM from the provided spec file:
```text
wget https://github.com/abraunegg/onedrive/archive/refs/tags/v2.4.22.tar.gz -O ~/rpmbuild/SOURCES/v2.4.22.tar.gz
wget https://raw.githubusercontent.com/abraunegg/onedrive/master/contrib/spec/onedrive.spec.in -O ~/rpmbuild/SPECS/onedrive.spec
rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec
```
## RPM Build Example Results
Below are example output results of building, installing and running the RPM package on the respective platforms:
### CentOS 7
```text
[alex@localhost ~]$ rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec
Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.wi6Tdz
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd /home/alex/rpmbuild/BUILD
+ rm -rf onedrive-2.4.15
+ /usr/bin/tar -xf -
+ /usr/bin/gzip -dc /home/alex/rpmbuild/SOURCES/v2.4.15.tar.gz
+ STATUS=0
+ '[' 0 -ne 0 ']'
+ cd onedrive-2.4.15
+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w .
+ exit 0
Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.dyeEuM
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd onedrive-2.4.15
+ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic'
+ export CFLAGS
+ CXXFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic'
+ export CXXFLAGS
+ FFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic -I/usr/lib64/gfortran/modules'
+ export FFLAGS
+ FCFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic -I/usr/lib64/gfortran/modules'
+ export FCFLAGS
+ LDFLAGS='-Wl,-z,relro '
+ export LDFLAGS
+ '[' 1 == 1 ']'
+ '[' x86_64 == ppc64le ']'
++ find . -name config.guess -o -name config.sub
+ ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info
configure: WARNING: unrecognized options: --disable-dependency-tracking
checking for a BSD-compatible install... /usr/bin/install -c
checking for x86_64-redhat-linux-gnu-pkg-config... no
checking for pkg-config... /usr/bin/pkg-config
checking pkg-config is at least version 0.9.0... yes
checking for dmd... dmd
checking version of D compiler... 2.087.0
checking for curl... yes
checking for sqlite... yes
configure: creating ./config.status
config.status: creating Makefile
config.status: creating contrib/pacman/PKGBUILD
config.status: creating contrib/spec/onedrive.spec
config.status: creating onedrive.1
config.status: creating contrib/systemd/onedrive.service
config.status: creating contrib/systemd/onedrive@.service
configure: WARNING: unrecognized options: --disable-dependency-tracking
+ make
if [ -f .git/HEAD ] ; then \
git describe --tags > version ; \
else \
echo v2.4.15 > version ; \
fi
dmd -w -g -O -J. -L-lcurl -L-lsqlite3 -L-ldl src/config.d src/itemdb.d src/log.d src/main.d src/monitor.d src/onedrive.d src/qxor.d src/selective.d src/sqlite.d src/sync.d src/upload.d src/util.d src/progress.d src/arsd/cgi.d -ofonedrive
+ exit 0
Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.L3JbHy
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ '[' /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 '!=' / ']'
+ rm -rf /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64
++ dirname /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64
+ mkdir -p /home/alex/rpmbuild/BUILDROOT
+ mkdir /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64
+ cd onedrive-2.4.15
+ /usr/bin/make install DESTDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 PREFIX=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64
/usr/bin/install -c -D onedrive /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/bin/onedrive
/usr/bin/install -c -D onedrive.1 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/man/man1/onedrive.1
/usr/bin/install -c -D -m 644 contrib/logrotate/onedrive.logrotate /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/etc/logrotate.d/onedrive
mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive
/usr/bin/install -c -D -m 644 README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive
/usr/bin/install -c -d -m 0755 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/user /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system
/usr/bin/install -c -m 0644 contrib/systemd/onedrive@.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system
/usr/bin/install -c -m 0644 contrib/systemd/onedrive.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system
+ /usr/lib/rpm/check-buildroot
+ /usr/lib/rpm/redhat/brp-compress
+ /usr/lib/rpm/redhat/brp-strip /usr/bin/strip
+ /usr/lib/rpm/redhat/brp-strip-comment-note /usr/bin/strip /usr/bin/objdump
+ /usr/lib/rpm/redhat/brp-strip-static-archive /usr/bin/strip
+ /usr/lib/rpm/brp-python-bytecompile /usr/bin/python 1
+ /usr/lib/rpm/redhat/brp-python-hardlink
+ /usr/lib/rpm/redhat/brp-java-repack-jars
Processing files: onedrive-2.4.15-1.el7.x86_64
Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.cpSXho
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd onedrive-2.4.15
+ DOCDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15
+ export DOCDIR
+ /usr/bin/mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15
+ cp -pr README.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15
+ cp -pr LICENSE /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15
+ cp -pr CHANGELOG.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15
+ exit 0
Provides: config(onedrive) = 2.4.15-1.el7 onedrive = 2.4.15-1.el7 onedrive(x86-64) = 2.4.15-1.el7
Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
Requires: ld-linux-x86-64.so.2()(64bit) ld-linux-x86-64.so.2(GLIBC_2.3)(64bit) libc.so.6()(64bit) libc.so.6(GLIBC_2.14)(64bit) libc.so.6(GLIBC_2.15)(64bit) libc.so.6(GLIBC_2.2.5)(64bit) libc.so.6(GLIBC_2.3.2)(64bit) libc.so.6(GLIBC_2.3.4)(64bit) libc.so.6(GLIBC_2.4)(64bit) libc.so.6(GLIBC_2.6)(64bit) libc.so.6(GLIBC_2.8)(64bit) libc.so.6(GLIBC_2.9)(64bit) libcurl.so.4()(64bit) libdl.so.2()(64bit) libdl.so.2(GLIBC_2.2.5)(64bit) libgcc_s.so.1()(64bit) libgcc_s.so.1(GCC_3.0)(64bit) libgcc_s.so.1(GCC_4.2.0)(64bit) libm.so.6()(64bit) libm.so.6(GLIBC_2.2.5)(64bit) libpthread.so.0()(64bit) libpthread.so.0(GLIBC_2.2.5)(64bit) libpthread.so.0(GLIBC_2.3.2)(64bit) libpthread.so.0(GLIBC_2.3.4)(64bit) librt.so.1()(64bit) librt.so.1(GLIBC_2.2.5)(64bit) libsqlite3.so.0()(64bit) rtld(GNU_HASH)
Checking for unpackaged file(s): /usr/lib/rpm/check-files /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64
Wrote: /home/alex/rpmbuild/SRPMS/onedrive-2.4.15-1.el7.src.rpm
Wrote: /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm
Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.nWoW33
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd onedrive-2.4.15
+ exit 0
[alex@localhost ~]$ sudo yum -y install /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm
Loaded plugins: fastestmirror
Examining /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm: onedrive-2.4.15-1.el7.x86_64
Marking /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm to be installed
Resolving Dependencies
--> Running transaction check
---> Package onedrive.x86_64 0:2.4.15-1.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
==============================================================================================================================================================================================
Package Arch Version Repository Size
==============================================================================================================================================================================================
Installing:
onedrive x86_64 2.4.15-1.el7 /onedrive-2.4.15-1.el7.x86_64 7.2 M
Transaction Summary
==============================================================================================================================================================================================
Install 1 Package
Total size: 7.2 M
Installed size: 7.2 M
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : onedrive-2.4.15-1.el7.x86_64 1/1
Verifying : onedrive-2.4.15-1.el7.x86_64 1/1
Installed:
onedrive.x86_64 0:2.4.15-1.el7
Complete!
[alex@localhost ~]$ which onedrive
/usr/bin/onedrive
[alex@localhost ~]$ onedrive --version
onedrive v2.4.15
[alex@localhost ~]$ onedrive --display-config
onedrive version = v2.4.15
Config path = /home/alex/.config/onedrive
Config file found in config path = false
Config option 'check_nosync' = false
Config option 'sync_dir' = /home/alex/OneDrive
Config option 'skip_dir' =
Config option 'skip_file' = ~*|.~*|*.tmp
Config option 'skip_dotfiles' = false
Config option 'skip_symlinks' = false
Config option 'monitor_interval' = 300
Config option 'min_notify_changes' = 5
Config option 'log_dir' = /var/log/onedrive/
Config option 'classify_as_big_delete' = 1000
Config option 'upload_only' = false
Config option 'no_remote_delete' = false
Config option 'remove_source_files' = false
Config option 'sync_root_files' = false
Selective sync 'sync_list' configured = false
Business Shared Folders configured = false
[alex@localhost ~]$
```
### CentOS 8
```text
[alex@localhost ~]$ rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec
Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.UINFyE
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd /home/alex/rpmbuild/BUILD
+ rm -rf onedrive-2.4.15
+ /usr/bin/gzip -dc /home/alex/rpmbuild/SOURCES/v2.4.15.tar.gz
+ /usr/bin/tar -xof -
+ STATUS=0
+ '[' 0 -ne 0 ']'
+ cd onedrive-2.4.15
+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w .
+ exit 0
Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.cX1WQa
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd onedrive-2.4.15
+ CFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection'
+ export CFLAGS
+ CXXFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection'
+ export CXXFLAGS
+ FFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules'
+ export FFLAGS
+ FCFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules'
+ export FCFLAGS
+ LDFLAGS='-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld'
+ export LDFLAGS
+ '[' 1 = 1 ']'
+++ dirname ./configure
++ find . -name config.guess -o -name config.sub
+ '[' 1 = 1 ']'
+ '[' x '!=' 'x-Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld' ']'
++ find . -name ltmain.sh
+ ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info
configure: WARNING: unrecognized options: --disable-dependency-tracking
checking for a BSD-compatible install... /usr/bin/install -c
checking for x86_64-redhat-linux-gnu-pkg-config... /usr/bin/x86_64-redhat-linux-gnu-pkg-config
checking pkg-config is at least version 0.9.0... yes
checking for dmd... dmd
checking version of D compiler... 2.087.0
checking for curl... yes
checking for sqlite... yes
configure: creating ./config.status
config.status: creating Makefile
config.status: creating contrib/pacman/PKGBUILD
config.status: creating contrib/spec/onedrive.spec
config.status: creating onedrive.1
config.status: creating contrib/systemd/onedrive.service
config.status: creating contrib/systemd/onedrive@.service
configure: WARNING: unrecognized options: --disable-dependency-tracking
+ make
if [ -f .git/HEAD ] ; then \
git describe --tags > version ; \
else \
echo v2.4.15 > version ; \
fi
dmd -w -g -O -J. -L-lcurl -L-lsqlite3 -L-ldl src/config.d src/itemdb.d src/log.d src/main.d src/monitor.d src/onedrive.d src/qxor.d src/selective.d src/sqlite.d src/sync.d src/upload.d src/util.d src/progress.d src/arsd/cgi.d -ofonedrive
+ exit 0
Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.dNFPdx
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ '[' /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 '!=' / ']'
+ rm -rf /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64
++ dirname /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64
+ mkdir -p /home/alex/rpmbuild/BUILDROOT
+ mkdir /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64
+ cd onedrive-2.4.15
+ /usr/bin/make install DESTDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 'INSTALL=/usr/bin/install -p' PREFIX=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64
/usr/bin/install -p -D onedrive /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/bin/onedrive
/usr/bin/install -p -D onedrive.1 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/man/man1/onedrive.1
/usr/bin/install -p -D -m 644 contrib/logrotate/onedrive.logrotate /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/etc/logrotate.d/onedrive
mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive
/usr/bin/install -p -D -m 644 README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive
/usr/bin/install -p -d -m 0755 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/user /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system
/usr/bin/install -p -m 0644 contrib/systemd/onedrive@.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system
/usr/bin/install -p -m 0644 contrib/systemd/onedrive.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system
+ /usr/lib/rpm/check-buildroot
+ /usr/lib/rpm/redhat/brp-ldconfig
/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/etc/ld.so.conf: No such file or directory
+ /usr/lib/rpm/brp-compress
+ /usr/lib/rpm/brp-strip /usr/bin/strip
+ /usr/lib/rpm/brp-strip-comment-note /usr/bin/strip /usr/bin/objdump
+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip
+ /usr/lib/rpm/brp-python-bytecompile 1
+ /usr/lib/rpm/brp-python-hardlink
+ PYTHON3=/usr/libexec/platform-python
+ /usr/lib/rpm/redhat/brp-mangle-shebangs
Processing files: onedrive-2.4.15-1.el8.x86_64
Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.TnFKbZ
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd onedrive-2.4.15
+ DOCDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive
+ export LC_ALL=C
+ LC_ALL=C
+ export DOCDIR
+ /usr/bin/mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive
+ cp -pr README.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive
+ cp -pr LICENSE /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive
+ cp -pr CHANGELOG.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive
+ exit 0
warning: File listed twice: /usr/share/doc/onedrive
warning: File listed twice: /usr/share/doc/onedrive/CHANGELOG.md
warning: File listed twice: /usr/share/doc/onedrive/LICENSE
warning: File listed twice: /usr/share/doc/onedrive/README.md
Provides: config(onedrive) = 2.4.15-1.el8 onedrive = 2.4.15-1.el8 onedrive(x86-64) = 2.4.15-1.el8
Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
Requires: ld-linux-x86-64.so.2()(64bit) ld-linux-x86-64.so.2(GLIBC_2.3)(64bit) libc.so.6()(64bit) libc.so.6(GLIBC_2.14)(64bit) libc.so.6(GLIBC_2.15)(64bit) libc.so.6(GLIBC_2.2.5)(64bit) libc.so.6(GLIBC_2.3.2)(64bit) libc.so.6(GLIBC_2.3.4)(64bit) libc.so.6(GLIBC_2.4)(64bit) libc.so.6(GLIBC_2.6)(64bit) libc.so.6(GLIBC_2.8)(64bit) libc.so.6(GLIBC_2.9)(64bit) libcurl.so.4()(64bit) libdl.so.2()(64bit) libdl.so.2(GLIBC_2.2.5)(64bit) libgcc_s.so.1()(64bit) libgcc_s.so.1(GCC_3.0)(64bit) libgcc_s.so.1(GCC_4.2.0)(64bit) libm.so.6()(64bit) libm.so.6(GLIBC_2.2.5)(64bit) libpthread.so.0()(64bit) libpthread.so.0(GLIBC_2.2.5)(64bit) libpthread.so.0(GLIBC_2.3.2)(64bit) libpthread.so.0(GLIBC_2.3.4)(64bit) librt.so.1()(64bit) librt.so.1(GLIBC_2.2.5)(64bit) libsqlite3.so.0()(64bit) rtld(GNU_HASH)
Checking for unpackaged file(s): /usr/lib/rpm/check-files /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64
Wrote: /home/alex/rpmbuild/SRPMS/onedrive-2.4.15-1.el8.src.rpm
Wrote: /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el8.x86_64.rpm
Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.FAMTFz
+ umask 022
+ cd /home/alex/rpmbuild/BUILD
+ cd onedrive-2.4.15
+ exit 0
[alex@localhost ~]$ sudo yum -y install /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el8.x86_64.rpm
Last metadata expiration check: 0:04:07 ago on Fri 14 Jan 2022 14:22:13 EST.
Dependencies resolved.
==============================================================================================================================================================================================
Package Architecture Version Repository Size
==============================================================================================================================================================================================
Installing:
onedrive x86_64 2.4.15-1.el8 @commandline 1.5 M
Transaction Summary
==============================================================================================================================================================================================
Install 1 Package
Total size: 1.5 M
Installed size: 7.1 M
Downloading Packages:
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
Preparing : 1/1
Installing : onedrive-2.4.15-1.el8.x86_64 1/1
Running scriptlet: onedrive-2.4.15-1.el8.x86_64 1/1
Verifying : onedrive-2.4.15-1.el8.x86_64 1/1
Installed:
onedrive-2.4.15-1.el8.x86_64
Complete!
[alex@localhost ~]$ which onedrive
/usr/bin/onedrive
[alex@localhost ~]$ onedrive --version
onedrive v2.4.15
[alex@localhost ~]$ onedrive --display-config
onedrive version = v2.4.15
Config path = /home/alex/.config/onedrive
Config file found in config path = false
Config option 'check_nosync' = false
Config option 'sync_dir' = /home/alex/OneDrive
Config option 'skip_dir' =
Config option 'skip_file' = ~*|.~*|*.tmp
Config option 'skip_dotfiles' = false
Config option 'skip_symlinks' = false
Config option 'monitor_interval' = 300
Config option 'min_notify_changes' = 5
Config option 'log_dir' = /var/log/onedrive/
Config option 'classify_as_big_delete' = 1000
Config option 'upload_only' = false
Config option 'no_remote_delete' = false
Config option 'remove_source_files' = false
Config option 'sync_root_files' = false
Selective sync 'sync_list' configured = false
Business Shared Folders configured = false
[alex@localhost ~]$
```

View file

@ -0,0 +1,40 @@
# How to configure OneDrive Business Shared Folder Sync
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Important Note
This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to:
* Removing `sync_business_shared_folders = "true|false"` from your 'config' file
* Removing the 'business_shared_folders' file
* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues.
## Process Overview
Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client:
1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you.
2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder
3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement.
4. Test the configuration using '--dry-run'
5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required
**NOTE:** This documentation will be updated as this feature progresses.
### Enable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_items = "true"
```
### Disable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_items = "false"
```
## Known Issues
Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders.
Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below:
![shared_with_me](./images/shared_with_me.JPG)
This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966)

320
docs/docker.md Normal file
View file

@ -0,0 +1,320 @@
# Run the OneDrive Client for Linux under Docker
This client can be run as a Docker container, with 3 available container base options for you to choose from:
| Container Base | Docker Tag | Description | i686 | x86_64 | ARMHF | AARCH64 |
|----------------|-------------|----------------------------------------------------------------|:------:|:------:|:-----:|:-------:|
| Alpine Linux | edge-alpine | Docker container based on Alpine 3.18 using 'master' |❌|✔|❌|✔|
| Alpine Linux | alpine | Docker container based on Alpine 3.18 using latest release |❌|✔|❌|✔|
| Debian | debian | Docker container based on Debian Stable using latest release |✔|✔|✔|✔|
| Debian | edge | Docker container based on Debian Stable using 'master' |✔|✔|✔|✔|
| Debian | edge-debian | Docker container based on Debian Stable using 'master' |✔|✔|✔|✔|
| Debian | latest | Docker container based on Debian Stable using latest release |✔|✔|✔|✔|
| Fedora | edge-fedora | Docker container based on Fedora 38 using 'master' |❌|✔|❌|✔|
| Fedora | fedora | Docker container based on Fedora 38 using latest release |❌|✔|❌|✔|
These containers offer a simple monitoring-mode service for the OneDrive Client for Linux.
The instructions below have been validated on:
* Red Hat Enterprise Linux 8.x
* Ubuntu Server 22.04
The instructions below will utilise the 'edge' tag, however this can be substituted for any of the other docker tags such as 'latest' from the table above if desired.
The 'edge' Docker Container will align closer to all documentation and features, where as 'latest' is the release version from a static point in time. The 'latest' tag however may contain bugs and/or issues that will have been fixed, and those fixes are contained in 'edge'.
Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in.
## Basic Setup
### 0. Install docker using your distribution platform's instructions
1. Ensure that SELinux has been disabled on your system. A reboot may be required to ensure that this is correctly disabled.
2. Install Docker as per required for your platform. Refer to https://docs.docker.com/engine/install/ for assistance.
3. Obtain your normal, non-root user UID and GID by using the `id` command
4. As your normal, non-root user, ensure that you can run `docker run hello-world` *without* using `sudo`
Once the above 4 steps are complete and you can successfully run `docker run hello-world` without sudo, only then proceed to 'Pulling and Running the Docker Image'
## Pulling and Running the Docker Image
### 1. Pull the image
```bash
docker pull driveone/onedrive:edge
```
**NOTE:** SELinux context needs to be configured or disabled for Docker to be able to write to OneDrive host directory.
### 2. Prepare config volume
The Docker container requries 2 Docker volumes:
* Config Volume
* Data Volume
Create the config volume with the following command:
```bash
docker volume create onedrive_conf
```
This will create a docker volume labeled `onedrive_conf`, where all configuration of your onedrive account will be stored. You can add a custom config file and other things later.
The second docker volume is for your data folder and is created in the next step. This volume needs to be a path to a directory on your local filesystem, and this is where your data will be stored from OneDrive. Keep in mind that:
* The owner of this specified folder must not be root
* The owner of this specified folder must have permissions for its parent directory
**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owned by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Docker container will fail to start with the following error message:
```bash
ROOT level privileges prohibited!
```
### 3. First run
The 'onedrive' client within the Docker container needs to be authorized with your Microsoft account. This is achieved by initially running docker in interactive mode.
Run the docker image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`).
```bash
export ONEDRIVE_DATA_DIR="${HOME}/OneDrive"
mkdir -p ${ONEDRIVE_DATA_DIR}
docker run -it --name onedrive -v onedrive_conf:/onedrive/conf \
-v "${ONEDRIVE_DATA_DIR}:/onedrive/data" \
-e "ONEDRIVE_UID=${ONEDRIVE_UID}" \
-e "ONEDRIVE_GID=${ONEDRIVE_GID}" \
driveone/onedrive:edge
```
**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the Docker container, otherwise, Docker will create the target folder, and the folder will be given 'root' permissions, which then causes the Docker container to fail upon startup with the following error message:
```bash
ROOT level privileges prohibited!
```
**NOTE:** It is also highly advisable for you to replace `${ONEDRIVE_UID}` and `${ONEDRIVE_GID}` with your actual UID and GID as specified by your `id` command output to avoid any any potential user or group conflicts.
**Example:**
```bash
export ONEDRIVE_UID=`id -u`
export ONEDRIVE_GID=`id -g`
export ONEDRIVE_DATA_DIR="${HOME}/OneDrive"
mkdir -p ${ONEDRIVE_DATA_DIR}
docker run -it --name onedrive -v onedrive_conf:/onedrive/conf \
-v "${ONEDRIVE_DATA_DIR}:/onedrive/data" \
-e "ONEDRIVE_UID=${ONEDRIVE_UID}" \
-e "ONEDRIVE_GID=${ONEDRIVE_GID}" \
driveone/onedrive:edge
```
When the Docker container successfully starts:
* You will be asked to open a specific link using your web browser
* Login to your Microsoft Account and give the application the permission
* After giving the permission, you will be redirected to a blank page
* Copy the URI of the blank page into the application prompt to authorise the application
Once the 'onedrive' application is authorised, the client will automatically start monitoring your `ONEDRIVE_DATA_DIR` for data changes to be uploaded to OneDrive. Files stored on OneDrive will be downloaded to this location.
If the client is working as expected, you can detach from the container with Ctrl+p, Ctrl+q.
### 4. Docker Container Status, stop, and restart
Check if the monitor service is running
```bash
docker ps -f name=onedrive
```
Show monitor run logs
```bash
docker logs onedrive
```
Stop running monitor
```bash
docker stop onedrive
```
Resume monitor
```bash
docker start onedrive
```
Remove onedrive Docker container
```bash
docker rm -f onedrive
```
## Advanced Setup
### 5. Docker-compose
Also supports docker-compose schemas > 3.
In the following example it is assumed you have a `ONEDRIVE_DATA_DIR` environment variable and a `onedrive_conf` volume.
However, you can also use bind mounts for the configuration folder, e.g. `export ONEDRIVE_CONF="${HOME}/OneDriveConfig"`.
```
version: "3"
services:
onedrive:
image: driveone/onedrive:edge
restart: unless-stopped
environment:
- ONEDRIVE_UID=${PUID}
- ONEDRIVE_GID=${PGID}
volumes:
- onedrive_conf:/onedrive/conf
- ${ONEDRIVE_DATA_DIR}:/onedrive/data
```
Note that you still have to perform step 3: First Run.
### 6. Edit the config
The 'onedrive' client should run in default configuration, however you can change this default configuration by placing a custom config file in the `onedrive_conf` docker volume. First download the default config from [here](https://raw.githubusercontent.com/abraunegg/onedrive/master/config)
Then put it into your onedrive_conf volume path, which can be found with:
```bash
docker volume inspect onedrive_conf
```
Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first.
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration)
### 7. Sync multiple accounts
There are many ways to do this, the easiest is probably to
1. Create a second docker config volume (replace `Work` with your desired name): `docker volume create onedrive_conf_Work`
2. And start a second docker monitor container (again replace `Work` with your desired name):
```
export ONEDRIVE_DATA_DIR_WORK="/home/abraunegg/OneDriveWork"
mkdir -p ${ONEDRIVE_DATA_DIR_WORK}
docker run -it --restart unless-stopped --name onedrive_Work -v onedrive_conf_Work:/onedrive/conf -v "${ONEDRIVE_DATA_DIR_WORK}:/onedrive/data" driveone/onedrive:edge
```
## Run or update with one script
If you are experienced with docker and onedrive, you can use the following script:
```bash
# Update ONEDRIVE_DATA_DIR with correct OneDrive directory path
ONEDRIVE_DATA_DIR="${HOME}/OneDrive"
# Create directory if non-existant
mkdir -p ${ONEDRIVE_DATA_DIR}
firstRun='-d'
docker pull driveone/onedrive:edge
docker inspect onedrive_conf > /dev/null 2>&1 || { docker volume create onedrive_conf; firstRun='-it'; }
docker inspect onedrive > /dev/null 2>&1 && docker rm -f onedrive
docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge
```
## Environment Variables
| Variable | Purpose | Sample Value |
| ---------------- | --------------------------------------------------- |:--------------------------------------------------------------------------------------------------------------------------------:|
| <B>ONEDRIVE_UID</B> | UserID (UID) to run as | 1000 |
| <B>ONEDRIVE_GID</B> | GroupID (GID) to run as | 1000 |
| <B>ONEDRIVE_VERBOSE</B> | Controls "--verbose" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_DEBUG</B> | Controls "--verbose --verbose" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_DEBUG_HTTPS</B> | Controls "--debug-https" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_RESYNC</B> | Controls "--resync" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_DOWNLOADONLY</B> | Controls "--download-only" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_UPLOADONLY</B> | Controls "--upload-only" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_NOREMOTEDELETE</B> | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_LOGOUT</B> | Controls "--logout" switch. Default is 0 | 1 |
| <B>ONEDRIVE_REAUTH</B> | Controls "--reauth" switch. Default is 0 | 1 |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_DISPLAY_CONFIG</B> | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_SINGLE_DIRECTORY</B> | Controls "--single-directory" option. Default = "" | "mydir" |
### Usage Examples
**Verbose Output:**
```bash
docker container run -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge
```
**Debug Output:**
```bash
docker container run -e ONEDRIVE_DEBUG=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge
```
**Perform a --resync:**
```bash
docker container run -e ONEDRIVE_RESYNC=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge
```
**Perform a --resync and --verbose:**
```bash
docker container run -e ONEDRIVE_RESYNC=1 -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge
```
**Perform a --logout and re-authenticate:**
```bash
docker container run -it -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge
```
## Build instructions
### Build Environment Requirements
* Build environment must have at least 1GB of memory & 2GB swap space
There are 2 ways to validate this requirement:
* Modify the file `/etc/dphys-swapfile` and edit the `CONF_SWAPSIZE`, for example: `CONF_SWAPSIZE=2048`. A reboot is required to make this change effective.
* Dynamically allocate a swapfile for building:
```bash
cd /var
sudo fallocate -l 1.5G swapfile
sudo chmod 600 swapfile
sudo mkswap swapfile
sudo swapon swapfile
# make swap permanent
sudo nano /etc/fstab
# add "/swapfile swap swap defaults 0 0" at the end of file
# check it has been assigned
swapon -s
free -h
```
### Building a custom Docker image
You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive):
```bash
git clone https://github.com/abraunegg/onedrive
cd onedrive
docker build . -t local-onedrive -f contrib/docker/Dockerfile
```
There are alternate, smaller images available by building
Dockerfile-debian or Dockerfile-alpine. These [multi-stage builder pattern](https://docs.docker.com/develop/develop-images/multistage-build/)
Dockerfiles require Docker version at least 17.05.
#### How to build and run a custom Docker image based on Debian
``` bash
docker build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-debian:latest
```
#### How to build and run a custom Docker image based on Alpine Linux
``` bash
docker build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-alpine:latest
```
#### How to build and run a custom Docker image for ARMHF (Raspberry Pi)
Compatible with:
* Raspberry Pi
* Raspberry Pi 2
* Raspberry Pi Zero
* Raspberry Pi 3
* Raspberry Pi 4
``` bash
docker build . -t local-onedrive-armhf -f contrib/docker/Dockerfile-debian
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-armhf:latest
```
#### How to build and run a custom Docker image for AARCH64 Platforms
``` bash
docker build . -t local-onedrive-aarch64 -f contrib/docker/Dockerfile-debian
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-aarch64:latest
```
#### How to support double-byte languages
In some geographic regions, you may need to change and/or update the locale specification of the Docker container to better support the local language used for your local filesystem. To do this, follow the example below:
```
FROM driveone/onedrive
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update
RUN apt-get install -y locales
RUN echo "ja_JP.UTF-8 UTF-8" > /etc/locale.gen && \
locale-gen ja_JP.UTF-8 && \
dpkg-reconfigure locales && \
/usr/sbin/update-locale LANG=ja_JP.UTF-8
ENV LC_ALL ja_JP.UTF-8
```
The above example changes the Docker container to support Japanese. To support your local language, change `ja_JP.UTF-8` to the required entry.

281
docs/install.md Normal file
View file

@ -0,0 +1,281 @@
# Installing or Upgrading using Distribution Packages or Building the OneDrive Client for Linux from source
## Installing or Upgrading using Distribution Packages
This project has been packaged for the following Linux distributions as per below. The current client release is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases)
Only the current release version or greater is supported. Earlier versions are not supported and should not be installed or used.
#### Important Note:
Distribution packages may be of an older release when compared to the latest release that is [available](https://github.com/abraunegg/onedrive/releases). If any package version indicator below is 'red' for your distribution, it is recommended that you build from source. Do not install the software from the available distribution package. If a package is out of date, please contact the package maintainer for resolution.
| Distribution | Package Name & Package Link | &nbsp;&nbsp;PKG_Version&nbsp;&nbsp; | &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 | Extra Details |
|---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |<a href="https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge"><img src="https://repology.org/badge/version-for-repo/alpine_edge/onedrive.svg?header=" alt="Alpine Linux Edge package" width="46" height="20"></a>|❌|✔|❌|✔ | |
| Arch Linux<br><br>Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |<a href="https://aur.archlinux.org/packages/onedrive-abraunegg"><img src="https://repology.org/badge/version-for-repo/aur/onedrive-abraunegg.svg?header=" alt="AUR package" width="46" height="20"></a>|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)<br><br>**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'<br><br>**Note:** System must have at least 1GB of memory & 1GB swap space
| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |<a href="https://packages.debian.org/bullseye/source/onedrive"><img src="https://repology.org/badge/version-for-repo/debian_11/onedrive.svg?header=" alt="Debian 11 package" width="46" height="20"></a>|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories<br><br>It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |<a href="https://packages.debian.org/bookworm/source/onedrive"><img src="https://repology.org/badge/version-for-repo/debian_12/onedrive.svg?header=" alt="Debian 12 package" width="46" height="20"></a>|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories<br><br>It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Fedora | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |<a href="https://koji.fedoraproject.org/koji/packageinfo?packageID=26044"><img src="https://repology.org/badge/version-for-repo/fedora_rawhide/onedrive.svg?header=" alt="Fedora Rawhide package" width="46" height="20"></a>|✔|✔|✔|✔| |
| Gentoo | [onedrive](https://gpo.zugaina.org/net-misc/onedrive) | No API Available |✔|✔|❌|❌| |
| Homebrew | [onedrive](https://formulae.brew.sh/formula/onedrive) | <a href="https://formulae.brew.sh/formula/onedrive"><img src="https://repology.org/badge/version-for-repo/homebrew/onedrive.svg?header=" alt="Homebrew package" width="46" height="20"></a> |❌|✔|❌|❌| |
| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |<a href="https://community.linuxmint.com/software/view/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_20_04/onedrive.svg?header=" alt="Ubuntu 20.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories<br><br>It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |<a href="https://community.linuxmint.com/software/view/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_22_04/onedrive.svg?header=" alt="Ubuntu 22.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories<br><br>It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| NixOS | [onedrive](https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive)|<a href="https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive"><img src="https://repology.org/badge/version-for-repo/nix_unstable/onedrive.svg?header=" alt="nixpkgs unstable package" width="46" height="20"></a>|❌|✔|❌|❌| Use package `onedrive` either by adding it to `configuration.nix` or by using the command `nix-env -iA <channel name>.onedrive`. This does not install a service. To install a service, use unstable channel (will stabilize in 20.09) and add `services.onedrive.enable=true` in `configuration.nix`. You can also add a custom package using the `services.onedrive.package` option (recommended since package lags upstream). Enabling the service installs a default package too (based on the channel). You can also add multiple onedrive accounts trivially, see [documentation](https://github.com/NixOS/nixpkgs/pull/77734#issuecomment-575874225). |
| OpenSuSE | [onedrive](https://software.opensuse.org/package/onedrive) |<a href="https://software.opensuse.org/package/onedrive"><img src="https://repology.org/badge/version-for-repo/opensuse_network_tumbleweed/onedrive.svg?header=" alt="openSUSE Tumbleweed package" width="46" height="20"></a>|✔|✔|❌|❌| |
| OpenSuSE Build Service | [onedrive](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) | No API Available |✔|✔|✔|✔| Package Build Service for Debian and Ubuntu |
| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |<a href="https://archive.raspbian.org/raspbian/pool/main/o/onedrive/"><img src="https://repology.org/badge/version-for-repo/raspbian_stable/onedrive.svg?header=" alt="Raspbian Stable package" width="46" height="20"></a> |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories<br><br>It is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) |
| Slackware | [onedrive](https://slackbuilds.org/result/?search=onedrive&sv=) |<a href="https://slackbuilds.org/result/?search=onedrive&sv="><img src="https://repology.org/badge/version-for-repo/slackbuilds/onedrive.svg?header=" alt="SlackBuilds package" width="46" height="20"></a>|✔|✔|❌|❌| |
| Solus | [onedrive](https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R) |<a href="https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R"><img src="https://repology.org/badge/version-for-repo/solus/onedrive.svg?header=" alt="Solus package" width="46" height="20"></a>|✔|✔|❌|❌| |
| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |<a href="https://packages.ubuntu.com/focal/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_20_04/onedrive.svg?header=" alt="Ubuntu 20.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe<br><br>It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |<a href="https://packages.ubuntu.com/jammy/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_22_04/onedrive.svg?header=" alt="Ubuntu 22.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe<br><br>It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |<a href="https://packages.ubuntu.com/lunar/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_23_04/onedrive.svg?header=" alt="Ubuntu 23.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe<br><br>It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Void Linux | [onedrive](https://voidlinux.org/packages/?arch=x86_64&q=onedrive) |<a href="https://voidlinux.org/packages/?arch=x86_64&q=onedrive"><img src="https://repology.org/badge/version-for-repo/void_x86_64/onedrive.svg?header=" alt="Void Linux x86_64 package" width="46" height="20"></a>|✔|✔|❌|❌| |
#### Important information for all Ubuntu and Ubuntu based distribution users:
This information is specifically for the following platforms and distributions:
* Ubuntu
* Lubuntu
* Linux Mint
* POP OS
* Peppermint OS
Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all&section=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Universe packages are out-of-date and are not supported and should not be used. If you wish to use a package, it is highly recommended that you utilise the [OpenSuSE Build Service](ubuntu-package-install.md) to install packages for these platforms. If the OpenSuSE Build Service does not cater for your version, your only option is to build from source.
If you wish to change this situation so that you can just use the Universe packages via 'apt install onedrive', consider becoming the Ubuntu package maintainer and contribute back to your community.
## Building from Source - High Level Requirements
* Build environment must have at least 1GB of memory & 1GB swap space
* Install the required distribution package dependencies
* [libcurl](http://curl.haxx.se/libcurl/)
* [SQLite 3](https://www.sqlite.org/) >= 3.7.15
* [Digital Mars D Compiler (DMD)](http://dlang.org/download.html) or [LDC the LLVM-based D Compiler](https://github.com/ldc-developers/ldc)
**Note:** DMD version >= 2.088.0 or LDC version >= 1.18.0 is required to compile this application
### Example for installing DMD Compiler
```text
curl -fsS https://dlang.org/install.sh | bash -s dmd
```
### Example for installing LDC Compiler
```text
curl -fsS https://dlang.org/install.sh | bash -s ldc
```
## Distribution Package Dependencies
### Dependencies: Ubuntu 16.x
Ubuntu Linux 16.x LTS reached the end of its five-year LTS window on April 30th 2021 and is no longer supported.
### Dependencies: Ubuntu 18.x / Lubuntu 18.x
Ubuntu Linux 18.x LTS reached the end of its five-year LTS window on May 31th 2023 and is no longer supported.
### Dependencies: Debian 9
Debian 9 reached the end of its five-year support window on June 30th 2022 and is no longer supported.
### Dependencies: Ubuntu 20.x -> Ubuntu 23.x / Debian 10 -> Debian 12 - x86_64
These dependencies are also applicable for all Ubuntu based distributions such as:
* Lubuntu
* Linux Mint
* POP OS
* Peppermint OS
```text
sudo apt install build-essential
sudo apt install libcurl4-openssl-dev libsqlite3-dev pkg-config git curl
curl -fsS https://dlang.org/install.sh | bash -s dmd
```
For notifications the following is also necessary:
```text
sudo apt install libnotify-dev
```
### Dependencies: CentOS 6.x / RHEL 6.x
CentOS 6.x and RHEL 6.x reached End of Life status on November 30th 2020 and is no longer supported.
### Dependencies: Fedora < Version 18 / CentOS 7.x / RHEL 7.x
```text
sudo yum groupinstall 'Development Tools'
sudo yum install libcurl-devel sqlite-devel
curl -fsS https://dlang.org/install.sh | bash -s dmd-2.099.0
```
For notifications the following is also necessary:
```text
sudo yum install libnotify-devel
```
### Dependencies: Fedora > Version 18 / CentOS 8.x / RHEL 8.x / RHEL 9.x
```text
sudo dnf groupinstall 'Development Tools'
sudo dnf install libcurl-devel sqlite-devel
curl -fsS https://dlang.org/install.sh | bash -s dmd
```
For notifications the following is also necessary:
```text
sudo dnf install libnotify-devel
```
### Dependencies: Arch Linux & Manjaro Linux
```text
sudo pacman -S make pkg-config curl sqlite ldc
```
For notifications the following is also necessary:
```text
sudo pacman -S libnotify
```
### Dependencies: Raspbian (ARMHF) and Ubuntu 22.x / Debian 11 / Debian 12 / Raspbian (ARM64)
**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later.
These instructions were validated using:
* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-armhf-lite) using Raspberry Pi 3B (revision 1.2)
* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-arm64-lite) using Raspberry Pi 3B (revision 1.2)
* `Linux ubuntu 5.15.0-1005-raspi #5-Ubuntu SMP PREEMPT Mon Apr 4 12:21:48 UTC 2022 aarch64 aarch64 aarch64 GNU/Linux` (ubuntu-22.04-preinstalled-server-arm64+raspi) using Raspberry Pi 3B (revision 1.2)
**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`.
```text
sudo apt install build-essential
sudo apt install libcurl4-openssl-dev libsqlite3-dev pkg-config git curl ldc
```
For notifications the following is also necessary:
```text
sudo apt install libnotify-dev
```
### Dependencies: Gentoo
```text
sudo emerge app-portage/layman
sudo layman -a dlang
```
Add ebuild from contrib/gentoo to a local overlay to use.
For notifications the following is also necessary:
```text
sudo emerge x11-libs/libnotify
```
### Dependencies: OpenSuSE Leap 15.0
```text
sudo zypper addrepo https://download.opensuse.org/repositories/devel:languages:D/openSUSE_Leap_15.0/devel:languages:D.repo
sudo zypper refresh
sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static
```
For notifications the following is also necessary:
```text
sudo zypper install libnotify-devel
```
### Dependencies: OpenSuSE Leap 15.1
```text
sudo zypper addrepo https://download.opensuse.org/repositories/devel:languages:D/openSUSE_Leap_15.1/devel:languages:D.repo
sudo zypper refresh
sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static
```
For notifications the following is also necessary:
```text
sudo zypper install libnotify-devel
```
### Dependencies: OpenSuSE Leap 15.2
```text
sudo zypper refresh
sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static
```
For notifications the following is also necessary:
```text
sudo zypper install libnotify-devel
```
## Compilation & Installation
### High Level Steps
1. Install the platform dependencies for your Linux OS
2. Activate your DMD or LDC compiler
3. Clone the GitHub repository, run configure and make, then install
4. Deactivate your DMD or LDC compiler
### Building using DMD Reference Compiler
Before cloning and compiling, if you have installed DMD via curl for your OS, you will need to activate DMD as per example below:
```text
Run `source ~/dlang/dmd-2.088.0/activate` in your shell to use dmd-2.088.0.
This will setup PATH, LIBRARY_PATH, LD_LIBRARY_PATH, DMD, DC, and PS1.
Run `deactivate` later on to restore your environment.
```
Without performing this step, the compilation process will fail.
**Note:** Depending on your DMD version, substitute `2.088.0` above with your DMD version that is installed.
```text
git clone https://github.com/abraunegg/onedrive.git
cd onedrive
./configure
make clean; make;
sudo make install
```
### Build options
#### GUI Notification Support
GUI notification support can be enabled using the `configure` switch `--enable-notifications`.
#### systemd service directory customisation support
Systemd service files are installed in the appropriate directories on the system,
as provided by `pkg-config systemd` settings. If the need for overriding the
deduced path are necessary, the two options `--with-systemdsystemunitdir` (for
the Systemd system unit location), and `--with-systemduserunitdir` (for the
Systemd user unit location) can be specified. Passing in `no` to one of these
options disabled service file installation.
#### Additional Compiler Debug
By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug
information, useful (for example) to get `perf`-issued figures.
#### Shell Completion Support
By passing `--enable-completions` to the `configure` call, shell completion functions are
installed for `bash`, `zsh` and `fish`. The installation directories are determined
as far as possible automatically, but can be overridden by passing
`--with-bash-completion-dir=<DIR>`, `--with-zsh-completion-dir=<DIR>`, and
`--with-fish-completion-dir=<DIR>` to `configure`.
### Building using a different compiler (for example [LDC](https://wiki.dlang.org/LDC))
#### ARMHF Architecture (Raspbian) and ARM64 Architecture (Ubuntu 22.x / Debian 11 / Raspbian)
**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later.
**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`.
```text
git clone https://github.com/abraunegg/onedrive.git
cd onedrive
./configure DC=/usr/bin/ldmd2
make clean; make
sudo make install
```
## Upgrading the client
If you have installed the client from a distribution package, the client will be updated when the distribution package is updated by the package maintainer and will be updated to the new application version when you perform your package update.
If you have built the client from source, to upgrade your client, it is recommended that you first uninstall your existing 'onedrive' binary (see below), then re-install the client by re-cloning, re-compiling and re-installing the client again to install the new version.
**Note:** Following the uninstall process will remove all client components including *all* systemd files, including any custom files created for specific access such as SharePoint Libraries.
You can optionally choose to not perform this uninstallation step, and simply re-install the client by re-cloning, re-compiling and re-installing the client again - however the risk here is that you end up with two onedrive client binaries on your system, and depending on your system search path preferences, this will determine which binary is used.
**Important:** Before performing any upgrade, it is highly recommended for you to stop any running systemd service if applicable to ensure that these services are restarted using the updated client version.
Post re-install, to confirm that you have the new version of the client installed, use `onedrive --version` to determine the client version that is now installed.
## Uninstalling the client
### Uninstalling the client if installed from distribution package
Follow your distribution documentation to uninstall the package that you installed
### Uninstalling the client if installed and built from source
From within your GitHub repository clone, perform the following to remove the 'onedrive' binary:
```text
sudo make uninstall
```
If you are not upgrading your client, to remove your application state and configuration, perform the following additional step:
```
rm -rf ~/.config/onedrive
```
**Note:** If you are using the `--confdir option`, substitute `~/.config/onedrive` for the correct directory storing your client configuration.
If you want to just delete the application key, but keep the items database:
```text
rm -f ~/.config/onedrive/refresh_token
```

54
docs/known-issues.md Normal file
View file

@ -0,0 +1,54 @@
# Known Issues
The below are known issues with this client:
## Moving files into different folders should not cause data to delete and be re-uploaded
**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876)
**Description:**
When running the client in standalone mode (`--synchronize`) moving folders that are successfully synced around between subsequent standalone syncs causes a deletion & re-upload of data to occur.
**Explanation:**
Technically, the client is 'working' correctly, as, when moving files, you are 'deleting' them from the current location, but copying them to the 'new location'. As the client is running in standalone sync mode, there is no way to track what OS operations have been done when the client is not running - thus, this is why the 'delete and upload' is occurring.
**Workaround:**
If the tracking of moving data to new local directories is requried, it is better to run the client in service mode (`--monitor`) rather than in standalone mode, as the 'move' of files can then be handled at the point when it occurs, so that the data is moved to the new location on OneDrive without the need to be deleted and re-uploaded.
## Application 'stops' running without any visible reason
**Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884), [#1162](https://github.com/abraunegg/onedrive/issues/1162), [#1408](https://github.com/abraunegg/onedrive/issues/1408), [#1520](https://github.com/abraunegg/onedrive/issues/1520), [#1526](https://github.com/abraunegg/onedrive/issues/1526)
**Description:**
When running the client and performing an upload or download operation, the application just stops working without any reason or explanation. If `echo $?` is used after the application has exited without visible reason, an error level of 141 may be provided.
Additionally, this issue has mainly been seen when the client is operating against Microsoft's Europe Data Centre's.
**Explanation:**
The client is heavily dependant on Curl and OpenSSL to perform the activities with the Microsoft OneDrive service. Generally, when this issue occurs, the following is found in the HTTPS Debug Log:
```
OpenSSL SSL_read: SSL_ERROR_SYSCALL, errno 104
```
The only way to determine that this is the cause of the application ceasing to work is to generate a HTTPS debug log using the following additional flags:
```
--verbose --verbose --debug-https
```
This is indicative of the following:
* Some sort of flaky Internet connection somewhere between you and the OneDrive service
* Some sort of 'broken' HTTPS transparent inspection service inspecting your traffic somewhere between you and the OneDrive service
**How to resolve:**
The best avenue of action here are:
* Ensure your OS is as up-to-date as possible
* Get support from your OS vendor
* Speak to your ISP or Help Desk for assistance
* Open a ticket with OpenSSL and/or Curl teams to better handle this sort of connection failure
* Generate a HTTPS Debug Log for this application and open a new support request with Microsoft and provide the debug log file for their analysis.
If you wish to diagnose this issue further, refer to the following:
https://maulwuff.de/research/ssl-debugging.html

View file

@ -0,0 +1,145 @@
# How to configure access to specific Microsoft Azure deployments
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Process Overview
In some cases it is a requirement to utilise specific Microsoft Azure cloud deployments to conform with data and security reuqirements that requires data to reside within the geographic borders of that country.
Current national clouds that are supported are:
* Microsoft Cloud for US Government
* Microsoft Cloud Germany
* Azure and Office365 operated by 21Vianet in China
In order to successfully use these specific Microsoft Azure deployments, the following steps are required:
1. Register an application with the Microsoft identity platform using the Azure portal
2. Configure the new application with the appropriate authentication scopes
3. Validate that the authentication / redirect URI is correct for your application registration
4. Configure the onedrive client to use the new application id as provided during application registration
5. Configure the onedrive client to use the right Microsoft Azure deployment region that your application was registered with
6. Authenticate the client
## Step 1: Register a new application with Microsoft Azure
1. Log into your applicable Microsoft Azure Portal with your applicable Office365 identity:
| National Cloud Environment | Microsoft Azure Portal |
|---|---|
| Microsoft Cloud for US Government | https://portal.azure.com/ |
| Microsoft Cloud Germany | https://portal.azure.com/ |
| Azure and Office365 operated by 21Vianet | https://portal.azure.cn/ |
2. Select 'Azure Active Directory' as the service you wish to configure
3. Under 'Manage', select 'App registrations' to register a new application
4. Click 'New registration'
5. Type in the appropriate details required as per below:
![application_registration](./images/application_registration.jpg)
6. To save the application registration, click 'Register' and something similar to the following will be displayed:
![application_registration_done](./images/application_registration_done.jpg)
**Note:** The Application (client) ID UUID as displayed after client registration, is what is required as the 'application_id' for Step 4 below.
## Step 2: Configure application authentication scopes
Configure the API permissions as per the following:
| API / Permissions name | Type | Description | Admin consent required |
|---|---|---|---|
| Files.ReadWrite | Delegated | Have full access to user files | No |
| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No |
| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No |
| offline_access | Delegated | Maintain access to data you have given it access to | No |
![authentication_scopes](./images/authentication_scopes.jpg)
## Step 3: Validate that the authentication / redirect URI is correct
Add the appropriate redirect URI for your Azure deployment:
![authentication_response_uri](./images/authentication_response_uri.jpg)
A valid entry for the response URI should be one of:
* https://login.microsoftonline.us/common/oauth2/nativeclient (Microsoft Cloud for US Government)
* https://login.microsoftonline.de/common/oauth2/nativeclient (Microsoft Cloud Germany)
* https://login.chinacloudapi.cn/common/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China)
For a single-tenant application, it may be necessary to use your specific tenant id instead of "common":
* https://login.microsoftonline.us/example.onmicrosoft.us/oauth2/nativeclient (Microsoft Cloud for US Government)
* https://login.microsoftonline.de/example.onmicrosoft.de/oauth2/nativeclient (Microsoft Cloud Germany)
* https://login.chinacloudapi.cn/example.onmicrosoft.cn/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China)
## Step 4: Configure the onedrive client to use new application registration
Update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following:
```text
application_id = "insert valid entry here"
```
This will reconfigure the client to use the new application registration you have created.
**Example:**
```text
application_id = "22c49a0d-d21c-4792-aed1-8f163c982546"
```
## Step 5: Configure the onedrive client to use the specific Microsoft Azure deployment
Update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following:
```text
azure_ad_endpoint = "insert valid entry here"
```
Valid entries are:
* USL4 (Microsoft Cloud for US Government)
* USL5 (Microsoft Cloud for US Government - DOD)
* DE (Microsoft Cloud Germany)
* CN (Azure and Office365 operated by 21Vianet in China)
This will configure your client to use the correct Azure AD and Graph endpoints as per [https://docs.microsoft.com/en-us/graph/deployments](https://docs.microsoft.com/en-us/graph/deployments)
**Example:**
```text
azure_ad_endpoint = "USL4"
```
If the Microsoft Azure deployment does not support multi-tenant applications, update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following:
```text
azure_tenant_id = "insert valid entry here"
```
This will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common".
The tenant id may be the GUID Directory ID (formatted "00000000-0000-0000-0000-000000000000"), or the fully qualified tenant name (e.g. "example.onmicrosoft.us").
The GUID Directory ID may be located in the Azure administation page as per [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id). Note that you may need to go to your national-deployment-specific administration page, rather than following the links within that document.
The tenant name may be obtained by following the PowerShell instructions on [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id); it is shown as the "TenantDomain" upon completion of the "Connect-AzureAD" command.
**Example:**
```text
azure_tenant_id = "example.onmicrosoft.us"
# or
azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc"
```
## Step 6: Authenticate the client
Run the application without any additional command switches.
You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application.
```text
[user@hostname ~]$ onedrive
Authorize this app visiting:
https://.....
Enter the response uri:
```
**Example:**
```
[user@hostname ~]$ onedrive
Authorize this app visiting:
https://login.microsoftonline.com/common/oauth2/v2.0/authorize?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient
Enter the response uri: https://login.microsoftonline.com/common/oauth2/nativeclient?code=<redacted>
Application has been successfully authorised, however no additional command switches were provided.
Please use --help for further assistance in regards to running this application.
```

289
docs/podman.md Normal file
View file

@ -0,0 +1,289 @@
# Run the OneDrive Client for Linux under Podman
This client can be run as a Podman container, with 3 available container base options for you to choose from:
| Container Base | Docker Tag | Description | i686 | x86_64 | ARMHF | AARCH64 |
|----------------|-------------|----------------------------------------------------------------|:------:|:------:|:-----:|:-------:|
| Alpine Linux | edge-alpine | Podman container based on Alpine 3.18 using 'master' |❌|✔|❌|✔|
| Alpine Linux | alpine | Podman container based on Alpine 3.18 using latest release |❌|✔|❌|✔|
| Debian | debian | Podman container based on Debian Stable using latest release |✔|✔|✔|✔|
| Debian | edge | Podman container based on Debian Stable using 'master' |✔|✔|✔|✔|
| Debian | edge-debian | Podman container based on Debian Stable using 'master' |✔|✔|✔|✔|
| Debian | latest | Podman container based on Debian Stable using latest release |✔|✔|✔|✔|
| Fedora | edge-fedora | Podman container based on Fedora 38 using 'master' |❌|✔|❌|✔|
| Fedora | fedora | Podman container based on Fedora 38 using latest release |❌|✔|❌|✔|
These containers offer a simple monitoring-mode service for the OneDrive Client for Linux.
The instructions below have been validated on:
* Fedora 35
The instructions below will utilise the 'latest' tag, however this can be substituted for any of the other docker tags from the table above if desired.
Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in.
**Note:** The below instructions for podman have only been tested as the root user while running the containers themselves as non-root users.
## Basic Setup
### 0. Install podman using your distribution platform's instructions if not already installed
1. Ensure that SELinux has been disabled on your system. A reboot may be required to ensure that this is correctly disabled.
2. Install Podman as per requried for your platform
3. Obtain your normal, non-root user UID and GID by using the `id` command or select another non-root id to run the container as
**NOTE:** SELinux context needs to be configured or disabled for Podman to be able to write to OneDrive host directory.
### 1.1 Prepare data volume
The container requries 2 Podman volumes:
* Config Volume
* Data Volume
The first volume is for your data folder and is created in the next step. This volume needs to be a path to a directory on your local filesystem, and this is where your data will be stored from OneDrive. Keep in mind that:
* The owner of this specified folder must not be root
* Podman will attempt to change the permissions of the volume to the user the container is configured to run as
**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Podman container will fail to start with the following error message:
```bash
ROOT level privileges prohibited!
```
### 1.2 Prepare config volume
Although not required, you can prepare the config volume before starting the container. Otherwise it will be created automatically during initial startup of the container.
Create the config volume with the following command:
```bash
podman volume create onedrive_conf
```
This will create a podman volume labeled `onedrive_conf`, where all configuration of your onedrive account will be stored. You can add a custom config file and other things later.
### 2. First run
The 'onedrive' client within the container needs to be authorized with your Microsoft account. This is achieved by initially running podman in interactive mode.
Run the podman image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`).
It is a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`).
```bash
export ONEDRIVE_DATA_DIR="${HOME}/OneDrive"
export ONEDRIVE_UID=1000
export ONEDRIVE_GID=1000
mkdir -p ${ONEDRIVE_DATA_DIR}
podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \
-v onedrive_conf:/onedrive/conf:U,Z \
-v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" \
driveone/onedrive:latest
```
**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the podman container
**If you plan to use podmans built in auto-updating of container images described in step 5, you must pass an additional argument to set a label during the first run.**
**Important:** In some scenarios, 'podman' sets the configuration and data directories to a different UID & GID as specified. To resolve this situation, you must run 'podman' with the `--userns=keep-id` flag to ensure 'podman' uses the UID and GID as specified.
The run command would look instead look like as follows:
```
podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \
-v onedrive_conf:/onedrive/conf:U,Z \
-v "onedrive-test-data:/onedrive/data:U,Z" \
-e PODMAN=1 \
--label "io.containers.autoupdate=image"
driveone/onedrive:latest
```
When the Podman container successfully starts:
* You will be asked to open a specific link using your web browser
* Login to your Microsoft Account and give the application the permission
* After giving the permission, you will be redirected to a blank page
* Copy the URI of the blank page into the application prompt to authorise the application
Once the 'onedrive' application is authorised, the client will automatically start monitoring your `ONEDRIVE_DATA_DIR` for data changes to be uploaded to OneDrive. Files stored on OneDrive will be downloaded to this location.
If the client is working as expected, you can detach from the container with Ctrl+p, Ctrl+q.
### 4. Podman Container Status, stop, and restart
Check if the monitor service is running
```bash
podman ps -f name=onedrive
```
Show monitor run logs
```bash
podman logs onedrive
```
Stop running monitor
```bash
podman stop onedrive
```
Resume monitor
```bash
podman start onedrive
```
Remove onedrive container
```bash
podman rm -f onedrive
```
## Advanced Setup
### 5. Systemd Service & Auto Updating
Podman supports running containers as a systemd service and also auto updating of the container images. Using the existing running container you can generate a systemd unit file to be installed by the **root** user. To have your container image auto-update with podman, it must first be created with the label `"io.containers.autoupdate=image"` mentioned in step 2.
```
cd /tmp
podman generate systemd --new --restart-policy on-failure --name -f onedrive
/tmp/container-onedrive.service
# copy the generated systemd unit file to the systemd path and reload the daemon
cp -Z ~/container-onedrive.service /usr/lib/systemd/system
systemctl daemon-reload
#optionally enable it to startup on boot
systemctl enable container-onedrive.service
#check status
systemctl status container-onedrive
#start/stop/restart container as a systemd service
systemctl stop container-onedrive
systemctl start container-onedrive
```
To update the image using podman (Ad-hoc)
```
podman auto-update
```
To update the image using systemd (Automatic/Scheduled)
```
# Enable the podman-auto-update.timer service at system start:
systemctl enable podman-auto-update.timer
# Start the service
systemctl start podman-auto-update.timer
# Containers with the autoupdate label will be updated on the next scheduled timer
systemctl list-timers --all
```
### 6. Edit the config
The 'onedrive' client should run in default configuration, however you can change this default configuration by placing a custom config file in the `onedrive_conf` podman volume. First download the default config from [here](https://raw.githubusercontent.com/abraunegg/onedrive/master/config)
Then put it into your onedrive_conf volume path, which can be found with:
```bash
podman volume inspect onedrive_conf
```
Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first.
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration)
### 7. Sync multiple accounts
There are many ways to do this, the easiest is probably to
1. Create a second podman config volume (replace `Work` with your desired name): `podman volume create onedrive_conf_Work`
2. And start a second podman monitor container (again replace `Work` with your desired name):
```
export ONEDRIVE_DATA_DIR_WORK="/home/abraunegg/OneDriveWork"
mkdir -p ${ONEDRIVE_DATA_DIR_WORK}
podman run -it --restart unless-stopped --name onedrive_work \
-v onedrive_conf_Work:/onedrive/conf \
-v "${ONEDRIVE_DATA_DIR_WORK}:/onedrive/data" \
--user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \
driveone/onedrive:latest
```
## Environment Variables
| Variable | Purpose | Sample Value |
| ---------------- | --------------------------------------------------- |:-------------:|
| <B>ONEDRIVE_UID</B> | UserID (UID) to run as | 1000 |
| <B>ONEDRIVE_GID</B> | GroupID (GID) to run as | 1000 |
| <B>ONEDRIVE_VERBOSE</B> | Controls "--verbose" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_DEBUG</B> | Controls "--verbose --verbose" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_DEBUG_HTTPS</B> | Controls "--debug-https" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_RESYNC</B> | Controls "--resync" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_DOWNLOADONLY</B> | Controls "--download-only" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_UPLOADONLY</B> | Controls "--upload-only" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_NOREMOTEDELETE</B> | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_LOGOUT</B> | Controls "--logout" switch. Default is 0 | 1 |
| <B>ONEDRIVE_REAUTH</B> | Controls "--reauth" switch. Default is 0 | 1 |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_DISPLAY_CONFIG</B> | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_SINGLE_DIRECTORY</B> | Controls "--single-directory" option. Default = "" | "mydir" |
### Usage Examples
**Verbose Output:**
```bash
podman run -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:latest
```
**Debug Output:**
```bash
podman run -e ONEDRIVE_DEBUG=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:latest
```
**Perform a --resync:**
```bash
podman run -e ONEDRIVE_RESYNC=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:latest
```
**Perform a --resync and --verbose:**
```bash
podman run -e ONEDRIVE_RESYNC=1 -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:latest
```
**Perform a --logout and re-authenticate:**
```bash
podman run -it -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:latest
```
## Build instructions
### Building a custom Podman image
You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive):
```bash
git clone https://github.com/abraunegg/onedrive
cd onedrive
podman build . -t local-onedrive -f contrib/docker/Dockerfile
```
There are alternate, smaller images available by building
Dockerfile-debian or Dockerfile-alpine. These [multi-stage builder pattern](https://docs.docker.com/develop/develop-images/multistage-build/)
Dockerfiles require Docker version at least 17.05.
#### How to build and run a custom Podman image based on Debian
``` bash
podman build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" local-ondrive-debian:latest
```
#### How to build and run a custom Podman image based on Alpine Linux
``` bash
podman build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" local-ondrive-alpine:latest
```
#### How to build and run a custom Podman image for ARMHF (Raspberry Pi)
Compatible with:
* Raspberry Pi
* Raspberry Pi 2
* Raspberry Pi Zero
* Raspberry Pi 3
* Raspberry Pi 4
``` bash
podman build . -t local-onedrive-armhf -f contrib/docker/Dockerfile-debian
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" local-onedrive-armhf:latest
```
#### How to build and run a custom Podman image for AARCH64 Platforms
``` bash
podman build . -t local-onedrive-aarch64 -f contrib/docker/Dockerfile-debian
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" local-onedrive-aarch64:latest
```

65
docs/privacy-policy.md Normal file
View file

@ -0,0 +1,65 @@
# Privacy Policy
Effective Date: May 16 2018
## Introduction
This Privacy Policy outlines how OneDrive Client for Linux ("we," "our," or "us") collects, uses, and protects information when you use our software ("OneDrive Client for Linux"). We respect your privacy and are committed to ensuring the confidentiality and security of any information you provide while using the Software.
## Information We Do Not Collect
We want to be transparent about the fact that we do not collect any personal data, usage data, or tracking data through the Software. This means:
1. **No Personal Data**: We do not collect any information that can be used to personally identify you, such as your name, email address, phone number, or physical address.
2. **No Usage Data**: We do not collect data about how you use the Software, such as the features you use, the duration of your sessions, or any interactions within the Software.
3. **No Tracking Data**: We do not use cookies or similar tracking technologies to monitor your online behavior or track your activities across websites or apps.
## How We Use Your Information
Since we do not collect any personal, usage, or tracking data, there is no information for us to use for any purpose.
## Third-Party Services
The Software may include links to third-party websites or services, but we do not have control over the privacy practices or content of these third-party services. We encourage you to review the privacy policies of any third-party services you access through the Software.
## Children's Privacy
Since we do not collect any personal, usage, or tracking data, there is no restriction on the use of this application by anyone under the age of 18.
## Information You Choose to Share
While we do not collect personal data, usage data, or tracking data through the Software, there may be instances where you voluntarily choose to share information with us, particularly when submitting bug reports. These bug reports may contain sensitive information such as account details, file names, and directory names. It's important to note that these details are included in the logs and debug logs solely for the purpose of diagnosing and resolving technical issues with the Software.
We want to emphasize that, even in these cases, we do not have access to your actual data. The logs and debug logs provided in bug reports are used exclusively for technical troubleshooting and debugging purposes. We take measures to treat this information with the utmost care, and it is only accessible to our technical support and development teams. We do not use this information for any other purpose, and we have strict security measures in place to protect it.
## Protecting Your Sensitive Data
We are committed to safeguarding your sensitive data and maintaining its confidentiality. To ensure its protection:
1. **Limited Access**: Only authorized personnel within our technical support and development teams have access to the logs and debug logs containing sensitive data, and they are trained in handling this information securely.
2. **Data Encryption**: We use industry-standard encryption protocols to protect the transmission and storage of sensitive data.
3. **Data Retention**: We retain bug report data for a limited time necessary for resolving the reported issue. Once the issue is resolved, we promptly delete or anonymize the data.
4. **Security Measures**: We employ robust security measures to prevent unauthorized access, disclosure, or alteration of sensitive data.
By submitting a bug report, you acknowledge and consent to the inclusion of sensitive information in logs and debug logs for the sole purpose of addressing technical issues with the Software.
## Your Responsibilities
While we take measures to protect your sensitive data, it is essential for you to exercise caution when submitting bug reports. Please refrain from including any sensitive or personally identifiable information that is not directly related to the technical issue you are reporting. You have the option to redact or obfuscate sensitive details in bug reports to further protect your data.
## Changes to this Privacy Policy
We may update this Privacy Policy from time to time to reflect changes in our practices or for other operational, legal, or regulatory reasons. We will notify you of any material changes by posting the updated Privacy Policy on our website or through the Software. We encourage you to review this Privacy Policy periodically.
## Contact Us
If you have any questions or concerns about this Privacy Policy or our privacy practices, please contact us at support@mynas.com.au or via GitHub (https://github.com/abraunegg/onedrive)
## Conclusion
By using the Software, you agree to the terms outlined in this Privacy Policy. If you do not agree with any part of this policy, please discontinue the use of the Software.

View file

@ -0,0 +1,228 @@
# How to configure OneDrive SharePoint Shared Library sync
**WARNING:** Several users have reported files being overwritten causing data loss as a result of using this client with SharePoint Libraries when running as a systemd service.
When this has been investigated, the following has been noted as potential root causes:
* File indexing application such as Baloo File Indexer or Tracker3 constantly indexing your OneDrive data
* The use of WPS Office and how it 'saves' files by deleting the existing item and replaces it with the saved data
Additionally there could be a yet unknown bug with the client, however all debugging and data provided previously shows that an 'external' process to the 'onedrive' application modifies the files triggering the undesirable upload to occur.
**Possible Preventative Actions:**
* Disable all File Indexing for your SharePoint Library data. It is out of scope to detail on how you should do this.
* Disable using a systemd service for syncing your SharePoint Library data.
* Do not use WPS Office to edit your documents. Use OpenOffice or LibreOffice as these do not exhibit the same 'delete to save' action that WPS Office has.
Additionally, please use caution when using this client with SharePoint.
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Process Overview
Syncing a OneDrive SharePoint library requires additional configuration for your 'onedrive' client:
1. Login to OneDrive and under 'Shared Libraries' obtain the shared library name
2. Query that shared library name using the client to obtain the required configuration details
3. Create a unique local folder which will be the SharePoint Library 'root'
4. Configure the client's config file with the required 'drive_id'
5. Test the configuration using '--dry-run'
6. Sync the SharePoint Library as required
**Note:** The `--get-O365-drive-id` process below requires a fully configured 'onedrive' configuration so that the applicable Drive ID for the given Office 365 SharePoint Shared Library can be determined. It is highly recommended that you do not use the application 'default' configuration directory for any SharePoint Site, and configure separate items for each site you wish to use.
## 1. Listing available OneDrive SharePoint Libraries
Login to the OneDrive web interface and determine which shared library you wish to configure the client for:
![shared_libraries](./images/SharedLibraries.jpg)
## 2. Query OneDrive API to obtain required configuration details
Run the following command using the 'onedrive' client to query the OneDrive API to obtain the required 'drive_id' of the SharePoint Library that you wish to sync:
```text
onedrive --get-O365-drive-id '<your site name to search>'
```
This will return something similar to the following:
```text
Configuration file successfully loaded
Configuring Global Azure AD Endpoints
Initializing the Synchronization Engine ...
Office 365 Library Name Query: <your site name to search>
-----------------------------------------------
Site Name: <your site name>
Library Name: <your library name>
drive_id: b!6H_y8B...xU5
Library URL: <your library URL>
-----------------------------------------------
```
If there are no matches to the site you are attempting to search, the following will be displayed:
```text
Configuration file successfully loaded
Configuring Global Azure AD Endpoints
Initializing the Synchronization Engine ...
Office 365 Library Name Query: blah
ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site.
The following SharePoint site names were returned:
* <site name 1>
* <site name 2>
...
* <site name X>
```
This list of site names can be used as a basis to search for the correct site for which you are searching
## 3. Create a new configuration directory and sync location for this SharePoint Library
Create a new configuration directory for this SharePoint Library in the following manner:
```text
mkdir ~/.config/SharePoint_My_Library_Name
```
Create a new local folder to store the SharePoint Library data in:
```text
mkdir ~/SharePoint_My_Library_Name
```
**Note:** Do not use spaces in the directory name, use '_' as a replacement
## 4. Configure SharePoint Library config file with the required 'drive_id' & 'sync_dir' options
Download a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above:
```text
wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/SharePoint_My_Library_Name/config
```
Update your 'onedrive' configuration file (`~/.config/SharePoint_My_Library_Name/config`) with the local folder where you will store your data:
```text
sync_dir = "~/SharePoint_My_Library_Name"
```
Update your 'onedrive' configuration file(`~/.config/SharePoint_My_Library_Name/config`) with the 'drive_id' value obtained in the steps above:
```text
drive_id = "insert the drive_id value from above here"
```
The OneDrive client will now be configured to sync this SharePoint shared library to your local system and the location you have configured.
**Note:** After changing `drive_id`, you must perform a full re-synchronization by adding `--resync` to your existing command line.
## 5. Validate and Test the configuration
Validate your new configuration using the `--display-config` option to validate you have configured the application correctly:
```text
onedrive --confdir="~/.config/SharePoint_My_Library_Name" --display-config
```
Test your new configuration using the `--dry-run` option to validate the application configuration:
```text
onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose --dry-run
```
**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration.
## 6. Sync the SharePoint Library as required
Sync the SharePoint Library to your system with either `--synchronize` or `--monitor` operations:
```text
onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose
```
```text
onedrive --confdir="~/.config/SharePoint_My_Library_Name" --monitor --verbose
```
**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration.
## 7. Enable custom systemd service for SharePoint Library
Systemd can be used to automatically run this configuration in the background, however, a unique systemd service will need to be setup for this SharePoint Library instance
In order to automatically start syncing each SharePoint Library, you will need to create a service file for each SharePoint Library. From the applicable 'systemd folder' where the applicable systemd service file exists:
* RHEL / CentOS: `/usr/lib/systemd/system`
* Others: `/usr/lib/systemd/user` and `/lib/systemd/system`
### Step1: Create a new systemd service file
#### Red Hat Enterprise Linux, CentOS Linux
Copy the required service file to a new name:
```text
sudo cp /usr/lib/systemd/system/onedrive.service /usr/lib/systemd/system/onedrive-SharePoint_My_Library_Name.service
```
or
```text
sudo cp /usr/lib/systemd/system/onedrive@.service /usr/lib/systemd/system/onedrive-SharePoint_My_Library_Name@.service
```
#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
Copy the required service file to a new name:
```text
sudo cp /usr/lib/systemd/user/onedrive.service /usr/lib/systemd/user/onedrive-SharePoint_My_Library_Name.service
```
or
```text
sudo cp /lib/systemd/system/onedrive@.service /lib/systemd/system/onedrive-SharePoint_My_Library_Name@.service
```
### Step 2: Edit new systemd service file
Edit the new systemd file, updating the line beginning with `ExecStart` so that the confdir mirrors the one you used above:
```text
ExecStart=/usr/local/bin/onedrive --monitor --confdir="/full/path/to/config/dir"
```
Example:
```text
ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/SharePoint_My_Library_Name"
```
**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded.
### Step 3: Enable the new systemd service
Once the file is correctly editied, you can enable the new systemd service using the following commands.
#### Red Hat Enterprise Linux, CentOS Linux
```text
systemctl enable onedrive-SharePoint_My_Library_Name
systemctl start onedrive-SharePoint_My_Library_Name
```
#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
```text
systemctl --user enable onedrive-SharePoint_My_Library_Name
systemctl --user start onedrive-SharePoint_My_Library_Name
```
or
```text
systemctl --user enable onedrive-SharePoint_My_Library_Name@myusername.service
systemctl --user start onedrive-SharePoint_My_Library_Name@myusername.service
```
### Step 4: Viewing systemd status and logs for the custom service
#### Viewing systemd service status - Red Hat Enterprise Linux, CentOS Linux
```text
systemctl status onedrive-SharePoint_My_Library_Name
```
#### Viewing systemd service status - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
```text
systemctl --user status onedrive-SharePoint_My_Library_Name
```
#### Viewing journalctl systemd logs - Red Hat Enterprise Linux, CentOS Linux
```text
journalctl --unit=onedrive-SharePoint_My_Library_Name -f
```
#### Viewing journalctl systemd logs - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora
```text
journalctl --user --unit=onedrive-SharePoint_My_Library_Name -f
```
### Step 5: (Optional) Run custom systemd service at boot without user login
In some cases it may be desirable for the systemd service to start without having to login as your 'user'
All the systemd steps above that utilise the `--user` option, will run the systemd service as your particular user. As such, the systemd service will not start unless you actually login to your system.
To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system:
```text
loginctl enable-linger <your_user_name>
```
Example:
```text
alex@ubuntu-headless:~$ loginctl enable-linger alex
```
## 8. Configuration for a SharePoint Library is complete
The 'onedrive' client configuration for this particular SharePoint Library is now complete.
# How to configure multiple OneDrive SharePoint Shared Library sync
Create a new configuration as per the process above. Repeat these steps for each SharePoint Library that you wish to use.

54
docs/terms-of-service.md Normal file
View file

@ -0,0 +1,54 @@
# OneDrive Client for Linux - Software Service Terms of Service
## 1. Introduction
These Terms of Service ("Terms") govern your use of the OneDrive Client for Linux ("Application") software and related Microsoft OneDrive services ("Service") provided by Microsoft. By accessing or using the Service, you agree to comply with and be bound by these Terms. If you do not agree to these Terms, please do not use the Service.
## 2. License Compliance
The OneDrive Client for Linux software is licensed under the GNU General Public License, version 3.0 (the "GPLv3"). Your use of the software must comply with the terms and conditions of the GPLv3. A copy of the GPLv3 can be found here: https://www.gnu.org/licenses/gpl-3.0.en.html
## 3. Use of the Service
### 3.1. Access and Accounts
You may need to create an account or provide personal information to access certain features of the Service. You are responsible for maintaining the confidentiality of your account information and are solely responsible for all activities that occur under your account.
### 3.2. Prohibited Activities
You agree not to:
- Use the Service in any way that violates applicable laws or regulations.
- Use the Service to engage in any unlawful, harmful, or fraudulent activity.
- Use the Service in any manner that disrupts, damages, or impairs the Service.
## 4. Intellectual Property
The OneDrive Client for Linux software is subject to the GPLv3, and you must respect all copyrights, trademarks, and other intellectual property rights associated with the software. Any contributions you make to the software must also comply with the GPLv3.
## 5. Disclaimer of Warranties
The OneDrive Client for Linux software is provided "as is" without any warranties, either expressed or implied. We do not guarantee that the use of the Application will be error-free or uninterrupted.
Microsoft is not responsible for OneDrive Client for Linux. Any issues or problems with OneDrive Client for Linux should be raised on GitHub at https://github.com/abraunegg/onedrive or email support@mynas.com.au
OneDrive Client for Linux is not responsible for the Microsoft OneDrive Service or the Microsoft Graph API Service that this Application utilizes. Any issue with either Microsoft OneDrive or Microsoft Graph API should be raised with Microsoft via their support channel in your country.
## 6. Limitation of Liability
To the fullest extent permitted by law, we shall not be liable for any direct, indirect, incidental, special, consequential, or punitive damages, or any loss of profits or revenues, whether incurred directly or indirectly, or any loss of data, use, goodwill, or other intangible losses, resulting from (a) your use or inability to use the Service, or (b) any other matter relating to the Service.
This limitiation of liability explicitly relates to the use of the OneDrive Client for Linux software and does not affect your rights under the GPLv3.
## 7. Changes to Terms
We reserve the right to update or modify these Terms at any time without prior notice. Any changes will be effective immediately upon posting on GitHub. Your continued use of the Service after the posting of changes constitutes your acceptance of such changes. Changes can be reviewed on GitHub.
## 8. Governing Law
These Terms shall be governed by and construed in accordance with the laws of Australia, without regard to its conflict of law principles.
## 9. Contact Us
If you have any questions or concerns about these Terms, please contact us at https://github.com/abraunegg/onedrive or email support@mynas.com.au

View file

@ -0,0 +1,383 @@
# Installation of 'onedrive' package on Debian and Ubuntu
This document covers the appropriate steps to install the 'onedrive' client using the provided packages for Debian and Ubuntu.
#### Important information for all Ubuntu and Ubuntu based distribution users:
This information is specifically for the following platforms and distributions:
* Lubuntu
* Linux Mint
* POP OS
* Peppermint OS
* Raspbian
* Ubuntu
Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all&section=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Ubuntu Universe packages are out-of-date and are not supported and should not be used.
## Determine which instructions to use
Ubuntu and its clones are based on various different releases, thus, you must use the correct instructions below, otherwise you may run into package dependancy issues and will be unable to install the client.
### Step 1: Remove any configured PPA and associated 'onedrive' package and systemd service files
Many Internet 'help' pages provide inconsistent details on how to install the OneDrive Client for Linux. A number of these websites continue to point users to install the client via the yann1ck PPA repository however this PPA no longer exists and should not be used.
To remove the PPA repository and the older client, perform the following actions:
```text
sudo apt remove onedrive
sudo add-apt-repository --remove ppa:yann1ck/onedrive
```
Additionally, Ubuntu and its clones have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated. This systemd entry is erroneous and needs to be removed.
```
Created symlink /etc/systemd/user/default.target.wants/onedrive.service → /usr/lib/systemd/user/onedrive.service.
```
To remove this symbolic link, run the following command:
```
sudo rm /etc/systemd/user/default.target.wants/onedrive.service
```
### Step 2: Ensure your system is up-to-date
Use a script, similar to the following to ensure your system is updated correctly:
```text
#!/bin/bash
rm -rf /var/lib/dpkg/lock-frontend
rm -rf /var/lib/dpkg/lock
apt-get update
apt-get upgrade -y
apt-get dist-upgrade -y
apt-get autoremove -y
apt-get autoclean -y
```
Run this script as 'root' by using `su -` to elevate to 'root'. Example below:
```text
Welcome to Ubuntu 20.04.1 LTS (GNU/Linux 5.4.0-48-generic x86_64)
* Documentation: https://help.ubuntu.com
* Management: https://landscape.canonical.com
* Support: https://ubuntu.com/advantage
425 updates can be installed immediately.
208 of these updates are security updates.
To see these additional updates run: apt list --upgradable
Your Hardware Enablement Stack (HWE) is supported until April 2025.
Last login: Thu Jan 20 14:21:48 2022 from my.ip.address
alex@ubuntu-20-LTS:~$ su -
Password:
root@ubuntu-20-LTS:~# ls -la
total 28
drwx------ 3 root root 4096 Oct 10 2020 .
drwxr-xr-x 20 root root 4096 Oct 10 2020 ..
-rw------- 1 root root 175 Jan 20 14:23 .bash_history
-rw-r--r-- 1 root root 3106 Dec 6 2019 .bashrc
drwx------ 2 root root 4096 Apr 23 2020 .cache
-rw-r--r-- 1 root root 161 Dec 6 2019 .profile
-rwxr-xr-x 1 root root 174 Oct 10 2020 update-os.sh
root@ubuntu-20-LTS:~# cat update-os.sh
#!/bin/bash
rm -rf /var/lib/dpkg/lock-frontend
rm -rf /var/lib/dpkg/lock
apt-get update
apt-get upgrade -y
apt-get dist-upgrade -y
apt-get autoremove -y
apt-get autoclean -y
root@ubuntu-20-LTS:~# ./update-os.sh
Hit:1 http://au.archive.ubuntu.com/ubuntu focal InRelease
Hit:2 http://au.archive.ubuntu.com/ubuntu focal-updates InRelease
Hit:3 http://au.archive.ubuntu.com/ubuntu focal-backports InRelease
Hit:4 http://security.ubuntu.com/ubuntu focal-security InRelease
Reading package lists... 96%
...
Sourcing file `/etc/default/grub'
Sourcing file `/etc/default/grub.d/init-select.cfg'
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-5.13.0-27-generic
Found initrd image: /boot/initrd.img-5.13.0-27-generic
Found linux image: /boot/vmlinuz-5.4.0-48-generic
Found initrd image: /boot/initrd.img-5.4.0-48-generic
Found memtest86+ image: /boot/memtest86+.elf
Found memtest86+ image: /boot/memtest86+.bin
done
Removing linux-modules-5.4.0-26-generic (5.4.0-26.30) ...
Processing triggers for libc-bin (2.31-0ubuntu9.2) ...
Reading package lists... Done
Building dependency tree
Reading state information... Done
root@ubuntu-20-LTS:~#
```
Reboot your system after running this process before continuing with Step 3.
```text
reboot
```
### Step 3: Determine what your OS is based on
Determine what your OS is based on. To do this, run the following command:
```text
lsb_release -a
```
**Example:**
```text
alex@ubuntu-system:~$ lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 22.04 LTS
Release: 22.04
Codename: jammy
```
### Step 4: Pick the correct instructions to use
If required, review the table below based on your 'lsb_release' information to pick the appropriate instructions to use:
| Release & Codename | Instructions to use |
|--------------------|---------------------|
| Linux Mint 19.x | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Linux Mint 21.x |
| Linux Mint 20.x | Use [Ubuntu 20.04](#distribution-ubuntu-2004) instructions below |
| Linux Mint 21.x | Use [Ubuntu 22.04](#distribution-ubuntu-2204) instructions below |
| Debian 9 | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Debian 11 |
| Debian 10 | You must build from source or upgrade your Operating System to Debian 11 |
| Debian 11 | Use [Debian 11](#distribution-debian-11) instructions below |
| Debian 12 | Use [Debian 12](#distribution-debian-12) instructions below |
| Raspbian GNU/Linux 10 | You must build from source or upgrade your Operating System to Raspbian GNU/Linux 11 |
| Raspbian GNU/Linux 11 | Use [Debian 11](#distribution-debian-11) instructions below |
| Raspbian GNU/Linux 12 | Use [Debian 12](#distribution-debian-12) instructions below |
| Ubuntu 18.04 / Bionic | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Ubuntu 22.x |
| Ubuntu 20.04 / Focal | Use [Ubuntu 20.04](#distribution-ubuntu-2004) instructions below |
| Ubuntu 21.04 / Hirsute | Use [Ubuntu 21.04](#distribution-ubuntu-2104) instructions below |
| Ubuntu 21.10 / Impish | Use [Ubuntu 21.10](#distribution-ubuntu-2110) instructions below |
| Ubuntu 22.04 / Jammy | Use [Ubuntu 22.04](#distribution-ubuntu-2204) instructions below |
| Ubuntu 22.10 / Kinetic | Use [Ubuntu 22.10](#distribution-ubuntu-2210) instructions below |
| Ubuntu 23.04 / Lunar | Use [Ubuntu 23.04](#distribution-ubuntu-2304) instructions below |
## Distribution Package Install Instructions
### Distribution: Debian 11
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
|✔|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_11/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_11/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Debian 12
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
|✔|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_12/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_12/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Ubuntu 20.04
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
❌|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_20.04/Release.key | sudo apt-key add -
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo 'deb https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_20.04/ ./' | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Ubuntu 21.04
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
❌|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Ubuntu 21.10
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
❌|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Ubuntu 22.04
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
❌|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Ubuntu 22.10
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
❌|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Ubuntu 23.04
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
❌|✔|✔|✔| |
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
## Known Issues with Installing from the above packages
### 1. The client may segfault | core-dump when exiting
When the client is run in `--monitor` mode manually, or when using the systemd service, the client may segfault on exit.
This issue is caused by the way the 'onedrive' packages are built using the distribution LDC package & the default distribution compiler options which is the root cause for this issue. Refer to: https://bugs.launchpad.net/ubuntu/+source/ldc/+bug/1895969
**Additional references:**
* https://github.com/abraunegg/onedrive/issues/1053
* https://github.com/abraunegg/onedrive/issues/1609
**Resolution Options:**
* Uninstall the package and build client from source

848
docs/usage.md Normal file
View file

@ -0,0 +1,848 @@
# Using the OneDrive Client for Linux
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Table of Contents
TABLE OF CONTENTS GOES HERE
## Important Notes
### Upgrading from the 'skilion' Client
The 'skilion' version has a significant number of issues in how it manages the local sync state. When upgrading from the 'skilion' client to this client, it's recommended to stop any service or OneDrive process that may be running. Once all OneDrive services are stopped, make sure to remove any old client binaries from your system.
Furthermore, if you're using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as shown below:
**Invalid 'skilion' configuration:**
```text
skip_file = ".*|~*"
```
**Minimum valid configuration:**
```text
skip_file = "~*"
```
**Default valid configuration:**
```text
skip_file = "~*|.~*|*.tmp|*.swp|*.partial"
```
Avoid using a 'skip_file' entry of `.*` as it may prevent the correct detection of local changes to process. The configuration values for 'skip_file' will be checked for validity, and if there is an issue, the following error message will be displayed:
```text
ERROR: Invalid skip_file entry '.*' detected
```
### Naming Conventions for Local Files and Folders
In the synchronisation directory, it's crucial to adhere to the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file) for your files and folders.
If you happen to have two files with the same names but different capitalisation, our application will make an effort to handle it. However, when there's a clash within the namespace, the file causing the conflict won't be synchronised. Please note that this behaviour is intentional and won't be addressed.
### Compatibility with curl
If your system uses curl < 7.47.0, curl will default to HTTP/1.1 for HTTPS operations, and the client will follow suit, using HTTP/1.1.
For systems running curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS, but it will still use HTTP/1.1 as the default for these operations. The client will employ HTTP/1.1 for HTTPS operations as well.
However, if your system employs curl >= 7.62.0, curl will, by default, prioritise HTTP/2 over HTTP/1.1. In this case, the client will utilise HTTP/2 for most HTTPS operations and stick with HTTP/1.1 for others. Please note that this distinction is governed by the OneDrive platform, not our client.
If you explicitly want to use HTTP/1.1, you can do so by using the `--force-http-11` flag or setting the configuration option `force_http_11 = "true"`. This will compel the application to exclusively use HTTP/1.1. Otherwise, all client operations will align with the curl default settings for your distribution.
## First Steps
### Authorise the Application with Your Microsoft OneDrive Account
Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches.
Please be aware that some companies may require you to explicitly add this app to the [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department.
When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application.
**Example:**
```text
[user@hostname ~]$ onedrive
Authorise this app by visiting:
https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient
Enter the response URI from your browser: https://login.microsoftonline.com/common/oauth2/nativeclient?code=<redacted>
The application has been successfully authorised, but no additional command switches were provided.
Please use 'onedrive --help' for further assistance on how to run this application.
```
### Display Your Applicable Runtime Configuration
To verify the configuration that the application will use, use the following command:
```text
onedrive --display-config
```
This command will display all the relevant runtime interpretations of the options and configurations you are using. An example output is as follows:
```text
Reading configuration file: /home/user/.config/onedrive/config
Configuration file successfully loaded
onedrive version = vX.Y.Z-A-bcdefghi
Config path = /home/user/.config/onedrive
Config file found in config path = true
Config option 'drive_id' =
Config option 'sync_dir' = ~/OneDrive
...
Config option 'webhook_enabled' = false
```
### Understanding OneDrive Client for Linux Operational Modes
There are two modes of operation when using the client:
1. Standalone sync mode that performs a single sync action against Microsoft OneDrive.
2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive.
#### Standalone Synchronisation Operational Mode (Standalone Mode)
This method of use can be employed by issuing the following option to the client:
```text
onedrive --sync
```
For simplicity, this can be reduced to the following:
```text
onedrive -s
```
#### Ongoing Synchronisation Operational Mode (Monitor Mode)
This method of use can be utilised by issuing the following option to the client:
```text
onedrive --monitor
```
For simplicity, this can be shortened to the following:
```text
onedrive -m
```
**Note:** This method of use is typically employed when enabling a systemd service to run the application in the background.
Two common errors can occur when using monitor mode:
* Initialisation failure
* Unable to add a new inotify watch
Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low:
* `fs.file-max`
* `fs.inotify.max_user_watches`
To determine what the existing values are on your system, use the following commands:
```text
sysctl fs.file-max
sysctl fs.inotify.max_user_watches
```
Alternatively, when running the client with increased verbosity (see below), the client will display what the current configured system maximum values are:
```text
...
All application operations will be performed in: /home/user/OneDrive
OneDrive synchronisation interval (seconds): 300
Maximum allowed open files: 393370 <-- This is the fs.file-max value
Maximum allowed inotify watches: 29374 <-- This is the fs.inotify.max_user_watches value
Initialising filesystem inotify monitoring ...
...
```
To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir':
```text
cd /path/to/your/sync/dir
ls -laR | wc -l
```
To make a change to these variables using your file and folder count, use the following process:
```text
sudo sysctl fs.file-max=<new_value>
sudo sysctl fs.inotify.max_user_watches=<new_value>
```
Once these values are changed, you will need to restart your client so that the new values are detected and used.
To make these changes permanent on your system, refer to your OS reference documentation.
### Increasing application logging level
When running a sync (`--sync`) or using monitor mode (`--monitor`), it may be desirable to see additional information regarding the progress and operation of the client. For example, for a `--sync` command, this would be:
```text
onedrive --sync --verbose
```
Furthermore, for simplicity, this can be simplified to the following:
```
onedrive -s -v
```
Adding `--verbose` twice will enable debug logging output. This is generally required when raising a bug report or needing to understand a problem.
### Testing your configuration
You can test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded, or removed; however, the application will display what 'would' have occurred. For example:
```text
onedrive --sync --verbose --dry-run
Reading configuration file: /home/user/.config/onedrive/config
Configuration file successfully loaded
Using 'user' Config Dir: /home/user/.config/onedrive
DRY-RUN Configured. Output below shows what 'would' have occurred.
DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations
DRY RUN: Not creating backup config file as --dry-run has been used
DRY RUN: Not updating hash files as --dry-run has been used
Checking Application Version ...
Attempting to initialise the OneDrive API ...
Configuring Global Azure AD Endpoints
The OneDrive API was initialised successfully
Opening the item database ...
Sync Engine Initialised with new Onedrive API instance
Application version: vX.Y.Z-A-bcdefghi
Account Type: <account-type>
Default Drive ID: <drive-id>
Default Root ID: <root-id>
Remaining Free Space: 1058488129 KB
All application operations will be performed in: /home/user/OneDrive
Fetching items from the OneDrive API for Drive ID: <drive-id> ..
...
Performing a database consistency and integrity check on locally stored data ...
Processing DB entries for this Drive ID: <drive-id>
Processing ~/OneDrive
The directory has not changed
...
Scanning local filesystem '~/OneDrive' for new data to upload ...
...
Performing a final true-up scan of online data from Microsoft OneDrive
Fetching items from the OneDrive API for Drive ID: <drive-id> ..
Sync with Microsoft OneDrive is complete
```
### Performing a sync with Microsoft OneDrive
By default, all files are downloaded in `~/OneDrive`. This download location is controlled by the 'sync_dir' config option.
After authorising the application, a sync of your data can be performed by running:
```text
onedrive --sync
```
This will synchronise files from your Microsoft OneDrive account to your `~/OneDrive` local directory or to your specified 'sync_dir' location.
If you prefer to use your local files as stored in `~/OneDrive` as your 'source of truth,' use the following sync command:
```text
onedrive --sync --local-first
```
### Performing a single directory synchronisation with Microsoft OneDrive
In some cases, it may be desirable to synchronise a single directory under ~/OneDrive without having to change your client configuration. To do this, use the following command:
```text
onedrive --sync --single-directory '<dir_name>'
```
**Example:** If the full path is `~/OneDrive/mydir`, the command would be `onedrive --sync --single-directory 'mydir'`
### Performing a 'one-way' download synchronisation with Microsoft OneDrive
In some cases, it may be desirable to 'download only' from Microsoft OneDrive. To do this, use the following command:
```text
onedrive --sync --download-only
```
This will download all the content from Microsoft OneDrive to your `~/OneDrive` location. Any files that are deleted online remain locally and will not be removed.
However, in some circumstances, it may be desirable to clean up local files that have been removed online. To do this, use the following command:
```text
onedrive --sync --download-only --cleanup-local-files
```
### Performing a 'one-way' upload synchronisation with Microsoft OneDrive
In some cases, it may be desirable to 'upload only' to Microsoft OneDrive. To do this, use the following command:
```text
onedrive --sync --upload-only
```
**Note:** If a file or folder is present on Microsoft OneDrive, which was previously synchronised and now does not exist locally, that item will be removed from Microsoft OneDrive online. If the data on Microsoft OneDrive should be kept, the following should be used:
```text
onedrive --sync --upload-only --no-remote-delete
```
**Note:** The operation of 'upload only' does not request data from Microsoft OneDrive about what 'other' data exists online. The client only knows about the data that 'this' client uploaded, thus any files or folders created or uploaded outside of this client will remain untouched online.
### Performing a selective synchronisation via 'sync_list' file
Selective synchronisation allows you to sync only specific files and directories.
To enable selective synchronisation, create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`).
Important points to understand before using 'sync_list'.
* 'sync_list' excludes _everything_ by default on OneDrive.
* 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**.
* Order exclusions before inclusions, so that anything _specifically included_ is included.
* How and where you place your `/` matters for excludes and includes in subdirectories.
Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations.
Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns.
Here is an example of `sync_list`:
```text
# sync_list supports comments
#
# The ordering of entries is highly recommended - exclusions before inclusions
#
# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in OneDrive
!Documents/temp*
#
# Exclude secret data folder in root directory only
!/Secret_data/*
#
# Include everything else in root directory
/*
#
# Include my Backup folder(s) or file(s) anywhere on OneDrive
Backup
#
# Include my Backup folder in root
/Backup/
#
# Include Documents folder(s) anywhere in OneDrive
Documents/
#
# Include all PDF files in Documents folder(s), anywhere in OneDrive
Documents/*.pdf
#
# Include this single document in Documents folder(s), anywhere in OneDrive
Documents/latest_report.docx
#
# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in OneDrive
Work/Project*
#
# Include all "notes.txt" files, anywhere in OneDrive
notes.txt
#
# Include /Blender in the ~OneDrive root but not if elsewhere in OneDrive
/Blender
#
# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name
Pictures/Camera Roll
Pictures/Saved Pictures
#
# Include these names if they match any file or folder
Cinema Soc
Codes
Textbooks
Year 2
```
The following are supported for pattern matching and exclusion rules:
* Use the `*` to wildcard select any characters to match for the item to be included
* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item
**Note:** When enabling the use of 'sync_list,' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement.
**Note:** After changing the sync_list, you must perform a full re-synchronisation by adding `--resync` to your existing command line - for example: `onedrive --sync --resync`
**Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file:
```text
sync_root_files = "true"
```
This will tell the application to sync any file that it finds in your 'sync_dir' root by default, negating the need to constantly update your 'sync_list' file.
### Performing a --resync
If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration:
* drive_id
* sync_dir
* skip_file
* skip_dir
* skip_dotfiles
* skip_symlinks
* sync_business_shared_items
* Creating, Modifying or Deleting the 'sync_list' file
Additionally, you might opt for a `--resync` if you think it's necessary to ensure your data remains in sync. If you're using this switch simply because you're unsure of the sync status, you can check the actual sync status using `--display-sync-status`.
When you use `--resync`, you'll encounter the following warning and advice:
```text
Using --resync will delete your local 'onedrive' client state, so there won't be a record of your current 'sync status.'
This may potentially overwrite local versions of files with older versions downloaded from OneDrive, leading to local data loss.
If in doubt, back up your local data before using --resync.
Are you sure you want to proceed with --resync? [Y/N]
```
To proceed with `--resync`, you must type 'y' or 'Y' to allow the application to continue.
**Note:** It's highly recommended to use `--resync` only if the application prompts you to do so. Don't blindly set the application to start with `--resync` as the default option.
**Note:** In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt.
### Performing a --force-sync without a --resync or changing your configuration
In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice.
The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync`.
To use this option, you must run the application manually in the following manner:
```text
onedrive --sync --single-directory '<directory_to_sync>' --force-sync <add any other options needed or required>
```
When using `--force-sync`, you'll encounter the following warning and advice:
```text
WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used
Using --force-sync will reconfigure the application to use defaults. This may have unknown future impacts.
By proceeding with this option, you accept any impacts, including potential data loss resulting from using --force-sync.
Are you sure you want to proceed with --force-sync [Y/N]
```
To proceed with `--force-sync`, you must type 'y' or 'Y' to allow the application to continue.
### Enabling the Client Activity Log
When running onedrive, all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` and will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client to allow easy sorting of user to client activity log.
**Note:** You will need to ensure the existence of this directory and that your user has the applicable permissions to write to this directory; otherwise, the following error message will be printed:
```text
ERROR: Unable to access /var/log/onedrive
ERROR: Please manually create '/var/log/onedrive' and set appropriate permissions to allow write access
ERROR: The requested client activity log will instead be located in your user's home directory
```
On many systems, this can be achieved by performing the following:
```text
sudo mkdir /var/log/onedrive
sudo chown root:users /var/log/onedrive
sudo chmod 0775 /var/log/onedrive
```
Additionally, you need to ensure that your user account is part of the 'users' group:
```
cat /etc/group | grep users
```
If your user is not part of this group, then you need to add your user to this group:
```
sudo usermod -a -G users <your-user-name>
```
If you need to make a group modification, you will need to 'logout' of all sessions / SSH sessions to log in again to have the new group access applied.
If the client is unable to write the client activity log, the following error message will be printed:
```text
ERROR: Unable to write the activity log to /var/log/onedrive/%username%.onedrive.log
ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account
ERROR: The requested client activity log will instead be located in your user's home directory
```
If you receive this error message, you will need to diagnose why your system cannot write to the specified file location.
#### Client Activity Log Example:
An example of a client activity log for the command `onedrive --sync --enable-logging` is below:
```text
2023-Sep-27 08:16:00.1128806 Configuring Global Azure AD Endpoints
2023-Sep-27 08:16:00.1160620 Sync Engine Initialised with new Onedrive API instance
2023-Sep-27 08:16:00.5227122 All application operations will be performed in: /home/user/OneDrive
2023-Sep-27 08:16:00.5227977 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:16:00.7780979 Processing changes and items received from Microsoft OneDrive ...
2023-Sep-27 08:16:00.7781548 Performing a database consistency and integrity check on locally stored data ...
2023-Sep-27 08:16:00.7785889 Scanning the local file system '~/OneDrive' for new data to upload ...
2023-Sep-27 08:16:00.7813710 Performing a final true-up scan of online data from Microsoft OneDrive
2023-Sep-27 08:16:00.7814668 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:16:01.0141776 Processing changes and items received from Microsoft OneDrive ...
2023-Sep-27 08:16:01.0142454 Sync with Microsoft OneDrive is complete
```
An example of a client activity log for the command `onedrive --sync --verbose --enable-logging` is below:
```text
2023-Sep-27 08:20:05.4600464 Checking Application Version ...
2023-Sep-27 08:20:05.5235017 Attempting to initialise the OneDrive API ...
2023-Sep-27 08:20:05.5237207 Configuring Global Azure AD Endpoints
2023-Sep-27 08:20:05.5238087 The OneDrive API was initialised successfully
2023-Sep-27 08:20:05.5238536 Opening the item database ...
2023-Sep-27 08:20:05.5270612 Sync Engine Initialised with new Onedrive API instance
2023-Sep-27 08:20:05.9226535 Application version: vX.Y.Z-A-bcdefghi
2023-Sep-27 08:20:05.9227079 Account Type: <account-type>
2023-Sep-27 08:20:05.9227360 Default Drive ID: <redacted>
2023-Sep-27 08:20:05.9227550 Default Root ID: <redacted>
2023-Sep-27 08:20:05.9227862 Remaining Free Space: <space-available>
2023-Sep-27 08:20:05.9228296 All application operations will be performed in: /home/user/OneDrive
2023-Sep-27 08:20:05.9228989 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:20:06.2076569 Performing a database consistency and integrity check on locally stored data ...
2023-Sep-27 08:20:06.2077121 Processing DB entries for this Drive ID: <redacted>
2023-Sep-27 08:20:06.2078408 Processing ~/OneDrive
2023-Sep-27 08:20:06.2078739 The directory has not changed
2023-Sep-27 08:20:06.2079783 Processing Attachments
2023-Sep-27 08:20:06.2080071 The directory has not changed
2023-Sep-27 08:20:06.2081585 Processing Attachments/file.docx
2023-Sep-27 08:20:06.2082079 The file has not changed
2023-Sep-27 08:20:06.2082760 Processing Documents
2023-Sep-27 08:20:06.2083225 The directory has not changed
2023-Sep-27 08:20:06.2084284 Processing Documents/file.log
2023-Sep-27 08:20:06.2084886 The file has not changed
2023-Sep-27 08:20:06.2085150 Scanning the local file system '~/OneDrive' for new data to upload ...
2023-Sep-27 08:20:06.2087133 Skipping item - excluded by sync_list config: ./random_25k_files
2023-Sep-27 08:20:06.2116235 Performing a final true-up scan of online data from Microsoft OneDrive
2023-Sep-27 08:20:06.2117190 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:20:06.5049743 Sync with Microsoft OneDrive is complete
```
#### Client Activity Log Differences
Despite application logging being enabled as early as possible, the following log entries will be missing from the client activity log when compared to console output:
**No user configuration file:**
```text
No user or system config file found, using application defaults
Using 'user' configuration path for application state data: /home/user/.config/onedrive
Using the following path to store the runtime application log: /var/log/onedrive
```
**User configuration file:**
```text
Reading configuration file: /home/user/.config/onedrive/config
Configuration file successfully loaded
Using 'user' configuration path for application state data: /home/user/.config/onedrive
Using the following path to store the runtime application log: /var/log/onedrive
```
### GUI Notifications
If notification support has been compiled in (refer to GUI Notification Support in install.md .. ADD LINK LATER), the following events will trigger a GUI notification within the display manager session:
* Aborting a sync if .nosync file is found
* Skipping a particular item due to an invalid name
* Skipping a particular item due to an invalid symbolic link
* Skipping a particular item due to an invalid UTF sequence
* Skipping a particular item due to an invalid character encoding sequence
* Cannot create remote directory
* Cannot upload file changes (free space issue, breaches maximum allowed size, breaches maximum OneDrive Account path length)
* Cannot delete remote file / folder
* Cannot move remote file / folder
* When a re-authentication is required
* When a new client version is available
* Files that fail to upload
* Files that fail to download
### Handling a Microsoft OneDrive Account Password Change
If you change your Microsoft OneDrive Account Password, the client will no longer be authorised to sync, and will generate the following error upon next application run:
```text
AADSTS50173: The provided grant has expired due to it being revoked, a fresh auth token is needed. The user might have changed or reset their password. The grant was issued on '<date-and-timestamp>' and the TokensValidFrom date (before which tokens are not valid) for this user is '<date-and-timestamp>'.
ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token.
```
To re-authorise the client, follow the steps below:
1. If running the client as a system service (init.d or systemd), stop the applicable system service
2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. Please note, if you are using `--confdir` as part of your application runtime configuration, you must include this when telling the client to re-authenticate.
3. Restart the client if running as a system service or perform the standalone sync operation again
The application will now sync with OneDrive with the new credentials.
### Determining the synchronisation result
When the client has finished syncing without errors, the following will be displayed:
```
Sync with Microsoft OneDrive is complete
```
If any items failed to sync, the following will be displayed:
```
Sync with Microsoft OneDrive has completed, however there are items that failed to sync.
```
A file list of either upload or download items will be then listed to allow you to determine your next steps.
In order to fix the upload or download failures, you may need to re-try your command and perform a resync to ensure your system is correctly synced with your Microsoft OneDrive Account.
## Frequently Asked Configuration Questions
### How to change the default configuration of the client?
Configuration is determined by three layers, and applied in the following order:
* Application default values
* Values that are set in the configuration file
* Values that are passed in via the command line at application runtime. These values will override any configuration file set value.
The default application values provide a reasonable operational default, and additional configuration is entirely optional.
If you want to change the application defaults, you can download a copy of the config file into your application configuration directory. Valid default directories for the config file are:
* `~/.config/onedrive`
* `/etc/onedrive`
**Example:** To download a copy of the config file, use the following:
```text
mkdir -p ~/.config/onedrive
wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config
```
For full configuration options and CLI switches, please refer to application-config-options.md
### How to change where my data from Microsoft OneDrive is stored?
By default, the location where your Microsoft OneDrive data is stored, is within your Home Directory under a directory called 'OneDrive'. This replicates as close as possible where the Microsoft Windows OneDrive client stores data.
To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored.
**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible when using 'Monitor Mode'. Local filesystem changes will be replicated between the local filesystem and Microsoft OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix.
### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?
The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive:
* Directories: 700 - This provides the following permissions: `drwx------`
* Files: 600 - This provides the following permissions: `-rw-------`
These default permissions align to the security principal of 'least privilege' so that only you should have access to your data that you download from Microsoft OneDrive.
To alter these default permissions, you can adjust the values of two configuration options as follows. You can also use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions.
```text
sync_dir_permissions = "700"
sync_file_permissions = "600"
```
**Important:** Please note that special permission bits such as setuid, setgid, and the sticky bit are not supported. Valid permission values range from `000` to `777` only.
### How to only sync a specific directory?
There are two methods to achieve this:
* Employ the '--single-directory' option to only sync this specific path
* Employ 'sync_list' as part of your 'config' file to configure what files and directories to sync, and what should be excluded
### How to 'skip' files from syncing?
There are two methods to achieve this:
* Employ 'skip_file' as part of your 'config' file to configure what files to skip
* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded
### How to 'skip' directories from syncing?
There are three methods available to 'skip' a directory from the sync process:
* Employ 'skip_dir' as part of your 'config' file to configure what directories to skip
* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded
* Employ 'check_nosync' as part of your 'config' file and a '.nosync' empty file within the directory to exclude to skip that directory
### How to 'skip' dot files and folders from syncing?
There are three methods to achieve this:
* Employ 'skip_file' or 'skip_dir' to configure what files or folders to skip
* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded
* Employ 'skip_dotfiles' as part of your 'config' file to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive
### How to 'skip' files larger than a certain size from syncing?
Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped.
### How to 'rate limit' the application to control bandwidth consumed for upload & download operations?
To minimise the Internet bandwidth for upload and download operations, you can add the 'rate_limit' configuration option as part of your 'config' file.
The default value is '0' which means use all available bandwidth for the application.
The value being used can be reviewed when using `--display-config`.
### How can I prevent my local disk from filling up?
By default, the application will reserve 50MB of disk space to prevent your filesystem from running out of disk space.
This default value can be modified by adding the 'space_reservation' configuration option and the applicable value as part of your 'config' file.
You can review the value being used when using `--display-config`.
### How does the client handle symbolic links?
Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories.
As such, there are only two methods to support symbolic links with this client:
1. Follow the Linux symbolic link and upload whatever the local symbolic link is pointing to to Microsoft OneDrive. This is the default behaviour.
2. Skip symbolic links by configuring the application to do so. When skipping, no data, no link, no reference is uploaded to OneDrive.
Use 'skip_symlinks' as part of your 'config' file to configure the skipping of all symbolic links while syncing.
### How to synchronise shared folders (OneDrive Personal)?
Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive".
### How to synchronise shared folders (OneDrive Business or Office 365)?
Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive".
Refer to [./business-shared-folders.md](business-shared-folders.md) for further details.
### How to synchronise SharePoint / Office 365 Shared Libraries?
There are two methods to achieve this:
* SharePoint library can be directly added to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the SharePoint Library you want to synchronise, and then click on "Add to my OneDrive".
* Configure a separate application instance to only synchronise that specific SharePoint Library. Refer to [./sharepoint-libraries.md](sharepoint-libraries.md) for configuration assistance.
### How to Create a Shareable Link?
In certain situations, you might want to generate a shareable file link and provide this link to other users for accessing a specific file.
To accomplish this, employ the following command:
```text
onedrive --create-share-link <path/to/file>
```
**Note:** By default, this access permissions for the file link will be read-only.
To make it a read-write link, execute the following command:
```text
onedrive --create-share-link <path/to/file> --with-editing-perms
```
**Note:** The order of the file path and option flag is crucial.
### How to Synchronise Both Personal and Business Accounts at once?
You need to set up separate instances of the application configuration for each account.
Refer to [./advanced-usage.md](advanced-usage.md) for guidance on configuration.
### How to Synchronise Multiple SharePoint Libraries simultaneously?
For each SharePoint Library, configure a separate instance of the application configuration.
Refer to [./advanced-usage.md](advanced-usage.md) for configuration instructions.
### How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?
When operating in 'Monitor Mode,' it may be advantageous to receive real-time updates to online data. A 'webhook' is the method to achieve this, so that when in 'Monitor Mode,' the client subscribes to remote updates.
Remote changes can then be promptly synchronised to your local file system, without waiting for the next synchronisation cycle.
This is accomplished by:
* Using 'webhook_enabled' as part of your 'config' file to enable this feature
* Using 'webhook_public_url' as part of your 'config' file to configure the URL the webhook will use for subscription updates
### How to initiate the client as a background service?
There are a few ways to employ onedrive as a service:
* via init.d
* via systemd
* via runit
#### OneDrive service running as root user via init.d
```text
chkconfig onedrive on
service onedrive start
```
To view the logs, execute:
```text
tail -f /var/log/onedrive/<username>.onedrive.log
```
To alter the 'user' under which the client operates (typically root by default), manually modify the init.d service file and adjust `daemon --user root onedrive_service.sh` to match the correct user.
#### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)
Initially, switch to the root user with `su - root`, then activate the systemd service:
```text
systemctl --user enable onedrive
systemctl --user start onedrive
```
**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below.
**Note:** This will execute the 'onedrive' process with a UID/GID of '0', which means any files or folders created will be owned by 'root'.
To monitor the service's status, use the following:
```text
systemctl --user status onedrive.service
```
To observe the systemd application logs, use:
```text
journalctl --user-unit=onedrive -f
```
**Note:** For systemd to function correctly, it requires the presence of XDG environment variables. If you encounter the following error while enabling the systemd service:
```text
Failed to connect to bus: No such file or directory
```
The most likely cause is missing XDG environment variables. To resolve this, add the following lines to `.bashrc` or another file executed upon user login:
```text
export XDG_RUNTIME_DIR="/run/user/$UID"
export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus"
```
To apply this change, you must log out of all user accounts where it has been made.
**Note:** On certain systems (e.g., Raspbian / Ubuntu / Debian on Raspberry Pi), the XDG fix above may not persist after system reboots. An alternative to starting the client via systemd as root is as follows:
1. Create a symbolic link from `/home/root/.config/onedrive` to `/root/.config/onedrive/`.
2. Establish a systemd service using the '@' service file: `systemctl enable onedrive@root.service`.
3. Start the root@service: `systemctl start onedrive@root.service`.
This ensures that the service correctly restarts upon system reboot.
To examine the systemd application logs, run:
```text
journalctl --unit=onedrive@<username> -f
```
#### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)
```text
systemctl enable onedrive
systemctl start onedrive
```
**Note:** This will execute the 'onedrive' process with a UID/GID of '0', meaning any files or folders created will be owned by 'root'.
To view the systemd application logs, execute:
```text
journalctl --unit=onedrive -f
```
#### OneDrive service running as a non-root user via systemd (All Linux Distributions)
In some instances, it is preferable to run the OneDrive client as a service without the 'root' user. Follow the instructions below to configure the service for your regular user login.
1. As the user who will run the service, launch the application in standalone mode, authorize it for use, and verify that synchronization is functioning as expected:
```text
onedrive --sync --verbose
```
2. After validating the application for your user, switch to the 'root' user, where <username> is your username from step 1 above.
```text
systemctl enable onedrive@<username>.service
systemctl start onedrive@<username>.service
```
3. To check the service's status for the user, use the following:
```text
systemctl status onedrive@<username>.service
```
To observe the systemd application logs, use:
```text
journalctl --unit=onedrive@<username> -f
```
#### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)
In some scenarios, you may want to receive GUI notifications when using the client as a non-root user. In this case, follow these steps:
1. Log in via the graphical UI as the user you want to enable the service for.
2. Disable any `onedrive@` service files for your username, e.g.:
```text
sudo systemctl stop onedrive@alex.service
sudo systemctl disable onedrive@alex.service
```
3. Enable the service as follows:
```text
systemctl --user enable onedrive
systemctl --user start onedrive
```
To check the service's status for the user, use the following:
```text
systemctl --user status onedrive.service
```
To view the systemd application logs, execute:
```text
journalctl --user-unit=onedrive -f
```
**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms.
#### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)
1. Create the following folder if it doesn't already exist: `/etc/sv/runsvdir-<username>`
- where `<username>` is the `USER` targeted for the service
- e.g., `# mkdir /etc/sv/runsvdir-nolan`
2. Create a file called `run` under the previously created folder with executable permissions
- `# touch /etc/sv/runsvdir-<username>/run`
- `# chmod 0755 /etc/sv/runsvdir-<username>/run`
3. Edit the `run` file with the following contents (permissions needed):
```sh
#!/bin/sh
export USER="<username>"
export HOME="/home/<username>"
groups="$(id -Gn "${USER}" | tr ' ' ':')"
svdir="${HOME}/service"
exec chpst -u "${USER}:${groups}" runsvdir "${svdir}"
```
- Ensure you replace `<username>` with the `USER` set in step #1.
4. Enable the previously created folder as a service
- `# ln -fs /etc/sv/runsvdir-<username> /var/service/`
5. Create a subfolder in the `USER`'s `HOME` directory to store the services (or symlinks)
- `$ mkdir ~/service`
6. Create a subfolder specifically for OneDrive
- `$ mkdir ~/service/onedrive/`
7. Create a file called `run` under the previously created folder with executable permissions
- `$ touch ~/service/onedrive/run`
- `$ chmod 0755 ~/service/onedrive/run`
8. Append the following contents to the `run` file
```sh
#!/usr/bin/env sh
exec /usr/bin/onedrive --monitor
```
- In some scenarios, the path to the `onedrive` binary may vary. You can obtain it by running `$ command -v onedrive`.
9. Reboot to apply the changes
10. Check the status of user-defined services
- `$ sv status ~/service/*`
For additional details, you can refer to Void's documentation on [Per-User Services](https://docs.voidlinux.org/config/services/user-services.html).
### How to start a user systemd service at boot without user login?
In some situations, it may be necessary for the systemd service to start without requiring your 'user' to log in.
To address this issue, you need to reconfigure your 'user' account so that the systemd services you've created launch without the need for you to log in to your system:
```text
loginctl enable-linger <your_user_name>
```

84
readme.md Normal file
View file

@ -0,0 +1,84 @@
# OneDrive Client for Linux
[![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases)
[![Release Date](https://img.shields.io/github/release-date/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases)
[![Test Build](https://github.com/abraunegg/onedrive/actions/workflows/testbuild.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/testbuild.yaml)
[![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml)
[![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive)
Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries.
This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services.
Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)).
This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018.
## Features
* State caching
* Real-Time local file monitoring with inotify
* Real-Time syncing of remote updates via webhooks
* File upload / download validation to ensure data integrity
* Resumable uploads
* Support OneDrive for Business (part of Office 365)
* Shared Folder support for OneDrive Personal and OneDrive Business accounts
* SharePoint / Office365 Shared Libraries
* Desktop notifications via libnotify
* Dry-run capability to test configuration changes
* Prevent major OneDrive accidental data deletion after configuration change
* Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China)
* Supports single & multi-tenanted applications
* Supports rate limiting of traffic
* Supports multi-threaded uploads and downloads
## What's missing
* Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive
* Support for Windows 'On-Demand' functionality so file is only downloaded when accessed locally
## External Enhancements
* A GUI for configuration management: [OneDrive Client for Linux GUI](https://github.com/bpozdena/OneDriveGUI)
* Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log)
* System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray)
## Frequently Asked Questions
Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions)
## Have a question
If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions)
## Reporting an Issue or Bug
If you encounter any bugs you can report them here on Github. Before filing an issue be sure to:
1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master.
2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md)
3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support)
* If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue
* If you are still concerned, provide an NDA or confidentiality document to sign
4. Upload the debug log to [pastebin](https://pastebin.com/) or archive and email to support@mynas.com.au
* If you are concerned regarding the sensitivity of your debug data, encrypt + password protect the archive file and provide the decryption password via an out-of-band (OOB) mechanism. Email support@mynas.com.au for an OOB method for the password to be sent.
* If you are still concerned, provide an NDA or confidentiality document to sign
## Known issues
Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/master/docs/known-issues.md)
## Documentation and Configuration Assistance
### Installing from Distribution Packages or Building the OneDrive Client for Linux from source
Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md)
### Configuration and Usage
Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md)
### Configure OneDrive Business Shared Folders
Refer to [docs/business-shared-folders.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-folders.md)
### Configure SharePoint / Office 365 Shared Libraries (Business or Education)
Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md)
### Configure National Cloud support
Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md)
### Docker support
Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md)
### Podman support
Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md)

8
src/arsd/README.md Normal file
View file

@ -0,0 +1,8 @@
The files in this directory have been obtained form the following places:
cgi.d
https://github.com/adamdruppe/arsd/blob/a870179988b8881b04126856105f0fad2cc0018d/cgi.d
License: Boost Software License - Version 1.0
Copyright 2008-2021, Adam D. Ruppe
see https://github.com/adamdruppe/arsd/blob/a870179988b8881b04126856105f0fad2cc0018d/LICENSE

11810
src/arsd/cgi.d Normal file

File diff suppressed because it is too large Load diff

384
src/clientSideFiltering.d Normal file
View file

@ -0,0 +1,384 @@
// What is this module called?
module clientSideFiltering;
// What does this module require to function?
import std.algorithm;
import std.array;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
class ClientSideFiltering {
// Class variables
ApplicationConfig appConfig;
string[] paths;
string[] businessSharedItemsList;
Regex!char fileMask;
Regex!char directoryMask;
bool skipDirStrictMatch = false;
bool skipDotfiles = false;
this(ApplicationConfig appConfig) {
// Configure the class varaible to consume the application configuration
this.appConfig = appConfig;
}
// Initialise the required items
bool initialise() {
//
log.vdebug("Configuring Client Side Filtering (Selective Sync)");
// Load the sync_list file if it exists
if (exists(appConfig.syncListFilePath)){
loadSyncList(appConfig.syncListFilePath);
}
// Load the Business Shared Items file if it exists
if (exists(appConfig.businessSharedItemsFilePath)){
loadBusinessSharedItems(appConfig.businessSharedItemsFilePath);
}
// Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries
// Handle skip_dir configuration in config file
log.vdebug("Configuring skip_dir ...");
log.vdebug("skip_dir: ", appConfig.getValueString("skip_dir"));
setDirMask(appConfig.getValueString("skip_dir"));
// Was --skip-dir-strict-match configured?
log.vdebug("Configuring skip_dir_strict_match ...");
log.vdebug("skip_dir_strict_match: ", appConfig.getValueBool("skip_dir_strict_match"));
if (appConfig.getValueBool("skip_dir_strict_match")) {
setSkipDirStrictMatch();
}
// Was --skip-dot-files configured?
log.vdebug("Configuring skip_dotfiles ...");
log.vdebug("skip_dotfiles: ", appConfig.getValueBool("skip_dotfiles"));
if (appConfig.getValueBool("skip_dotfiles")) {
setSkipDotfiles();
}
// Handle skip_file configuration in config file
log.vdebug("Configuring skip_file ...");
// Validate skip_file to ensure that this does not contain an invalid configuration
// Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process.
foreach(entry; appConfig.getValueString("skip_file").split("|")){
if (entry == ".*") {
// invalid entry element detected
log.error("ERROR: Invalid skip_file entry '.*' detected");
return false;
}
}
// All skip_file entries are valid
log.vdebug("skip_file: ", appConfig.getValueString("skip_file"));
setFileMask(appConfig.getValueString("skip_file"));
// All configured OK
return true;
}
// Load sync_list file if it exists
void loadSyncList(string filepath) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
paths ~= buildNormalizedPath(line);
}
file.close();
}
// load business_shared_folders file
void loadBusinessSharedItems(string filepath) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
businessSharedItemsList ~= buildNormalizedPath(line);
}
file.close();
}
// Configure the regex that will be used for 'skip_file'
void setFileMask(const(char)[] mask) {
fileMask = wild2regex(mask);
log.vdebug("Selective Sync File Mask: ", fileMask);
}
// Configure the regex that will be used for 'skip_dir'
void setDirMask(const(char)[] dirmask) {
directoryMask = wild2regex(dirmask);
log.vdebug("Selective Sync Directory Mask: ", directoryMask);
}
// Configure skipDirStrictMatch if function is called
// By default, skipDirStrictMatch = false;
void setSkipDirStrictMatch() {
skipDirStrictMatch = true;
}
// Configure skipDotfiles if function is called
// By default, skipDotfiles = false;
void setSkipDotfiles() {
skipDotfiles = true;
}
// return value of skipDotfiles
bool getSkipDotfiles() {
return skipDotfiles;
}
// Match against sync_list only
bool isPathExcludedViaSyncList(string path) {
// Debug output that we are performing a 'sync_list' inclusion / exclusion test
return isPathExcluded(path, paths);
}
// config file skip_dir parameter
bool isDirNameExcluded(string name) {
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
log.vdebug("skip_dir evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(directoryMask).empty) {
log.vdebug("'!name.matchFirst(directoryMask).empty' returned true = matched");
return true;
} else {
// Do we check the base name as well?
if (!skipDirStrictMatch) {
log.vdebug("No Strict Matching Enforced");
// Test the entire path working backwards from child
string path = buildNormalizedPath(name);
string checkPath;
auto paths = pathSplitter(path);
foreach_reverse(directory; paths) {
if (directory != "/") {
// This will add a leading '/' but that needs to be stripped to check
checkPath = "/" ~ directory ~ checkPath;
if(!checkPath.strip('/').matchFirst(directoryMask).empty) {
log.vdebug("'!checkPath.matchFirst(directoryMask).empty' returned true = matched");
return true;
}
}
}
} else {
log.vdebug("Strict Matching Enforced - No Match");
}
}
// no match
return false;
}
// config file skip_file parameter
bool isFileNameExcluded(string name) {
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
log.vdebug("skip_file evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(fileMask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(fileMask).empty) {
return true;
}
}
// no match
return false;
}
// test if the given path is not included in the allowed paths
// if there are no allowed paths always return false
private bool isPathExcluded(string path, string[] allowedPaths) {
// function variables
bool exclude = false;
bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
int offset;
string wildcard = "*";
// always allow the root
if (path == ".") return false;
// if there are no allowed paths always return false
if (allowedPaths.empty) return false;
path = buildNormalizedPath(path);
log.vdebug("Evaluation against 'sync_list' for this path: ", path);
log.vdebug("[S]exclude = ", exclude);
log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[S]excludeMatched = ", excludeMatched);
// unless path is an exact match, entire sync_list entries need to be processed to ensure
// negative matches are also correctly detected
foreach (allowedPath; allowedPaths) {
// is this an inclusion path or finer grained exclusion?
switch (allowedPath[0]) {
case '-':
// sync_list path starts with '-', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '-/' offset needs to be 2, else 1
if (startsWith(allowedPath, "-/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '!':
// sync_list path starts with '!', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '!/' offset needs to be 2, else 1
if (startsWith(allowedPath, "!/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '/':
// sync_list path starts with '/', this user wants to include this path
// but a '/' at the start causes matching issues, so use the offset for comparison
exclude = false;
offset = 1;
break;
default:
// no negative pattern, default is to not exclude
exclude = false;
offset = 0;
}
// What are we comparing against?
log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath);
// Generate the common prefix from the path vs the allowed path
auto comm = commonPrefix(path, allowedPath[offset..$]);
// Is path is an exact match of the allowed path?
if (comm.length == path.length) {
// we have a potential exact match
// strip any potential '/*' from the allowed path, to avoid a potential lesser common match
string strippedAllowedPath = strip(allowedPath[offset..$], "/*");
if (path == strippedAllowedPath) {
// we have an exact path match
log.vdebug("exact path match");
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded");
// do not set excludeMatched = true here, otherwise parental path also gets excluded
// flag exludeDirectMatch so that a 'wildcard match' will not override this exclude
exludeDirectMatch = true;
// final result
finalResult = true;
}
} else {
// no exact path match, but something common does match
log.vdebug("something 'common' matches the input path");
auto splitAllowedPaths = pathSplitter(strippedAllowedPath);
string pathToEvaluate = "";
foreach(base; splitAllowedPaths) {
pathToEvaluate ~= base;
if (path == pathToEvaluate) {
// The input path matches what we want to evaluate against as a direct match
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded");
finalResult = true;
// do not set excludeMatched = true here, otherwise parental path also gets excluded
}
}
pathToEvaluate ~= dirSeparator;
}
}
}
// Is path is a subitem/sub-folder of the allowed path?
if (comm.length == allowedPath[offset..$].length) {
// The given path is potentially a subitem of an allowed path
// We want to capture sub-folders / files of allowed paths here, but not explicitly match other items
// if there is no wildcard
auto subItemPathCheck = allowedPath[offset..$] ~ "/";
if (canFind(path, subItemPathCheck)) {
// The 'path' includes the allowed path, and is 'most likely' a sub-path item
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: parental path match");
finalResult = false;
// parental path matches, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
// Does the allowed path contain a wildcard? (*)
if (canFind(allowedPath[offset..$], wildcard)) {
// allowed path contains a wildcard
// manually replace '*' for '.*' to be compatible with regex
string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*");
auto allowedMask = regex(regexCompatiblePath);
if (matchAll(path, allowedMask)) {
// regex wildcard evaluation matches
// if we have a prior pattern match for an exclude, excludeMatched = true
if (!exclude && !excludeMatched && !exludeDirectMatch) {
// nothing triggered an exclusion before evaluation against wildcard match attempt
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match");
finalResult = false;
} else {
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
}
// Interim results
log.vdebug("[F]exclude = ", exclude);
log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[F]excludeMatched = ", excludeMatched);
// If exclude or excludeMatched is true, then finalResult has to be true
if ((exclude) || (excludeMatched) || (exludeDirectMatch)) {
finalResult = true;
}
// results
if (finalResult) {
log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED");
} else {
log.vdebug("Evaluation against 'sync_list' final result: included for sync");
}
return finalResult;
}
}

2107
src/config.d Normal file

File diff suppressed because it is too large Load diff

111
src/curlEngine.d Normal file
View file

@ -0,0 +1,111 @@
// What is this module called?
module curlEngine;
// What does this module require to function?
import std.net.curl;
import etc.c.curl: CurlOption;
import std.datetime;
// What other modules that we have created do we need to import?
import log;
import std.stdio;
class CurlEngine {
HTTP http;
this() {
http = HTTP();
}
void initialise(long dnsTimeout, long connectTimeout, long dataTimeout, long operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, long userRateLimit, long protocolVersion) {
// Curl Timeout Handling
// libcurl dns_cache_timeout timeout
// https://curl.se/libcurl/c/CURLOPT_DNS_CACHE_TIMEOUT.html
// https://dlang.org/library/std/net/curl/http.dns_timeout.html
http.dnsTimeout = (dur!"seconds"(dnsTimeout));
// Timeout for HTTPS connections
// https://curl.se/libcurl/c/CURLOPT_CONNECTTIMEOUT.html
// https://dlang.org/library/std/net/curl/http.connect_timeout.html
http.connectTimeout = (dur!"seconds"(connectTimeout));
// Timeout for activity on connection
// This is a DMD | DLANG specific item, not a libcurl item
// https://dlang.org/library/std/net/curl/http.data_timeout.html
// https://raw.githubusercontent.com/dlang/phobos/master/std/net/curl.d - private enum _defaultDataTimeout = dur!"minutes"(2);
http.dataTimeout = (dur!"seconds"(dataTimeout));
// Maximum time any operation is allowed to take
// This includes dns resolution, connecting, data transfer, etc.
// https://curl.se/libcurl/c/CURLOPT_TIMEOUT_MS.html
// https://dlang.org/library/std/net/curl/http.operation_timeout.html
http.operationTimeout = (dur!"seconds"(operationTimeout));
// Specify how many redirects should be allowed
http.maxRedirects(maxRedirects);
// Debug HTTPS
http.verbose = httpsDebug;
// Use the configured 'user_agent' value
http.setUserAgent = userAgent;
// What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6
http.handle.set(CurlOption.ipresolve,protocolVersion); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// What version of HTTP protocol do we use?
// Curl >= 7.62.0 defaults to http2 for a significant number of operations
if (httpProtocol) {
// Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1
http.handle.set(CurlOption.http_version,2);
}
// Configure upload / download rate limits if configured
// 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts
// A 0 value means rate is unlimited, and is the curl default
if (userRateLimit > 0) {
// set rate limit
http.handle.set(CurlOption.max_send_speed_large,userRateLimit);
http.handle.set(CurlOption.max_recv_speed_large,userRateLimit);
}
// Explicitly set these libcurl options
// https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html
// Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals
http.handle.set(CurlOption.nosignal,0);
// https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html
// Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled
http.handle.set(CurlOption.tcp_nodelay,0);
// https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html
// CURLOPT_FORBID_REUSE - make connection get closed at once after use
// Ensure that we ARE NOT reusing TCP sockets connections - setting to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable
// Setting this to 1 ensures that when we close the curl instance, any open sockets are closed - which we need to do when running
// multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly
// The libcurl default is 1 - ensure we are configuring not to reuse connections and leave unused sockets open
http.handle.set(CurlOption.forbid_reuse,1);
if (httpsDebug) {
// Output what options we are using so that in the debug log this can be tracked
log.vdebug("http.dnsTimeout = ", dnsTimeout);
log.vdebug("http.connectTimeout = ", connectTimeout);
log.vdebug("http.dataTimeout = ", dataTimeout);
log.vdebug("http.operationTimeout = ", operationTimeout);
log.vdebug("http.maxRedirects = ", maxRedirects);
log.vdebug("http.CurlOption.ipresolve = ", protocolVersion);
}
}
void setMethodPost() {
http.method = HTTP.Method.post;
}
void setMethodPatch() {
http.method = HTTP.Method.patch;
}
void setDisableSSLVerifyPeer() {
log.vdebug("Switching off CurlOption.ssl_verifypeer");
http.handle.set(CurlOption.ssl_verifypeer, 0);
}
}

696
src/itemdb.d Normal file
View file

@ -0,0 +1,696 @@
// What is this module called?
module itemdb;
// What does this module require to function?
import std.datetime;
import std.exception;
import std.path;
import std.string;
import std.stdio;
import std.algorithm.searching;
import core.stdc.stdlib;
import std.json;
import std.conv;
// What other modules that we have created do we need to import?
import sqlite;
import util;
import log;
enum ItemType {
file,
dir,
remote,
unknown
}
struct Item {
string driveId;
string id;
string name;
string remoteName;
ItemType type;
string eTag;
string cTag;
SysTime mtime;
string parentId;
string quickXorHash;
string sha256Hash;
string remoteDriveId;
string remoteId;
string syncStatus;
string size;
}
// Construct an Item struct from a JSON driveItem
Item makeDatabaseItem(JSONValue driveItem) {
Item item = {
id: driveItem["id"].str,
name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Business
eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Business
cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Business)
remoteName: "actualOnlineName" in driveItem ? driveItem["actualOnlineName"].str : null, // actualOnlineName is only used with OneDrive Business Shared Folders
};
// OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834
// OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive
if(isItemDeleted(driveItem)) {
// Set mtime to SysTime(0)
item.mtime = SysTime(0);
} else {
// Item is not in a deleted state
// Resolve 'Key not found: fileSystemInfo' when then item is a remote item
// https://github.com/abraunegg/onedrive/issues/11
if (isItemRemote(driveItem)) {
// remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default
// Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI
// to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash
// See: https://github.com/abraunegg/onedrive/issues/1533
if ("fileSystemInfo" in driveItem["remoteItem"]) {
// 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases
item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str);
} else {
// is a remote item, but 'fileSystemInfo' is missing from 'remoteItem'
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
} else {
// Does fileSystemInfo exist at all ?
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
}
// Set this item object type
bool typeSet = false;
if (isItemFile(driveItem)) {
// 'file' object exists in the JSON
log.vdebug("Flagging object as a file");
typeSet = true;
item.type = ItemType.file;
}
if (isItemFolder(driveItem)) {
// 'folder' object exists in the JSON
log.vdebug("Flagging object as a directory");
typeSet = true;
item.type = ItemType.dir;
}
if (isItemRemote(driveItem)) {
// 'remote' object exists in the JSON
log.vdebug("Flagging object as a remote");
typeSet = true;
item.type = ItemType.remote;
}
// root and remote items do not have parentReference
if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) {
item.driveId = driveItem["parentReference"]["driveId"].str;
if (hasParentReferenceId(driveItem)) {
item.parentId = driveItem["parentReference"]["id"].str;
}
}
// extract the file hash and file size
if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) {
// Get file size
if (hasFileSize(driveItem)) {
item.size = to!string(driveItem["size"].integer);
// Get quickXorHash as default
if ("quickXorHash" in driveItem["file"]["hashes"]) {
item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str;
} else {
log.vdebug("quickXorHash is missing from ", driveItem["id"].str);
}
// If quickXorHash is empty ..
if (item.quickXorHash.empty) {
// Is there a sha256Hash?
if ("sha256Hash" in driveItem["file"]["hashes"]) {
item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str;
} else {
log.vdebug("sha256Hash is missing from ", driveItem["id"].str);
}
}
} else {
// So that we have at least a zero value here as the API provided no 'size' data for this file item
item.size = "0";
}
}
// Is the object a remote drive item - living on another driveId ?
if (isItemRemote(driveItem)) {
item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str;
item.remoteId = driveItem["remoteItem"]["id"].str;
}
// We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not:
// - National Cloud Deployments do not support /delta as a query
// - When using --single-directory
// - When using --download-only --cleanup-local-files
//
// Thus we need to track in the database that this item is in sync
// As we are making an item, set the syncStatus to Y
// ONLY when either of the three modes above are being used, all the existing DB entries will get set to N
// so when processing /children, it can be identified what the 'deleted' difference is
item.syncStatus = "Y";
// Return the created item
return item;
}
final class ItemDatabase {
// increment this for every change in the db schema
immutable int itemDatabaseVersion = 12;
Database db;
string insertItemStmt;
string updateItemStmt;
string selectItemByIdStmt;
string selectItemByRemoteIdStmt;
string selectItemByParentIdStmt;
string deleteItemByIdStmt;
bool databaseInitialised = false;
this(const(char)[] filename) {
db = Database(filename);
int dbVersion;
try {
dbVersion = db.getVersion();
} catch (SqliteException e) {
// An error was generated - what was the error?
if (e.msg == "database is locked") {
writeln();
log.error("ERROR: onedrive application is already running - check system process list for active application instances");
log.vlog(" - Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process");
writeln();
} else {
writeln();
log.error("ERROR: An internal database error occurred: " ~ e.msg);
writeln();
}
return;
}
if (dbVersion == 0) {
createTable();
} else if (db.getVersion() != itemDatabaseVersion) {
log.log("The item database is incompatible, re-creating database table structures");
db.exec("DROP TABLE item");
createTable();
}
// Set the enforcement of foreign key constraints.
// https://www.sqlite.org/pragma.html#pragma_foreign_keys
// PRAGMA foreign_keys = boolean;
db.exec("PRAGMA foreign_keys = TRUE");
// Set the recursive trigger capability
// https://www.sqlite.org/pragma.html#pragma_recursive_triggers
// PRAGMA recursive_triggers = boolean;
db.exec("PRAGMA recursive_triggers = TRUE");
// Set the journal mode for databases associated with the current connection
// https://www.sqlite.org/pragma.html#pragma_journal_mode
db.exec("PRAGMA journal_mode = WAL");
// Automatic indexing is enabled by default as of version 3.7.17
// https://www.sqlite.org/pragma.html#pragma_automatic_index
// PRAGMA automatic_index = boolean;
db.exec("PRAGMA automatic_index = FALSE");
// Tell SQLite to store temporary tables in memory. This will speed up many read operations that rely on temporary tables, indices, and views.
// https://www.sqlite.org/pragma.html#pragma_temp_store
db.exec("PRAGMA temp_store = MEMORY");
// Tell SQlite to cleanup database table size
// https://www.sqlite.org/pragma.html#pragma_auto_vacuum
// PRAGMA schema.auto_vacuum = 0 | NONE | 1 | FULL | 2 | INCREMENTAL;
db.exec("PRAGMA auto_vacuum = FULL");
// This pragma sets or queries the database connection locking-mode. The locking-mode is either NORMAL or EXCLUSIVE.
// https://www.sqlite.org/pragma.html#pragma_locking_mode
// PRAGMA schema.locking_mode = NORMAL | EXCLUSIVE
db.exec("PRAGMA locking_mode = EXCLUSIVE");
insertItemStmt = "
INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus, size)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)
";
updateItemStmt = "
UPDATE item
SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteId = ?13, syncStatus = ?14, size = ?15
WHERE driveId = ?1 AND id = ?2
";
selectItemByIdStmt = "
SELECT *
FROM item
WHERE driveId = ?1 AND id = ?2
";
selectItemByRemoteIdStmt = "
SELECT *
FROM item
WHERE remoteDriveId = ?1 AND remoteId = ?2
";
selectItemByParentIdStmt = "SELECT * FROM item WHERE driveId = ? AND parentId = ?";
deleteItemByIdStmt = "DELETE FROM item WHERE driveId = ? AND id = ?";
// flag that the database is accessible and we have control
databaseInitialised = true;
}
bool isDatabaseInitialised() {
return databaseInitialised;
}
void createTable() {
db.exec("CREATE TABLE item (
driveId TEXT NOT NULL,
id TEXT NOT NULL,
name TEXT NOT NULL,
remoteName TEXT,
type TEXT NOT NULL,
eTag TEXT,
cTag TEXT,
mtime TEXT NOT NULL,
parentId TEXT,
quickXorHash TEXT,
sha256Hash TEXT,
remoteDriveId TEXT,
remoteId TEXT,
deltaLink TEXT,
syncStatus TEXT,
size TEXT,
PRIMARY KEY (driveId, id),
FOREIGN KEY (driveId, parentId)
REFERENCES item (driveId, id)
ON DELETE CASCADE
ON UPDATE RESTRICT
)");
db.exec("CREATE INDEX name_idx ON item (name)");
db.exec("CREATE INDEX remote_idx ON item (remoteDriveId, remoteId)");
db.exec("CREATE INDEX item_children_idx ON item (driveId, parentId)");
db.exec("CREATE INDEX selectByPath_idx ON item (name, driveId, parentId)");
db.setVersion(itemDatabaseVersion);
}
void insert(const ref Item item) {
auto p = db.prepare(insertItemStmt);
bindItem(item, p);
p.exec();
}
void update(const ref Item item) {
auto p = db.prepare(updateItemStmt);
bindItem(item, p);
p.exec();
}
void dump_open_statements() {
db.dump_open_statements();
}
int db_checkpoint() {
return db.db_checkpoint();
}
void upsert(const ref Item item) {
auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?");
s.bind(1, item.driveId);
s.bind(2, item.id);
auto r = s.exec();
Statement stmt;
if (r.front[0] == "0") stmt = db.prepare(insertItemStmt);
else stmt = db.prepare(updateItemStmt);
bindItem(item, stmt);
stmt.exec();
}
Item[] selectChildren(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(selectItemByParentIdStmt);
p.bind(1, driveId);
p.bind(2, id);
auto res = p.exec();
Item[] items;
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
bool selectById(const(char)[] driveId, const(char)[] id, out Item item) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
auto r = p.exec();
if (!r.empty) {
item = buildItem(r);
return true;
}
return false;
}
bool selectByRemoteId(const(char)[] remoteDriveId, const(char)[] remoteId, out Item item) {
auto p = db.prepare(selectItemByRemoteIdStmt);
p.bind(1, remoteDriveId);
p.bind(2, remoteId);
auto r = p.exec();
if (!r.empty) {
item = buildItem(r);
return true;
}
return false;
}
// returns true if an item id is in the database
bool idInLocalDatabase(const(string) driveId, const(string)id) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
auto r = p.exec();
if (!r.empty) {
return true;
}
return false;
}
// returns the item with the given path
// the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3"
bool selectByPath(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
if (startsWith(path, "./") || path == ".") {
// Need to remove the . from the path prefix
path = "root/" ~ path.chompPrefix(".");
} else {
// Leave path as it is
path = "root/" ~ path;
}
auto s = db.prepare("SELECT * FROM item WHERE name = ?1 AND driveId IS ?2 AND parentId IS ?3");
foreach (name; pathSplitter(path)) {
s.bind(1, name);
s.bind(2, currItem.driveId);
s.bind(3, currItem.id);
auto r = s.exec();
if (r.empty) return false;
currItem = buildItem(r);
// if the item is of type remote substitute it with the child
if (currItem.type == ItemType.remote) {
Item child;
if (selectById(currItem.remoteDriveId, currItem.remoteId, child)) {
assert(child.type != ItemType.remote, "The type of the child cannot be remote");
currItem = child;
}
}
}
item = currItem;
return true;
}
// same as selectByPath() but it does not traverse remote folders
bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
if (startsWith(path, "./") || path == ".") {
// Need to remove the . from the path prefix
path = "root/" ~ path.chompPrefix(".");
} else {
// Leave path as it is
path = "root/" ~ path;
}
auto s = db.prepare("SELECT * FROM item WHERE name IS ?1 AND driveId IS ?2 AND parentId IS ?3");
foreach (name; pathSplitter(path)) {
s.bind(1, name);
s.bind(2, currItem.driveId);
s.bind(3, currItem.id);
auto r = s.exec();
if (r.empty) return false;
currItem = buildItem(r);
}
item = currItem;
return true;
}
void deleteById(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(deleteItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
p.exec();
}
private void bindItem(const ref Item item, ref Statement stmt) {
with (stmt) with (item) {
bind(1, driveId);
bind(2, id);
bind(3, name);
bind(4, remoteName);
string typeStr = null;
final switch (type) with (ItemType) {
case file: typeStr = "file"; break;
case dir: typeStr = "dir"; break;
case remote: typeStr = "remote"; break;
case unknown: typeStr = "unknown"; break;
}
bind(5, typeStr);
bind(6, eTag);
bind(7, cTag);
bind(8, mtime.toISOExtString());
bind(9, parentId);
bind(10, quickXorHash);
bind(11, sha256Hash);
bind(12, remoteDriveId);
bind(13, remoteId);
bind(14, syncStatus);
bind(15, size);
}
}
private Item buildItem(Statement.Result result) {
assert(!result.empty, "The result must not be empty");
assert(result.front.length == 16, "The result must have 16 columns");
Item item = {
// column 0: driveId
// column 1: id
// column 2: name
// column 3: remoteName - only used when there is a difference in the local name & remote shared folder name
// column 4: type
// column 5: eTag
// column 6: cTag
// column 7: mtime
// column 8: parentId
// column 9: quickXorHash
// column 10: sha256Hash
// column 11: remoteDriveId
// column 12: remoteId
// column 13: deltaLink
// column 14: syncStatus
// column 15: size
driveId: result.front[0].dup,
id: result.front[1].dup,
name: result.front[2].dup,
remoteName: result.front[3].dup,
// Column 4 is type - not set here
eTag: result.front[5].dup,
cTag: result.front[6].dup,
mtime: SysTime.fromISOExtString(result.front[7]),
parentId: result.front[8].dup,
quickXorHash: result.front[9].dup,
sha256Hash: result.front[10].dup,
remoteDriveId: result.front[11].dup,
remoteId: result.front[12].dup,
// Column 13 is deltaLink - not set here
syncStatus: result.front[14].dup,
size: result.front[15].dup
};
switch (result.front[4]) {
case "file": item.type = ItemType.file; break;
case "dir": item.type = ItemType.dir; break;
case "remote": item.type = ItemType.remote; break;
default: assert(0, "Invalid item type");
}
return item;
}
// computes the path of the given item id
// the path is relative to the sync directory ex: "Music/Turbo Killer.mp3"
// the trailing slash is not added even if the item is a directory
string computePath(const(char)[] driveId, const(char)[] id) {
assert(driveId && id);
string path;
Item item;
auto s = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND id = ?2");
auto s2 = db.prepare("SELECT driveId, id FROM item WHERE remoteDriveId = ?1 AND remoteId = ?2");
while (true) {
s.bind(1, driveId);
s.bind(2, id);
auto r = s.exec();
if (!r.empty) {
item = buildItem(r);
if (item.type == ItemType.remote) {
// substitute the last name with the current
ptrdiff_t idx = indexOf(path, '/');
path = idx >= 0 ? item.name ~ path[idx .. $] : item.name;
} else {
if (path) path = item.name ~ "/" ~ path;
else path = item.name;
}
id = item.parentId;
} else {
if (id == null) {
// check for remoteItem
s2.bind(1, item.driveId);
s2.bind(2, item.id);
auto r2 = s2.exec();
if (r2.empty) {
// root reached
assert(path.length >= 4);
// remove "root/" from path string if it exists
if (path.length >= 5) {
if (canFind(path, "root/")){
path = path[5 .. $];
}
} else {
path = path[4 .. $];
}
// special case of computing the path of the root itself
if (path.length == 0) path = ".";
break;
} else {
// remote folder
driveId = r2.front[0].dup;
id = r2.front[1].dup;
}
} else {
// broken tree
log.vdebug("The following generated a broken tree query:");
log.vdebug("Drive ID: ", driveId);
log.vdebug("Item ID: ", id);
assert(0);
}
}
}
return path;
}
Item[] selectRemoteItems() {
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE remoteDriveId IS NOT NULL");
auto res = stmt.exec();
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
string getDeltaLink(const(char)[] driveId, const(char)[] id) {
// Log what we received
log.vdebug("DeltaLink Query (driveId): ", driveId);
log.vdebug("DeltaLink Query (id): ", id);
assert(driveId && id);
auto stmt = db.prepare("SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
stmt.bind(2, id);
auto res = stmt.exec();
if (res.empty) return null;
return res.front[0].dup;
}
void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) {
assert(driveId && id);
assert(deltaLink);
auto stmt = db.prepare("UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
stmt.bind(2, id);
stmt.bind(3, deltaLink);
stmt.exec();
}
// National Cloud Deployments (US and DE) do not support /delta as a query
// We need to track in the database that this item is in sync
// As we query /children to get all children from OneDrive, update anything in the database
// to be flagged as not-in-sync, thus, we can use that flag to determing what was previously
// in-sync, but now deleted on OneDrive
void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) {
assert(driveId);
auto stmt = db.prepare("UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
stmt.bind(2, id);
stmt.exec();
}
// National Cloud Deployments (US and DE) do not support /delta as a query
// Select items that have a out-of-sync flag set
Item[] selectOutOfSyncItems(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1");
stmt.bind(1, driveId);
auto res = stmt.exec();
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
// OneDrive Business Folders are stored in the database potentially without a root | parentRoot link
// Select items associated with the provided driveId
Item[] selectByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL");
stmt.bind(1, driveId);
auto res = stmt.exec();
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
// Select all items associated with the provided driveId
Item[] selectAllItemsByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1");
stmt.bind(1, driveId);
auto res = stmt.exec();
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
// Perform a vacuum on the database, commit WAL / SHM to file
void performVacuum() {
try {
auto stmt = db.prepare("VACUUM;");
stmt.exec();
} catch (SqliteException e) {
writeln();
log.error("ERROR: Unable to perform a database vacuum: " ~ e.msg);
writeln();
}
}
// Select distinct driveId items from database
string[] selectDistinctDriveIds() {
string[] driveIdArray;
auto stmt = db.prepare("SELECT DISTINCT driveId FROM item;");
auto res = stmt.exec();
if (res.empty) return driveIdArray;
while (!res.empty) {
driveIdArray ~= res.front[0].dup;
res.step();
}
return driveIdArray;
}
}

252
src/log.d Normal file
View file

@ -0,0 +1,252 @@
// What is this module called?
module log;
// What does this module require to function?
import std.stdio;
import std.file;
import std.datetime;
import std.process;
import std.conv;
import std.path;
import std.string;
import core.memory;
import core.sys.posix.pwd;
import core.sys.posix.unistd;
import core.stdc.string : strlen;
import std.algorithm : splitter;
version(Notifications) {
import dnotify;
}
// module variables
// verbose logging count
long verbose;
// do we write a log file? ... this should be a config falue
bool writeLogFile = false;
// did the log file write fail?
bool logFileWriteFailFlag = false;
private bool triggerNotification;
// shared string variable for username
string username;
string logFilePath;
string logFileName;
string logFileFullPath;
void initialise(string logDir) {
writeLogFile = true;
// Configure various variables
username = getUserName();
logFilePath = logDir;
logFileName = username ~ ".onedrive.log";
logFileFullPath = buildPath(logFilePath, logFileName);
if (!exists(logFilePath)){
// logfile path does not exist
try {
mkdirRecurse(logFilePath);
}
catch (std.file.FileException e) {
// we got an error ..
writeln();
writeln("ERROR: Unable to access ", logFilePath);
writeln("ERROR: Please manually create '",logFilePath, "' and set appropriate permissions to allow write access");
writeln("ERROR: The requested client activity log will instead be located in your users home directory");
writeln();
// set the flag so we dont keep printing this sort of message
logFileWriteFailFlag = true;
}
}
}
void enableNotifications(bool value) {
version(Notifications) {
// if we try to enable notifications, check for server availability
// and disable in case dbus server is not reachable
if (value) {
auto serverAvailable = dnotify.check_availability();
if (!serverAvailable) {
log("Notification (dbus) server not available, disabling");
value = false;
}
}
}
triggerNotification = value;
}
void log(T...)(T args) {
writeln(args);
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void logAndNotify(T...)(T args) {
notify(args);
log(args);
}
void fileOnly(T...)(T args) {
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void vlog(T...)(T args) {
if (verbose >= 1) {
writeln(args);
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
}
void vdebug(T...)(T args) {
if (verbose >= 2) {
writeln("[DEBUG] ", args);
if(writeLogFile){
// Write to log file
logfileWriteLine("[DEBUG] ", args);
}
}
}
void vdebugNewLine(T...)(T args) {
if (verbose >= 2) {
writeln("\n[DEBUG] ", args);
if(writeLogFile){
// Write to log file
logfileWriteLine("\n[DEBUG] ", args);
}
}
}
void error(T...)(T args) {
stderr.writeln(args);
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void errorAndNotify(T...)(T args) {
notify(args);
error(args);
}
void notify(T...)(T args) {
version(Notifications) {
if (triggerNotification) {
string result;
foreach (index, arg; args) {
result ~= to!string(arg);
if (index != args.length - 1)
result ~= " ";
}
auto n = new Notification("OneDrive", result, "IGNORED");
try {
n.show();
// Sent message to notification daemon
if (verbose >= 2) {
writeln("[DEBUG] Sent notification to notification service. If notification is not displayed, check dbus or notification-daemon for errors");
}
} catch (Throwable e) {
vlog("Got exception from showing notification: ", e);
}
}
}
}
private void logfileWriteLine(T...)(T args) {
static import std.exception;
// Write to log file
auto currentTime = Clock.currTime();
auto timeString = leftJustify(currentTime.toString(), 28, '0');
File logFile;
// Resolve: std.exception.ErrnoException@std/stdio.d(423): Cannot open file `/var/log/onedrive/xxxxx.onedrive.log' in mode `a' (Permission denied)
try {
logFile = File(logFileFullPath, "a");
}
catch (std.exception.ErrnoException e) {
// We cannot open the log file logFileFullPath for writing
// The user is not part of the standard 'users' group (GID 100)
// Change logfile to ~/onedrive.log putting the log file in the users home directory
if (!logFileWriteFailFlag) {
// write out error message that we cant log to the requested file
writeln();
writeln("ERROR: Unable to write activity log to ", logFileFullPath);
writeln("ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account");
writeln("ERROR: The requested client activity log will instead be located in your users home directory");
writeln();
// set the flag so we dont keep printing this error message
logFileWriteFailFlag = true;
}
string homePath = environment.get("HOME");
string logFileFullPathAlternate = homePath ~ "/onedrive.log";
logFile = File(logFileFullPathAlternate, "a");
}
// Write to the log file
logFile.writeln(timeString, "\t", args);
logFile.close();
}
private string getUserName() {
auto pw = getpwuid(getuid);
// get required details
auto runtime_pw_name = pw.pw_name[0 .. strlen(pw.pw_name)].splitter(',');
auto runtime_pw_uid = pw.pw_uid;
auto runtime_pw_gid = pw.pw_gid;
// user identifiers from process
vdebug("Process ID: ", pw);
vdebug("User UID: ", runtime_pw_uid);
vdebug("User GID: ", runtime_pw_gid);
// What should be returned as username?
if (!runtime_pw_name.empty && runtime_pw_name.front.length){
// user resolved
vdebug("User Name: ", runtime_pw_name.front.idup);
return runtime_pw_name.front.idup;
} else {
// Unknown user?
vdebug("User Name: unknown");
return "unknown";
}
}
void displayMemoryUsagePreGC() {
// Display memory usage
writeln();
writeln("Memory Usage pre GC (KB)");
writeln("------------------------");
writeMemoryStats();
writeln();
}
void displayMemoryUsagePostGC() {
// Display memory usage
writeln();
writeln("Memory Usage post GC (KB)");
writeln("-------------------------");
writeMemoryStats();
writeln();
}
void writeMemoryStats() {
// write memory stats
writeln("memory usedSize = ", (GC.stats.usedSize/1024));
writeln("memory freeSize = ", (GC.stats.freeSize/1024));
writeln("memory allocatedInCurrentThread = ", (GC.stats.allocatedInCurrentThread/1024));
}

1001
src/main.d Normal file

File diff suppressed because it is too large Load diff

413
src/monitor.d Normal file
View file

@ -0,0 +1,413 @@
// What is this module called?
module monitor;
// What does this module require to function?
import core.stdc.errno;
import core.stdc.stdlib;
import core.sys.linux.sys.inotify;
import core.sys.posix.poll;
import core.sys.posix.unistd;
import std.algorithm;
import std.exception;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
import std.conv;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
import clientSideFiltering;
// Relevant inotify events
private immutable uint32_t mask = IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_MOVE | IN_IGNORED | IN_Q_OVERFLOW;
class MonitorException: ErrnoException {
@safe this(string msg, string file = __FILE__, size_t line = __LINE__) {
super(msg, file, line);
}
}
final class Monitor {
// Class variables
ApplicationConfig appConfig;
ClientSideFiltering selectiveSync;
// Are we verbose in logging output
bool verbose = false;
// skip symbolic links
bool skip_symlinks = false;
// check for .nosync if enabled
bool check_nosync = false;
// Configure Private Class Variables
// inotify file descriptor
private int fd;
// map every inotify watch descriptor to its directory
private string[int] wdToDirName;
// map the inotify cookies of move_from events to their path
private string[int] cookieToPath;
// buffer to receive the inotify events
private void[] buffer;
// Configure function delegates
void delegate(string path) onDirCreated;
void delegate(string path) onFileChanged;
void delegate(string path) onDelete;
void delegate(string from, string to) onMove;
// Configure the class varaible to consume the application configuration including selective sync
this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) {
this.appConfig = appConfig;
this.selectiveSync = selectiveSync;
}
// Initialise the monitor class
void initialise() {
// Configure the variables
skip_symlinks = appConfig.getValueBool("skip_symlinks");
check_nosync = appConfig.getValueBool("check_nosync");
if (appConfig.getValueLong("verbose") > 0) {
verbose = true;
}
assert(onDirCreated && onFileChanged && onDelete && onMove);
fd = inotify_init();
if (fd < 0) throw new MonitorException("inotify_init failed");
if (!buffer) buffer = new void[4096];
// from which point do we start watching for changes?
string monitorPath;
if (appConfig.getValueString("single_directory") != ""){
// single directory in use, monitor only this path
monitorPath = "./" ~ appConfig.getValueString("single_directory");
} else {
// default
monitorPath = ".";
}
addRecursive(monitorPath);
}
// Shutdown the monitor class
void shutdown() {
if (fd > 0) close(fd);
wdToDirName = null;
}
// Recursivly add this path to be monitored
private void addRecursive(string dirname) {
// skip non existing/disappeared items
if (!exists(dirname)) {
log.vlog("Not adding non-existing/disappeared directory: ", dirname);
return;
}
// Skip the monitoring of any user filtered items
if (dirname != ".") {
// Is the directory name a match to a skip_dir entry?
// The path that needs to be checked needs to include the '/'
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if (isDir(dirname)) {
if (selectiveSync.isDirNameExcluded(dirname.strip('.'))) {
// dont add a watch for this item
log.vdebug("Skipping monitoring due to skip_dir match: ", dirname);
return;
}
}
if (isFile(dirname)) {
// Is the filename a match to a skip_file entry?
// The path that needs to be checked needs to include the '/'
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
if (selectiveSync.isFileNameExcluded(dirname.strip('.'))) {
// dont add a watch for this item
log.vdebug("Skipping monitoring due to skip_file match: ", dirname);
return;
}
}
// is the path exluded by sync_list?
if (selectiveSync.isPathExcludedViaSyncList(buildNormalizedPath(dirname))) {
// dont add a watch for this item
log.vdebug("Skipping monitoring due to sync_list match: ", dirname);
return;
}
}
// skip symlinks if configured
if (isSymlink(dirname)) {
// if config says so we skip all symlinked items
if (skip_symlinks) {
// dont add a watch for this directory
return;
}
}
// Do we need to check for .nosync? Only if check_nosync is true
if (check_nosync) {
if (exists(buildNormalizedPath(dirname) ~ "/.nosync")) {
log.vlog("Skipping watching path - .nosync found & --check-for-nosync enabled: ", buildNormalizedPath(dirname));
return;
}
}
// passed all potential exclusions
// add inotify watch for this path / directory / file
log.vdebug("Calling add() for this dirname: ", dirname);
add(dirname);
// if this is a directory, recursivly add this path
if (isDir(dirname)) {
// try and get all the directory entities for this path
try {
auto pathList = dirEntries(dirname, SpanMode.shallow, false);
foreach(DirEntry entry; pathList) {
if (entry.isDir) {
log.vdebug("Calling addRecursive() for this directory: ", entry.name);
addRecursive(entry.name);
}
}
// catch any error which is generated
} catch (std.file.FileException e) {
// Standard filesystem error
displayFileSystemErrorMessage(e.msg, getFunctionName!({}));
return;
} catch (Exception e) {
// Issue #1154 handling
// Need to check for: Failed to stat file in error message
if (canFind(e.msg, "Failed to stat file")) {
// File system access issue
log.error("ERROR: The local file system returned an error with the following message:");
log.error(" Error Message: ", e.msg);
log.error("ACCESS ERROR: Please check your UID and GID access to this file, as the permissions on this file is preventing this application to read it");
log.error("\nFATAL: Exiting application to avoid deleting data due to local file system access issues\n");
// Must exit here
exit(-1);
} else {
// some other error
displayFileSystemErrorMessage(e.msg, getFunctionName!({}));
return;
}
}
}
}
// Add this path to be monitored
private void add(string pathname) {
int wd = inotify_add_watch(fd, toStringz(pathname), mask);
if (wd < 0) {
if (errno() == ENOSPC) {
// Get the current value
ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/user/max_inotify_watches")));
log.log("The user limit on the total number of inotify watches has been reached.");
log.log("Your current limit of inotify watches is: ", maxInotifyWatches);
log.log("It is recommended that you change the max number of inotify watches to at least double your existing value.");
log.log("To change the current max number of watches to " , (maxInotifyWatches * 2) , " run:");
log.log("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=", (maxInotifyWatches * 2));
}
if (errno() == 13) {
if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) {
// no misleading output that we could not add a watch due to permission denied
return;
} else {
log.vlog("WARNING: inotify_add_watch failed - permission denied: ", pathname);
return;
}
}
// Flag any other errors
log.error("ERROR: inotify_add_watch failed: ", pathname);
return;
}
// Add path to inotify watch - required regardless if a '.folder' or 'folder'
wdToDirName[wd] = buildNormalizedPath(pathname) ~ "/";
log.vdebug("inotify_add_watch successfully added for: ", pathname);
// Do we log that we are monitoring this directory?
if (isDir(pathname)) {
// This is a directory
// is the path exluded if skip_dotfiles configured and path is a .folder?
if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) {
// no misleading output that we are monitoring this directory
return;
}
// Log that this is directory is being monitored
log.vlog("Monitoring directory: ", pathname);
}
}
// Remove a watch descriptor
private void remove(int wd) {
assert(wd in wdToDirName);
int ret = inotify_rm_watch(fd, wd);
if (ret < 0) throw new MonitorException("inotify_rm_watch failed");
log.vlog("Monitored directory removed: ", wdToDirName[wd]);
wdToDirName.remove(wd);
}
// Remove the watch descriptors associated to the given path
private void remove(const(char)[] path) {
path ~= "/";
foreach (wd, dirname; wdToDirName) {
if (dirname.startsWith(path)) {
int ret = inotify_rm_watch(fd, wd);
if (ret < 0) throw new MonitorException("inotify_rm_watch failed");
wdToDirName.remove(wd);
log.vlog("Monitored directory removed: ", dirname);
}
}
}
// Return the file path from an inotify event
private string getPath(const(inotify_event)* event) {
string path = wdToDirName[event.wd];
if (event.len > 0) path ~= fromStringz(event.name.ptr);
log.vdebug("inotify path event for: ", path);
return path;
}
// Update
void update(bool useCallbacks = true) {
pollfd fds = {
fd: fd,
events: POLLIN
};
while (true) {
int ret = poll(&fds, 1, 0);
if (ret == -1) throw new MonitorException("poll failed");
else if (ret == 0) break; // no events available
size_t length = read(fd, buffer.ptr, buffer.length);
if (length == -1) throw new MonitorException("read failed");
int i = 0;
while (i < length) {
inotify_event *event = cast(inotify_event*) &buffer[i];
string path;
string evalPath;
// inotify event debug
log.vdebug("inotify event wd: ", event.wd);
log.vdebug("inotify event mask: ", event.mask);
log.vdebug("inotify event cookie: ", event.cookie);
log.vdebug("inotify event len: ", event.len);
log.vdebug("inotify event name: ", event.name);
if (event.mask & IN_ACCESS) log.vdebug("inotify event flag: IN_ACCESS");
if (event.mask & IN_MODIFY) log.vdebug("inotify event flag: IN_MODIFY");
if (event.mask & IN_ATTRIB) log.vdebug("inotify event flag: IN_ATTRIB");
if (event.mask & IN_CLOSE_WRITE) log.vdebug("inotify event flag: IN_CLOSE_WRITE");
if (event.mask & IN_CLOSE_NOWRITE) log.vdebug("inotify event flag: IN_CLOSE_NOWRITE");
if (event.mask & IN_MOVED_FROM) log.vdebug("inotify event flag: IN_MOVED_FROM");
if (event.mask & IN_MOVED_TO) log.vdebug("inotify event flag: IN_MOVED_TO");
if (event.mask & IN_CREATE) log.vdebug("inotify event flag: IN_CREATE");
if (event.mask & IN_DELETE) log.vdebug("inotify event flag: IN_DELETE");
if (event.mask & IN_DELETE_SELF) log.vdebug("inotify event flag: IN_DELETE_SELF");
if (event.mask & IN_MOVE_SELF) log.vdebug("inotify event flag: IN_MOVE_SELF");
if (event.mask & IN_UNMOUNT) log.vdebug("inotify event flag: IN_UNMOUNT");
if (event.mask & IN_Q_OVERFLOW) log.vdebug("inotify event flag: IN_Q_OVERFLOW");
if (event.mask & IN_IGNORED) log.vdebug("inotify event flag: IN_IGNORED");
if (event.mask & IN_CLOSE) log.vdebug("inotify event flag: IN_CLOSE");
if (event.mask & IN_MOVE) log.vdebug("inotify event flag: IN_MOVE");
if (event.mask & IN_ONLYDIR) log.vdebug("inotify event flag: IN_ONLYDIR");
if (event.mask & IN_DONT_FOLLOW) log.vdebug("inotify event flag: IN_DONT_FOLLOW");
if (event.mask & IN_EXCL_UNLINK) log.vdebug("inotify event flag: IN_EXCL_UNLINK");
if (event.mask & IN_MASK_ADD) log.vdebug("inotify event flag: IN_MASK_ADD");
if (event.mask & IN_ISDIR) log.vdebug("inotify event flag: IN_ISDIR");
if (event.mask & IN_ONESHOT) log.vdebug("inotify event flag: IN_ONESHOT");
if (event.mask & IN_ALL_EVENTS) log.vdebug("inotify event flag: IN_ALL_EVENTS");
// skip events that need to be ignored
if (event.mask & IN_IGNORED) {
// forget the directory associated to the watch descriptor
wdToDirName.remove(event.wd);
goto skip;
} else if (event.mask & IN_Q_OVERFLOW) {
throw new MonitorException("Inotify overflow, events missing");
}
// if the event is not to be ignored, obtain path
path = getPath(event);
// configure the skip_dir & skip skip_file comparison item
evalPath = path.strip('.');
// Skip events that should be excluded based on application configuration
// We cant use isDir or isFile as this information is missing from the inotify event itself
// Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995
// Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions
// Directory events should only be compared against skip_dir and file events should only be compared against skip_file
if (event.mask & IN_ISDIR) {
// The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory
// This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched
if (selectiveSync.isDirNameExcluded(evalPath)) {
// The path to evaluate matches a path that the user has configured to skip
goto skip;
}
} else {
// The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file
// This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched
if (selectiveSync.isFileNameExcluded(evalPath)) {
// The path to evaluate matches a file that the user has configured to skip
goto skip;
}
}
// is the path, excluded via sync_list
if (selectiveSync.isPathExcludedViaSyncList(path)) {
// The path to evaluate matches a directory or file that the user has configured not to include in the sync
goto skip;
}
// handle the inotify events
if (event.mask & IN_MOVED_FROM) {
log.vdebug("event IN_MOVED_FROM: ", path);
cookieToPath[event.cookie] = path;
} else if (event.mask & IN_MOVED_TO) {
log.vdebug("event IN_MOVED_TO: ", path);
if (event.mask & IN_ISDIR) addRecursive(path);
auto from = event.cookie in cookieToPath;
if (from) {
cookieToPath.remove(event.cookie);
if (useCallbacks) onMove(*from, path);
} else {
// item moved from the outside
if (event.mask & IN_ISDIR) {
if (useCallbacks) onDirCreated(path);
} else {
if (useCallbacks) onFileChanged(path);
}
}
} else if (event.mask & IN_CREATE) {
log.vdebug("event IN_CREATE: ", path);
if (event.mask & IN_ISDIR) {
addRecursive(path);
if (useCallbacks) onDirCreated(path);
}
} else if (event.mask & IN_DELETE) {
log.vdebug("event IN_DELETE: ", path);
if (useCallbacks) onDelete(path);
} else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) {
log.vdebug("event IN_CLOSE_WRITE and ...: ", path);
if (useCallbacks) onFileChanged(path);
} else {
log.vdebug("event unhandled: ", path);
assert(0);
}
skip:
i += inotify_event.sizeof + event.len;
}
// assume that the items moved outside the watched directory have been deleted
foreach (cookie, path; cookieToPath) {
log.vdebug("deleting (post loop): ", path);
if (useCallbacks) onDelete(path);
remove(path);
cookieToPath.remove(cookie);
}
// Debug Log that all inotify events are flushed
log.vdebug("inotify events flushed");
}
}
}

10
src/notifications/README Normal file
View file

@ -0,0 +1,10 @@
The files in this directory have been obtained form the following places:
dnotify.d
https://github.com/Dav1dde/dnotify/blob/master/dnotify.d
License: Creative Commons Zro 1.0 Universal
see https://github.com/Dav1dde/dnotify/blob/master/LICENSE
notify.d
https://github.com/D-Programming-Deimos/libnotify/blob/master/deimos/notify/notify.d
License: GNU Lesser General Public License (LGPL) 2.1 or upwards, see file

323
src/notifications/dnotify.d Normal file
View file

@ -0,0 +1,323 @@
module dnotify;
private {
import std.string : toStringz;
import std.conv : to;
import std.traits : isPointer, isArray;
import std.variant : Variant;
import std.array : appender;
import deimos.notify.notify;
}
public import deimos.notify.notify : NOTIFY_EXPIRES_DEFAULT, NOTIFY_EXPIRES_NEVER,
NotifyUrgency;
version(NoPragma) {
} else {
pragma(lib, "notify");
pragma(lib, "gmodule");
pragma(lib, "glib-2.0");
}
extern (C) {
private void g_free(void* mem);
private void g_list_free(GList* glist);
}
version(NoGdk) {
} else {
version(NoPragma) {
} else {
pragma(lib, "gdk_pixbuf");
}
private:
extern (C) {
GdkPixbuf* gdk_pixbuf_new_from_file(const(char)* filename, GError **error);
}
}
class NotificationError : Exception {
string message;
GError* gerror;
this(GError* gerror) {
this.message = to!(string)(gerror.message);
this.gerror = gerror;
super(this.message);
}
this(string message) {
this.message = message;
super(message);
}
}
bool check_availability() {
// notify_init might return without dbus server actually started
// try to check for running dbus server
char **ret_name;
char **ret_vendor;
char **ret_version;
char **ret_spec_version;
bool ret;
try {
return notify_get_server_info(ret_name, ret_vendor, ret_version, ret_spec_version);
} catch (NotificationError e) {
throw new NotificationError("Cannot find dbus server!");
}
}
void init(in char[] name) {
notify_init(name.toStringz());
}
alias notify_is_initted is_initted;
alias notify_uninit uninit;
static this() {
init(__FILE__);
}
static ~this() {
uninit();
}
string get_app_name() {
return to!(string)(notify_get_app_name());
}
void set_app_name(in char[] app_name) {
notify_set_app_name(app_name.toStringz());
}
string[] get_server_caps() {
auto result = appender!(string[])();
GList* list = notify_get_server_caps();
if(list !is null) {
for(GList* c = list; c !is null; c = c.next) {
result.put(to!(string)(cast(char*)c.data));
g_free(c.data);
}
g_list_free(list);
}
return result.data;
}
struct ServerInfo {
string name;
string vendor;
string version_;
string spec_version;
}
ServerInfo get_server_info() {
char* name;
char* vendor;
char* version_;
char* spec_version;
notify_get_server_info(&name, &vendor, &version_, &spec_version);
scope(exit) {
g_free(name);
g_free(vendor);
g_free(version_);
g_free(spec_version);
}
return ServerInfo(to!string(name), to!string(vendor), to!string(version_), to!string(spec_version));
}
struct Action {
const(char[]) id;
const(char[]) label;
NotifyActionCallback callback;
void* user_ptr;
}
class Notification {
NotifyNotification* notify_notification;
const(char)[] summary;
const(char)[] body_;
const(char)[] icon;
bool closed = true;
private int _timeout = NOTIFY_EXPIRES_DEFAULT;
const(char)[] _category;
NotifyUrgency _urgency;
GdkPixbuf* _image;
Variant[const(char)[]] _hints;
const(char)[] _app_name;
Action[] _actions;
this(in char[] summary, in char[] body_, in char[] icon="")
in { assert(is_initted(), "call dnotify.init() before using Notification"); }
do {
this.summary = summary;
this.body_ = body_;
this.icon = icon;
notify_notification = notify_notification_new(summary.toStringz(), body_.toStringz(), icon.toStringz());
}
bool update(in char[] summary, in char[] body_, in char[] icon="") {
this.summary = summary;
this.body_ = body_;
this.icon = icon;
return notify_notification_update(notify_notification, summary.toStringz(), body_.toStringz(), icon.toStringz());
}
void show() {
GError* ge;
if(!notify_notification_show(notify_notification, &ge)) {
throw new NotificationError(ge);
}
}
@property int timeout() { return _timeout; }
@property void timeout(int timeout) {
this._timeout = timeout;
notify_notification_set_timeout(notify_notification, timeout);
}
@property const(char[]) category() { return _category; }
@property void category(in char[] category) {
this._category = category;
notify_notification_set_category(notify_notification, category.toStringz());
}
@property NotifyUrgency urgency() { return _urgency; }
@property void urgency(NotifyUrgency urgency) {
this._urgency = urgency;
notify_notification_set_urgency(notify_notification, urgency);
}
void set_image(GdkPixbuf* pixbuf) {
notify_notification_set_image_from_pixbuf(notify_notification, pixbuf);
//_image = pixbuf;
}
version(NoGdk) {
} else {
void set_image(in char[] filename) {
GError* ge;
// TODO: free pixbuf
GdkPixbuf* pixbuf = gdk_pixbuf_new_from_file(filename.toStringz(), &ge);
if(pixbuf is null) {
if(ge is null) {
throw new NotificationError("Unable to load file: " ~ filename.idup);
} else {
throw new NotificationError(ge);
}
}
assert(notify_notification !is null);
notify_notification_set_image_from_pixbuf(notify_notification, pixbuf); // TODO: fix segfault
//_image = pixbuf;
}
}
@property GdkPixbuf* image() { return _image; }
// using deprecated set_hint_* functions (GVariant is an opaque structure, which needs the glib)
void set_hint(T)(in char[] key, T value) {
static if(is(T == int)) {
notify_notification_set_hint_int32(notify_notification, key, value);
} else static if(is(T == uint)) {
notify_notification_set_hint_uint32(notify_notification, key, value);
} else static if(is(T == double)) {
notify_notification_set_hint_double(notify_notification, key, value);
} else static if(is(T : const(char)[])) {
notify_notification_set_hint_string(notify_notification, key, value.toStringz());
} else static if(is(T == ubyte)) {
notify_notification_set_hint_byte(notify_notification, key, value);
} else static if(is(T == ubyte[])) {
notify_notification_set_hint_byte_array(notify_notification, key, value.ptr, value.length);
} else {
static assert(false, "unsupported value for Notification.set_hint");
}
_hints[key] = Variant(value);
}
// unset hint?
Variant get_hint(in char[] key) {
return _hints[key];
}
@property const(char)[] app_name() { return _app_name; }
@property void app_name(in char[] name) {
this._app_name = app_name;
notify_notification_set_app_name(notify_notification, app_name.toStringz());
}
void add_action(T)(in char[] action, in char[] label, NotifyActionCallback callback, T user_data) {
static if(isPointer!T) {
void* user_ptr = cast(void*)user_data;
} else static if(isArray!T) {
void* user_ptr = cast(void*)user_data.ptr;
} else {
void* user_ptr = cast(void*)&user_data;
}
notify_notification_add_action(notify_notification, action.toStringz(), label.toStringz(),
callback, user_ptr, null);
_actions ~= Action(action, label, callback, user_ptr);
}
void add_action()(Action action) {
notify_notification_add_action(notify_notification, action.id.toStringz(), action.label.toStringz(),
action.callback, action.user_ptr, null);
_actions ~= action;
}
@property Action[] actions() { return _actions; }
void clear_actions() {
notify_notification_clear_actions(notify_notification);
}
void close() {
GError* ge;
if(!notify_notification_close(notify_notification, &ge)) {
throw new NotificationError(ge);
}
}
@property int closed_reason() {
return notify_notification_get_closed_reason(notify_notification);
}
}
version(TestMain) {
import std.stdio;
void main() {
writeln(get_app_name());
set_app_name("bla");
writeln(get_app_name());
writeln(get_server_caps());
writeln(get_server_info());
auto n = new Notification("foo", "bar", "notification-message-im");
n.timeout = 3;
n.show();
}
}

195
src/notifications/notify.d Normal file
View file

@ -0,0 +1,195 @@
/**
* Copyright (C) 2004-2006 Christian Hammond
* Copyright (C) 2010 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
module deimos.notify.notify;
enum NOTIFY_VERSION_MAJOR = 0;
enum NOTIFY_VERSION_MINOR = 7;
enum NOTIFY_VERSION_MICRO = 5;
template NOTIFY_CHECK_VERSION(int major, int minor, int micro) {
enum NOTIFY_CHECK_VERSION = ((NOTIFY_VERSION_MAJOR > major) ||
(NOTIFY_VERSION_MAJOR == major && NOTIFY_VERSION_MINOR > minor) ||
(NOTIFY_VERSION_MAJOR == major && NOTIFY_VERSION_MINOR == minor &&
NOTIFY_VERSION_MICRO >= micro));
}
alias ulong GType;
alias void function(void*) GFreeFunc;
struct GError {
uint domain;
int code;
char* message;
}
struct GList {
void* data;
GList* next;
GList* prev;
}
// dummies
struct GdkPixbuf {}
struct GObject {}
struct GObjectClass {}
struct GVariant {}
GType notify_urgency_get_type();
/**
* NOTIFY_EXPIRES_DEFAULT:
*
* The default expiration time on a notification.
*/
enum NOTIFY_EXPIRES_DEFAULT = -1;
/**
* NOTIFY_EXPIRES_NEVER:
*
* The notification never expires. It stays open until closed by the calling API
* or the user.
*/
enum NOTIFY_EXPIRES_NEVER = 0;
// #define NOTIFY_TYPE_NOTIFICATION (notify_notification_get_type ())
// #define NOTIFY_NOTIFICATION(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), NOTIFY_TYPE_NOTIFICATION, NotifyNotification))
// #define NOTIFY_NOTIFICATION_CLASS(k) (G_TYPE_CHECK_CLASS_CAST((k), NOTIFY_TYPE_NOTIFICATION, NotifyNotificationClass))
// #define NOTIFY_IS_NOTIFICATION(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), NOTIFY_TYPE_NOTIFICATION))
// #define NOTIFY_IS_NOTIFICATION_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), NOTIFY_TYPE_NOTIFICATION))
// #define NOTIFY_NOTIFICATION_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), NOTIFY_TYPE_NOTIFICATION, NotifyNotificationClass))
extern (C) {
struct NotifyNotificationPrivate;
struct NotifyNotification {
/*< private >*/
GObject parent_object;
NotifyNotificationPrivate *priv;
}
struct NotifyNotificationClass {
GObjectClass parent_class;
/* Signals */
void function(NotifyNotification *notification) closed;
}
/**
* NotifyUrgency:
* @NOTIFY_URGENCY_LOW: Low urgency. Used for unimportant notifications.
* @NOTIFY_URGENCY_NORMAL: Normal urgency. Used for most standard notifications.
* @NOTIFY_URGENCY_CRITICAL: Critical urgency. Used for very important notifications.
*
* The urgency level of the notification.
*/
enum NotifyUrgency {
NOTIFY_URGENCY_LOW,
NOTIFY_URGENCY_NORMAL,
NOTIFY_URGENCY_CRITICAL,
}
/**
* NotifyActionCallback:
* @notification:
* @action:
* @user_data:
*
* An action callback function.
*/
alias void function(NotifyNotification* notification, char* action, void* user_data) NotifyActionCallback;
GType notify_notification_get_type();
NotifyNotification* notify_notification_new(const(char)* summary, const(char)* body_, const(char)* icon);
bool notify_notification_update(NotifyNotification* notification, const(char)* summary, const(char)* body_, const(char)* icon);
bool notify_notification_show(NotifyNotification* notification, GError** error);
void notify_notification_set_timeout(NotifyNotification* notification, int timeout);
void notify_notification_set_category(NotifyNotification* notification, const(char)* category);
void notify_notification_set_urgency(NotifyNotification* notification, NotifyUrgency urgency);
void notify_notification_set_image_from_pixbuf(NotifyNotification* notification, GdkPixbuf* pixbuf);
void notify_notification_set_icon_from_pixbuf(NotifyNotification* notification, GdkPixbuf* icon);
void notify_notification_set_hint_int32(NotifyNotification* notification, const(char)* key, int value);
void notify_notification_set_hint_uint32(NotifyNotification* notification, const(char)* key, uint value);
void notify_notification_set_hint_double(NotifyNotification* notification, const(char)* key, double value);
void notify_notification_set_hint_string(NotifyNotification* notification, const(char)* key, const(char)* value);
void notify_notification_set_hint_byte(NotifyNotification* notification, const(char)* key, ubyte value);
void notify_notification_set_hint_byte_array(NotifyNotification* notification, const(char)* key, const(ubyte)* value, ulong len);
void notify_notification_set_hint(NotifyNotification* notification, const(char)* key, GVariant* value);
void notify_notification_set_app_name(NotifyNotification* notification, const(char)* app_name);
void notify_notification_clear_hints(NotifyNotification* notification);
void notify_notification_add_action(NotifyNotification* notification, const(char)* action, const(char)* label,
NotifyActionCallback callback, void* user_data, GFreeFunc free_func);
void notify_notification_clear_actions(NotifyNotification* notification);
bool notify_notification_close(NotifyNotification* notification, GError** error);
int notify_notification_get_closed_reason(const NotifyNotification* notification);
bool notify_init(const(char)* app_name);
void notify_uninit();
bool notify_is_initted();
const(char)* notify_get_app_name();
void notify_set_app_name(const(char)* app_name);
GList *notify_get_server_caps();
bool notify_get_server_info(char** ret_name, char** ret_vendor, char** ret_version, char** ret_spec_version);
}
version(MainTest) {
import std.string;
void main() {
notify_init("test".toStringz());
auto n = notify_notification_new("summary".toStringz(), "body".toStringz(), "none".toStringz());
GError* ge;
notify_notification_show(n, &ge);
scope(success) notify_uninit();
}
}

1504
src/onedrive.d Normal file

File diff suppressed because it is too large Load diff

160
src/progress.d Normal file
View file

@ -0,0 +1,160 @@
// What is this module called?
module progress;
// What does this module require to function?
import std.stdio;
import std.range;
import std.format;
import std.datetime;
import core.sys.posix.unistd;
import core.sys.posix.sys.ioctl;
// What other modules that we have created do we need to import?
class Progress
{
private:
immutable static size_t default_width = 80;
size_t max_width = 40;
size_t width = default_width;
ulong start_time;
string caption = "Progress";
size_t iterations;
size_t counter;
size_t getTerminalWidth() {
size_t column = default_width;
version (CRuntime_Musl) {
} else version(Android) {
} else {
winsize ws;
if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) != -1 && ws.ws_col > 0) {
column = ws.ws_col;
}
}
return column;
}
void clear() {
write("\r");
for(auto i = 0; i < width; i++) write(" ");
write("\r");
}
int calc_eta() {
immutable auto ratio = cast(double)counter / iterations;
auto current_time = Clock.currTime.toUnixTime();
auto duration = cast(int)(current_time - start_time);
int hours, minutes, seconds;
double elapsed = (current_time - start_time);
int eta_sec = cast(int)((elapsed / ratio) - elapsed);
// Return an ETA or Duration?
if (eta_sec != 0){
return eta_sec;
} else {
return duration;
}
}
string progressbarText(string header_text, string footer_text) {
immutable auto ratio = cast(double)counter / iterations;
string result = "";
double bar_length = width - header_text.length - footer_text.length;
if(bar_length > max_width && max_width > 0) {
bar_length = max_width;
}
size_t i = 0;
for(; i < ratio * bar_length; i++) result ~= "o";
for(; i < bar_length; i++) result ~= " ";
return header_text ~ result ~ footer_text;
}
void print() {
immutable auto ratio = cast(double)counter / iterations;
auto header = appender!string();
auto footer = appender!string();
header.formattedWrite("%s %3d%% |", caption, cast(int)(ratio * 100));
if(counter <= 0 || ratio == 0.0) {
footer.formattedWrite("| ETA --:--:--:");
} else {
int h, m, s;
dur!"seconds"(calc_eta())
.split!("hours", "minutes", "seconds")(h, m, s);
if (counter != iterations){
footer.formattedWrite("| ETA %02d:%02d:%02d ", h, m, s);
} else {
footer.formattedWrite("| DONE IN %02d:%02d:%02d ", h, m, s);
}
}
write(progressbarText(header.data, footer.data));
}
void update() {
width = getTerminalWidth();
clear();
print();
stdout.flush();
}
public:
this(size_t iterations) {
if(iterations <= 0) iterations = 1;
counter = -1;
this.iterations = iterations;
start_time = Clock.currTime.toUnixTime;
}
@property {
string title() { return caption; }
string title(string text) { return caption = text; }
}
@property {
size_t count() { return counter; }
size_t count(size_t val) {
if(val > iterations) val = iterations;
return counter = val;
}
}
@property {
size_t maxWidth() { return max_width; }
size_t maxWidth(size_t w) {
return max_width = w;
}
}
void reset() {
counter = -1;
start_time = Clock.currTime.toUnixTime;
}
void next() {
counter++;
if(counter > iterations) counter = iterations;
update();
}
}

78
src/qxor.d Normal file
View file

@ -0,0 +1,78 @@
// What is this module called?
module qxor;
// What does this module require to function?
import std.algorithm;
import std.digest;
// Implementation of the QuickXorHash algorithm in D
// https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md
struct QuickXor
{
private enum int widthInBits = 160;
private enum size_t lengthInBytes = (widthInBits - 1) / 8 + 1;
private enum size_t lengthInQWords = (widthInBits - 1) / 64 + 1;
private enum int bitsInLastCell = widthInBits % 64; // 32
private enum int shift = 11;
private ulong[lengthInQWords] _data;
private ulong _lengthSoFar;
private int _shiftSoFar;
nothrow @safe void put(scope const(ubyte)[] array...)
{
int vectorArrayIndex = _shiftSoFar / 64;
int vectorOffset = _shiftSoFar % 64;
immutable size_t iterations = min(array.length, widthInBits);
for (size_t i = 0; i < iterations; i++) {
immutable bool isLastCell = vectorArrayIndex == _data.length - 1;
immutable int bitsInVectorCell = isLastCell ? bitsInLastCell : 64;
if (vectorOffset <= bitsInVectorCell - 8) {
for (size_t j = i; j < array.length; j += widthInBits) {
_data[vectorArrayIndex] ^= cast(ulong) array[j] << vectorOffset;
}
} else {
int index1 = vectorArrayIndex;
int index2 = isLastCell ? 0 : (vectorArrayIndex + 1);
ubyte low = cast(ubyte) (bitsInVectorCell - vectorOffset);
ubyte xoredByte = 0;
for (size_t j = i; j < array.length; j += widthInBits) {
xoredByte ^= array[j];
}
_data[index1] ^= cast(ulong) xoredByte << vectorOffset;
_data[index2] ^= cast(ulong) xoredByte >> low;
}
vectorOffset += shift;
if (vectorOffset >= bitsInVectorCell) {
vectorArrayIndex = isLastCell ? 0 : vectorArrayIndex + 1;
vectorOffset -= bitsInVectorCell;
}
}
_shiftSoFar = cast(int) (_shiftSoFar + shift * (array.length % widthInBits)) % widthInBits;
_lengthSoFar += array.length;
}
nothrow @safe void start()
{
_data = _data.init;
_shiftSoFar = 0;
_lengthSoFar = 0;
}
nothrow @trusted ubyte[lengthInBytes] finish()
{
ubyte[lengthInBytes] tmp;
tmp[0 .. lengthInBytes] = (cast(ubyte*) _data)[0 .. lengthInBytes];
for (size_t i = 0; i < 8; i++) {
tmp[lengthInBytes - 8 + i] ^= (cast(ubyte*) &_lengthSoFar)[i];
}
return tmp;
}
}

207
src/sqlite.d Normal file
View file

@ -0,0 +1,207 @@
// What is this module called?
module sqlite;
// What does this module require to function?
import std.stdio;
import etc.c.sqlite3;
import std.string: fromStringz, toStringz;
import core.stdc.stdlib;
import std.conv;
// What other modules that we have created do we need to import?
import log;
extern (C) immutable(char)* sqlite3_errstr(int); // missing from the std library
static this() {
if (sqlite3_libversion_number() < 3006019) {
throw new SqliteException("sqlite 3.6.19 or newer is required");
}
}
private string ifromStringz(const(char)* cstr) {
return fromStringz(cstr).dup;
}
class SqliteException: Exception {
@safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
{
super(msg, file, line, next);
}
@safe pure nothrow this(string msg, Throwable next, string file = __FILE__, size_t line = __LINE__)
{
super(msg, file, line, next);
}
}
struct Database {
private sqlite3* pDb;
this(const(char)[] filename) {
open(filename);
}
~this() {
close();
}
int db_checkpoint() {
return sqlite3_wal_checkpoint(pDb, null);
}
void dump_open_statements() {
log.log("Dumping open statements: \n");
auto p = sqlite3_next_stmt(pDb, null);
while (p != null) {
log.log (" - " ~ ifromStringz(sqlite3_sql(p)) ~ "\n");
p = sqlite3_next_stmt(pDb, p);
}
}
void open(const(char)[] filename) {
// https://www.sqlite.org/c3ref/open.html
int rc = sqlite3_open(toStringz(filename), &pDb);
if (rc == SQLITE_CANTOPEN) {
// Database cannot be opened
log.error("\nThe database cannot be opened. Please check the permissions of " ~ filename ~ "\n");
close();
exit(-1);
}
if (rc != SQLITE_OK) {
log.error("\nA database access error occurred: " ~ getErrorMessage() ~ "\n");
close();
exit(-1);
}
sqlite3_extended_result_codes(pDb, 1); // always use extended result codes
}
void exec(const(char)[] sql) {
// https://www.sqlite.org/c3ref/exec.html
int rc = sqlite3_exec(pDb, toStringz(sql), null, null, null);
if (rc != SQLITE_OK) {
log.error("\nA database execution error occurred: "~ getErrorMessage() ~ "\n");
log.error("Please retry your command with --resync to fix any local database corruption issues.\n");
close();
exit(-1);
}
}
int getVersion() {
int userVersion;
extern (C) int callback(void* user_version, int count, char** column_text, char** column_name) {
import core.stdc.stdlib: atoi;
*(cast(int*) user_version) = atoi(*column_text);
return 0;
}
int rc = sqlite3_exec(pDb, "PRAGMA user_version", &callback, &userVersion, null);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(pDb)));
}
return userVersion;
}
string getErrorMessage() {
return ifromStringz(sqlite3_errmsg(pDb));
}
void setVersion(int userVersion) {
import std.conv: to;
exec("PRAGMA user_version=" ~ to!string(userVersion));
}
Statement prepare(const(char)[] zSql) {
Statement s;
// https://www.sqlite.org/c3ref/prepare.html
int rc = sqlite3_prepare_v2(pDb, zSql.ptr, cast(int) zSql.length, &s.pStmt, null);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(pDb)));
}
return s;
}
void close() {
// https://www.sqlite.org/c3ref/close.html
sqlite3_close_v2(pDb);
pDb = null;
}
}
struct Statement {
struct Result {
private sqlite3_stmt* pStmt;
private const(char)[][] row;
private this(sqlite3_stmt* pStmt) {
this.pStmt = pStmt;
step(); // initialize the range
}
@property bool empty() {
return row.length == 0;
}
@property auto front() {
return row;
}
alias step popFront;
void step() {
// https://www.sqlite.org/c3ref/step.html
int rc = sqlite3_step(pStmt);
if (rc == SQLITE_BUSY) {
// Database is locked by another onedrive process
log.error("The database is currently locked by another process - cannot sync");
return;
}
if (rc == SQLITE_DONE) {
row.length = 0;
} else if (rc == SQLITE_ROW) {
// https://www.sqlite.org/c3ref/data_count.html
int count = 0;
count = sqlite3_data_count(pStmt);
row = new const(char)[][count];
foreach (size_t i, ref column; row) {
// https://www.sqlite.org/c3ref/column_blob.html
column = fromStringz(sqlite3_column_text(pStmt, to!int(i)));
}
} else {
string errorMessage = ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)));
log.error("\nA database statement execution error occurred: "~ errorMessage ~ "\n");
log.error("Please retry your command with --resync to fix any local database corruption issues.\n");
exit(-1);
}
}
}
private sqlite3_stmt* pStmt;
~this() {
// https://www.sqlite.org/c3ref/finalize.html
sqlite3_finalize(pStmt);
}
void bind(int index, const(char)[] value) {
reset();
// https://www.sqlite.org/c3ref/bind_blob.html
int rc = sqlite3_bind_text(pStmt, index, value.ptr, cast(int) value.length, SQLITE_STATIC);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
}
}
Result exec() {
reset();
return Result(pStmt);
}
private void reset() {
// https://www.sqlite.org/c3ref/reset.html
int rc = sqlite3_reset(pStmt);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
}
}
}

6535
src/sync.d Normal file

File diff suppressed because it is too large Load diff

784
src/util.d Normal file
View file

@ -0,0 +1,784 @@
// What is this module called?
module util;
// What does this module require to function?
import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit;
import std.base64;
import std.conv;
import std.digest.crc;
import std.digest.sha;
import std.net.curl;
import std.datetime;
import std.file;
import std.path;
import std.regex;
import std.socket;
import std.stdio;
import std.string;
import std.algorithm;
import std.uri;
import std.json;
import std.traits;
import core.stdc.stdlib;
import core.thread;
// What other modules that we have created do we need to import?
import log;
import config;
import qxor;
import curlEngine;
// module variables
shared string deviceName;
static this() {
deviceName = Socket.hostName;
}
// Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario
void safeBackup(const(char)[] path, bool dryRun) {
auto ext = extension(path);
auto newPath = path.chomp(ext) ~ "-" ~ deviceName;
if (exists(newPath ~ ext)) {
int n = 2;
char[] newPath2;
do {
newPath2 = newPath ~ "-" ~ n.to!string;
n++;
} while (exists(newPath2 ~ ext));
newPath = newPath2;
}
newPath ~= ext;
// Perform the backup
log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath);
if (!dryRun) {
rename(path, newPath);
} else {
log.vdebug("DRY-RUN: Skipping local file backup");
}
}
// Rename the given item, and only performs the function if not in a --dry-run scenario
void safeRename(const(char)[] oldPath, const(char)[] newPath, bool dryRun) {
// Perform the rename
if (!dryRun) {
log.vdebug("Calling rename(oldPath, newPath)");
// rename physical path on disk
rename(oldPath, newPath);
} else {
log.vdebug("DRY-RUN: Skipping local file rename");
}
}
// deletes the specified file without throwing an exception if it does not exists
void safeRemove(const(char)[] path) {
if (exists(path)) remove(path);
}
// returns the CRC32 hex string of a file
string computeCRC32(string path) {
CRC32 crc;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
crc.put(data);
}
return crc.finish().toHexString().dup;
}
// returns the SHA1 hash hex string of a file
string computeSha1Hash(string path) {
SHA1 sha;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
sha.put(data);
}
return sha.finish().toHexString().dup;
}
// returns the quickXorHash base64 string of a file
string computeQuickXorHash(string path) {
QuickXor qxor;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
qxor.put(data);
}
return Base64.encode(qxor.finish());
}
// returns the SHA256 hex string of a file
string computeSHA256Hash(string path) {
SHA256 sha256;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
sha256.put(data);
}
return sha256.finish().toHexString().dup;
}
// converts wildcards (*, ?) to regex
Regex!char wild2regex(const(char)[] pattern) {
string str;
str.reserve(pattern.length + 2);
str ~= "^";
foreach (c; pattern) {
switch (c) {
case '*':
str ~= "[^/]*";
break;
case '.':
str ~= "\\.";
break;
case '?':
str ~= "[^/]";
break;
case '|':
str ~= "$|^";
break;
case '+':
str ~= "\\+";
break;
case ' ':
str ~= "\\s+";
break;
case '/':
str ~= "\\/";
break;
case '(':
str ~= "\\(";
break;
case ')':
str ~= "\\)";
break;
default:
str ~= c;
break;
}
}
str ~= "$";
return regex(str, "i");
}
// Test Internet access to Microsoft OneDrive
bool testInternetReachability(ApplicationConfig appConfig) {
// Use preconfigured object with all the correct http values assigned
auto curlEngine = new CurlEngine();
curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version"));
// Configure the remaining items required
// URL to use
curlEngine.http.url = "https://login.microsoftonline.com";
// HTTP connection test method
curlEngine.http.method = HTTP.Method.head;
// Attempt to contact the Microsoft Online Service
try {
log.vdebug("Attempting to contact Microsoft OneDrive Login Service");
curlEngine.http.perform();
log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Login Service");
curlEngine.http.shutdown();
// Free object and memory
object.destroy(curlEngine);
return true;
} catch (SocketException e) {
// Socket issue
log.vdebug("HTTP Socket Issue");
log.error("Cannot connect to Microsoft OneDrive Login Service - Socket Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
return false;
} catch (CurlException e) {
// No network connection to OneDrive Service
log.vdebug("No Network Connection");
log.error("Cannot connect to Microsoft OneDrive Login Service - Network Connection Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
return false;
}
}
// Retry Internet access test to Microsoft OneDrive
bool retryInternetConnectivtyTest(ApplicationConfig appConfig) {
// re-try network connection to OneDrive
// https://github.com/abraunegg/onedrive/issues/1184
// Back off & retry with incremental delay
int retryCount = 10000;
int retryAttempts = 1;
int backoffInterval = 1;
int maxBackoffInterval = 3600;
bool onlineRetry = false;
bool retrySuccess = false;
while (!retrySuccess){
// retry to access OneDrive API
backoffInterval++;
int thisBackOffInterval = retryAttempts*backoffInterval;
log.vdebug(" Retry Attempt: ", retryAttempts);
if (thisBackOffInterval <= maxBackoffInterval) {
log.vdebug(" Retry In (seconds): ", thisBackOffInterval);
Thread.sleep(dur!"seconds"(thisBackOffInterval));
} else {
log.vdebug(" Retry In (seconds): ", maxBackoffInterval);
Thread.sleep(dur!"seconds"(maxBackoffInterval));
}
// perform the re-rty
onlineRetry = testInternetReachability(appConfig);
if (onlineRetry) {
// We are now online
log.log("Internet connectivity to Microsoft OneDrive service has been restored");
retrySuccess = true;
} else {
// We are still offline
if (retryAttempts == retryCount) {
// we have attempted to re-connect X number of times
// false set this to true to break out of while loop
retrySuccess = true;
}
}
// Increment & loop around
retryAttempts++;
}
if (!onlineRetry) {
// Not online after 1.2 years of trying
log.error("ERROR: Was unable to reconnect to the Microsoft OneDrive service after 10000 attempts lasting over 1.2 years!");
}
// return the state
return onlineRetry;
}
// Can we read the file - as a permissions issue or file corruption will cause a failure
// https://github.com/abraunegg/onedrive/issues/113
// returns true if file can be accessed
bool readLocalFile(string path) {
try {
// attempt to read up to the first 1 byte of the file
// validates we can 'read' the file based on file permissions
read(path,1);
} catch (std.file.FileException e) {
// unable to read the new local file
displayFileSystemErrorMessage(e.msg, getFunctionName!({}));
return false;
}
return true;
}
// calls globMatch for each string in pattern separated by '|'
bool multiGlobMatch(const(char)[] path, const(char)[] pattern) {
foreach (glob; pattern.split('|')) {
if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) {
return true;
}
}
return false;
}
bool isValidName(string path) {
// Restriction and limitations about windows naming files
// https://msdn.microsoft.com/en-us/library/aa365247
// https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders
// allow root item
if (path == ".") {
return true;
}
bool matched = true;
string itemName = baseName(path);
auto invalidNameReg =
ctRegex!(
// Leading whitespace and trailing whitespace/dot
`^\s.*|^.*[\s\.]$|` ~
// Invalid characters
`.*[<>:"\|\?*/\\].*|` ~
// Reserved device name and trailing .~
`(?:^CON|^PRN|^AUX|^NUL|^COM[0-9]|^LPT[0-9])(?:[.].+)?$`
);
auto m = match(itemName, invalidNameReg);
matched = m.empty;
// Additional explicit validation checks
if (itemName == ".lock") {matched = false;}
if (itemName == "desktop.ini") {matched = false;}
// _vti_ cannot appear anywhere in a file or folder name
if(canFind(itemName, "_vti_")){matched = false;}
// Item name cannot equal '~'
if (itemName == "~") {matched = false;}
// return response
return matched;
}
bool containsBadWhiteSpace(string path) {
// allow root item
if (path == ".") {
return true;
}
// https://github.com/abraunegg/onedrive/issues/35
// Issue #35 presented an interesting issue where the filename contained a newline item
// 'State-of-the-art, challenges, and open issues in the integration of Internet of'$'\n''Things and Cloud Computing.pdf'
// When the check to see if this file was present the GET request queries as follows:
// /v1.0/me/drive/root:/.%2FState-of-the-art%2C%20challenges%2C%20and%20open%20issues%20in%20the%20integration%20of%20Internet%20of%0AThings%20and%20Cloud%20Computing.pdf
// The '$'\n'' is translated to %0A which causes the OneDrive query to fail
// Check for the presence of '%0A' via regex
string itemName = encodeComponent(baseName(path));
auto invalidWhitespaceReg =
ctRegex!(
// Check for \n which is %0A when encoded
`%0A`
);
auto m = match(itemName, invalidWhitespaceReg);
return m.empty;
}
bool containsASCIIHTMLCodes(string path) {
// https://github.com/abraunegg/onedrive/issues/151
// If a filename contains ASCII HTML codes, regardless of if it gets encoded, it generates an error
// Check if the filename contains an ASCII HTML code sequence
auto invalidASCIICode =
ctRegex!(
// Check to see if &#XXXX is in the filename
`(?:&#|&#[0-9][0-9]|&#[0-9][0-9][0-9]|&#[0-9][0-9][0-9][0-9])`
);
auto m = match(path, invalidASCIICode);
return m.empty;
}
// Parse and display error message received from OneDrive
void displayOneDriveErrorMessage(string message, string callingFunction) {
writeln();
log.error("ERROR: Microsoft OneDrive API returned an error with the following message:");
auto errorArray = splitLines(message);
log.error(" Error Message: ", errorArray[0]);
// Extract 'message' as the reason
JSONValue errorMessage = parseJSON(replace(message, errorArray[0], ""));
// What is the reason for the error
if (errorMessage.type() == JSONType.object) {
// configure the error reason
string errorReason;
string requestDate;
string requestId;
// set the reason for the error
try {
// Use error_description as reason
errorReason = errorMessage["error_description"].str;
} catch (JSONException e) {
// we dont want to do anything here
}
// set the reason for the error
try {
// Use ["error"]["message"] as reason
errorReason = errorMessage["error"]["message"].str;
} catch (JSONException e) {
// we dont want to do anything here
}
// Display the error reason
if (errorReason.startsWith("<!DOCTYPE")) {
// a HTML Error Reason was given
log.error(" Error Reason: A HTML Error response was provided. Use debug logging (--verbose --verbose) to view this error");
log.vdebug(errorReason);
} else {
// a non HTML Error Reason was given
log.error(" Error Reason: ", errorReason);
}
// Get the date of request if available
try {
// Use ["error"]["innerError"]["date"] as date
requestDate = errorMessage["error"]["innerError"]["date"].str;
} catch (JSONException e) {
// we dont want to do anything here
}
// Get the request-id if available
try {
// Use ["error"]["innerError"]["request-id"] as request-id
requestId = errorMessage["error"]["innerError"]["request-id"].str;
} catch (JSONException e) {
// we dont want to do anything here
}
// Display the date and request id if available
if (requestDate != "") log.error(" Error Timestamp: ", requestDate);
if (requestId != "") log.error(" API Request ID: ", requestId);
}
// Where in the code was this error generated
log.log(" Calling Function: ", callingFunction);
// Extra Debug if we are using --verbose --verbose
log.vdebug("Raw Error Data: ", message);
log.vdebug("JSON Message: ", errorMessage);
}
// Common code for handling when a client is unauthorised
void handleClientUnauthorised(int httpStatusCode, string message) {
// Split the lines of the error message
auto errorArray = splitLines(message);
// Extract 'message' as the reason
JSONValue errorMessage = parseJSON(replace(message, errorArray[0], ""));
log.vdebug("errorMessage: ", errorMessage);
if (httpStatusCode == 400) {
// bad request or a new auth token is needed
// configure the error reason
writeln();
string[] errorReason = splitLines(errorMessage["error_description"].str);
log.errorAndNotify(errorReason[0]);
writeln();
log.errorAndNotify("ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token.");
writeln();
}
if (httpStatusCode == 401) {
writeln("CODING TO DO: Triggered a 401 HTTP unauthorised response when client was unauthorised");
writeln();
log.errorAndNotify("ERROR: Check your configuration as your refresh_token may be empty or invalid. You may need to issue a --reauth and re-authorise this client.");
writeln();
}
// Must exit here
exit(EXIT_FAILURE);
}
// Parse and display error message received from the local file system
void displayFileSystemErrorMessage(string message, string callingFunction) {
writeln();
log.error("ERROR: The local file system returned an error with the following message:");
auto errorArray = splitLines(message);
// What was the error message
log.error(" Error Message: ", errorArray[0]);
// Where in the code was this error generated
log.vlog(" Calling Function: ", callingFunction);
// If we are out of disk space (despite download reservations) we need to exit the application
ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace("."));
if (localActualFreeSpace == 0) {
// force exit
exit(EXIT_FAILURE);
}
}
// Display the POSIX Error Message
void displayPosixErrorMessage(string message) {
writeln();
log.error("ERROR: Microsoft OneDrive API returned data that highlights a POSIX compliance issue:");
log.error(" Error Message: ", message);
}
// Get the function name that is being called to assist with identifying where an error is being generated
string getFunctionName(alias func)() {
return __traits(identifier, __traits(parent, func)) ~ "()\n";
}
// Get the latest release version from GitHub
JSONValue getLatestReleaseDetails() {
// Import curl just for this function
import std.net.curl;
char[] content;
JSONValue githubLatest;
JSONValue versionDetails;
string latestTag;
string publishedDate;
try {
content = get("https://api.github.com/repos/abraunegg/onedrive/releases/latest");
} catch (CurlException e) {
// curl generated an error - meaning we could not query GitHub
log.vdebug("Unable to query GitHub for latest release");
}
try {
githubLatest = content.parseJSON();
} catch (JSONException e) {
// unable to parse the content JSON, set to blank JSON
log.vdebug("Unable to parse GitHub JSON response");
githubLatest = parseJSON("{}");
}
// githubLatest has to be a valid JSON object
if (githubLatest.type() == JSONType.object){
// use the returned tag_name
if ("tag_name" in githubLatest) {
// use the provided tag
// "tag_name": "vA.B.CC" and strip 'v'
latestTag = strip(githubLatest["tag_name"].str, "v");
} else {
// set to latestTag zeros
log.vdebug("'tag_name' unavailable in JSON response. Setting GitHub 'tag_name' release version to 0.0.0");
latestTag = "0.0.0";
}
// use the returned published_at date
if ("published_at" in githubLatest) {
// use the provided value
publishedDate = githubLatest["published_at"].str;
} else {
// set to v2.0.0 release date
log.vdebug("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z");
publishedDate = "2018-07-18T18:00:00Z";
}
} else {
// JSONValue is not an object
log.vdebug("Invalid JSON Object. Setting GitHub 'tag_name' release version to 0.0.0");
latestTag = "0.0.0";
log.vdebug("Invalid JSON Object. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z");
publishedDate = "2018-07-18T18:00:00Z";
}
// return the latest github version and published date as our own JSON
versionDetails = [
"latestTag": JSONValue(latestTag),
"publishedDate": JSONValue(publishedDate)
];
// return JSON
return versionDetails;
}
// Get the release details from the 'current' running version
JSONValue getCurrentVersionDetails(string thisVersion) {
// Import curl just for this function
import std.net.curl;
char[] content;
JSONValue githubDetails;
JSONValue versionDetails;
string versionTag = "v" ~ thisVersion;
string publishedDate;
try {
content = get("https://api.github.com/repos/abraunegg/onedrive/releases");
} catch (CurlException e) {
// curl generated an error - meaning we could not query GitHub
log.vdebug("Unable to query GitHub for release details");
}
try {
githubDetails = content.parseJSON();
} catch (JSONException e) {
// unable to parse the content JSON, set to blank JSON
log.vdebug("Unable to parse GitHub JSON response");
githubDetails = parseJSON("{}");
}
// githubDetails has to be a valid JSON array
if (githubDetails.type() == JSONType.array){
foreach (searchResult; githubDetails.array) {
// searchResult["tag_name"].str;
if (searchResult["tag_name"].str == versionTag) {
log.vdebug("MATCHED version");
log.vdebug("tag_name: ", searchResult["tag_name"].str);
log.vdebug("published_at: ", searchResult["published_at"].str);
publishedDate = searchResult["published_at"].str;
}
}
if (publishedDate.empty) {
// empty .. no version match ?
// set to v2.0.0 release date
log.vdebug("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z");
publishedDate = "2018-07-18T18:00:00Z";
}
} else {
// JSONValue is not an Array
log.vdebug("Invalid JSON Array. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z");
publishedDate = "2018-07-18T18:00:00Z";
}
// return the latest github version and published date as our own JSON
versionDetails = [
"versionTag": JSONValue(thisVersion),
"publishedDate": JSONValue(publishedDate)
];
// return JSON
return versionDetails;
}
// Check the application version versus GitHub latestTag
void checkApplicationVersion() {
// Get the latest details from GitHub
JSONValue latestVersionDetails = getLatestReleaseDetails();
string latestVersion = latestVersionDetails["latestTag"].str;
SysTime publishedDate = SysTime.fromISOExtString(latestVersionDetails["publishedDate"].str).toUTC();
SysTime releaseGracePeriod = publishedDate;
SysTime currentTime = Clock.currTime().toUTC();
// drop fraction seconds
publishedDate.fracSecs = Duration.zero;
currentTime.fracSecs = Duration.zero;
releaseGracePeriod.fracSecs = Duration.zero;
// roll the grace period forward to allow distributions to catch up based on their release cycles
releaseGracePeriod = releaseGracePeriod.add!"months"(1);
// what is this clients version?
auto currentVersionArray = strip(strip(import("version"), "v")).split("-");
string applicationVersion = currentVersionArray[0];
// debug output
log.vdebug("applicationVersion: ", applicationVersion);
log.vdebug("latestVersion: ", latestVersion);
log.vdebug("publishedDate: ", publishedDate);
log.vdebug("currentTime: ", currentTime);
log.vdebug("releaseGracePeriod: ", releaseGracePeriod);
// display details if not current
// is application version is older than available on GitHub
if (applicationVersion != latestVersion) {
// application version is different
bool displayObsolete = false;
// what warning do we present?
if (applicationVersion < latestVersion) {
// go get this running version details
JSONValue thisVersionDetails = getCurrentVersionDetails(applicationVersion);
SysTime thisVersionPublishedDate = SysTime.fromISOExtString(thisVersionDetails["publishedDate"].str).toUTC();
thisVersionPublishedDate.fracSecs = Duration.zero;
log.vdebug("thisVersionPublishedDate: ", thisVersionPublishedDate);
// the running version grace period is its release date + 1 month
SysTime thisVersionReleaseGracePeriod = thisVersionPublishedDate;
thisVersionReleaseGracePeriod = thisVersionReleaseGracePeriod.add!"months"(1);
log.vdebug("thisVersionReleaseGracePeriod: ", thisVersionReleaseGracePeriod);
// Is this running version obsolete ?
if (!displayObsolete) {
// if releaseGracePeriod > currentTime
// display an information warning that there is a new release available
if (releaseGracePeriod.toUnixTime() > currentTime.toUnixTime()) {
// inside release grace period ... set flag to false
displayObsolete = false;
} else {
// outside grace period
displayObsolete = true;
}
}
// display version response
writeln();
if (!displayObsolete) {
// display the new version is available message
log.logAndNotify("INFO: A new onedrive client version is available. Please upgrade your client version when possible.");
} else {
// display the obsolete message
log.logAndNotify("WARNING: Your onedrive client version is now obsolete and unsupported. Please upgrade your client version.");
}
log.log("Current Application Version: ", applicationVersion);
log.log("Version Available: ", latestVersion);
writeln();
}
}
}
bool hasId(JSONValue item) {
return ("id" in item) != null;
}
bool hasQuota(JSONValue item) {
return ("quota" in item) != null;
}
bool isItemDeleted(JSONValue item) {
return ("deleted" in item) != null;
}
bool isItemRoot(JSONValue item) {
return ("root" in item) != null;
}
bool hasParentReference(const ref JSONValue item) {
return ("parentReference" in item) != null;
}
bool hasParentReferenceId(JSONValue item) {
return ("id" in item["parentReference"]) != null;
}
bool hasParentReferencePath(JSONValue item) {
return ("path" in item["parentReference"]) != null;
}
bool isFolderItem(const ref JSONValue item) {
return ("folder" in item) != null;
}
bool isFileItem(const ref JSONValue item) {
return ("file" in item) != null;
}
bool isItemRemote(const ref JSONValue item) {
return ("remoteItem" in item) != null;
}
bool isItemFile(const ref JSONValue item) {
return ("file" in item) != null;
}
bool isItemFolder(const ref JSONValue item) {
return ("folder" in item) != null;
}
bool hasFileSize(const ref JSONValue item) {
return ("size" in item) != null;
}
bool isDotFile(const(string) path) {
// always allow the root
if (path == ".") return false;
auto paths = pathSplitter(buildNormalizedPath(path));
foreach(base; paths) {
if (startsWith(base, ".")){
return true;
}
}
return false;
}
bool isMalware(const ref JSONValue item) {
return ("malware" in item) != null;
}
bool hasHashes(const ref JSONValue item) {
return ("hashes" in item["file"]) != null;
}
bool hasQuickXorHash(const ref JSONValue item) {
return ("quickXorHash" in item["file"]["hashes"]) != null;
}
bool hasSHA256Hash(const ref JSONValue item) {
return ("sha256Hash" in item["file"]["hashes"]) != null;
}
bool isMicrosoftOneNoteMimeType1(const ref JSONValue item) {
return (item["file"]["mimeType"].str) == "application/msonenote";
}
bool isMicrosoftOneNoteMimeType2(const ref JSONValue item) {
return (item["file"]["mimeType"].str) == "application/octet-stream";
}
bool hasUploadURL(const ref JSONValue item) {
return ("uploadUrl" in item) != null;
}
bool hasNextExpectedRanges(const ref JSONValue item) {
return ("nextExpectedRanges" in item) != null;
}
bool hasLocalPath(const ref JSONValue item) {
return ("localPath" in item) != null;
}
bool hasETag(const ref JSONValue item) {
return ("eTag" in item) != null;
}
bool hasSharedElement(const ref JSONValue item) {
return ("eTag" in item) != null;
}