Fix spelling problems as identified by GitHub Action check-spelling (#2946)

* Fix spelling problems as identified by GitHub Action check-spelling
This commit is contained in:
abraunegg 2024-11-04 11:32:18 +11:00 committed by GitHub
commit ae84b8cfef
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
24 changed files with 582 additions and 128 deletions

View file

@ -6,7 +6,7 @@ body:
- type: markdown
attributes:
value: |
**Note:** Before submitting a bug report, please ensure you are running the latest 'onedrive' client as built from 'master' and compile by using the latest available DMD or LDC compiler. Refer to the the [install](https://github.com/abraunegg/onedrive/blob/master/docs/install.md) document on how to build the client for your system.
**Note:** Before submitting a bug report, please ensure you are running the latest 'onedrive' client as built from 'master' and compile by using the latest available DMD or LDC compiler. Refer to the [install](https://github.com/abraunegg/onedrive/blob/master/docs/install.md) document on how to build the client for your system.
- type: textarea
id: bugDescription

407
.github/actions/spelling/allow.txt vendored Normal file
View file

@ -0,0 +1,407 @@
#
# Other allow entries
#
AADSTS
aarch
abraunegg
accrights
accrightslen
adamdruppe
addrepo
adr
alex
alpinelinux
annobin
antix
aothmane
apng
archlinux
ARequest
armhf
ARMv
arsd
arsdnet
artix
ASCIIHTML
ASpurious
AThings
aufxw
aur
autoclean
autoprocess
autoupdate
avmkfdiitirnrenzljwc
baus
bcdefghi
bindir
bir
blargh
bools
bpozdena
brp
btw
bugzilla
buildfiles
buildroot
bytecompile
cancelfd
CApath
cattr
ccc
certbot
checkinterval
chkconfig
chpst
classinfo
cloexec
Cloudflare
cmptr
cmsghdr
codacy
commandline
concated
confdir
constness
controllen
crt
ctl
ctls
cyb
datadir
dchar
debian
dechunk
Deepin
deimos
devuan
dhparams
dirmask
dlang
dlnow
dltotal
dmd
dnf
dnotify
Dockerfiles
dotfile
dphys
driveid
driveone
druntime
drwx
drwxr
drwxrwxr
dryrun
DTime
Dynu
eis
ele
endinaness
enduml
envp
epfd
estr
eventfd
evt
fasynchronous
fcf
fcgid
fcgienv
FCGX
fcontext
fedoraproject
fefefe
fexceptions
ffat
fhandler
flto
fstack
FState
fullchain
fullscan
gdc
gdk
gerror
getenforce
gfortran
GFree
Gibi
gmodule
GObject
gpg
GPLv
GPOs
grecord
groupinstall
gshared
GVariant
hideonindex
hnsecs
hskrieg
htons
idk
idlol
idup
ifrom
includedir
ine
infodir
initted
initval
intercambio
iocp
ioop
ioops
iov
iovec
iovlen
ipresolve
isv
itemdb
itimerspec
journalctl
jsvar
kdbx
keepass
lalen
lbl
lcurl
ldc
ldconfig
ldl
letsencrypt
lgdk
lgio
lglib
lgobject
libdir
libexec
libexecdir
libgcc
libgdk
libgio
libglib
libgobject
liblphobos
libm
libnotify
libsqlite
Lighttpd
lintian
llclose
llsend
lnotify
localstatedir
lol
lpdw
lpfn
lrwxrwxrwx
lsb
lsqlite
ltmain
Lyncredible
makepkg
mangleof
mayne
mbr
memtest
microsoftonline
mountpoint
mozram
msghdr
msonenote
mswsock
mtune
mydir
mynasau
myusername
nadded
nadjusted
namelen
nas
nativeclient
nbd
nbytes
ncache
ndata
netinet
nev
nevent
newfd
newfile
nexisting
nfds
nfor
nintegrity
niop
nobj
nodelay
nolan
nomount
nosignal
nosync
notif
nph
nrecords
nto
nuntil
nupload
nvia
objectsx
odata
ofonedrive
onbinarymessage
onedrive
onmicrosoft
ontextmessage
opensuse
overallocated
pacman
pamac
parentid
passoff
pastebin
pfa
phlibi
phobos
pidx
pixbuf
pki
pkolmann
podman
pollfd
pollfds
postun
prefork
preun
privkey
Privs
prueba
prw
Pseudoblocking
puml
qewrqwerwqer
QWords
qxor
raf
ralen
raspberrypi
raspi
raspios
rbtree
rdo
readdata
readln
readret
reauth
Recvd
recvfd
redhat
relro
restorecon
retu
revents
rko
rlc
robertschulze
rpcing
rpmbuild
rpmlib
Rproj
rrodrigueznt
rsv
rtud
rul
runsvdir
Ruppe
rwxr
sargs
sbindir
scgi
sdlang
semanage
sendfd
setsebool
settime
sev
sfn
sharedstatedir
sharepoint
shortnames
sigaction
sigchldhandler
sigemptyset
signo
sigpipe
skilion
skinparam
Sockaddrs
somevar
sooooo
startuml
statm
stdc
stringof
Stringz
subobject
subobjects
svdir
swapfile
swapon
swp
symcode
syncable
syncdir
sysconf
sysconfdir
systemdsystemunitdir
systemduserunitdir
tbh
tdcockers
templ
Thh
thnk
tidx
timerfd
tlsv
Tting
typecons
uda
ulnow
uload
ultotal
undefiened
unistd
unittests
urlify
userinfo
usermod
userns
userpass
usl
valgrind
vti
wal
websockets
webtemplate
weburl
Werror
wpath
writefln
wrt
wtf
xca
xcbac
xdeadbeef
xdg
xlaunch
xof
xored
XXYYZZ
yann
yourapp
yourdomain
yourfile
yourprogram
Zorin
zypper

4
.github/actions/spelling/excludes.txt vendored Normal file
View file

@ -0,0 +1,4 @@
# Ignore the action's configuration data
^\.github/action/spelling/
# Ignore all GitHub workflow files
^\.github/workflows/

9
.github/actions/spelling/only.txt vendored Normal file
View file

@ -0,0 +1,9 @@
# Only process the following files
# *.md
# onedrive.1.in
# *.d
# *.puml
\.md$
\.d$
\.puml$
^onedrive\\.1\\.in$

36
.github/actions/spelling/patterns.txt vendored Normal file
View file

@ -0,0 +1,36 @@
# https/http/file urls
(?:\b(?:https?|ftp|file)://)[-A-Za-z0-9+&@#/*%?=~_|!:,.;]+[-A-Za-z0-9+&@#/*%=~_|]
# uuid:
\b[0-9a-fA-F]{8}-(?:[0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}\b
# sha256:<sha string>
\bsha256:[0-9a-fA-F]{64}\b
# sha256 string without any prefix
\b[0-9a-fA-F]{64}\b
# 32 character string as generated from /dev/urandom
\b[a-zA-Z0-9]{32}\b
# 16 character string as generated from /dev/urandom
\b[a-zA-Z0-9]{16}\b
# Microsoft URL's
\b(?:https?://|)(?:(?:download\.visualstudio|docs|msdn2?|research)\.microsoft|blogs\.msdn)\.com/[-_a-zA-Z0-9()=./%]*
# Microsoft OneDrive Business Account DriveID identifiers
\b[bB]![A-Za-z0-9_-]{64}\b
# UTF-16 Hex Values
\b0x(?:D[89A-F][0-9A-F]{2}|E[0-9A-F]{3}|F[0-9A-F]{3})\b
# man troff content
# https://www.gnu.org/software/groff/manual/groff.html
\\f[BCIPR]
# All UPPERCASE letters only (no underscores)
\b[A-Z]+\b
# Ignore UPPERCASE letters separated by an underscore '_'
\b[A-Z]+(?:_[A-Z]+)*\b

View file

@ -31,7 +31,7 @@ A special thankyou to @phlibi for assistance with diagnosing and troubleshooting
* Fix Regression: Fix regression that real-time synchronization is not occurring when using --monitor and sync_list
* Fix Regression: Fix regression that --remove-source-files doesnt work
* Fix Bug: Application crash when run synchronize due to negative free space online
* Fix Bug: Application crash when performing a URL dedocde
* Fix Bug: Application crash when performing a URL decode
* Fix Bug: Application crash when using sync_list and Personal Shared Folders the root folder fails to present the item id
* Fix Bug: Application crash when attempting to read timestamp from database as invalid data was written
@ -226,7 +226,7 @@ A special thankyou to all those who helped with testing and providing feedback d
## 2.4.20 - 2022-07-20
### Fixed
* Fix 'foreign key constraint failed' when using OneDrive Business Shared Folders due to change to using /delta query
* Fix various little spelling fixes (check with lintian during Debian packaging)
* Fix various little spelling errors (checked with lintian during Debian packaging)
* Fix handling of a custom configuration directory when using --confdir
* Fix to ensure that any active http instance is shutdown before any application exit
* Fix to enforce that --confdir must be a directory
@ -257,7 +257,7 @@ A special thankyou to all those who helped with testing and providing feedback d
## 2.4.18 - 2022-06-02
### Fixed
* Fixed various database related access issues steming from running multiple instances of the application at the same time using the same configuration data
* Fixed various database related access issues stemming from running multiple instances of the application at the same time using the same configuration data
* Fixed --display-config being impacted by --resync flag
* Fixed installation permissions for onedrive man-pages file
* Fixed that in some situations that users try --upload-only and --download-only together which is not possible
@ -270,7 +270,7 @@ A special thankyou to all those who helped with testing and providing feedback d
### Updated
* Updated all Docker build files to current distributions, using updated distribution LDC version
* Updated logging output to logfiles when an actual sync process is occuring
* Updated logging output to logfiles when an actual sync process is occurring
* Updated output of --display-config to be more relevant
* Updated manpage to align with application configuration
* Updated documentation and Docker files based on minimum compiler versions to dmd-2.088.0 and ldc-1.18.0
@ -335,7 +335,7 @@ A special thankyou to all those who helped with testing and providing feedback d
* Updated minimum compiler versions to dmd-2.087.0 and ldc-1.17.0
### Updated
* Updated Dockerfile-alpine to use Apline 3.14
* Updated Dockerfile-alpine to use Alpine 3.14
* Updated documentation (various)
## 2.4.14 - 2021-11-24
@ -462,7 +462,7 @@ A special thankyou to all those who helped with testing and providing feedback d
* Fix application crash in --monitor mode due to 'Failed to stat file' when setgid is used on a directory and data cannot be read
### Added
* Added advanced-usage.md to document advaced client usage such as multi account configurations and Windows dual-boot
* Added advanced-usage.md to document advanced client usage such as multi account configurations and Windows dual-boot
### Updated
* Updated --verbose logging output for config options when set
@ -519,7 +519,7 @@ A special thankyou to all those who helped with testing and providing feedback d
## 2.4.4 - 2020-08-11
### Fixed
* Fix 'skip_dir' & 'skip_file' pattern matching to ensure correct matching is performed
* Fix 'skip_dir' & 'skip_file' so that each directive is only used against directories or files as requried in --monitor
* Fix 'skip_dir' & 'skip_file' so that each directive is only used against directories or files as required in --monitor
* Fix client hand when attempting to sync a Unix pipe file
* Fix --single-directory & 'sync_list' performance
* Fix erroneous 'return' statements which could prematurely end processing all changes returned from OneDrive
@ -619,7 +619,7 @@ A special thankyou to all those who helped with testing and providing feedback d
* Update known-issues.md regarding 'SSL_ERROR_SYSCALL, errno 104'
* Update progress bar to be more accurate when downloading large files
* Updated #658 and #865 handling of when to trigger a directory walk when changes occur on OneDrive
* Updated handling of when a full scan is requried due to utilising sync_list
* Updated handling of when a full scan is required due to utilising sync_list
* Updated handling of when OneDrive service throws a 429 or 504 response to retry original request after a delay
## 2.4.0 - 2020-03-22
@ -633,7 +633,7 @@ A special thankyou to all those who helped with testing and providing feedback d
* Fixed the regex parsing of response URI to avoid potentially generating a bad request to OneDrive, leading to a 'AADSTS9002313: Invalid request. Request is malformed or invalid.' response.
### Added
* Added a Dockerfile for building on Rasberry Pi / ARM platforms
* Added a Dockerfile for building on Raspberry Pi / ARM platforms
* Implement Feature: warning on big deletes to safeguard data on OneDrive
* Implement Feature: delete local files after sync
* Implement Feature: perform skip_dir explicit match only
@ -1078,7 +1078,7 @@ A special thankyou to all those who helped with testing and providing feedback d
* Resolve Microsoft Naming Convention not being followed correctly
* Resolve Error when trying to upload a file with weird non printable characters present
* Resolve Crash if file is locked by online editing (status code 423)
* Resolve Resolve compilation issue with dmd-2.081.0
* Resolve compilation issue with dmd-2.081.0
* Resolve skip_file configuration doesn't handle spaces or specified directory paths
### Added
* Implement Feature: Add a flag to detect when the sync-folder is missing

View file

@ -213,20 +213,20 @@ docker volume create onedrive_conf_sharepoint_site50
#### Create the required unique local path used for the Docker data volume
Create the required unique local path used for the Docker data volume
```text
mkdir -p /use/full/local/path/no/tilda/SharePointSite1
mkdir -p /use/full/local/path/no/tilda/SharePointSite2
mkdir -p /use/full/local/path/no/tilda/SharePointSite3
mkdir -p /use/full/local/path/no/tilde/SharePointSite1
mkdir -p /use/full/local/path/no/tilde/SharePointSite2
mkdir -p /use/full/local/path/no/tilde/SharePointSite3
...
mkdir -p /use/full/local/path/no/tilda/SharePointSite50
mkdir -p /use/full/local/path/no/tilde/SharePointSite50
```
#### Start the Docker container with the required configuration (example)
```text
docker run -it --name onedrive -v onedrive_conf_sharepoint_site1:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite1:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site2:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite2:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site3:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite3:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site1:/onedrive/conf -v "/use/full/local/path/no/tilde/SharePointSite1:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site2:/onedrive/conf -v "/use/full/local/path/no/tilde/SharePointSite2:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site3:/onedrive/conf -v "/use/full/local/path/no/tilde/SharePointSite3:/onedrive/data" driveone/onedrive:latest
...
docker run -it --name onedrive -v onedrive_conf_sharepoint_site50:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite50:/onedrive/data" driveone/onedrive:latest
docker run -it --name onedrive -v onedrive_conf_sharepoint_site50:/onedrive/conf -v "/use/full/local/path/no/tilde/SharePointSite50:/onedrive/data" driveone/onedrive:latest
```
> [!TIP]
@ -250,7 +250,7 @@ After unchecking the option and clicking "OK", the Windows OneDrive client shoul
| ![Uncheck-Personal](./images/personal-files-on-demand.png) | ![Uncheck-Business](./images/business-files-on-demand.png) |
## Configuring the client for use when 'sync_dir' is a mounted directory
In some environments, your setup might be that your configured 'sync_dir' is pointing to another mounted file system - a NFS|CIFS location, an external drive (USB stuc, eSATA etc). As such, you configure your 'sync_dir' as follows:
In some environments, your setup might be that your configured 'sync_dir' is pointing to another mounted file system - a NFS|CIFS location, an external drive (USB stick, eSATA etc). As such, you configure your 'sync_dir' as follows:
```text
sync_dir = "/path/to/mountpoint/OneDrive"
```

View file

@ -98,7 +98,7 @@ Before reading this document, please ensure you are running application version
## Configuration File Options
### application_id
_**Description:**_ This is the config option for application id that used used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option.
_**Description:**_ This is the config option for application id that used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option.
_**Value Type:**_ String
@ -337,7 +337,7 @@ _**CLI Option Use:**_ `--enable-logging`
> Additional configuration is potentially required to configure the default log directory. Refer to the [Enabling the Client Activity Log](./usage.md#enabling-the-client-activity-log) section in usage.md for details
### force_http_11
_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP prodocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1.
_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP protocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1.
_**Value Type:**_ Boolean
@ -724,7 +724,7 @@ _**Description:**_ This configuration option controls how much local disk space
_**Value Type:**_ Integer
_**Default Value:**_ 50 MB (expressesed as Bytes when using `--display-config`)
_**Default Value:**_ 50 MB (expressed as Bytes when using `--display-config`)
_**Config Example:**_ `space_reservation = "100"`
@ -1112,7 +1112,7 @@ Shared By: test user (testuser@domain.tld)
```
### CLI Option: --logout
_**Description:**_ This CLI option removes this clients authentictaion status with Microsoft OneDrive. Any further application use will require the application to be re-authenticated with Microsoft OneDrive.
_**Description:**_ This CLI option removes this clients authentication status with Microsoft OneDrive. Any further application use will require the application to be re-authenticated with Microsoft OneDrive.
_**Usage Example:**_ `onedrive --logout`

View file

@ -6,7 +6,7 @@
> [!CAUTION]
> This feature has been 100% re-written from v2.5.0 onwards and is not backwards compatible with v2.4.x client versions. If enabling this feature, you must upgrade to v2.5.0 or above on all systems that are running this client.
>
> An additional pre-requesite before using this capability in v2.5.0 and above is for you to revert any v2.4.x Shared Business Folder configuration you may be currently using, including, but not limited to:
> An additional pre-requisite before using this capability in v2.5.0 and above is for you to revert any v2.4.x Shared Business Folder configuration you may be currently using, including, but not limited to:
> * Removing `sync_business_shared_folders = "true|false"` from your 'config' file
> * Removing the 'business_shared_folders' file
> * Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues.
@ -67,8 +67,8 @@ Processing OneDrive JSON item batch [1/1] to ensure consistent local state
Creating local directory: ./my_shared_folder/asdf
Creating local directory: ./my_shared_folder/original_data
Number of items to download from OneDrive: 3
Downloading file: my_shared_folder/asdf/asdfasdfhashdkfasdf.txt ... done
Downloading file: my_shared_folder/asdf/asdfasdf.txt ... done
Downloading file: my_shared_folder/my_folder/file_one.txt ... done
Downloading file: my_shared_folder/my_folder/file_two.txt ... done
Downloading file: my_shared_folder/original_data/file1.data ... done
Performing a database consistency and integrity check on locally stored data
...
@ -87,7 +87,7 @@ There are two methods to support the syncing OneDrive Business Shared Files with
2. Use `--sync-shared-files` option to sync all files shared with you to your local disk. If you use this method, you can utilise any 'client side filtering' rules that you have created to filter out files you do not want locally. This option will create a new folder locally, with sub-folders named after the person who shared the data with you.
### Syncing OneDrive Business Shared Files using Option 1
1. As per the above method for adding folders, select the shared file, then select to 'Add shorcut' to the file
1. As per the above method for adding folders, select the shared file, then select to 'Add shortcut' to the file
![add_shared_file_shortcut](./images/add_shared_file_shortcut.png)
@ -132,7 +132,7 @@ Any shared file link you add can utilise any 'client side filtering' rules that
### Syncing OneDrive Business Shared Files using Option 2
> [!IMPORTANT]
> When using option 2, all files that have been shared with you will be downloaded by default. To reduce this, first use `--list-shared-items` to list all shared items with your account, then use 'client side filtering' rules such as 'sync_list' configuration to selectivly sync all the files to your local system.
> When using option 2, all files that have been shared with you will be downloaded by default. To reduce this, first use `--list-shared-items` to list all shared items with your account, then use 'client side filtering' rules such as 'sync_list' configuration to selectively sync all the files to your local system.
1. Review all items that have been shared with you by using `onedrive --list-shared-items`. This should display output similar to the following:
```

View file

@ -53,7 +53,7 @@ When using this coding style, even when the code of the `if`, `else`, `for`, or
}
```
## Naming Conventsions
## Naming Conventions
### Variables and Functions
Please use `camelCase` for variable and function names.

View file

@ -367,14 +367,14 @@ There are alternate, smaller images available by using `Dockerfile-debian` or `D
### How to build and run a custom Docker image based on Debian
``` bash
docker build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-debian:latest
docker build . -t local-onedrive-debian -f contrib/docker/Dockerfile-debian
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-debian:latest
```
### How to build and run a custom Docker image based on Alpine Linux
``` bash
docker build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-alpine:latest
docker build . -t local-onedrive-alpine -f contrib/docker/Dockerfile-alpine
docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-alpine:latest
```
### How to build and run a custom Docker image for ARMHF (Raspberry Pi)

View file

@ -37,7 +37,7 @@ Only the current release version or greater is supported. Earlier versions are n
## Building from Source - High Level Requirements
* For successful compilation of this application, it's crucial that the build environment is equipped with a minimum of 1GB of memory and an additional 1GB of swap space.
* Install the required distribution package dependencies coverering the required development tools and development libraries for curl and sqlite
* Install the required distribution package dependencies covering the required development tools and development libraries for curl and sqlite
* Install the [Digital Mars D Compiler (DMD)](https://dlang.org/download.html) or [LDC the LLVM-based D Compiler](https://github.com/ldc-developers/ldc)
> [!IMPORTANT]

View file

@ -345,14 +345,14 @@ Dockerfiles require Docker version at least 17.05.
### How to build and run a custom Podman image based on Debian
``` bash
podman build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-ondrive-debian:latest
podman build . -t local-onedrive-debian -f contrib/docker/Dockerfile-debian
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-onedrive-debian:latest
```
### How to build and run a custom Podman image based on Alpine Linux
``` bash
podman build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-ondrive-alpine:latest
podman build . -t local-onedrive-alpine -f contrib/docker/Dockerfile-alpine
podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-onedrive-alpine:latest
```
### How to build and run a custom Podman image for ARMHF (Raspberry Pi)

View file

@ -34,7 +34,7 @@ sudo add-apt-repository --remove ppa:yann1ck/onedrive
#### Step 1b: Remove errant systemd service file installed by PPA or distribution package
Additionally, the distributon packages have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated:
Additionally, the distribution packages have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated:
```
Created symlink /etc/systemd/user/default.target.wants/onedrive.service → /usr/lib/systemd/user/onedrive.service.
```

View file

@ -6,7 +6,7 @@ Before reading this document, please ensure you are running application version
- [Important Notes](#important-notes)
- [Memory Usage](#memory-usage)
- [Upgrading from the 'skilion' Client](#upgrading-from-the-sklion-client)
- [Upgrading from the 'skilion' Client](#upgrading-from-the-skilion-client)
- [Guidelines for Local File and Folder Naming in the Synchronisation Directory](#guidelines-for-local-file-and-folder-naming-in-the-synchronisation-directory)
- [Compatibility with curl](#compatibility-with-curl)
- [First Steps](#first-steps)
@ -314,7 +314,7 @@ onedrive -s -v
### Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive
Client Side Filtering in the context of the OneDrive Client for Linux refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this:
* **check_nosync:** This option allows you you create a `.nosync` file in local directories, to skip that directory from being included in sync operations.
* **check_nosync:** This option allows you to create a `.nosync` file in local directories, to skip that directory from being included in sync operations.
* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process.
* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local.
* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage.

View file

@ -104,7 +104,7 @@ The configuration above will:
* Once tested, reload your 'nginx' configuration to activate the webhook reverse proxy configuration.
### Step 4: Initial Firewall/Router Configuration
* Configure your firewall or router to forward all incomming HTTP and HTTPS traffic to the internal address of your system where 'nginx' is running. This is required for to allow the Let's Encrypt `certbot` tool to create a valid HTTPS certificate for your system.
* Configure your firewall or router to forward all incoming HTTP and HTTPS traffic to the internal address of your system where 'nginx' is running. This is required for to allow the Let's Encrypt `certbot` tool to create a valid HTTPS certificate for your system.
![initial_firewall_config](./images/initial_firewall_config.png)
@ -305,7 +305,7 @@ Starting a sync with Microsoft OneDrive
.....
```
* Review the 'nginx' logs to validate that applicable communication is occuring:
* Review the 'nginx' logs to validate that applicable communication is occurring:
```
70.37.95.11 - - [28/Aug/2024:18:26:07 +1000] "POST /webhooks/onedrive?validationToken=Validation%3a+Testing+client+application+reachability+for+subscription+Request-Id%3a+25460109-0e8b-4521-8090-dd691b407ed8 HTTP/1.1" 200 128 "-" "-" "-"
137.135.11.116 - - [28/Aug/2024:18:32:02 +1000] "POST /webhooks/onedrive?validationToken=Validation%3a+Testing+client+application+reachability+for+subscription+Request-Id%3a+65e43e3c-cbab-4e74-87ec-0e8fafdef6d3 HTTP/1.1" 200 128 "-" "-" "-"

View file

@ -190,7 +190,7 @@ void main() {
Simulating_requests:
If you are using one of the [GenericMain] or [DispatcherMain] mixins, or main with your own call to [RequestServer.trySimulatedRequest], you can simulate requests from your command-ine shell. Call the program like this:
If you are using one of the [GenericMain] or [DispatcherMain] mixins, or main with your own call to [RequestServer.trySimulatedRequest], you can simulate requests from your command line shell. Call the program like this:
$(CONSOLE
./yourprogram GET / name=adr
@ -1193,7 +1193,7 @@ class Cgi {
get = keepLastOf(getArray);
// NOTE: on shitpache, you need to specifically forward this
// NOTE: on apache, you need to specifically forward this
authorization = getenv("HTTP_AUTHORIZATION");
// this is a hack because Apache is a shitload of fuck and
// refuses to send the real header to us. Compatible
@ -5389,7 +5389,6 @@ class BufferedInputRange {
/// You do not need to call this if you always want to wait for more data when you
/// consume some.
ubyte[] consume(size_t bytes) {
//import std.stdio; writeln("consuime ", bytes, "/", view.length);
view = view[bytes > $ ? $ : bytes .. $];
if(view.length == 0) {
view = underlyingBuffer[0 .. 0]; // go ahead and reuse the beginning
@ -8713,8 +8712,7 @@ ssize_t write_fd(int fd, void *ptr, size_t nbytes, int sendfd) {
iovec[1] iov;
version(OSX) {
//msg.msg_accrights = cast(cattr_t) &sendfd;
//msg.msg_accrightslen = int.sizeof;
// removed
} else version(Android) {
} else {
union ControlUnion {
@ -10858,7 +10856,7 @@ class CollectionOf(Obj) : RestObject!(CollectionOf) {
remove
You will want to be able to customize the HTTP, HTML, and JSON returns but generally shouldn't have to - the defaults
should usually work. The returned JSON will include a field "href" on all returned objects along with "id". Or omething like that.
should usually work. The returned JSON will include a field "href" on all returned objects along with "id". Or something like that.
Usage of this function will add a dependency on [arsd.dom] and [arsd.jsvar].

View file

@ -224,7 +224,7 @@ class ClientSideFiltering {
private bool isPathExcluded(string path) {
// function variables
bool exclude = false;
bool exludeExactMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeExactMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeParentMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
bool anywhereRuleMatched = false; // will get updated if the 'anywhere' rule matches
@ -247,7 +247,7 @@ class ClientSideFiltering {
if (debugLogging) {
addLogEntry("******************* SYNC LIST RULES EVALUATION START *******************", ["debug"]);
addLogEntry("Evaluation against 'sync_list' rules for this input path: " ~ path, ["debug"]);
addLogEntry("[S]exludeExactMatch = " ~ to!string(exludeExactMatch), ["debug"]);
addLogEntry("[S]excludeExactMatch = " ~ to!string(excludeExactMatch), ["debug"]);
addLogEntry("[S]excludeParentMatched = " ~ to!string(excludeParentMatched), ["debug"]);
addLogEntry("[S]excludeAnywhereMatched = " ~ to!string(excludeAnywhereMatched), ["debug"]);
addLogEntry("[S]excludeWildcardMatched = " ~ to!string(excludeWildcardMatched), ["debug"]);
@ -259,16 +259,16 @@ class ClientSideFiltering {
// There are several matches we need to think of here
// Exclusions:
// !foldername/* = As there is no preceding '/' (after the !) .. this is a rule that should exclude 'foldername' and all its children ANYWHERE
// !*.extention = As there is no preceding '/' (after the !) .. this is a rule that should exclude any item that has the specified extention ANYWHERE
// !*.extension = As there is no preceding '/' (after the !) .. this is a rule that should exclude any item that has the specified extension ANYWHERE
// !/path/to/foldername/* = As there IS a preceding '/' (after the !) .. this is a rule that should exclude this specific path and all its children
// !/path/to/foldername/*.extention = As there IS a preceding '/' (after the !) .. this is a rule that should exclude any item that has the specified extention in this path ONLY
// !/path/to/foldername/*.extension = As there IS a preceding '/' (after the !) .. this is a rule that should exclude any item that has the specified extension in this path ONLY
// !/path/to/foldername/*/specific_target/* = As there IS a preceding '/' (after the !) .. this excludes 'specific_target' in any subfolder of '/path/to/foldername/'
//
// Inclusions:
// foldername/* = As there is no preceding '/' .. this is a rule that should INCLUDE 'foldername' and all its children ANYWHERE
// *.extention = As there is no preceding '/' .. this is a rule that should INCLUDE any item that has the specified extention ANYWHERE
// *.extension = As there is no preceding '/' .. this is a rule that should INCLUDE any item that has the specified extension ANYWHERE
// /path/to/foldername/* = As there IS a preceding '/' .. this is a rule that should INCLUDE this specific path and all its children
// /path/to/foldername/*.extention = As there IS a preceding '/' .. this is a rule that should INCLUDE any item that has the specified extention in this path ONLY
// /path/to/foldername/*.extension = As there IS a preceding '/' .. this is a rule that should INCLUDE any item that has the specified extension in this path ONLY
// /path/to/foldername/*/specific_target/* = As there IS a preceding '/' .. this INCLUDES 'specific_target' in any subfolder of '/path/to/foldername/'
if (debugLogging) {addLogEntry("------------------------------ NEW RULE --------------------------------", ["debug"]);}
@ -332,8 +332,8 @@ class ClientSideFiltering {
} else {
// Exclude rule
if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: exclusion direct match - path to be excluded", ["debug"]);}
// flag exludeExactMatch so that a 'wildcard match' will not override this exclude
exludeExactMatch = true;
// flag excludeExactMatch so that a 'wildcard match' will not override this exclude
excludeExactMatch = true;
exclude = true;
// final result
finalResult = true;
@ -398,10 +398,10 @@ class ClientSideFiltering {
// Is the 'sync_list' rule an 'anywhere' rule?
// EXCLUSION
// !foldername/*
// !*.extention
// !*.extension
// INCLUSION
// foldername/*
// *.extention
// *.extension
if (to!string(syncListRuleEntry[0]) != "/") {
// reset anywhereRuleMatched
anywhereRuleMatched = false;
@ -526,14 +526,14 @@ class ClientSideFiltering {
addLogEntry("------------------------------------------------------------------------", ["debug"]);
// Interim results after checking each 'sync_list' rule against the input path
addLogEntry("[F]exludeExactMatch = " ~ to!string(exludeExactMatch), ["debug"]);
addLogEntry("[F]excludeExactMatch = " ~ to!string(excludeExactMatch), ["debug"]);
addLogEntry("[F]excludeParentMatched = " ~ to!string(excludeParentMatched), ["debug"]);
addLogEntry("[F]excludeAnywhereMatched = " ~ to!string(excludeAnywhereMatched), ["debug"]);
addLogEntry("[F]excludeWildcardMatched = " ~ to!string(excludeWildcardMatched), ["debug"]);
}
// If any of these exclude match items is true, then finalResult has to be flagged as true
if ((exclude) || (exludeExactMatch) || (excludeParentMatched) || (excludeAnywhereMatched) || (excludeWildcardMatched)) {
if ((exclude) || (excludeExactMatch) || (excludeParentMatched) || (excludeAnywhereMatched) || (excludeWildcardMatched)) {
finalResult = true;
}

View file

@ -176,7 +176,7 @@ class ApplicationConfig {
private string previousConfigHash = "";
private string previousSyncListHash = "";
// Store items that come in from the 'config' file, otherwise these need to be set the the defaults
// Store items that come in from the 'config' file, otherwise these need to be set the defaults
private string configFileSyncDir = defaultSyncDir;
private string configFileSkipFile = ""; // Default for now, if post reading in any user configuration, if still empty, default will be used
private string configFileSkipDir = ""; // Default here is no directories are skipped
@ -272,7 +272,7 @@ class ApplicationConfig {
longValues["data_timeout"] = defaultDataTimeout;
// What IP protocol version should be used when communicating with OneDrive
longValues["ip_protocol_version"] = defaultIpProtocol; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// What is the default age that a curl engine should be left idle for, before being being destroyed
// What is the default age that a curl engine should be left idle for, before being destroyed
longValues["max_curl_idle"] = 120;
// Number of concurrent threads
@ -381,7 +381,7 @@ class ApplicationConfig {
// - This is especially beneficial when debugging or performing memory tests with Valgrind
longValues["monitor_max_loop"] = 0;
// display_sync_options = true | false
// - It may be desirable to see what options are being passed in to performSync() without enabling the full verbose debug logging
// - It may be desirable to see what options are being passed into performSync() without enabling the full verbose debug logging
boolValues["display_sync_options"] = false;
// force_children_scan = true | false
// - Force client to use /children rather than /delta to query changes on OneDrive
@ -441,7 +441,7 @@ class ApplicationConfig {
// create the directory
mkdirRecurse(configDirName);
// Configure the applicable permissions for the folder
configDirName.setAttributes(returnRequiredDirectoryPermisions());
configDirName.setAttributes(returnRequiredDirectoryPermissions());
} else {
// The config path exists
// The path that exists must be a directory, not a file
@ -650,7 +650,7 @@ class ApplicationConfig {
}
// Configure the directory octal permission value
void configureRequiredDirectoryPermisions() {
void configureRequiredDirectoryPermissions() {
// return the directory permission mode required
// - return octal!defaultDirectoryPermissionMode; ... cant be used .. which is odd
// Error: variable defaultDirectoryPermissionMode cannot be read at compile time
@ -668,7 +668,7 @@ class ApplicationConfig {
}
// Configure the file octal permission value
void configureRequiredFilePermisions() {
void configureRequiredFilePermissions() {
// return the file permission mode required
// - return octal!defaultFilePermissionMode; ... cant be used .. which is odd
// Error: variable defaultFilePermissionMode cannot be read at compile time
@ -686,20 +686,20 @@ class ApplicationConfig {
}
// Read the configuredDirectoryPermissionMode and return
int returnRequiredDirectoryPermisions() {
int returnRequiredDirectoryPermissions() {
if (configuredDirectoryPermissionMode == 0) {
// the configured value is zero, this means that directories would get
// values of d---------
configureRequiredDirectoryPermisions();
configureRequiredDirectoryPermissions();
}
return configuredDirectoryPermissionMode;
}
// Read the configuredFilePermissionMode and return
int returnRequiredFilePermisions() {
int returnRequiredFilePermissions() {
if (configuredFilePermissionMode == 0) {
// the configured value is zero
configureRequiredFilePermisions();
configureRequiredFilePermissions();
}
return configuredFilePermissionMode;
}
@ -1205,7 +1205,7 @@ class ApplicationConfig {
// Is this a running as a container
if (entrypointExists) {
// write this to the config file so that when config optins are checked again, this matches on next run
// write this to the config file so that when config options are checked again, this matches on next run
applicableConfigFilePathFileHandleWrite.writeln(newConfigOptionSyncDirLine);
}
@ -1952,9 +1952,9 @@ class ApplicationConfig {
} else {
// Debug log output what permissions are being set to
if (debugLogging) {addLogEntry("Configuring default new folder permissions as: " ~ to!string(getValueLong("sync_dir_permissions")), ["debug"]);}
configureRequiredDirectoryPermisions();
configureRequiredDirectoryPermissions();
if (debugLogging) {addLogEntry("Configuring default new file permissions as: " ~ to!string(getValueLong("sync_file_permissions")), ["debug"]);}
configureRequiredFilePermisions();
configureRequiredFilePermissions();
}
// --upload-only and --download-only cannot be used together
@ -2043,7 +2043,7 @@ class ApplicationConfig {
// --sync and --display-sync-status cannot be used together
if ((getValueBool("synchronize")) && (getValueBool("display_sync_status"))) {
addLogEntry("ERROR: --sync and and --display-sync-status cannot be used together");
addLogEntry("ERROR: --sync and --display-sync-status cannot be used together");
operationalConflictDetected = true;
}
@ -2055,7 +2055,7 @@ class ApplicationConfig {
// --sync and --display-quota cannot be used together
if ((getValueBool("synchronize")) && (getValueBool("display_quota"))) {
addLogEntry("ERROR: --sync and and --display-quota cannot be used together");
addLogEntry("ERROR: --sync and --display-quota cannot be used together");
operationalConflictDetected = true;
}

View file

@ -185,7 +185,7 @@ int main(string[] cliArgs) {
return EXIT_FAILURE;
}
// Update the current runtime application configuration (default or 'config' fileread-in options) from any passed in command line arguments
// Update the current runtime application configuration (default or 'config' file read in options) from any passed in command line arguments
appConfig.updateFromArgs(cliArgs);
// If --disable-notifications has not been used, check if everything exists to enable notifications
@ -245,7 +245,7 @@ int main(string[] cliArgs) {
// Configure application logging to a log file only if this has been enabled
// This is the earliest point that this can be done, as the client configuration has been read in, and any CLI arguments have been processed.
// Either of those ('confif' file, CPU arguments) could be enabling logging, thus this is the earliest point at which this can be validated and enabled.
// Either of those ('config' file, CLI arguments) could be enabling logging, thus this is the earliest point at which this can be validated and enabled.
// The buffered logging also ensures that all 'output' to this point is also captured and written out to the log file
if (appConfig.getValueBool("enable_logging")) {
// Calculate the application logging directory
@ -464,7 +464,7 @@ int main(string[] cliArgs) {
addLogEntry();
addLogEntry("Unable to reach the Microsoft OneDrive API service at this point in time, re-trying network tests based on applicable intervals");
addLogEntry();
if (!retryInternetConnectivtyTest(appConfig)) {
if (!retryInternetConnectivityTest(appConfig)) {
return EXIT_FAILURE;
}
}
@ -697,7 +697,7 @@ int main(string[] cliArgs) {
mkdirRecurse(runtimeSyncDirectory);
// Configure the applicable permissions for the folder
if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ runtimeSyncDirectory, ["debug"]);}
runtimeSyncDirectory.setAttributes(appConfig.returnRequiredDirectoryPermisions());
runtimeSyncDirectory.setAttributes(appConfig.returnRequiredDirectoryPermissions());
} catch (std.file.FileException e) {
// Creating the sync directory failed
addLogEntry("ERROR: Unable to create the configured local 'sync_dir' directory: " ~ e.msg, ["info", "notify"]);
@ -773,7 +773,7 @@ int main(string[] cliArgs) {
mkdirRecurse(singleDirectoryPath);
// Configure the applicable permissions for the folder
if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ singleDirectoryPath, ["debug"]);}
singleDirectoryPath.setAttributes(appConfig.returnRequiredDirectoryPermisions());
singleDirectoryPath.setAttributes(appConfig.returnRequiredDirectoryPermissions());
}
// Update the paths that we use to perform the sync actions
@ -934,12 +934,12 @@ int main(string[] cliArgs) {
immutable auto checkOnlineInterval = dur!"seconds"(appConfig.getValueLong("monitor_interval"));
immutable auto githubCheckInterval = dur!"seconds"(86400);
immutable ulong fullScanFrequency = appConfig.getValueLong("monitor_fullscan_frequency");
immutable ulong logOutputSupressionInterval = appConfig.getValueLong("monitor_log_frequency");
immutable ulong logOutputSuppressionInterval = appConfig.getValueLong("monitor_log_frequency");
immutable bool webhookEnabled = appConfig.getValueBool("webhook_enabled");
immutable string loopStartOutputMessage = "################################################## NEW LOOP ##################################################";
immutable string loopStopOutputMessage = "################################################ LOOP COMPLETE ###############################################";
// Changables
// Changeable variables
ulong monitorLoopFullCount = 0;
ulong fullScanFrequencyLoopCount = 0;
ulong monitorLogOutputLoopCount = 0;
@ -1012,16 +1012,16 @@ int main(string[] cliArgs) {
// 'monitor_log_frequency' controls how often, in a non-verbose application output mode, how often
// the full output of what is occurring is done. This is done to lessen the 'verbosity' of non-verbose
// logging, but only when running in --monitor
if (monitorLogOutputLoopCount > logOutputSupressionInterval) {
// unsurpress the logging output
if (monitorLogOutputLoopCount > logOutputSuppressionInterval) {
// re-enable the logging output as required
monitorLogOutputLoopCount = 1;
if (debugLogging) {addLogEntry("Unsuppressing initial sync log output", ["debug"]);}
if (debugLogging) {addLogEntry("Allowing initial sync log output", ["debug"]);}
appConfig.suppressLoggingOutput = false;
} else {
// do we suppress the logging output to absolute minimal
if (monitorLoopFullCount == 1) {
// application startup with --monitor
if (debugLogging) {addLogEntry("Unsuppressing initial sync log output", ["debug"]);}
if (debugLogging) {addLogEntry("Allowing initial sync log output", ["debug"]);}
appConfig.suppressLoggingOutput = false;
} else {
// only suppress if we are not doing --verbose or higher
@ -1029,7 +1029,7 @@ int main(string[] cliArgs) {
if (debugLogging) {addLogEntry("Suppressing --monitor log output", ["debug"]);}
appConfig.suppressLoggingOutput = true;
} else {
if (debugLogging) {addLogEntry("Unsuppressing log output", ["debug"]);}
if (debugLogging) {addLogEntry("Allowing log output", ["debug"]);}
appConfig.suppressLoggingOutput = false;
}
}
@ -1584,9 +1584,9 @@ void shutdownDatabase() {
if (itemDB !is null && itemDB.isDatabaseInitialised()) {
if (debugLogging) {addLogEntry("Shutting down Database instance", ["debug"]);}
if (performDatabaseVacuum) {
// Logging to attempt this is dentoed from performVacuum() - so no need to confirm here
// Logging to attempt this is denoted from performVacuum() - so no need to confirm here
itemDB.performVacuum();
// If this completes, it is dentoed from performVacuum() - so no need to confirm here
// If this completes, it is denoted from performVacuum() - so no need to confirm here
}
itemDB.closeDatabaseFile(); // Close the DB File Handle
object.destroy(itemDB);
@ -1613,7 +1613,7 @@ void shutdownApplicationLogging() {
thread_joinAll();
if (debugLogging) {addLogEntry("Application is exiting", ["debug"]);}
addLogEntry("#######################################################################################################################################", ["logFileOnly"]);
// Destroy the shared logging buffer which flushes any remaing logs
// Destroy the shared logging buffer which flushes any remaining logs
if (debugLogging) {addLogEntry("Shutting down Application Logging instance", ["debug"]);}
// Allow any logging complete before we exit
Thread.sleep(dur!("msecs")(500));

View file

@ -312,7 +312,7 @@ version(TestMain) {
void main() {
writeln(get_app_name());
set_app_name("bla");
set_app_name("blargh");
writeln(get_app_name());
writeln(get_server_caps());
writeln(get_server_info());

View file

@ -778,7 +778,7 @@ class OneDriveApi {
mkdirRecurse(newPath);
// Configure the applicable permissions for the folder
if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ newPath, ["debug"]);}
newPath.setAttributes(appConfig.returnRequiredDirectoryPermisions());
newPath.setAttributes(appConfig.returnRequiredDirectoryPermissions());
} catch (FileException exception) {
// display the error message
displayFileSystemErrorMessage(exception.msg, getFunctionName!({}));
@ -792,7 +792,7 @@ class OneDriveApi {
if (exists(saveToPath)) {
// File was downloaded successfully - configure the applicable permissions for the file
if (debugLogging) {addLogEntry("Setting file permissions for: " ~ saveToPath, ["debug"]);}
saveToPath.setAttributes(appConfig.returnRequiredFilePermisions());
saveToPath.setAttributes(appConfig.returnRequiredFilePermissions());
}
}
@ -901,7 +901,7 @@ class OneDriveApi {
if (debugLogging) {addLogEntry("Updating refreshToken on disk", ["debug"]);}
std.file.write(appConfig.refreshTokenFilePath, refreshToken);
if (debugLogging) {addLogEntry("Setting file permissions for: " ~ appConfig.refreshTokenFilePath, ["debug"]);}
appConfig.refreshTokenFilePath.setAttributes(appConfig.returnRequiredFilePermisions());
appConfig.refreshTokenFilePath.setAttributes(appConfig.returnRequiredFilePermissions());
} catch (FileException exception) {
// display the error message
displayFileSystemErrorMessage(exception.msg, getFunctionName!({}));
@ -1221,7 +1221,7 @@ class OneDriveApi {
}
}
// If retryAtempts is greater than 1, it means we were re-trying the request
// If retryAttempts is greater than 1, it means we were re-trying the request
if (retryAttempts > 1) {
// No error from http.perform() on re-try
if (!transientError) {

View file

@ -108,7 +108,7 @@ class SyncEngine {
string[] pathFakeDeletedArray;
// Array of database Parent Item ID, Item ID & Local Path where the content has changed and needs to be uploaded
string[3][] databaseItemsWhereContentHasChanged;
// Array of local file paths that need to be uploaded as new itemts to OneDrive
// Array of local file paths that need to be uploaded as new items to OneDrive
string[] newLocalFilesToUploadToOneDrive;
// Array of local file paths that failed to be uploaded to OneDrive
string[] fileUploadFailures;
@ -751,7 +751,7 @@ class SyncEngine {
addLogEntry("The OneDrive Client was asked to search for this directory online and create it if it's not located: " ~ normalisedSingleDirectoryPath);
// Query the OneDrive API for the specified path online
// In a --single-directory scenario, we need to travervse the entire path that we are wanting to sync
// In a --single-directory scenario, we need to traverse the entire path that we are wanting to sync
// and then check the path element does it exist online, if it does, is it a POSIX match, or if it does not, create the path
// Once we have searched online, we have the right drive id and item id so that we can downgrade the sync status, then build up
// any object items from that location
@ -994,12 +994,12 @@ class SyncEngine {
// This is an API capability gap:
//
// ..
// @odata.nextLink: https://graph.microsoft.com/v1.0/drives/<redacted>/items/<redacted>/delta?token=<redacted>F9JRD0zODEyNzg7JTIzOyUyMzA7JTIz
// @odata.nextLink: https://graph.microsoft.com/v1.0/drives/<redacted>/items/<redacted>/delta?token=<redacted>
// Processing API Response Bundle: 115 - Quantity of 'changes|items' in this bundle to process: 204
// ..
// @odata.nextLink: https://graph.microsoft.com/v1.0/drives/<redacted>/items/<redacted>/delta?token=<redacted>F9JRD0zODM2Nzg7JTIzOyUyMzA7JTIz
// @odata.nextLink: https://graph.microsoft.com/v1.0/drives/<redacted>/items/<redacted>/delta?token=<redacted>
// Processing API Response Bundle: 127 - Quantity of 'changes|items' in this bundle to process: 204
// @odata.nextLink: https://graph.microsoft.com/v1.0/drives/<redacted>/items/<redacted>/delta?token=<redacted>F9JRD0zODM4Nzg7JTIzOyUyMzA7JTIz
// @odata.nextLink: https://graph.microsoft.com/v1.0/drives/<redacted>/items/<redacted>/delta?token=<redacted>
// Processing API Response Bundle: 128 - Quantity of 'changes|items' in this bundle to process: 176
// @odata.deltaLink: https://graph.microsoft.com/v1.0/drives/<redacted>/items/<redacted>/delta?token=<redacted>
// Finished processing /delta JSON response from the OneDrive API
@ -1082,7 +1082,7 @@ class SyncEngine {
// Perform Garbage Collection
GC.collect();
} else {
// Why are are generating a /delta response
// Why are we generating a /delta response
if (debugLogging) {
addLogEntry("Why are we generating a /delta response:", ["debug"]);
addLogEntry(" singleDirectoryScope: " ~ to!string(singleDirectoryScope), ["debug"]);
@ -2168,7 +2168,7 @@ class SyncEngine {
handleLocalDirectoryCreation(newDatabaseItem, newItemPath, onedriveJSONItem);
break;
case ItemType.remote:
// Add to the directory and relevant detils for processing later
// Add to the directory and relevant details for processing later
if (newDatabaseItem.remoteType == ItemType.dir) {
handleLocalDirectoryCreation(newDatabaseItem, newItemPath, onedriveJSONItem);
} else {
@ -2196,7 +2196,7 @@ class SyncEngine {
mkdirRecurse(newItemPath);
// Configure the applicable permissions for the folder
if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ newItemPath, ["debug"]);}
newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermisions());
newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermissions());
// Update the time of the folder to match the last modified time as is provided by OneDrive
// If there are any files then downloaded into this folder, the last modified time will get
// updated by the local Operating System with the latest timestamp - as this is normal operation
@ -2307,7 +2307,7 @@ class SyncEngine {
fileJSONItemsToDownload ~= onedriveJSONItem;
} else {
// If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive
// Unfortunately because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes
// Unfortunately because of the consequence of National Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes
// This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes
// as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are:
// - National Cloud Deployments do not support /delta as a query
@ -2341,7 +2341,7 @@ class SyncEngine {
// The existingDatabaseItem.eTag == changedOneDriveItem.eTag .. nothing has changed eTag wise
// If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive
// Unfortunately because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes
// Unfortunately because of the consequence of National Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes
// This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes
// as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are:
// - National Cloud Deployments do not support /delta as a query
@ -2871,7 +2871,7 @@ class SyncEngine {
forceExit();
}
// Display the pertinant details of the sync engine
// Display the pertinent details of the sync engine
void displaySyncEngineDetails() {
// Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes
if (verboseLogging) {
@ -3216,7 +3216,7 @@ class SyncEngine {
// Log DB items to process
if (debugLogging) {addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]);}
// Process each database database item associated with the driveId
// Process each database item associated with the driveId
foreach(dbItem; driveItems) {
// Does it still exist on disk in the location the DB thinks it is
checkDatabaseItemForConsistency(dbItem);
@ -3229,7 +3229,7 @@ class SyncEngine {
driveItems = itemDB.selectByDriveId(driveId);
if (debugLogging) {addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]);}
// Process each database database item associated with the driveId
// Process each database item associated with the driveId
foreach(dbItem; driveItems) {
// Does it still exist on disk in the location the DB thinks it is
checkDatabaseItemForConsistency(dbItem);
@ -3467,7 +3467,7 @@ class SyncEngine {
} else {
// Directory still exists locally
if (verboseLogging) {addLogEntry("The directory has not changed", ["verbose"]);}
// When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed
// When we are using --single-directory, we use the getChildren() call to get all children of a path, meaning all children are already traversed
// Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal
if (!singleDirectoryScope) {
// loop through the children
@ -3975,7 +3975,7 @@ class SyncEngine {
// What is the path of the new item
string newItemPath;
// Is the parent in the database? If not, we cannot compute the the full path based on the database entries
// Is the parent in the database? If not, we cannot compute the full path based on the database entries
// In a --resync scenario - the database is empty
if (parentInDatabase) {
// Calculate this items path based on database entries
@ -4046,7 +4046,7 @@ class SyncEngine {
}
}
// If this is a Shared Folder, we need to 'trim' the resulting path to that of the 'folder' that is actually shared with us so that this can be appropriatly checked against 'sync_list' entries
// If this is a Shared Folder, we need to 'trim' the resulting path to that of the 'folder' that is actually shared with us so that this can be appropriately checked against 'sync_list' entries
if (sharedFolderDeltaGeneration) {
// Find the index of 'currentSharedFolderName' in 'newItemPath'
int pos = cast(int) newItemPath.indexOf(currentSharedFolderName);
@ -4104,7 +4104,7 @@ class SyncEngine {
if (!parentInDatabase) {
// Parental database structure needs to be created
if (verboseLogging) {addLogEntry("Parental Path structure needs to be created to support included file: " ~ dirName(newItemPath), ["verbose"]);}
// Recursivly, stepping backward from 'thisItemParentId', query online, save entry to DB
// Recursively, stepping backward from 'thisItemParentId', query online, save entry to DB
createLocalPathStructure(onedriveJSONItem);
// If this is --dry-run
@ -4149,7 +4149,7 @@ class SyncEngine {
string thisItemDriveId;
string thisItemParentId;
// Log what we recieved to analyse
// Log what we received to analyse
if (debugLogging) {addLogEntry("createLocalPathStructure input onedriveJSONItem: " ~ to!string(onedriveJSONItem), ["debug"]);}
// Configure these variables based on the JSON input
@ -5013,7 +5013,7 @@ class SyncEngine {
}
}
// Check this path against the Microsoft Naming Conventions & Restristions
// Check this path against the Microsoft Naming Conventions & Restrictions
// - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders
// - Check path for bad whitespace items
// - Check path for HTML ASCII Codes
@ -5393,7 +5393,7 @@ class SyncEngine {
if (childAsLower == thisFolderNameAsLower) {
// This is a POSIX 'case in-sensitive match' ..... in folder name only
// - Local item name has a 'case-insensitive match' to an existing item on OneDrive
// The 'parentId' of this JSON object must match the the parentId of where the folder was created
// The 'parentId' of this JSON object must match the parentId of where the folder was created
// - why .. we might have the same folder name, but somewhere totally different
if (queryItem.id == thisChildItem.parentId) {
@ -5703,7 +5703,7 @@ class SyncEngine {
}
}
// If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty
// If the parent path was found in the DB, to ensure we are uploading the right location 'parentItem.driveId' must not be empty
if ((parentPathFoundInDB) && (parentItem.driveId.empty)) {
// switch to using defaultDriveId
if (debugLogging) {addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls", ["debug"]);}
@ -6461,7 +6461,7 @@ class SyncEngine {
addLogEntry("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve your data on Microsoft OneDrive");
addLogEntry("ERROR: The total number of items being deleted is: " ~ to!string(itemsToDelete));
addLogEntry("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value");
addLogEntry("ERROR: Optionally, perform a --resync to reset your local syncronisation state");
addLogEntry("ERROR: Optionally, perform a --resync to reset your local synchronisation state");
// Must exit here to preserve data on online , allow logging to be done
forceExit();
}
@ -6502,7 +6502,7 @@ class SyncEngine {
actualItemToDelete = remoteShortcutLinkItem;
// Delete the shortcut reference in the local database
itemDB.deleteById(remoteShortcutLinkItem.driveId, remoteShortcutLinkItem.id);
if (debugLogging) {addLogEntry("Deleted OneDrive Business Shared Folder 'Shorcut Link'", ["debug"]);}
if (debugLogging) {addLogEntry("Deleted OneDrive Business Shared Folder 'Shortcut Link'", ["debug"]);}
} else {
// No data was returned, use the original data
actualItemToDelete = itemToDelete;
@ -7634,7 +7634,7 @@ class SyncEngine {
unwanted = checkPathAgainstClientSideFiltering(newPath);
}
// Check this path against the Microsoft Naming Conventions & Restristions
// Check this path against the Microsoft Naming Conventions & Restrictions
// - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders
// - Check path for bad whitespace items
// - Check path for HTML ASCII Codes
@ -7666,7 +7666,7 @@ class SyncEngine {
if (!itemDB.selectByPath(dirName(newPath), appConfig.defaultDriveId, parentItem)) {
// the parent item is not in the database
throw new SyncException("Can't move an item to an unsynced directory");
throw new SyncException("Can't move an item to an unsynchronised directory");
}
if (oldItem.driveId != parentItem.driveId) {
@ -8239,7 +8239,7 @@ class SyncEngine {
if (hasHashes(onedriveJSONItem)) {
// At a minimum we require 'quickXorHash' to exist
if (hasQuickXorHash(onedriveJSONItem)) {
// JSON itme has a hash we can use
// JSON item has a hash we can use
thisItemHash = onedriveJSONItem["file"]["hashes"]["quickXorHash"].str;
}
@ -8554,7 +8554,7 @@ class SyncEngine {
// Does this pass validation?
if (!validateUploadSessionFileData(sessionFilePath)) {
// Remove upload_session file as it is invalid
// upload_session file file contains an error - cant resume this session
// upload_session file contains an error - cant resume this session
if (verboseLogging) {addLogEntry("Restore file upload session failed - cleaning up resumable session data file: " ~ sessionFilePath, ["verbose"]);}
// cleanup session path
@ -8569,7 +8569,7 @@ class SyncEngine {
// At this point we should have an array of JSON items to resume uploading
if (count(jsonItemsToResumeUpload) > 0) {
// there are valid items to resume upload
// Lets deal with all the JSON items that need to be reumed for upload in a batch process
// Lets deal with all the JSON items that need to be resumed for upload in a batch process
size_t batchSize = to!int(appConfig.getValueLong("threads"));
ulong batchCount = (jsonItemsToResumeUpload.length + batchSize - 1) / batchSize;
ulong batchesProcessed = 0;
@ -9515,7 +9515,7 @@ class SyncEngine {
// We do not check this path against the Client Side Filtering Rules as this is 100% an online move only
// Check this path against the Microsoft Naming Conventions & Restristions
// Check this path against the Microsoft Naming Conventions & Restrictions
// - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders
// - Check path for bad whitespace items
// - Check path for HTML ASCII Codes

View file

@ -264,7 +264,7 @@ bool testInternetReachability(ApplicationConfig appConfig) {
}
// Retry Internet access test to Microsoft OneDrive
bool retryInternetConnectivtyTest(ApplicationConfig appConfig) {
bool retryInternetConnectivityTest(ApplicationConfig appConfig) {
int retryAttempts = 0;
int backoffInterval = 1; // initial backoff interval in seconds
int maxBackoffInterval = 3600; // maximum backoff interval in seconds
@ -1173,7 +1173,7 @@ void displayMemoryUsagePostGC() {
// Update previous RSS with the new value
previousRSS = rss;
// Closout
// Closeout
addLogEntry();
}