mirror of
https://framagit.org/ppom/reaction
synced 2026-03-14 20:55:47 +01:00
Compare commits
148 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a34a1fa11 |
||
|
|
3ca54c6c43 |
||
|
|
16692731f0 |
||
|
|
938a366576 |
||
|
|
5a6c203c01 |
||
|
|
f2b1accec0 |
||
|
|
00725ed9e2 |
||
|
|
ea0e7177d9 |
||
|
|
c41c89101d |
||
|
|
3d7e647ef7 |
||
|
|
5b6cc35deb |
||
|
|
0cd765251a |
||
|
|
26cf3a96e7 |
||
|
|
285954f7cd |
||
|
|
dc51d7d432 |
||
|
|
488dc6c66f |
||
|
|
88c99fff0f |
||
|
|
645d72ac1e |
||
|
|
a7e958f248 |
||
|
|
5577d4f46f |
||
|
|
a8cd1af78d |
||
|
|
2f57f73ac9 |
||
|
|
d629d57a7e |
||
|
|
3c20d8f008 |
||
|
|
5a030ffb7e |
||
|
|
a4ea173c13 |
||
|
|
3a61db9e6f |
||
|
|
b4313699df |
||
|
|
270c6cb969 |
||
|
|
15f923ef64 |
||
|
|
a37a5e5752 |
||
|
|
a8651bf2e0 |
||
|
|
b07b5064e9 |
||
|
|
b7d997ca5e |
||
|
|
cce850fc71 |
||
|
|
109fb6d869 |
||
|
|
ae28cfbb31 |
||
|
|
b0dc3c56ad |
||
|
|
57d6da5377 |
||
|
|
12fc90535a |
||
|
|
62933b55e4 |
||
|
|
34e2a8f294 |
||
|
|
41bc3525f8 |
||
|
|
5ce773c8e5 |
||
|
|
6914f19fb8 |
||
|
|
7cd4a4305d |
||
|
|
c39fdecef3 |
||
|
|
885e6b7ef7 |
||
|
|
516e6956ab |
||
|
|
79ec6d279f |
||
|
|
a83c93ac9d |
||
|
|
47947d18db |
||
|
|
915e308015 |
||
|
|
41b8a661d2 |
||
|
|
87a25cf04c |
||
|
|
d6b6e9096b |
||
|
|
3ccd471b45 |
||
|
|
3a6260fa26 |
||
|
|
959c32c01e | ||
|
|
05c6c1fbce |
||
|
|
615d721c9a |
||
|
|
19ee5688a7 |
||
|
|
fb6f54d84f |
||
|
|
4fce6ecaf5 |
||
|
|
5bfcf318c7 |
||
|
|
7ede2fa79c |
||
|
|
1e082086e5 |
||
|
|
5a44ae89e9 |
||
|
|
8b3bde456e |
||
|
|
2095009fa9 |
||
|
|
f414245168 |
||
|
|
c595552504 |
||
|
|
96a551f7b9 |
||
|
|
f6e03496e1 |
||
|
|
fbf8c24e31 |
||
|
|
114dcd9945 |
||
|
|
da257966d9 |
||
|
|
b14f781528 |
||
|
|
c9e3a07fde |
||
|
|
aac9a71d4e |
||
|
|
79d85c1df1 |
||
|
|
1c423c5258 |
||
|
|
b667b1a373 |
||
|
|
83ac520d27 |
||
|
|
81fa49aa5c |
||
|
|
e22429f92e |
||
|
|
da5c3afefb |
||
|
|
3ed2ebd488 |
||
|
|
ff5200b0a0 |
||
|
|
a5d31f6c1a |
||
|
|
43fdd3a877 |
||
|
|
2216edfba0 |
||
|
|
0635bae544 |
||
|
|
552b311ac4 |
||
|
|
71d26766f8 |
||
|
|
bc0271b209 |
||
|
|
5782e3eb29 |
||
|
|
a70b45ba2d |
||
|
|
40c6202cd4 |
||
|
|
7e680a3a66 |
||
|
|
9235873084 |
||
|
|
ba9ab4c319 |
||
|
|
2e7fa016c6 |
||
|
|
3f6e74d096 |
||
|
|
983eff13eb |
||
|
|
db622eec53 |
||
|
|
cd2d337850 |
||
|
|
ebf906ea51 |
||
|
|
310d3dbe99 |
||
|
|
58180fe609 |
||
|
|
20921be07d |
||
|
|
a7604ca8d5 |
||
|
|
124a2827d9 |
||
|
|
e3060d0404 |
||
|
|
c918910453 |
||
|
|
61fe405b85 |
||
|
|
8d864b1fb9 |
||
|
|
fa350310fd |
||
|
|
0c4d19a4d7 |
||
|
|
9f56e5d8d2 |
||
|
|
a5c563d55f |
||
|
|
76bc551043 |
||
|
|
b44800ed30 |
||
|
|
7cbf482e4d |
||
|
|
f08762c3f3 |
||
|
|
160d27f13a |
||
|
|
147a4623b2 |
||
|
|
d887acf27e |
||
|
|
a99dea4421 |
||
|
|
fc11234f12 |
||
|
|
05f30c3c57 |
||
|
|
338aa8a8a2 |
||
|
|
ae46932219 |
||
|
|
8229f01182 |
||
|
|
22125dfd53 |
||
|
|
d36e54c55b |
||
|
|
fa63a9feb8 |
||
|
|
7f0cf32666 |
||
|
|
c6e4af96cd |
||
|
|
278baaa3e6 |
||
|
|
974139610f |
||
|
|
aec3bb54ed |
||
|
|
582889f71e |
||
|
|
e37bd6ebbe |
||
|
|
1f734a516d | ||
|
|
e45963dd4c |
||
|
|
0e75514db3 |
||
|
|
fc6a385574 |
89 changed files with 13200 additions and 2446 deletions
1
.envrc
Normal file
1
.envrc
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
use_nix
|
||||||
13
.gitignore
vendored
13
.gitignore
vendored
|
|
@ -1,22 +1,15 @@
|
||||||
/reaction
|
/reaction
|
||||||
/ip46tables
|
reaction.db
|
||||||
/nft46
|
reaction.db.old
|
||||||
reaction*.db
|
|
||||||
reaction*.db.old
|
|
||||||
/data
|
/data
|
||||||
/lmdb
|
|
||||||
reaction*.export.json
|
|
||||||
/reaction*.sock
|
/reaction*.sock
|
||||||
/result
|
/result
|
||||||
/wiki
|
/wiki
|
||||||
/deb
|
|
||||||
*.deb
|
*.deb
|
||||||
*.minisig
|
*.minisig
|
||||||
*.qcow2
|
*.qcow2
|
||||||
debian-packaging/*
|
|
||||||
*.swp
|
*.swp
|
||||||
export-go-db/export-go-db
|
|
||||||
import-rust-db/target
|
|
||||||
/target
|
/target
|
||||||
/local
|
/local
|
||||||
.ccls-cache
|
.ccls-cache
|
||||||
|
.direnv
|
||||||
|
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
||||||
---
|
|
||||||
image: golang:1.20-bookworm
|
|
||||||
stages:
|
|
||||||
- build
|
|
||||||
|
|
||||||
variables:
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
|
|
||||||
test_building:
|
|
||||||
stage: build
|
|
||||||
before_script:
|
|
||||||
- apt-get -qq -y update
|
|
||||||
- apt-get -qq -y install build-essential devscripts debhelper quilt wget
|
|
||||||
script:
|
|
||||||
- make reaction ip46tables nft46
|
|
||||||
|
|
@ -6,6 +6,7 @@ Here is a high-level overview of the codebase.
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
|
|
||||||
|
- `bench/`: Configuration that spawns a very high load on reaction. Useful to test performance improvements and regressions.
|
||||||
- `build.rs`: permits to create shell completions and man pages on build.
|
- `build.rs`: permits to create shell completions and man pages on build.
|
||||||
- `Cargo.toml`, `Cargo.lock`: manifest and dependencies.
|
- `Cargo.toml`, `Cargo.lock`: manifest and dependencies.
|
||||||
- `config/`: example / test configuration files. Look at its git history to discover more.
|
- `config/`: example / test configuration files. Look at its git history to discover more.
|
||||||
|
|
@ -15,8 +16,7 @@ Here is a high-level overview of the codebase.
|
||||||
|
|
||||||
## Main source code
|
## Main source code
|
||||||
|
|
||||||
- `helpers_c/`: C helpers. I wish to have special IP support in reaction and get rid of them. See #79 and #116.
|
- `tests/`: Integration tests. They test reaction runtime behavior, persistance, client-daemon communication, plugin integrations.
|
||||||
- `tests/`: Integration tests. For now they test basic reaction runtime behavior, persistance, and client-daemon communication.
|
|
||||||
- `src/`: The source code, here we go!
|
- `src/`: The source code, here we go!
|
||||||
|
|
||||||
### Top-level files
|
### Top-level files
|
||||||
|
|
@ -25,18 +25,13 @@ Here is a high-level overview of the codebase.
|
||||||
- `src/lib.rs`: Second main entrypoint
|
- `src/lib.rs`: Second main entrypoint
|
||||||
- `src/cli.rs`: Command-line arguments
|
- `src/cli.rs`: Command-line arguments
|
||||||
- `src/tests.rs`: Test utilities
|
- `src/tests.rs`: Test utilities
|
||||||
|
- `src/protocol.rs`: de/serialization and client/daemon protocol messages.
|
||||||
|
|
||||||
### `src/concepts/`
|
### `src/concepts/`
|
||||||
|
|
||||||
reaction really is about its configuration, which is at the center of the code.
|
reaction really is about its configuration, which is at the center of the code.
|
||||||
|
|
||||||
There is one file for each of its concepts: configuration, streams, filters, actions, patterns.
|
There is one file for each of its concepts: configuration, streams, filters, actions, patterns, plugins.
|
||||||
|
|
||||||
### `src/protocol/`
|
|
||||||
|
|
||||||
Low-level serialization/deserialization and client-daemon protocol messages.
|
|
||||||
|
|
||||||
Shared by the client and daemon's socket. Also used by daemon's database.
|
|
||||||
|
|
||||||
### `src/client/`
|
### `src/client/`
|
||||||
|
|
||||||
|
|
@ -58,9 +53,9 @@ This code has async code, to handle input streams and communication with clients
|
||||||
- `mod.rs`: High-level logic
|
- `mod.rs`: High-level logic
|
||||||
- `state.rs`: Inner state operations
|
- `state.rs`: Inner state operations
|
||||||
- `socket.rs`: The socket task, responsible for communication with clients.
|
- `socket.rs`: The socket task, responsible for communication with clients.
|
||||||
- `shutdown.rs`: Logic for passing shutdown signal across all tasks
|
- `plugin.rs`: Plugin startup, configuration loading and cleanup.
|
||||||
|
|
||||||
### `src/tree`
|
### `crates/treedb`
|
||||||
|
|
||||||
Persistence layer.
|
Persistence layer.
|
||||||
|
|
||||||
|
|
@ -68,5 +63,19 @@ This is a database highly adapted to reaction workload, making reaction faster t
|
||||||
(heed, sled and fjall crates have been tested).
|
(heed, sled and fjall crates have been tested).
|
||||||
Its design is explained in the comments of its files:
|
Its design is explained in the comments of its files:
|
||||||
|
|
||||||
- `mod.rs`: main database code, with its two API structs: Tree and Database.
|
- `lib.rs`: main database code, with its two API structs: Tree and Database.
|
||||||
- `raw.rs` low-level part, directly interacting with de/serializisation and files.
|
- `raw.rs`: low-level part, directly interacting with de/serializisation and files.
|
||||||
|
- `time.rs`: time definitions shared with reaction.
|
||||||
|
- `helpers.rs`: utilities to ease db deserialization from disk.
|
||||||
|
|
||||||
|
### `plugins/reaction-plugin`
|
||||||
|
|
||||||
|
Shared plugin interface between reaction daemon and its plugins.
|
||||||
|
|
||||||
|
Also defines some shared logic between them:
|
||||||
|
- `shutdown.rs`: Logic for passing shutdown signal across all tasks
|
||||||
|
- `parse_duration.rs` Duration parsing
|
||||||
|
|
||||||
|
### `plugins/reaction-plugin-*`
|
||||||
|
|
||||||
|
All core plugins.
|
||||||
|
|
|
||||||
3887
Cargo.lock
generated
3887
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
77
Cargo.toml
77
Cargo.toml
|
|
@ -1,7 +1,7 @@
|
||||||
[package]
|
[package]
|
||||||
name = "reaction"
|
name = "reaction"
|
||||||
version = "2.2.0"
|
version = "2.3.0"
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
authors = ["ppom <reaction@ppom.me>"]
|
authors = ["ppom <reaction@ppom.me>"]
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
description = "Scan logs and take action"
|
description = "Scan logs and take action"
|
||||||
|
|
@ -10,40 +10,58 @@ homepage = "https://reaction.ppom.me"
|
||||||
repository = "https://framagit.org/ppom/reaction"
|
repository = "https://framagit.org/ppom/reaction"
|
||||||
keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"]
|
keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
default-run = "reaction"
|
||||||
|
|
||||||
[package.metadata.deb]
|
[package.metadata.deb]
|
||||||
|
section = "net"
|
||||||
|
extended-description = """A daemon that scans program outputs for repeated patterns, and takes action.
|
||||||
|
A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors.
|
||||||
|
reaction aims at being a successor to fail2ban."""
|
||||||
maintainer-scripts = "packaging/"
|
maintainer-scripts = "packaging/"
|
||||||
systemd-units = { enable = false }
|
systemd-units = { enable = false }
|
||||||
assets = [
|
assets = [
|
||||||
# Executables
|
# Executables
|
||||||
[ "target/release/reaction", "/usr/bin/reaction", "755" ],
|
[ "target/release/reaction", "/usr/bin/reaction", "755" ],
|
||||||
[ "target/release/ip46tables", "/usr/bin/ip46tables", "755" ],
|
[ "target/release/reaction-plugin-virtual", "/usr/bin/reaction-plugin-virtual", "755" ],
|
||||||
[ "target/release/nft46", "/usr/bin/nft46", "755" ],
|
|
||||||
# Man pages
|
# Man pages
|
||||||
[ "target/release/reaction*.1", "/usr/share/man/man1/", "644" ],
|
[ "target/release/reaction*.1", "/usr/share/man/man1/", "644" ],
|
||||||
# Shell completions
|
# Shell completions
|
||||||
[ "target/release/reaction.bash", "/usr/share/bash-completion/completions/reaction", "644" ],
|
[ "target/release/reaction.bash", "/usr/share/bash-completion/completions/reaction", "644" ],
|
||||||
[ "target/release/reaction.fish", "/usr/share/fish/completions/", "644" ],
|
[ "target/release/reaction.fish", "/usr/share/fish/completions/", "644" ],
|
||||||
[ "target/release/_reaction", "/usr/share/zsh/vendor-completions/", "644" ],
|
[ "target/release/_reaction", "/usr/share/zsh/vendor-completions/", "644" ],
|
||||||
|
# Slice
|
||||||
|
[ "packaging/system-reaction.slice", "/usr/lib/systemd/system/", "644" ],
|
||||||
]
|
]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
chrono = { version = "0.4.38", features = ["std", "clock", "serde"] }
|
# Time types
|
||||||
|
chrono.workspace = true
|
||||||
|
# CLI parsing
|
||||||
clap = { version = "4.5.4", features = ["derive"] }
|
clap = { version = "4.5.4", features = ["derive"] }
|
||||||
jrsonnet-evaluator = "0.4.2"
|
# Unix interfaces
|
||||||
nix = { version = "0.29.0", features = ["signal"] }
|
nix = { version = "0.29.0", features = ["signal"] }
|
||||||
num_cpus = "1.16.0"
|
num_cpus = "1.16.0"
|
||||||
|
# Regex matching
|
||||||
regex = "1.10.4"
|
regex = "1.10.4"
|
||||||
serde = { version = "1.0.203", features = ["derive"] }
|
# Configuration languages, ser/deserialisation
|
||||||
serde_json = "1.0.117"
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
serde_yaml = "0.9.34"
|
serde_yaml = "0.9.34"
|
||||||
thiserror = "1.0.63"
|
jrsonnet-evaluator = "0.4.2"
|
||||||
timer = "0.2.0"
|
# Error macro
|
||||||
futures = "0.3.30"
|
thiserror.workspace = true
|
||||||
tokio = { version = "1.40.0", features = ["full", "tracing"] }
|
# Async runtime & helpers
|
||||||
tokio-util = { version = "0.7.12", features = ["codec"] }
|
futures = { workspace = true }
|
||||||
tracing = "0.1.40"
|
tokio = { workspace = true, features = ["full", "tracing"] }
|
||||||
|
tokio-util = { workspace = true, features = ["codec"] }
|
||||||
|
# Async logging
|
||||||
|
tracing.workspace = true
|
||||||
tracing-subscriber = "0.3.18"
|
tracing-subscriber = "0.3.18"
|
||||||
|
# Database
|
||||||
|
treedb.workspace = true
|
||||||
|
# Reaction plugin system
|
||||||
|
remoc.workspace = true
|
||||||
|
reaction-plugin.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
clap = { version = "4.5.4", features = ["derive"] }
|
clap = { version = "4.5.4", features = ["derive"] }
|
||||||
|
|
@ -54,7 +72,34 @@ tracing = "0.1.40"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
tempfile = "3.12.0"
|
treedb.workspace = true
|
||||||
assert_fs = "1.1.3"
|
treedb.features = ["test"]
|
||||||
|
tempfile.workspace = true
|
||||||
|
assert_fs.workspace = true
|
||||||
assert_cmd = "2.0.17"
|
assert_cmd = "2.0.17"
|
||||||
predicates = "3.1.3"
|
predicates = "3.1.3"
|
||||||
|
|
||||||
|
[workspace]
|
||||||
|
members = [
|
||||||
|
"crates/treedb",
|
||||||
|
"plugins/reaction-plugin",
|
||||||
|
"plugins/reaction-plugin-cluster",
|
||||||
|
"plugins/reaction-plugin-ipset",
|
||||||
|
"plugins/reaction-plugin-nftables",
|
||||||
|
"plugins/reaction-plugin-virtual"
|
||||||
|
]
|
||||||
|
|
||||||
|
[workspace.dependencies]
|
||||||
|
assert_fs = "1.1.3"
|
||||||
|
chrono = { version = "0.4.38", features = ["std", "clock", "serde"] }
|
||||||
|
futures = "0.3.30"
|
||||||
|
remoc = { version = "0.18.3" }
|
||||||
|
serde = { version = "1.0.203", features = ["derive"] }
|
||||||
|
serde_json = { version = "1.0.117", features = ["arbitrary_precision"] }
|
||||||
|
tempfile = "3.12.0"
|
||||||
|
thiserror = "1.0.63"
|
||||||
|
tokio = { version = "1.40.0" }
|
||||||
|
tokio-util = { version = "0.7.12" }
|
||||||
|
tracing = "0.1.40"
|
||||||
|
reaction-plugin = { path = "plugins/reaction-plugin" }
|
||||||
|
treedb = { path = "crates/treedb" }
|
||||||
|
|
|
||||||
11
Dockerfile
Normal file
11
Dockerfile
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
# This Dockerfile permits to build reaction and its plugins
|
||||||
|
|
||||||
|
# Use debian old-stable, so that it runs on both old-stable and stable
|
||||||
|
FROM rust:bookworm
|
||||||
|
|
||||||
|
RUN apt update && apt install -y \
|
||||||
|
clang \
|
||||||
|
libipset-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /reaction
|
||||||
2
Makefile
2
Makefile
|
|
@ -14,8 +14,6 @@ reaction:
|
||||||
|
|
||||||
install: reaction
|
install: reaction
|
||||||
install -m755 target/release/reaction $(DESTDIR)$(BINDIR)
|
install -m755 target/release/reaction $(DESTDIR)$(BINDIR)
|
||||||
install -m755 target/release/ip46tables $(DESTDIR)$(BINDIR)
|
|
||||||
install -m755 target/release/nft46 $(DESTDIR)$(BINDIR)
|
|
||||||
|
|
||||||
install_systemd: install
|
install_systemd: install
|
||||||
install -m644 packaging/reaction.service $(SYSTEMDDIR)/system/reaction.service
|
install -m644 packaging/reaction.service $(SYSTEMDDIR)/system/reaction.service
|
||||||
|
|
|
||||||
25
README.md
25
README.md
|
|
@ -4,7 +4,7 @@ A daemon that scans program outputs for repeated patterns, and takes action.
|
||||||
|
|
||||||
A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors.
|
A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors.
|
||||||
|
|
||||||
🚧 This program hasn't received external security audit. However, it already works well on my servers 🚧
|
🚧 This program hasn't received external security audit yet. However, it already works well on many servers 🚧
|
||||||
|
|
||||||
## Rationale
|
## Rationale
|
||||||
|
|
||||||
|
|
@ -67,8 +67,8 @@ streams:
|
||||||
regex:
|
regex:
|
||||||
- 'authentication failure;.*rhost=<ip>'
|
- 'authentication failure;.*rhost=<ip>'
|
||||||
- 'Failed password for .* from <ip>'
|
- 'Failed password for .* from <ip>'
|
||||||
- 'Invalid user .* from <ip>',
|
- 'Invalid user .* from <ip>'
|
||||||
- 'banner exchange: Connection from <ip> port [0-9]*: invalid format',
|
- 'banner exchange: Connection from <ip> port [0-9]*: invalid format'
|
||||||
retry: 3
|
retry: 3
|
||||||
retryperiod: '6h'
|
retryperiod: '6h'
|
||||||
actions:
|
actions:
|
||||||
|
|
@ -136,6 +136,9 @@ local banFor(time) = {
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
> It is recommended to setup reaction with [`nftables`](https://reaction.ppom.me/actions/nftables.html)
|
||||||
|
> or [`ipset` + `iptables`](https://reaction.ppom.me/actions/ipset.html), which are much more performant
|
||||||
|
> solutions than `iptables` alone.
|
||||||
|
|
||||||
### Database
|
### Database
|
||||||
|
|
||||||
|
|
@ -152,19 +155,10 @@ If you don't know where to start reaction, `/var/lib/reaction` should be a sane
|
||||||
- `reaction test-config` shows loaded configuration
|
- `reaction test-config` shows loaded configuration
|
||||||
- `reaction help` for full usage.
|
- `reaction help` for full usage.
|
||||||
|
|
||||||
### `ip46tables` and `nft46`
|
### old binaries
|
||||||
|
|
||||||
> ⚠️Deprecated since v2.2.0:
|
`ip46tables` and `nft46` binaries are no longer part of reaction. If you really need them, see
|
||||||
> reaction now provides builtin support for executing different actions on ipv4 and ipv6.
|
[the last commit that included them](https://framagit.org/ppom/reaction/-/tree/b7d997ca5e9a69c8572bb2ec9d27d0eb03b3cb9f/helpers_c).
|
||||||
> They will be removed in a future version.
|
|
||||||
|
|
||||||
`ip46tables` and `nft46` are two minimal c programs present in the `helpers_c` directory with only standard posix dependencies.
|
|
||||||
|
|
||||||
`ip46tables` permits to configure `iptables` and `ip6tables` at the same time.
|
|
||||||
It will execute `iptables` when detecting ipv4, `ip6tables` when detecting ipv6 and both if no ip address is present on the command line.
|
|
||||||
|
|
||||||
`nft46` works slightly differently: it will replace the `X` in its argument by 4 or 6 depending on the ip address on the command line.
|
|
||||||
This permits to have 2 IP sets, one of type `ipv4_addr` and one of type `ipv6_addr`.
|
|
||||||
|
|
||||||
## Wiki
|
## Wiki
|
||||||
|
|
||||||
|
|
@ -245,6 +239,7 @@ French version: [#reaction-dev-fr:club1.fr](https://matrix.to/#/#reaction-dev-fr
|
||||||
|
|
||||||
You can ask for help in the issues or in this Matrix room: [#reaction-users-en:club1.fr](https://matrix.to/#/#reaction-users-en:club1.fr).
|
You can ask for help in the issues or in this Matrix room: [#reaction-users-en:club1.fr](https://matrix.to/#/#reaction-users-en:club1.fr).
|
||||||
French version: [#reaction-users-fr:club1.fr](https://matrix.to/#/#reaction-users-fr:club1.fr).
|
French version: [#reaction-users-fr:club1.fr](https://matrix.to/#/#reaction-users-fr:club1.fr).
|
||||||
|
You can alternatively send a mail: `reaction` on domain `ppom.me`.
|
||||||
|
|
||||||
## Funding
|
## Funding
|
||||||
|
|
||||||
|
|
|
||||||
8
TODO
8
TODO
|
|
@ -1,9 +1,3 @@
|
||||||
Test what happens when a Filter's pattern Set changes (I think it's shitty)
|
Test what happens when a Filter's pattern Set changes (I think it's shitty)
|
||||||
|
|
||||||
stream: test regex ending with $
|
|
||||||
|
|
||||||
should an ipv6-mapped ipv4 match a pattern of type ipv6?
|
|
||||||
should it be normalized as ipv4 then?
|
|
||||||
|
|
||||||
fix order of db writes subject to race condition (make writes async?)
|
|
||||||
DB: add tests on stress testing (lines should always be in order)
|
DB: add tests on stress testing (lines should always be in order)
|
||||||
|
conf: merge filters
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm -f reaction.db
|
rm -f reaction.db
|
||||||
cargo build --release
|
cargo build --release --bins
|
||||||
sudo systemd-run --wait \
|
sudo systemd-run --wait \
|
||||||
-p User="$(id -nu)" \
|
-p User="$(id -nu)" \
|
||||||
-p MemoryAccounting=yes \
|
-p MemoryAccounting=yes \
|
||||||
|
|
|
||||||
86
bench/small-heavy-load-virtual.yml
Normal file
86
bench/small-heavy-load-virtual.yml
Normal file
|
|
@ -0,0 +1,86 @@
|
||||||
|
---
|
||||||
|
# This configuration permits to test reaction's performance
|
||||||
|
# under a very high load
|
||||||
|
#
|
||||||
|
# It keeps regexes super simple, to avoid benchmarking the `regex` crate,
|
||||||
|
# and benchmark reaction's internals instead.
|
||||||
|
concurrency: 32
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
- path: "/home/ppom/prg/reaction/target/release/reaction-plugin-virtual"
|
||||||
|
|
||||||
|
patterns:
|
||||||
|
num:
|
||||||
|
regex: '[0-9]{3}'
|
||||||
|
ip:
|
||||||
|
regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})'
|
||||||
|
ignore:
|
||||||
|
- 1.0.0.1
|
||||||
|
|
||||||
|
streams:
|
||||||
|
virtual:
|
||||||
|
type: virtual
|
||||||
|
filters:
|
||||||
|
find0:
|
||||||
|
regex:
|
||||||
|
- '^<num>$'
|
||||||
|
actions:
|
||||||
|
damn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
undamn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
after: 1m
|
||||||
|
onexit: false
|
||||||
|
tailDown1:
|
||||||
|
cmd: [ 'sh', '-c', 'sleep 2; seq 1001 | while read i; do echo found $i; done' ]
|
||||||
|
filters:
|
||||||
|
find1:
|
||||||
|
regex:
|
||||||
|
- '^found <num>'
|
||||||
|
retry: 9
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
virtual:
|
||||||
|
type: virtual
|
||||||
|
options:
|
||||||
|
send: '<num>'
|
||||||
|
to: virtual
|
||||||
|
tailDown2:
|
||||||
|
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
|
||||||
|
filters:
|
||||||
|
find2:
|
||||||
|
regex:
|
||||||
|
- '^found <num>'
|
||||||
|
retry: 480
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
virtual:
|
||||||
|
type: virtual
|
||||||
|
options:
|
||||||
|
send: '<num>'
|
||||||
|
to: virtual
|
||||||
|
tailDown3:
|
||||||
|
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
|
||||||
|
filters:
|
||||||
|
find3:
|
||||||
|
regex:
|
||||||
|
- '^found <num>'
|
||||||
|
retry: 480
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
virtual:
|
||||||
|
type: virtual
|
||||||
|
options:
|
||||||
|
send: '<num>'
|
||||||
|
to: virtual
|
||||||
|
find4:
|
||||||
|
regex:
|
||||||
|
- '^trouvé <num>'
|
||||||
|
retry: 480
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
virtual:
|
||||||
|
type: virtual
|
||||||
|
options:
|
||||||
|
send: '<num>'
|
||||||
|
to: virtual
|
||||||
74
bench/small-heavy-load.yml
Normal file
74
bench/small-heavy-load.yml
Normal file
|
|
@ -0,0 +1,74 @@
|
||||||
|
---
|
||||||
|
# This configuration permits to test reaction's performance
|
||||||
|
# under a very high load
|
||||||
|
#
|
||||||
|
# It keeps regexes super simple, to avoid benchmarking the `regex` crate,
|
||||||
|
# and benchmark reaction's internals instead.
|
||||||
|
concurrency: 32
|
||||||
|
|
||||||
|
patterns:
|
||||||
|
num:
|
||||||
|
regex: '[0-9]{3}'
|
||||||
|
ip:
|
||||||
|
regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})'
|
||||||
|
ignore:
|
||||||
|
- 1.0.0.1
|
||||||
|
|
||||||
|
streams:
|
||||||
|
tailDown1:
|
||||||
|
cmd: [ 'sh', '-c', 'sleep 2; seq 1001 | while read i; do echo found $i; done' ]
|
||||||
|
filters:
|
||||||
|
find1:
|
||||||
|
regex:
|
||||||
|
- '^found <num>'
|
||||||
|
retry: 9
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
damn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
undamn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
after: 1m
|
||||||
|
onexit: false
|
||||||
|
tailDown2:
|
||||||
|
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
|
||||||
|
filters:
|
||||||
|
find2:
|
||||||
|
regex:
|
||||||
|
- '^found <num>'
|
||||||
|
retry: 480
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
damn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
undamn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
after: 1m
|
||||||
|
onexit: false
|
||||||
|
tailDown3:
|
||||||
|
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
|
||||||
|
filters:
|
||||||
|
find3:
|
||||||
|
regex:
|
||||||
|
- '^found <num>'
|
||||||
|
retry: 480
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
damn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
undamn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
after: 1m
|
||||||
|
onexit: false
|
||||||
|
find4:
|
||||||
|
regex:
|
||||||
|
- '^trouvé <num>'
|
||||||
|
retry: 480
|
||||||
|
retryperiod: 6m
|
||||||
|
actions:
|
||||||
|
damn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
undamn:
|
||||||
|
cmd: [ 'sleep', '0.0<num>' ]
|
||||||
|
after: 1m
|
||||||
|
onexit: false
|
||||||
50
build.rs
50
build.rs
|
|
@ -1,8 +1,6 @@
|
||||||
use std::{
|
use std::{
|
||||||
env::{var, var_os},
|
env::var_os,
|
||||||
io::{self, ErrorKind},
|
io::{self, ErrorKind},
|
||||||
path::Path,
|
|
||||||
process,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use clap_complete::shells;
|
use clap_complete::shells;
|
||||||
|
|
@ -10,54 +8,10 @@ use clap_complete::shells;
|
||||||
// SubCommand defined here
|
// SubCommand defined here
|
||||||
include!("src/cli.rs");
|
include!("src/cli.rs");
|
||||||
|
|
||||||
fn cc() -> String {
|
|
||||||
// TARGET looks like aarch64-unknown-linux-musl
|
|
||||||
let cc = match var("TARGET") {
|
|
||||||
Ok(target) => {
|
|
||||||
// We're looking for an environment variable looking like
|
|
||||||
// CC_aarch64_unknown_linux_musl
|
|
||||||
let target = target.replace("-", "_");
|
|
||||||
var(format!("CC_{}", target.replace("-", "_"))).ok()
|
|
||||||
}
|
|
||||||
Err(_) => None,
|
|
||||||
};
|
|
||||||
match cc {
|
|
||||||
Some(cc) => Some(cc),
|
|
||||||
// Else we're looking for CC environment variable
|
|
||||||
None => var("CC").ok(),
|
|
||||||
}
|
|
||||||
// Else we use `cc`
|
|
||||||
.unwrap_or("cc".into())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compile_helper(cc: &str, name: &str, out_dir: &Path) -> io::Result<()> {
|
|
||||||
let mut args = vec![
|
|
||||||
format!("helpers_c/{name}.c"),
|
|
||||||
"-o".into(),
|
|
||||||
out_dir
|
|
||||||
.join(name)
|
|
||||||
.to_str()
|
|
||||||
.expect("could not join path")
|
|
||||||
.to_owned(),
|
|
||||||
];
|
|
||||||
// We can build static executables in cross environment
|
|
||||||
if cc.ends_with("-gcc") {
|
|
||||||
args.push("-static".into());
|
|
||||||
}
|
|
||||||
process::Command::new(cc).args(args).spawn()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() -> io::Result<()> {
|
fn main() -> io::Result<()> {
|
||||||
if var_os("PROFILE").ok_or(ErrorKind::NotFound)? == "release" {
|
if var_os("PROFILE").ok_or(ErrorKind::NotFound)? == "release" {
|
||||||
let out_dir = PathBuf::from(var_os("OUT_DIR").ok_or(ErrorKind::NotFound)?).join("../../..");
|
let out_dir = PathBuf::from(var_os("OUT_DIR").ok_or(ErrorKind::NotFound)?).join("../../..");
|
||||||
|
|
||||||
// Compile C helpers
|
|
||||||
let cc = cc();
|
|
||||||
println!("CC is: {}", cc);
|
|
||||||
compile_helper(&cc, "ip46tables", &out_dir)?;
|
|
||||||
compile_helper(&cc, "nft46", &out_dir)?;
|
|
||||||
|
|
||||||
// Build CLI
|
// Build CLI
|
||||||
let cli = clap::Command::new("reaction");
|
let cli = clap::Command::new("reaction");
|
||||||
let cli = SubCommand::augment_subcommands(cli);
|
let cli = SubCommand::augment_subcommands(cli);
|
||||||
|
|
@ -80,8 +34,6 @@ See usage examples, service configurations and good practices on the wiki: https
|
||||||
|
|
||||||
println!("cargo::rerun-if-changed=build.rs");
|
println!("cargo::rerun-if-changed=build.rs");
|
||||||
println!("cargo::rerun-if-changed=src/cli.rs");
|
println!("cargo::rerun-if-changed=src/cli.rs");
|
||||||
println!("cargo::rerun-if-changed=helpers_c/ip46tables.c");
|
|
||||||
println!("cargo::rerun-if-changed=helpers_c/nft46.c");
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ Documentation=https://reaction.ppom.me
|
||||||
|
|
||||||
# See `man systemd.exec` and `man systemd.service` for most options below
|
# See `man systemd.exec` and `man systemd.service` for most options below
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/local/bin/reaction start -c /etc/reaction.jsonnet
|
ExecStart=/usr/local/bin/reaction start -c /etc/reaction/
|
||||||
|
|
||||||
# Ask systemd to create /var/lib/reaction (/var/lib/ is implicit)
|
# Ask systemd to create /var/lib/reaction (/var/lib/ is implicit)
|
||||||
StateDirectory=reaction
|
StateDirectory=reaction
|
||||||
|
|
@ -15,6 +15,8 @@ StateDirectory=reaction
|
||||||
RuntimeDirectory=reaction
|
RuntimeDirectory=reaction
|
||||||
# Start reaction in its state directory
|
# Start reaction in its state directory
|
||||||
WorkingDirectory=/var/lib/reaction
|
WorkingDirectory=/var/lib/reaction
|
||||||
|
# Let reaction kill its child processes first
|
||||||
|
KillMode=mixed
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|
|
||||||
23
crates/treedb/Cargo.toml
Normal file
23
crates/treedb/Cargo.toml
Normal file
|
|
@ -0,0 +1,23 @@
|
||||||
|
[package]
|
||||||
|
name = "treedb"
|
||||||
|
version = "1.0.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
test = []
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
chrono.workspace = true
|
||||||
|
futures.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
tokio.features = ["rt-multi-thread", "macros", "io-util", "time", "fs", "tracing"]
|
||||||
|
tokio-util.workspace = true
|
||||||
|
tokio-util.features = ["rt"]
|
||||||
|
tracing.workspace = true
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tempfile.workspace = true
|
||||||
|
|
||||||
|
|
@ -1,9 +1,12 @@
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
use std::{
|
||||||
|
collections::{BTreeMap, BTreeSet},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
use chrono::{DateTime, Local};
|
use chrono::DateTime;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use crate::concepts::{Match, MatchTime, Time};
|
use crate::time::Time;
|
||||||
|
|
||||||
/// Tries to convert a [`Value`] into a [`String`]
|
/// Tries to convert a [`Value`] into a [`String`]
|
||||||
pub fn to_string(val: &Value) -> Result<String, String> {
|
pub fn to_string(val: &Value) -> Result<String, String> {
|
||||||
|
|
@ -15,19 +18,40 @@ pub fn to_u64(val: &Value) -> Result<u64, String> {
|
||||||
val.as_u64().ok_or("not a u64".into())
|
val.as_u64().ok_or("not a u64".into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Old way of converting time: with chrono's serialization
|
||||||
|
fn old_string_to_time(val: &str) -> Result<Time, String> {
|
||||||
|
let time = DateTime::parse_from_rfc3339(val).map_err(|err| err.to_string())?;
|
||||||
|
Ok(Duration::new(time.timestamp() as u64, time.timestamp_subsec_nanos()).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// New way of converting time: with our own implem
|
||||||
|
fn new_string_to_time(val: &str) -> Result<Time, String> {
|
||||||
|
let nanos: u128 = val.parse().map_err(|_| "not a number")?;
|
||||||
|
Ok(Duration::new(
|
||||||
|
(nanos / 1_000_000_000) as u64,
|
||||||
|
(nanos % 1_000_000_000) as u32,
|
||||||
|
)
|
||||||
|
.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to convert a [`&str`] into a [`Time`]
|
||||||
fn string_to_time(val: &str) -> Result<Time, String> {
|
fn string_to_time(val: &str) -> Result<Time, String> {
|
||||||
Ok(DateTime::parse_from_rfc3339(val)
|
match new_string_to_time(val) {
|
||||||
.map_err(|err| err.to_string())?
|
Err(err) => match old_string_to_time(val) {
|
||||||
.with_timezone(&Local))
|
Err(_) => Err(err),
|
||||||
|
ok => ok,
|
||||||
|
},
|
||||||
|
ok => ok,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert a [`Value`] into a [`Time`]
|
/// Tries to convert a [`Value`] into a [`Time`]
|
||||||
pub fn to_time(val: &Value) -> Result<Time, String> {
|
pub fn to_time(val: &Value) -> Result<Time, String> {
|
||||||
string_to_time(val.as_str().ok_or("not a datetime")?)
|
string_to_time(val.as_str().ok_or("not a string number")?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert a [`Value`] into a [`Match`]
|
/// Tries to convert a [`Value`] into a [`Vec<String>`]
|
||||||
pub fn to_match(val: &Value) -> Result<Match, String> {
|
pub fn to_match(val: &Value) -> Result<Vec<String>, String> {
|
||||||
val.as_array()
|
val.as_array()
|
||||||
.ok_or("not an array")?
|
.ok_or("not an array")?
|
||||||
.iter()
|
.iter()
|
||||||
|
|
@ -35,15 +59,6 @@ pub fn to_match(val: &Value) -> Result<Match, String> {
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tries to convert a [`Value`] into a [`MatchTime`]
|
|
||||||
pub fn to_matchtime(val: &Value) -> Result<MatchTime, String> {
|
|
||||||
let map = val.as_object().ok_or("not an object")?;
|
|
||||||
Ok(MatchTime {
|
|
||||||
m: to_match(map.get("m").ok_or("no m in object")?)?,
|
|
||||||
t: to_time(map.get("t").ok_or("no t in object")?)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tries to convert a [`Value`] into a [`BTreeSet<Time>`]
|
/// Tries to convert a [`Value`] into a [`BTreeSet<Time>`]
|
||||||
pub fn to_timeset(val: &Value) -> Result<BTreeSet<Time>, String> {
|
pub fn to_timeset(val: &Value) -> Result<BTreeSet<Time>, String> {
|
||||||
val.as_array()
|
val.as_array()
|
||||||
|
|
@ -66,9 +81,6 @@ pub fn to_timemap(val: &Value) -> Result<BTreeMap<Time, u64>, String> {
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use chrono::TimeZone;
|
|
||||||
use serde_json::Map;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
@ -96,15 +108,12 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_to_time() {
|
fn test_to_time() {
|
||||||
assert_eq!(
|
assert_eq!(to_time(&"123456".into()).unwrap(), Time::from_nanos(123456),);
|
||||||
to_time(&"1970-01-01T01:02:03.456+01:00".into()).unwrap(),
|
|
||||||
Local.timestamp_millis_opt(123456).unwrap(),
|
|
||||||
);
|
|
||||||
assert!(to_time(&(u64::MAX.into())).is_err());
|
assert!(to_time(&(u64::MAX.into())).is_err());
|
||||||
|
|
||||||
assert!(to_time(&(["ploup"].into())).is_err());
|
assert!(to_time(&(["ploup"].into())).is_err());
|
||||||
assert!(to_time(&(true.into())).is_err());
|
assert!(to_time(&(true.into())).is_err());
|
||||||
assert!(to_time(&(12345.into())).is_err());
|
// assert!(to_time(&(12345.into())).is_err());
|
||||||
assert!(to_time(&(None::<String>.into())).is_err());
|
assert!(to_time(&(None::<String>.into())).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -126,22 +135,14 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_to_timeset() {
|
fn test_to_timeset() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_timeset(&(["1970-01-01T01:20:34.567+01:00"].into())),
|
to_timeset(&Value::from([Value::from("123456789")])),
|
||||||
Ok(BTreeSet::from([Local
|
Ok(BTreeSet::from([Time::from_nanos(123456789)]))
|
||||||
.timestamp_millis_opt(1234567)
|
|
||||||
.unwrap()]))
|
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_timeset(
|
to_timeset(&Value::from([Value::from("8"), Value::from("123456")])),
|
||||||
&([
|
|
||||||
"1970-01-01T01:00:00.008+01:00",
|
|
||||||
"1970-01-01T01:02:03.456+01:00"
|
|
||||||
]
|
|
||||||
.into())
|
|
||||||
),
|
|
||||||
Ok(BTreeSet::from([
|
Ok(BTreeSet::from([
|
||||||
Local.timestamp_millis_opt(8).unwrap(),
|
Time::from_nanos(8),
|
||||||
Local.timestamp_millis_opt(123456).unwrap()
|
Time::from_nanos(123456),
|
||||||
]))
|
]))
|
||||||
);
|
);
|
||||||
assert!(to_timeset(&[Value::from("plip"), Value::from(10)].into()).is_err());
|
assert!(to_timeset(&[Value::from("plip"), Value::from(10)].into()).is_err());
|
||||||
|
|
@ -153,76 +154,27 @@ mod tests {
|
||||||
assert!(to_timeset(&(None::<String>.into())).is_err());
|
assert!(to_timeset(&(None::<String>.into())).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_to_matchtime() {
|
|
||||||
assert_eq!(
|
|
||||||
to_matchtime(&Value::Object(Map::from_iter(
|
|
||||||
BTreeMap::from([
|
|
||||||
("m".into(), ["plip", "ploup"].into()),
|
|
||||||
("t".into(), "1970-01-01T04:25:45.678+01:00".into()),
|
|
||||||
])
|
|
||||||
.into_iter()
|
|
||||||
))),
|
|
||||||
Ok(MatchTime {
|
|
||||||
m: vec!["plip".into(), "ploup".into()],
|
|
||||||
t: Local.timestamp_millis_opt(12345678).unwrap(),
|
|
||||||
})
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(to_matchtime(&Value::Object(Map::from_iter(
|
|
||||||
BTreeMap::from([("m".into(), ["plip", "ploup"].into()),]).into_iter()
|
|
||||||
)))
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
assert!(to_matchtime(&Value::Object(Map::from_iter(
|
|
||||||
BTreeMap::from([("t".into(), 12345678.into()),]).into_iter()
|
|
||||||
)))
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
assert!(to_matchtime(&Value::Object(Map::from_iter(
|
|
||||||
BTreeMap::from([("m".into(), "ploup".into()), ("t".into(), 12345678.into()),])
|
|
||||||
.into_iter()
|
|
||||||
)))
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
assert!(to_matchtime(&Value::Object(Map::from_iter(
|
|
||||||
BTreeMap::from([
|
|
||||||
("m".into(), ["plip", "ploup"].into()),
|
|
||||||
("t".into(), [1234567].into()),
|
|
||||||
])
|
|
||||||
.into_iter()
|
|
||||||
)))
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
assert!(to_timeset(&([""].into())).is_err());
|
|
||||||
assert!(to_timeset(&(["ploup"].into())).is_err());
|
|
||||||
assert!(to_timeset(&(true.into())).is_err());
|
|
||||||
assert!(to_timeset(&(8.into())).is_err());
|
|
||||||
assert!(to_timeset(&(None::<String>.into())).is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_to_timemap() {
|
fn test_to_timemap() {
|
||||||
let time1 = "2025-07-10T12:35:00.000+02:00";
|
let time1 = 1234567;
|
||||||
let time1_t = DateTime::parse_from_rfc3339(time1)
|
let time1_t = Time::from_nanos(time1);
|
||||||
.unwrap()
|
let time2 = 123456789;
|
||||||
.with_timezone(&Local);
|
let time2_t = Time::from_nanos(time2);
|
||||||
let time2 = "2026-08-11T12:36:01.000+02:00";
|
|
||||||
let time2_t = DateTime::parse_from_rfc3339(time2)
|
|
||||||
.unwrap()
|
|
||||||
.with_timezone(&Local);
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_timemap(&Value::from_iter([(time2, 1)])),
|
to_timemap(&Value::from_iter([(time2.to_string(), 1)])),
|
||||||
Ok(BTreeMap::from([(time2_t, 1)]))
|
Ok(BTreeMap::from([(time2_t, 1)]))
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_timemap(&Value::from_iter([(time1, 4), (time2, 0)])),
|
to_timemap(&Value::from_iter([
|
||||||
Ok(BTreeMap::from([(time1_t, 4), (time2_t, 0)]))
|
(time1.to_string(), 4),
|
||||||
|
(time2.to_string(), 0)
|
||||||
|
])),
|
||||||
|
Ok(BTreeMap::from([(time1_t.into(), 4), (time2_t.into(), 0)]))
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(to_timemap(&Value::from_iter([("1", time2)])).is_err());
|
assert!(to_timemap(&Value::from_iter([("1-1", time2)])).is_err());
|
||||||
assert!(to_timemap(&Value::from_iter([(time2, time2)])).is_err());
|
// assert!(to_timemap(&Value::from_iter([(time2.to_string(), time2)])).is_err());
|
||||||
assert!(to_timemap(&Value::from_iter([(time2)])).is_err());
|
assert!(to_timemap(&Value::from_iter([(time2)])).is_err());
|
||||||
assert!(to_timemap(&Value::from_iter([(1)])).is_err());
|
assert!(to_timemap(&Value::from_iter([(1)])).is_err());
|
||||||
|
|
||||||
|
|
@ -17,49 +17,93 @@ use std::{
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::{Local, TimeDelta};
|
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs::{rename, File},
|
fs::{File, rename},
|
||||||
sync::{mpsc, oneshot},
|
sync::{mpsc, oneshot},
|
||||||
time::{interval, MissedTickBehavior},
|
time::{MissedTickBehavior, interval},
|
||||||
};
|
};
|
||||||
|
use tokio_util::{sync::CancellationToken, task::task_tracker::TaskTrackerToken};
|
||||||
use crate::{
|
|
||||||
concepts::{Config, Time},
|
|
||||||
daemon::ShutdownToken,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub mod helpers;
|
|
||||||
|
|
||||||
// Database
|
// Database
|
||||||
|
|
||||||
use raw::{ReadDB, WriteDB};
|
use raw::{ReadDB, WriteDB};
|
||||||
|
use time::{Time, now};
|
||||||
|
|
||||||
|
pub mod helpers;
|
||||||
mod raw;
|
mod raw;
|
||||||
|
pub mod time;
|
||||||
|
|
||||||
|
/// Any order the Database can receive
|
||||||
|
enum Order {
|
||||||
|
Log(Entry),
|
||||||
|
OpenTree(OpenTree),
|
||||||
|
}
|
||||||
|
|
||||||
/// Entry sent from [`Tree`] to [`Database`]
|
/// Entry sent from [`Tree`] to [`Database`]
|
||||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
pub struct Entry {
|
struct Entry {
|
||||||
pub tree: String,
|
pub tree: String,
|
||||||
pub key: Value,
|
pub key: Value,
|
||||||
pub value: Option<Value>,
|
pub value: Option<Value>,
|
||||||
pub expiry: Time,
|
pub expiry: Time,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type LoadedDB = HashMap<String, HashMap<Value, Value>>;
|
/// Order to receive a tree from previous Database
|
||||||
|
pub struct OpenTree {
|
||||||
|
name: String,
|
||||||
|
resp: oneshot::Sender<Option<LoadedTree>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
type LoadedTree = HashMap<Value, Value>;
|
||||||
|
pub type LoadedDB = HashMap<String, LoadedTree>;
|
||||||
|
|
||||||
const DB_NAME: &str = "reaction.db";
|
const DB_NAME: &str = "reaction.db";
|
||||||
const DB_NEW_NAME: &str = "reaction.new.db";
|
const DB_NEW_NAME: &str = "reaction.new.db";
|
||||||
|
|
||||||
impl Config {
|
fn path_of(state_directory: &Path, name: &str) -> PathBuf {
|
||||||
fn path_of(&self, name: &str) -> PathBuf {
|
if state_directory.as_os_str().is_empty() {
|
||||||
if self.state_directory.is_empty() {
|
name.into()
|
||||||
name.into()
|
} else {
|
||||||
} else {
|
PathBuf::from(state_directory).join(name)
|
||||||
PathBuf::from(&self.state_directory).join(name)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type DatabaseErrorReceiver = oneshot::Receiver<Result<(), String>>;
|
||||||
|
|
||||||
|
/// Public-facing API for a treedb Database
|
||||||
|
pub struct Database {
|
||||||
|
entry_tx: Option<mpsc::Sender<Order>>,
|
||||||
|
error_rx: DatabaseErrorReceiver,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Database {
|
||||||
|
/// Open a new Database, whom task will start in the background.
|
||||||
|
/// You'll have to:
|
||||||
|
/// - drop all [`Tree`]s,
|
||||||
|
/// - call [`Self::quit`],
|
||||||
|
///
|
||||||
|
/// to have the Database properly quit.
|
||||||
|
///
|
||||||
|
/// You can wait for [`Self::quit`] returned channel to know how it went.
|
||||||
|
pub async fn open(
|
||||||
|
path_directory: &Path,
|
||||||
|
cancellation_token: CancellationToken,
|
||||||
|
task_tracker_token: TaskTrackerToken,
|
||||||
|
) -> Result<Database, IoError> {
|
||||||
|
let (manager, entry_tx) = DatabaseManager::open(path_directory).await?;
|
||||||
|
let error_rx = manager.manager(cancellation_token, task_tracker_token);
|
||||||
|
Ok(Self {
|
||||||
|
entry_tx: Some(entry_tx),
|
||||||
|
error_rx,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Permit to close DB's channel.
|
||||||
|
/// Without this function manually called, the DB can't close.
|
||||||
|
pub fn quit(self) -> DatabaseErrorReceiver {
|
||||||
|
self.error_rx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -67,8 +111,9 @@ impl Config {
|
||||||
// This would make more sense, as actual garbage collection is time-based
|
// This would make more sense, as actual garbage collection is time-based
|
||||||
|
|
||||||
/// A [`Database`] logs all write operations on [`Tree`]s in a single file.
|
/// A [`Database`] logs all write operations on [`Tree`]s in a single file.
|
||||||
/// Logs are written asynchronously, so the write operations in RAM will never block.
|
/// Logs are written asynchronously, so the write operations in RAM will block only when the
|
||||||
pub struct Database {
|
/// underlying channel is full.
|
||||||
|
struct DatabaseManager {
|
||||||
/// Inner database
|
/// Inner database
|
||||||
write_db: WriteDB,
|
write_db: WriteDB,
|
||||||
/// [`Tree`]s loaded from disk
|
/// [`Tree`]s loaded from disk
|
||||||
|
|
@ -79,10 +124,7 @@ pub struct Database {
|
||||||
/// New database atomically replaces the old one when its writing is done.
|
/// New database atomically replaces the old one when its writing is done.
|
||||||
new_path: PathBuf,
|
new_path: PathBuf,
|
||||||
/// The receiver on [`Tree`] write operations
|
/// The receiver on [`Tree`] write operations
|
||||||
entry_rx: mpsc::Receiver<Entry>,
|
entry_rx: mpsc::Receiver<Order>,
|
||||||
/// The sender on [`Tree`] write operations.
|
|
||||||
/// Only used to clone new senders for new Trees.
|
|
||||||
entry_tx: Option<mpsc::Sender<Entry>>,
|
|
||||||
/// The interval at which the database must be flushed to kernel
|
/// The interval at which the database must be flushed to kernel
|
||||||
flush_every: Duration,
|
flush_every: Duration,
|
||||||
/// The maximum bytes that must be written until the database is rotated
|
/// The maximum bytes that must be written until the database is rotated
|
||||||
|
|
@ -91,29 +133,37 @@ pub struct Database {
|
||||||
bytes_written: usize,
|
bytes_written: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Database {
|
impl DatabaseManager {
|
||||||
pub async fn open(config: &Config) -> Result<Database, IoError> {
|
pub async fn open(
|
||||||
let path = config.path_of(DB_NAME);
|
path_directory: &Path,
|
||||||
let new_path = config.path_of(DB_NEW_NAME);
|
) -> Result<(DatabaseManager, mpsc::Sender<Order>), IoError> {
|
||||||
|
let path = path_of(path_directory, DB_NAME);
|
||||||
|
let new_path = path_of(path_directory, DB_NEW_NAME);
|
||||||
|
|
||||||
let (write_db, loaded_db) = rotate_db(&path, &new_path, true).await?;
|
let (write_db, loaded_db) = rotate_db(&path, &new_path, true).await?;
|
||||||
|
|
||||||
let (entry_tx, entry_rx) = mpsc::channel(1000);
|
let (entry_tx, entry_rx) = mpsc::channel(1000);
|
||||||
|
|
||||||
Ok(Database {
|
Ok((
|
||||||
write_db,
|
DatabaseManager {
|
||||||
loaded_db,
|
write_db,
|
||||||
path,
|
loaded_db,
|
||||||
new_path,
|
path,
|
||||||
entry_rx,
|
new_path,
|
||||||
entry_tx: Some(entry_tx),
|
entry_rx,
|
||||||
flush_every: Duration::from_secs(2),
|
flush_every: Duration::from_secs(2),
|
||||||
max_bytes: 20 * 1024 * 1024, // 20 MiB
|
max_bytes: 20 * 1024 * 1024, // 20 MiB
|
||||||
bytes_written: 0,
|
bytes_written: 0,
|
||||||
})
|
},
|
||||||
|
entry_tx,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn manager(mut self, shutdown: ShutdownToken) -> oneshot::Receiver<Result<(), String>> {
|
pub fn manager(
|
||||||
|
mut self,
|
||||||
|
cancellation_token: CancellationToken,
|
||||||
|
_task_tracker_token: TaskTrackerToken,
|
||||||
|
) -> oneshot::Receiver<Result<(), String>> {
|
||||||
let (error_tx, error_rx) = oneshot::channel();
|
let (error_tx, error_rx) = oneshot::channel();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut interval = interval(self.flush_every);
|
let mut interval = interval(self.flush_every);
|
||||||
|
|
@ -123,28 +173,27 @@ impl Database {
|
||||||
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
|
||||||
let mut status = loop {
|
let mut status = loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
entry = self.entry_rx.recv() => {
|
order = self.entry_rx.recv() => {
|
||||||
if let Err(err) = self.handle_entry(entry).await {
|
if let Err(err) = self.handle_order(order).await {
|
||||||
shutdown.ask_shutdown();
|
cancellation_token.cancel();
|
||||||
break err;
|
break err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ = interval.tick() => {
|
_ = interval.tick() => {
|
||||||
if let Err(err) = self.flush().await {
|
if let Err(err) = self.flush().await {
|
||||||
shutdown.ask_shutdown();
|
cancellation_token.cancel();
|
||||||
break Some(err);
|
break Some(err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ = shutdown.wait() => break None
|
_ = cancellation_token.cancelled() => break None
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
// Finish consuming received entries when shutdown asked
|
// Finish consuming received entries when shutdown asked
|
||||||
if status.is_none() {
|
if status.is_none() {
|
||||||
self.entry_tx = None;
|
|
||||||
loop {
|
loop {
|
||||||
let entry = self.entry_rx.recv().await;
|
let order = self.entry_rx.recv().await;
|
||||||
if let Err(err) = self.handle_entry(entry).await {
|
if let Err(err) = self.handle_order(order).await {
|
||||||
status = err;
|
status = err;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -168,33 +217,42 @@ impl Database {
|
||||||
error_rx
|
error_rx
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write a received entry. Return:
|
/// Executes an order. Returns:
|
||||||
/// - Err(Some) if there was an error,
|
/// - Err(Some) if there was an error,
|
||||||
/// - Err(None) is channel is closed,
|
/// - Err(None) is channel is closed,
|
||||||
/// - Ok(()) in general case.
|
/// - Ok(()) in general case.
|
||||||
async fn handle_entry(&mut self, entry: Option<Entry>) -> Result<(), Option<String>> {
|
async fn handle_order(&mut self, order: Option<Order>) -> Result<(), Option<String>> {
|
||||||
match entry {
|
match order {
|
||||||
Some(entry) => match self.write_db.write_entry(&entry).await {
|
Some(Order::Log(entry)) => self.handle_entry(entry).await.map_err(Option::Some),
|
||||||
Ok(bytes_written) => {
|
Some(Order::OpenTree(open_tree)) => {
|
||||||
self.bytes_written += bytes_written;
|
self.handle_open_tree(open_tree);
|
||||||
if self.bytes_written > self.max_bytes {
|
Ok(())
|
||||||
match self.rotate_db().await {
|
}
|
||||||
Ok(_) => {
|
|
||||||
self.bytes_written = 0;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(err) => Err(Some(format!("while rotating database: {err}"))),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(err) => Err(Some(format!("while writing entry to database: {err}"))),
|
|
||||||
},
|
|
||||||
None => Err(None),
|
None => Err(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Write a received entry.
|
||||||
|
async fn handle_entry(&mut self, entry: Entry) -> Result<(), String> {
|
||||||
|
match self.write_db.write_entry(&entry).await {
|
||||||
|
Ok(bytes_written) => {
|
||||||
|
self.bytes_written += bytes_written;
|
||||||
|
if self.bytes_written > self.max_bytes {
|
||||||
|
match self.rotate_db().await {
|
||||||
|
Ok(_) => {
|
||||||
|
self.bytes_written = 0;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(err) => Err(format!("while rotating database: {err}")),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => Err(format!("while writing entry to database: {err}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Flush inner database.
|
/// Flush inner database.
|
||||||
async fn flush(&mut self) -> Result<(), String> {
|
async fn flush(&mut self) -> Result<(), String> {
|
||||||
self.write_db
|
self.write_db
|
||||||
|
|
@ -228,7 +286,7 @@ async fn rotate_db(
|
||||||
// No need to rotate the database when it is new,
|
// No need to rotate the database when it is new,
|
||||||
// we return here
|
// we return here
|
||||||
(true, ErrorKind::NotFound) => {
|
(true, ErrorKind::NotFound) => {
|
||||||
return Ok((WriteDB::new(File::create(path).await?), HashMap::default()))
|
return Ok((WriteDB::new(File::create(path).await?), HashMap::default()));
|
||||||
}
|
}
|
||||||
(_, _) => return Err(err),
|
(_, _) => return Err(err),
|
||||||
},
|
},
|
||||||
|
|
@ -268,11 +326,11 @@ pub struct Tree<K: KeyType, V: ValueType> {
|
||||||
/// This property permits the database rotation to be `O(n)` in time and `O(1)` in RAM space,
|
/// This property permits the database rotation to be `O(n)` in time and `O(1)` in RAM space,
|
||||||
/// `n` being the number of write operations from the last rotation plus the number of new
|
/// `n` being the number of write operations from the last rotation plus the number of new
|
||||||
/// operations.
|
/// operations.
|
||||||
entry_timeout: TimeDelta,
|
entry_timeout: Duration,
|
||||||
/// The inner BTreeMap
|
/// The inner BTreeMap
|
||||||
tree: BTreeMap<K, V>,
|
tree: BTreeMap<K, V>,
|
||||||
/// The sender that permits to asynchronously send write operations to database
|
/// The sender that permits to asynchronously send write operations to database
|
||||||
tx: mpsc::Sender<Entry>,
|
tx: mpsc::Sender<Order>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Database {
|
impl Database {
|
||||||
|
|
@ -280,22 +338,33 @@ impl Database {
|
||||||
/// Takes a closure (or regular function) that converts (Value, Value) JSON entries
|
/// Takes a closure (or regular function) that converts (Value, Value) JSON entries
|
||||||
/// into (K, V) typed entries.
|
/// into (K, V) typed entries.
|
||||||
/// Helpers for this closure can be found in the [`helpers`] module.
|
/// Helpers for this closure can be found in the [`helpers`] module.
|
||||||
pub fn open_tree<K: KeyType, V: ValueType, F>(
|
pub async fn open_tree<K: KeyType, V: ValueType, F>(
|
||||||
&mut self,
|
&mut self,
|
||||||
name: String,
|
name: String,
|
||||||
entry_timeout: TimeDelta,
|
entry_timeout: Duration,
|
||||||
map_f: F,
|
map_f: F,
|
||||||
) -> Result<Tree<K, V>, String>
|
) -> Result<Tree<K, V>, String>
|
||||||
where
|
where
|
||||||
F: Fn((Value, Value)) -> Result<(K, V), String>,
|
F: Fn((Value, Value)) -> Result<(K, V), String>,
|
||||||
{
|
{
|
||||||
// Get a clone of the channel sender
|
// Request the tree
|
||||||
let tx = self
|
let (tx, rx) = oneshot::channel();
|
||||||
.entry_tx
|
let entry_tx = match self.entry_tx.clone() {
|
||||||
.clone()
|
None => return Err("Database is closing".to_string()),
|
||||||
.ok_or("Database is closing".to_string())?;
|
Some(entry_tx) => {
|
||||||
|
entry_tx
|
||||||
|
.send(Order::OpenTree(OpenTree {
|
||||||
|
name: name.clone(),
|
||||||
|
resp: tx,
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.map_err(|_| "Database did not answer")?;
|
||||||
|
// Get a clone of the channel sender
|
||||||
|
entry_tx.clone()
|
||||||
|
}
|
||||||
|
};
|
||||||
// Load the tree from its JSON
|
// Load the tree from its JSON
|
||||||
let tree = if let Some(json_tree) = self.loaded_db.remove(&name) {
|
let tree = if let Some(json_tree) = rx.await.map_err(|_| "Database did not respond")? {
|
||||||
json_tree
|
json_tree
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(map_f)
|
.map(map_f)
|
||||||
|
|
@ -307,15 +376,25 @@ impl Database {
|
||||||
id: name,
|
id: name,
|
||||||
entry_timeout,
|
entry_timeout,
|
||||||
tree,
|
tree,
|
||||||
tx,
|
tx: entry_tx,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabaseManager {
|
||||||
|
/// Creates a new Tree with the given name and entry timeout.
|
||||||
|
/// Takes a closure (or regular function) that converts (Value, Value) JSON entries
|
||||||
|
/// into (K, V) typed entries.
|
||||||
|
/// Helpers for this closure can be found in the [`helpers`] module.
|
||||||
|
pub fn handle_open_tree(&mut self, open_tree: OpenTree) {
|
||||||
|
let _ = open_tree.resp.send(self.loaded_db.remove(&open_tree.name));
|
||||||
|
}
|
||||||
|
|
||||||
// TODO keep only tree names, and use it for next db rotation to remove associated entries
|
// TODO keep only tree names, and use it for next db rotation to remove associated entries
|
||||||
/// Drops Trees that have not been loaded already
|
// Drops Trees that have not been loaded already
|
||||||
pub fn drop_trees(&mut self) {
|
// pub fn drop_trees(&mut self) {
|
||||||
self.loaded_db = HashMap::default();
|
// self.loaded_db = HashMap::default();
|
||||||
}
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gives access to all read-only functions
|
// Gives access to all read-only functions
|
||||||
|
|
@ -330,45 +409,50 @@ impl<K: KeyType, V: ValueType> Deref for Tree<K, V> {
|
||||||
// Reimplement write functions
|
// Reimplement write functions
|
||||||
impl<K: KeyType, V: ValueType> Tree<K, V> {
|
impl<K: KeyType, V: ValueType> Tree<K, V> {
|
||||||
/// Log an [`Entry`] to the [`Database`]
|
/// Log an [`Entry`] to the [`Database`]
|
||||||
fn log(&mut self, k: &K, v: Option<&V>) {
|
async fn log(&mut self, k: &K, v: Option<&V>) {
|
||||||
|
let now = now();
|
||||||
let e = Entry {
|
let e = Entry {
|
||||||
tree: self.id.clone(),
|
tree: self.id.clone(),
|
||||||
key: serde_json::to_value(k).expect("could not serialize key"),
|
key: serde_json::to_value(k).expect("could not serialize key"),
|
||||||
value: v.map(|v| serde_json::to_value(v).expect("could not serialize value")),
|
value: v.map(|v| serde_json::to_value(v).expect("could not serialize value")),
|
||||||
expiry: Local::now() + self.entry_timeout,
|
expiry: now + self.entry_timeout,
|
||||||
};
|
};
|
||||||
let tx = self.tx.clone();
|
let tx = self.tx.clone();
|
||||||
// FIXME what if send fails?
|
// FIXME what if send fails?
|
||||||
tokio::spawn(async move {
|
let _ = tx.send(Order::Log(e)).await;
|
||||||
let _ = tx.send(e).await;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously persisted version of [`BTreeMap::insert`]
|
/// Asynchronously persisted version of [`BTreeMap::insert`]
|
||||||
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
|
pub async fn insert(&mut self, key: K, value: V) -> Option<V> {
|
||||||
self.log(&key, Some(&value));
|
self.log(&key, Some(&value)).await;
|
||||||
self.tree.insert(key, value)
|
self.tree.insert(key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously persisted version of [`BTreeMap::pop_first`]
|
/// Asynchronously persisted version of [`BTreeMap::pop_first`]
|
||||||
pub fn pop_first(&mut self) -> Option<(K, V)> {
|
pub async fn pop_first(&mut self) -> Option<(K, V)> {
|
||||||
self.tree.pop_first().map(|(key, value)| {
|
match self.tree.pop_first() {
|
||||||
self.log(&key, None);
|
Some((key, value)) => {
|
||||||
(key, value)
|
self.log(&key, None).await;
|
||||||
})
|
Some((key, value))
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously persisted version of [`BTreeMap::pop_last`]
|
/// Asynchronously persisted version of [`BTreeMap::pop_last`]
|
||||||
pub fn pop_last(&mut self) -> Option<(K, V)> {
|
pub async fn pop_last(&mut self) -> Option<(K, V)> {
|
||||||
self.tree.pop_last().map(|(key, value)| {
|
match self.tree.pop_last() {
|
||||||
self.log(&key, None);
|
Some((key, value)) => {
|
||||||
(key, value)
|
self.log(&key, None).await;
|
||||||
})
|
Some((key, value))
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Asynchronously persisted version of [`BTreeMap::remove`]
|
/// Asynchronously persisted version of [`BTreeMap::remove`]
|
||||||
pub fn remove(&mut self, key: &K) -> Option<V> {
|
pub async fn remove(&mut self, key: &K) -> Option<V> {
|
||||||
self.log(key, None);
|
self.log(key, None).await;
|
||||||
self.tree.remove(key)
|
self.tree.remove(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -376,20 +460,49 @@ impl<K: KeyType, V: ValueType> Tree<K, V> {
|
||||||
/// Returning None removes the item if it existed before.
|
/// Returning None removes the item if it existed before.
|
||||||
/// Asynchronously persisted.
|
/// Asynchronously persisted.
|
||||||
/// *API design borrowed from [`fjall::WriteTransaction::fetch_update`].*
|
/// *API design borrowed from [`fjall::WriteTransaction::fetch_update`].*
|
||||||
pub fn fetch_update<F: FnMut(Option<V>) -> Option<V>>(
|
pub async fn fetch_update<F: FnMut(Option<V>) -> Option<V>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
key: K,
|
key: K,
|
||||||
mut f: F,
|
mut f: F,
|
||||||
) -> Option<V> {
|
) -> Option<V> {
|
||||||
let old_value = self.get(&key).map(|v| v.to_owned());
|
let old_value = self.get(&key).map(|v| v.to_owned());
|
||||||
let new_value = f(old_value);
|
let new_value = f(old_value);
|
||||||
self.log(&key, new_value.as_ref());
|
self.log(&key, new_value.as_ref()).await;
|
||||||
if let Some(new_value) = new_value {
|
if let Some(new_value) = new_value {
|
||||||
self.tree.insert(key, new_value)
|
self.tree.insert(key, new_value)
|
||||||
} else {
|
} else {
|
||||||
self.tree.remove(&key)
|
self.tree.remove(&key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "test"))]
|
||||||
|
pub fn tree(&self) -> &BTreeMap<K, V> {
|
||||||
|
&self.tree
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "test"))]
|
||||||
|
impl DatabaseManager {
|
||||||
|
pub fn set_loaded_db(&mut self, loaded_db: LoadedDB) {
|
||||||
|
self.loaded_db = loaded_db;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "test"))]
|
||||||
|
impl Database {
|
||||||
|
pub async fn from_dir(dir_path: &Path, loaded_db: Option<LoadedDB>) -> Result<Self, IoError> {
|
||||||
|
use tokio_util::task::TaskTracker;
|
||||||
|
|
||||||
|
let (mut manager, entry_tx) = DatabaseManager::open(dir_path).await?;
|
||||||
|
if let Some(loaded_db) = loaded_db {
|
||||||
|
manager.set_loaded_db(loaded_db)
|
||||||
|
}
|
||||||
|
let error_rx = manager.manager(CancellationToken::new(), TaskTracker::new().token());
|
||||||
|
Ok(Self {
|
||||||
|
entry_tx: Some(entry_tx),
|
||||||
|
error_rx,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
@ -397,68 +510,21 @@ mod tests {
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet, HashMap},
|
collections::{BTreeMap, BTreeSet, HashMap},
|
||||||
io::Error as IoError,
|
time::Duration,
|
||||||
path::Path,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::{Local, TimeDelta};
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tempfile::{NamedTempFile, TempDir};
|
use tempfile::{NamedTempFile, TempDir};
|
||||||
use tokio::fs::{write, File};
|
use tokio::fs::File;
|
||||||
|
|
||||||
use crate::concepts::Config;
|
use super::{DB_NAME, Database, Entry, Time, helpers::*, now, raw::WriteDB, rotate_db};
|
||||||
|
|
||||||
use super::{
|
|
||||||
helpers::*, raw::WriteDB, rotate_db, Database, Entry, KeyType, LoadedDB, Tree, ValueType,
|
|
||||||
DB_NAME,
|
|
||||||
};
|
|
||||||
|
|
||||||
impl Database {
|
|
||||||
pub async fn from_dir(dir_path: &Path) -> Result<Self, IoError> {
|
|
||||||
let config_path = dir_path.join("reaction.jsonnet");
|
|
||||||
write(
|
|
||||||
&config_path,
|
|
||||||
format!(
|
|
||||||
"
|
|
||||||
{{
|
|
||||||
state_directory: {dir_path:?},
|
|
||||||
patterns: {{ pattern: {{ regex: \"prout\" }} }},
|
|
||||||
streams: {{ dummy: {{
|
|
||||||
cmd: [\"dummy\"],
|
|
||||||
filters: {{ dummy: {{
|
|
||||||
regex: [\"dummy\"],
|
|
||||||
actions: {{ dummy: {{
|
|
||||||
cmd: [\"dummy\"]
|
|
||||||
}} }}
|
|
||||||
}} }}
|
|
||||||
}} }}
|
|
||||||
}}
|
|
||||||
"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let config = Config::from_path(&config_path).unwrap();
|
|
||||||
Database::open(&config).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_loaded_db(&mut self, loaded_db: LoadedDB) {
|
|
||||||
self.loaded_db = loaded_db;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<K: KeyType, V: ValueType> Tree<K, V> {
|
|
||||||
pub fn tree(&self) -> &BTreeMap<K, V> {
|
|
||||||
&self.tree
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_rotate_db() {
|
async fn test_rotate_db() {
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
|
|
||||||
let expired = now - TimeDelta::seconds(2);
|
let expired = now - Time::from_secs(2);
|
||||||
let valid = now + TimeDelta::seconds(2);
|
let valid = now + Time::from_secs(2);
|
||||||
|
|
||||||
let entries = [
|
let entries = [
|
||||||
Entry {
|
Entry {
|
||||||
|
|
@ -556,15 +622,16 @@ mod tests {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_open_tree() {
|
async fn test_open_tree() {
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let now2 = now + TimeDelta::milliseconds(2);
|
|
||||||
let now3 = now + TimeDelta::milliseconds(3);
|
|
||||||
|
|
||||||
let now_ms = now.to_rfc3339();
|
let now2 = now + Time::from_millis(2);
|
||||||
let now2_ms = now2.to_rfc3339();
|
let now3 = now + Time::from_millis(3);
|
||||||
let now3_ms = now3.to_rfc3339();
|
|
||||||
|
|
||||||
let valid = now + TimeDelta::seconds(2);
|
// let now_ms = now.as_nanos().to_string();
|
||||||
|
// let now2_ms = now2.as_nanos().to_string();
|
||||||
|
// let now3_ms = now3.as_nanos().to_string();
|
||||||
|
|
||||||
|
let valid = now + Time::from_secs(2);
|
||||||
|
|
||||||
let ip127 = vec!["127.0.0.1".to_string()];
|
let ip127 = vec!["127.0.0.1".to_string()];
|
||||||
let ip1 = vec!["1.1.1.1".to_string()];
|
let ip1 = vec!["1.1.1.1".to_string()];
|
||||||
|
|
@ -572,44 +639,50 @@ mod tests {
|
||||||
let entries = [
|
let entries = [
|
||||||
Entry {
|
Entry {
|
||||||
tree: "time-match".into(),
|
tree: "time-match".into(),
|
||||||
key: now_ms.clone().into(),
|
key: now.as_nanos().to_string().into(),
|
||||||
value: Some(ip127.clone().into()),
|
value: Some(ip127.clone().into()),
|
||||||
expiry: valid,
|
expiry: valid,
|
||||||
},
|
},
|
||||||
Entry {
|
Entry {
|
||||||
tree: "time-match".into(),
|
tree: "time-match".into(),
|
||||||
key: now2_ms.clone().into(),
|
key: now2.as_nanos().to_string().into(),
|
||||||
value: Some(ip127.clone().into()),
|
value: Some(ip127.clone().into()),
|
||||||
expiry: valid,
|
expiry: valid,
|
||||||
},
|
},
|
||||||
Entry {
|
Entry {
|
||||||
tree: "time-match".into(),
|
tree: "time-match".into(),
|
||||||
key: now3_ms.clone().into(),
|
key: now3.as_nanos().to_string().into(),
|
||||||
value: Some(ip127.clone().into()),
|
value: Some(ip127.clone().into()),
|
||||||
expiry: valid,
|
expiry: valid,
|
||||||
},
|
},
|
||||||
Entry {
|
Entry {
|
||||||
tree: "time-match".into(),
|
tree: "time-match".into(),
|
||||||
key: now2_ms.clone().into(),
|
key: now2.as_nanos().to_string().into(),
|
||||||
value: Some(ip127.clone().into()),
|
value: Some(ip127.clone().into()),
|
||||||
expiry: valid,
|
expiry: valid,
|
||||||
},
|
},
|
||||||
Entry {
|
Entry {
|
||||||
tree: "match-timeset".into(),
|
tree: "match-timeset".into(),
|
||||||
key: ip127.clone().into(),
|
key: ip127.clone().into(),
|
||||||
value: Some([Value::String(now_ms)].into()),
|
value: Some([Value::String(now.as_nanos().to_string())].into()),
|
||||||
expiry: valid,
|
expiry: valid,
|
||||||
},
|
},
|
||||||
Entry {
|
Entry {
|
||||||
tree: "match-timeset".into(),
|
tree: "match-timeset".into(),
|
||||||
key: ip1.clone().into(),
|
key: ip1.clone().into(),
|
||||||
value: Some([Value::String(now2_ms.clone())].into()),
|
value: Some([Value::String(now2.as_nanos().to_string())].into()),
|
||||||
expiry: valid,
|
expiry: valid,
|
||||||
},
|
},
|
||||||
Entry {
|
Entry {
|
||||||
tree: "match-timeset".into(),
|
tree: "match-timeset".into(),
|
||||||
key: ip1.clone().into(),
|
key: ip1.clone().into(),
|
||||||
value: Some([Value::String(now2_ms.clone()), now3_ms.into()].into()),
|
value: Some(
|
||||||
|
[
|
||||||
|
Value::String(now2.as_nanos().to_string()),
|
||||||
|
Value::String(now3.as_nanos().to_string()),
|
||||||
|
]
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
expiry: valid,
|
expiry: valid,
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
|
|
@ -625,14 +698,15 @@ mod tests {
|
||||||
write_db.close().await.unwrap();
|
write_db.close().await.unwrap();
|
||||||
drop(write_db);
|
drop(write_db);
|
||||||
|
|
||||||
let mut database = Database::from_dir(dir_path).await.unwrap();
|
let mut database = Database::from_dir(dir_path, None).await.unwrap();
|
||||||
|
|
||||||
let time_match = database
|
let time_match = database
|
||||||
.open_tree(
|
.open_tree(
|
||||||
"time-match".into(),
|
"time-match".into(),
|
||||||
TimeDelta::seconds(2),
|
Duration::from_secs(2),
|
||||||
|(key, value)| Ok((to_time(&key)?, to_match(&value)?)),
|
|(key, value)| Ok((to_time(&key)?, to_match(&value)?)),
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
time_match.tree,
|
time_match.tree,
|
||||||
|
|
@ -646,9 +720,10 @@ mod tests {
|
||||||
let match_timeset = database
|
let match_timeset = database
|
||||||
.open_tree(
|
.open_tree(
|
||||||
"match-timeset".into(),
|
"match-timeset".into(),
|
||||||
TimeDelta::hours(2),
|
Duration::from_hours(2),
|
||||||
|(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)),
|
|(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)),
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
match_timeset.tree,
|
match_timeset.tree,
|
||||||
|
|
@ -661,9 +736,10 @@ mod tests {
|
||||||
let unknown_tree = database
|
let unknown_tree = database
|
||||||
.open_tree(
|
.open_tree(
|
||||||
"unknown_tree".into(),
|
"unknown_tree".into(),
|
||||||
TimeDelta::hours(2),
|
Duration::from_hours(2),
|
||||||
|(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)),
|
|(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)),
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(unknown_tree.tree, BTreeMap::default());
|
assert_eq!(unknown_tree.tree, BTreeMap::default());
|
||||||
}
|
}
|
||||||
|
|
@ -1,6 +1,9 @@
|
||||||
use std::{collections::HashMap, io::Error as IoError};
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
io::Error as IoError,
|
||||||
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
|
};
|
||||||
|
|
||||||
use chrono::{Local, TimeZone};
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
@ -10,6 +13,8 @@ use tokio::{
|
||||||
};
|
};
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
|
use crate::time::Time;
|
||||||
|
|
||||||
use super::{Entry, LoadedDB};
|
use super::{Entry, LoadedDB};
|
||||||
|
|
||||||
const DB_TREE_ID: u64 = 0;
|
const DB_TREE_ID: u64 = 0;
|
||||||
|
|
@ -43,7 +48,7 @@ struct WriteEntry<'a> {
|
||||||
#[serde(rename = "v")]
|
#[serde(rename = "v")]
|
||||||
pub value: &'a Option<Value>,
|
pub value: &'a Option<Value>,
|
||||||
#[serde(rename = "e")]
|
#[serde(rename = "e")]
|
||||||
pub expiry: i64,
|
pub expiry: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Entry in custom database format, just read from database
|
/// Entry in custom database format, just read from database
|
||||||
|
|
@ -56,7 +61,7 @@ struct ReadEntry {
|
||||||
#[serde(rename = "v")]
|
#[serde(rename = "v")]
|
||||||
pub value: Option<Value>,
|
pub value: Option<Value>,
|
||||||
#[serde(rename = "e")]
|
#[serde(rename = "e")]
|
||||||
pub expiry: i64,
|
pub expiry: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Permits to write entries in a database.
|
/// Permits to write entries in a database.
|
||||||
|
|
@ -109,7 +114,7 @@ impl WriteDB {
|
||||||
tree: tree_id,
|
tree: tree_id,
|
||||||
key: &entry.key,
|
key: &entry.key,
|
||||||
value: &entry.value,
|
value: &entry.value,
|
||||||
expiry: entry.expiry.timestamp_millis(),
|
expiry: entry.expiry.as_millis() as u64,
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
.map(|bytes_written| bytes_written + written)
|
.map(|bytes_written| bytes_written + written)
|
||||||
|
|
@ -176,12 +181,14 @@ impl ReadDB {
|
||||||
Ok(Some(entry)) => {
|
Ok(Some(entry)) => {
|
||||||
// Add back in new DB
|
// Add back in new DB
|
||||||
match write_db.write_entry(&entry).await {
|
match write_db.write_entry(&entry).await {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(err) => match err {
|
Err(err) => match err {
|
||||||
SerdeOrIoError::IO(err) => return Err(err),
|
SerdeOrIoError::IO(err) => return Err(err),
|
||||||
SerdeOrIoError::Serde(err) => error!("serde should be able to serialize an entry just deserialized: {err}"),
|
SerdeOrIoError::Serde(err) => error!(
|
||||||
}
|
"serde should be able to serialize an entry just deserialized: {err}"
|
||||||
}
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
// Insert data in RAM
|
// Insert data in RAM
|
||||||
if load_db {
|
if load_db {
|
||||||
let map: &mut HashMap<Value, Value> =
|
let map: &mut HashMap<Value, Value> =
|
||||||
|
|
@ -199,7 +206,10 @@ impl ReadDB {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn next(&mut self) -> Result<Option<Entry>, DatabaseError> {
|
async fn next(&mut self) -> Result<Option<Entry>, DatabaseError> {
|
||||||
let now = Local::now().timestamp_millis();
|
let now = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_millis() as u64;
|
||||||
// Loop until we get a non-special value
|
// Loop until we get a non-special value
|
||||||
let raw_entry = loop {
|
let raw_entry = loop {
|
||||||
self.buffer.clear();
|
self.buffer.clear();
|
||||||
|
|
@ -239,7 +249,7 @@ impl ReadDB {
|
||||||
tree: tree.to_owned(),
|
tree: tree.to_owned(),
|
||||||
key: raw_entry.key,
|
key: raw_entry.key,
|
||||||
value: raw_entry.value,
|
value: raw_entry.value,
|
||||||
expiry: Local.timestamp_millis_opt(raw_entry.expiry).unwrap(),
|
expiry: Time::from_millis(raw_entry.expiry),
|
||||||
})),
|
})),
|
||||||
None => Err(DatabaseError::MissingKeyId(raw_entry.tree)),
|
None => Err(DatabaseError::MissingKeyId(raw_entry.tree)),
|
||||||
}
|
}
|
||||||
|
|
@ -250,22 +260,22 @@ impl ReadDB {
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use chrono::{Local, TimeDelta, TimeZone};
|
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tempfile::NamedTempFile;
|
use tempfile::NamedTempFile;
|
||||||
use tokio::fs::{read, write, File};
|
use tokio::fs::{File, read, write};
|
||||||
|
|
||||||
use crate::treedb::{
|
use crate::{
|
||||||
raw::{DatabaseError, ReadDB, WriteDB, DB_TREE_ID},
|
|
||||||
Entry,
|
Entry,
|
||||||
|
raw::{DB_TREE_ID, DatabaseError, ReadDB, WriteDB},
|
||||||
|
time::{Time, now},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn write_db_write_entry() {
|
async fn write_db_write_entry() {
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let expired = now - TimeDelta::seconds(2);
|
let expired = now - Time::from_secs(2);
|
||||||
let expired_ts = expired.timestamp_millis();
|
let expired_ts = expired.as_millis();
|
||||||
// let valid = now + TimeDelta::seconds(2);
|
// let valid = now + Time::from_secs(2);
|
||||||
// let valid_ts = valid.timestamp_millis();
|
// let valid_ts = valid.timestamp_millis();
|
||||||
|
|
||||||
let path = NamedTempFile::new().unwrap().into_temp_path();
|
let path = NamedTempFile::new().unwrap().into_temp_path();
|
||||||
|
|
@ -289,21 +299,23 @@ mod tests {
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
contents,
|
contents,
|
||||||
format!("{{\"t\":0,\"k\":1,\"v\":\"yooo\",\"e\":0}}\n{{\"t\":1,\"k\":\"key1\",\"v\":\"value1\",\"e\":{expired_ts}}}\n")
|
format!(
|
||||||
|
"{{\"t\":0,\"k\":1,\"v\":\"yooo\",\"e\":0}}\n{{\"t\":1,\"k\":\"key1\",\"v\":\"value1\",\"e\":{expired_ts}}}\n"
|
||||||
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn read_db_next() {
|
async fn read_db_next() {
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
|
|
||||||
let expired = now - TimeDelta::seconds(2);
|
let expired = now - Time::from_secs(2);
|
||||||
let expired_ts = expired.timestamp_millis();
|
let expired_ts = expired.as_millis();
|
||||||
|
|
||||||
let valid = now + TimeDelta::seconds(2);
|
let valid = now + Time::from_secs(2);
|
||||||
let valid_ts = valid.timestamp_millis();
|
let valid_ts = valid.as_millis();
|
||||||
// Truncate to millisecond precision
|
// Truncate to millisecond precision
|
||||||
let valid = Local.timestamp_millis_opt(valid_ts).unwrap();
|
let valid = Time::new(valid.as_secs(), valid.subsec_millis() * 1_000_000);
|
||||||
|
|
||||||
let path = NamedTempFile::new().unwrap().into_temp_path();
|
let path = NamedTempFile::new().unwrap().into_temp_path();
|
||||||
|
|
||||||
|
|
@ -343,13 +355,13 @@ mod tests {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn read_db_read() {
|
async fn read_db_read() {
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
|
|
||||||
let expired = now - TimeDelta::seconds(2);
|
let expired = now - Time::from_secs(2);
|
||||||
let expired_ts = expired.timestamp_millis();
|
let expired_ts = expired.as_millis();
|
||||||
|
|
||||||
let valid = now + TimeDelta::seconds(2);
|
let valid = now + Time::from_secs(2);
|
||||||
let valid_ts = valid.timestamp_millis();
|
let valid_ts = valid.as_millis();
|
||||||
|
|
||||||
let read_path = NamedTempFile::new().unwrap().into_temp_path();
|
let read_path = NamedTempFile::new().unwrap().into_temp_path();
|
||||||
let write_path = NamedTempFile::new().unwrap().into_temp_path();
|
let write_path = NamedTempFile::new().unwrap().into_temp_path();
|
||||||
|
|
@ -422,13 +434,13 @@ mod tests {
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn write_then_read_1000() {
|
async fn write_then_read_1000() {
|
||||||
// Generate entries
|
// Generate entries
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let entries: Vec<_> = (0..1000)
|
let entries: Vec<_> = (0..1000)
|
||||||
.map(|i| Entry {
|
.map(|i| Entry {
|
||||||
tree: format!("tree{}", i % 4),
|
tree: format!("tree{}", i % 4),
|
||||||
key: format!("key{}", i % 10).into(),
|
key: format!("key{}", i % 10).into(),
|
||||||
value: Some(format!("value{}", i % 10).into()),
|
value: Some(format!("value{}", i % 10).into()),
|
||||||
expiry: now + TimeDelta::seconds((i % 4) - 1),
|
expiry: now + Time::from_secs(i % 4) - Time::from_secs(1),
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
|
@ -438,7 +450,7 @@ mod tests {
|
||||||
tree: format!("tree{}", i % 4),
|
tree: format!("tree{}", i % 4),
|
||||||
key: format!("key{}", i % 10).into(),
|
key: format!("key{}", i % 10).into(),
|
||||||
value: None,
|
value: None,
|
||||||
expiry: now + TimeDelta::seconds(i % 4),
|
expiry: now + Time::from_secs(i % 4),
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
117
crates/treedb/src/time.rs
Normal file
117
crates/treedb/src/time.rs
Normal file
|
|
@ -0,0 +1,117 @@
|
||||||
|
use std::{
|
||||||
|
fmt,
|
||||||
|
ops::{Add, Deref, Sub},
|
||||||
|
time::{Duration, SystemTime, UNIX_EPOCH},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
/// [`std::time::Duration`] since [`std::time::UNIX_EPOCH`]
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct Time(Duration);
|
||||||
|
impl Deref for Time {
|
||||||
|
type Target = Duration;
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<Duration> for Time {
|
||||||
|
fn from(value: Duration) -> Self {
|
||||||
|
Time(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Into<Duration> for Time {
|
||||||
|
fn into(self) -> Duration {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Add<Duration> for Time {
|
||||||
|
type Output = Time;
|
||||||
|
fn add(self, rhs: Duration) -> Self::Output {
|
||||||
|
Time(self.0 + rhs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Add<Time> for Time {
|
||||||
|
type Output = Time;
|
||||||
|
fn add(self, rhs: Time) -> Self::Output {
|
||||||
|
Time(self.0 + rhs.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Sub<Duration> for Time {
|
||||||
|
type Output = Time;
|
||||||
|
fn sub(self, rhs: Duration) -> Self::Output {
|
||||||
|
Time(self.0 - rhs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Sub<Time> for Time {
|
||||||
|
type Output = Time;
|
||||||
|
fn sub(self, rhs: Time) -> Self::Output {
|
||||||
|
Time(self.0 - rhs.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for Time {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
self.as_nanos().to_string().serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
struct TimeVisitor;
|
||||||
|
impl<'de> serde::de::Visitor<'de> for TimeVisitor {
|
||||||
|
type Value = Time;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(formatter, "a string representing nanoseconds")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
|
||||||
|
where
|
||||||
|
E: serde::de::Error,
|
||||||
|
{
|
||||||
|
match s.parse::<u128>() {
|
||||||
|
Ok(nanos) => Ok(Time(Duration::new(
|
||||||
|
(nanos / 1_000_000_000) as u64,
|
||||||
|
(nanos % 1_000_000_000) as u32,
|
||||||
|
))),
|
||||||
|
Err(_) => Err(serde::de::Error::invalid_value(
|
||||||
|
serde::de::Unexpected::Str(s),
|
||||||
|
&self,
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<'de> Deserialize<'de> for Time {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
deserializer.deserialize_str(TimeVisitor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Time {
|
||||||
|
pub fn new(secs: u64, nanos: u32) -> Time {
|
||||||
|
Time(Duration::new(secs, nanos))
|
||||||
|
}
|
||||||
|
pub fn from_hours(hours: u64) -> Time {
|
||||||
|
Time(Duration::from_hours(hours))
|
||||||
|
}
|
||||||
|
pub fn from_mins(mins: u64) -> Time {
|
||||||
|
Time(Duration::from_mins(mins))
|
||||||
|
}
|
||||||
|
pub fn from_secs(secs: u64) -> Time {
|
||||||
|
Time(Duration::from_secs(secs))
|
||||||
|
}
|
||||||
|
pub fn from_millis(millis: u64) -> Time {
|
||||||
|
Time(Duration::from_millis(millis))
|
||||||
|
}
|
||||||
|
pub fn from_nanos(nanos: u64) -> Time {
|
||||||
|
Time(Duration::from_nanos(nanos))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn now() -> Time {
|
||||||
|
Time(SystemTime::now().duration_since(UNIX_EPOCH).unwrap())
|
||||||
|
}
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
# C helpers
|
|
||||||
|
|
||||||
Those helpers permit to handle IPv4 & IPv6 at the same time, waiting for [#79](https://framagit.org/ppom/reaction/-/issues/79) to be addressed.
|
|
||||||
|
|
||||||
Compilation:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Produces nft46 binary
|
|
||||||
gcc -o nft46 nft46.c
|
|
||||||
# Produces ip46tables binary
|
|
||||||
gcc -o ip46tables ip46tables.c
|
|
||||||
```
|
|
||||||
|
|
@ -1,91 +0,0 @@
|
||||||
#include<ctype.h>
|
|
||||||
#include<errno.h>
|
|
||||||
#include<stdio.h>
|
|
||||||
#include<stdlib.h>
|
|
||||||
#include<string.h>
|
|
||||||
#include<unistd.h>
|
|
||||||
|
|
||||||
// If this programs
|
|
||||||
// - receives an ipv4 address in its arguments:
|
|
||||||
// → it will executes iptables with the same arguments in place.
|
|
||||||
//
|
|
||||||
// - receives an ipv6 address in its arguments:
|
|
||||||
// → it will executes ip6tables with the same arguments in place.
|
|
||||||
//
|
|
||||||
// - doesn't receive an ipv4 or ipv6 address in its arguments:
|
|
||||||
// → it will executes both, with the same arguments in place.
|
|
||||||
|
|
||||||
int isIPv4(char *tab) {
|
|
||||||
int i,len;
|
|
||||||
// IPv4 addresses are at least 7 chars long
|
|
||||||
len = strlen(tab);
|
|
||||||
if (len < 7 || !isdigit(tab[0]) || !isdigit(tab[len-1])) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
// Each char must be a digit or a dot between 2 digits
|
|
||||||
for (i=1; i<len-1; i++) {
|
|
||||||
if (!isdigit(tab[i]) && !(tab[i] == '.' && isdigit(tab[i-1]) && isdigit(tab[i+1]))) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int isIPv6(char *tab) {
|
|
||||||
int i,len, twodots = 0;
|
|
||||||
// IPv6 addresses are at least 3 chars long
|
|
||||||
len = strlen(tab);
|
|
||||||
if (len < 3) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
// Each char must be a digit, :, a-f, or A-F
|
|
||||||
for (i=0; i<len; i++) {
|
|
||||||
if (!isdigit(tab[i]) && tab[i] != ':' && !(tab[i] >= 'a' && tab[i] <= 'f') && !(tab[i] >= 'A' && tab[i] <= 'F')) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int guess_type(int len, char *tab[]) {
|
|
||||||
int i;
|
|
||||||
for (i=0; i<len; i++) {
|
|
||||||
if (isIPv4(tab[i])) {
|
|
||||||
return 4;
|
|
||||||
} else if (isIPv6(tab[i])) {
|
|
||||||
return 6;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void exec(char *str, char **argv) {
|
|
||||||
argv[0] = str;
|
|
||||||
execvp(str, argv);
|
|
||||||
// returns only if fails
|
|
||||||
printf("ip46tables: exec failed %d\n", errno);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
|
||||||
if (argc < 2) {
|
|
||||||
printf("ip46tables: At least one argument has to be given\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
int type;
|
|
||||||
type = guess_type(argc, argv);
|
|
||||||
if (type == 4) {
|
|
||||||
exec("iptables", argv);
|
|
||||||
} else if (type == 6) {
|
|
||||||
exec("ip6tables", argv);
|
|
||||||
} else {
|
|
||||||
pid_t pid = fork();
|
|
||||||
if (pid == -1) {
|
|
||||||
printf("ip46tables: fork failed\n");
|
|
||||||
exit(1);
|
|
||||||
} else if (pid) {
|
|
||||||
exec("iptables", argv);
|
|
||||||
} else {
|
|
||||||
exec("ip6tables", argv);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,97 +0,0 @@
|
||||||
#include<ctype.h>
|
|
||||||
#include<errno.h>
|
|
||||||
#include<stdio.h>
|
|
||||||
#include<stdlib.h>
|
|
||||||
#include<string.h>
|
|
||||||
#include<unistd.h>
|
|
||||||
|
|
||||||
// nft46 'add element inet reaction ipvXbans { 1.2.3.4 }' → nft 'add element inet reaction ipv4bans { 1.2.3.4 }'
|
|
||||||
// nft46 'add element inet reaction ipvXbans { a:b::c:d }' → nft 'add element inet reaction ipv6bans { a:b::c:d }'
|
|
||||||
//
|
|
||||||
// the character X is replaced by 4 or 6 depending on the address family of the specified IP
|
|
||||||
//
|
|
||||||
// Limitations:
|
|
||||||
// - nft46 must receive exactly one argument
|
|
||||||
// - only one IP must be given per command
|
|
||||||
// - the IP must be between { braces }
|
|
||||||
|
|
||||||
int isIPv4(char *tab, int len) {
|
|
||||||
int i;
|
|
||||||
// IPv4 addresses are at least 7 chars long
|
|
||||||
if (len < 7 || !isdigit(tab[0]) || !isdigit(tab[len-1])) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
// Each char must be a digit or a dot between 2 digits
|
|
||||||
for (i=1; i<len-1; i++) {
|
|
||||||
if (!isdigit(tab[i]) && !(tab[i] == '.' && isdigit(tab[i-1]) && isdigit(tab[i+1]))) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int isIPv6(char *tab, int len) {
|
|
||||||
int i;
|
|
||||||
// IPv6 addresses are at least 3 chars long
|
|
||||||
if (len < 3) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
// Each char must be a digit, :, a-f, or A-F
|
|
||||||
for (i=0; i<len; i++) {
|
|
||||||
if (!isdigit(tab[i]) && tab[i] != ':' && tab[i] != '.' && !(tab[i] >= 'a' && tab[i] <= 'f') && !(tab[i] >= 'A' && tab[i] <= 'F')) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int findchar(char *tab, char c, int i, int len) {
|
|
||||||
while (i < len && tab[i] != c) i++;
|
|
||||||
if (i == len) {
|
|
||||||
printf("nft46: one %c must be present", c);
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
void adapt_args(char *tab) {
|
|
||||||
int i, len, X, startIP, endIP, startedIP;
|
|
||||||
X = startIP = endIP = -1;
|
|
||||||
startedIP = 0;
|
|
||||||
len = strlen(tab);
|
|
||||||
i = 0;
|
|
||||||
X = i = findchar(tab, 'X', i, len);
|
|
||||||
startIP = i = findchar(tab, '{', i, len);
|
|
||||||
while (startIP + 1 <= (i = findchar(tab, ' ', i, len))) startIP = i + 1;
|
|
||||||
i = startIP;
|
|
||||||
endIP = i = findchar(tab, ' ', i, len) - 1;
|
|
||||||
|
|
||||||
if (isIPv4(tab+startIP, endIP-startIP+1)) {
|
|
||||||
tab[X] = '4';
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isIPv6(tab+startIP, endIP-startIP+1)) {
|
|
||||||
tab[X] = '6';
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("nft46: no IP address found\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void exec(char *str, char **argv) {
|
|
||||||
argv[0] = str;
|
|
||||||
execvp(str, argv);
|
|
||||||
// returns only if fails
|
|
||||||
printf("nft46: exec failed %d\n", errno);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
|
||||||
if (argc != 2) {
|
|
||||||
printf("nft46: Exactly one argument must be given\n");
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
adapt_args(argv[1]);
|
|
||||||
exec("nft", argv);
|
|
||||||
}
|
|
||||||
|
|
@ -4,17 +4,21 @@ MANDIR = $(PREFIX)/share/man/man1
|
||||||
SYSTEMDDIR ?= /etc/systemd
|
SYSTEMDDIR ?= /etc/systemd
|
||||||
|
|
||||||
install:
|
install:
|
||||||
install -Dm755 reaction nft46 ip46tables $(DESTDIR)$(BINDIR)
|
install -Dm755 reaction $(DESTDIR)$(BINDIR)
|
||||||
|
install -Dm755 reaction-plugin-virtual $(DESTDIR)$(BINDIR)
|
||||||
install -Dm644 reaction*.1 -t $(DESTDIR)$(MANDIR)/
|
install -Dm644 reaction*.1 -t $(DESTDIR)$(MANDIR)/
|
||||||
install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction
|
install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction
|
||||||
install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish
|
install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish
|
||||||
install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction
|
install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction
|
||||||
install -Dm644 reaction.service $(SYSTEMDDIR)/system/reaction.service
|
install -Dm644 reaction.service $(SYSTEMDDIR)/system/reaction.service
|
||||||
|
|
||||||
|
install-ipset:
|
||||||
|
install -Dm755 reaction-plugin-ipset $(DESTDIR)$(BINDIR)
|
||||||
|
|
||||||
remove:
|
remove:
|
||||||
rm -f $(DESTDIR)$(BINDIR)/bin/reaction
|
rm -f $(DESTDIR)$(BINDIR)/bin/reaction
|
||||||
rm -f $(DESTDIR)$(BINDIR)/bin/nft46
|
rm -f $(DESTDIR)$(BINDIR)/bin/reaction-plugin-virtual
|
||||||
rm -f $(DESTDIR)$(BINDIR)/bin/ip46tables
|
rm -f $(DESTDIR)$(BINDIR)/bin/reaction-plugin-ipset
|
||||||
rm -f $(DESTDIR)$(MANDIR)/reaction*.1
|
rm -f $(DESTDIR)$(MANDIR)/reaction*.1
|
||||||
rm -f $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction
|
rm -f $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction
|
||||||
rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish
|
rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
# vim: ft=systemd
|
# vim: ft=systemd
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=A daemon that scans program outputs for repeated patterns, and takes action.
|
Description=reaction daemon
|
||||||
Documentation=https://reaction.ppom.me
|
Documentation=https://reaction.ppom.me
|
||||||
# Ensure reaction will insert its chain after docker has inserted theirs. Only useful when iptables & docker are used
|
# Ensure reaction will insert its chain after docker has inserted theirs. Only useful when iptables & docker are used
|
||||||
# After=docker.service
|
# After=docker.service
|
||||||
|
|
||||||
# See `man systemd.exec` and `man systemd.service` for most options below
|
# See `man systemd.exec` and `man systemd.service` for most options below
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/bin/reaction start -c /etc/%i
|
ExecStart=/usr/bin/reaction start -c /etc/reaction/
|
||||||
|
|
||||||
# Ask systemd to create /var/lib/reaction (/var/lib/ is implicit)
|
# Ask systemd to create /var/lib/reaction (/var/lib/ is implicit)
|
||||||
StateDirectory=reaction
|
StateDirectory=reaction
|
||||||
|
|
@ -15,6 +15,10 @@ StateDirectory=reaction
|
||||||
RuntimeDirectory=reaction
|
RuntimeDirectory=reaction
|
||||||
# Start reaction in its state directory
|
# Start reaction in its state directory
|
||||||
WorkingDirectory=/var/lib/reaction
|
WorkingDirectory=/var/lib/reaction
|
||||||
|
# Let reaction kill its child processes first
|
||||||
|
KillMode=mixed
|
||||||
|
# Put reaction in its own slice so that plugins can be grouped within.
|
||||||
|
Slice=system-reaction.slice
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
1
packaging/system-reaction.slice
Normal file
1
packaging/system-reaction.slice
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
[Slice]
|
||||||
23
plugins/reaction-plugin-cluster/Cargo.toml
Normal file
23
plugins/reaction-plugin-cluster/Cargo.toml
Normal file
|
|
@ -0,0 +1,23 @@
|
||||||
|
[package]
|
||||||
|
name = "reaction-plugin-cluster"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
reaction-plugin.workspace = true
|
||||||
|
|
||||||
|
chrono.workspace = true
|
||||||
|
futures.workspace = true
|
||||||
|
remoc.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
tokio.features = ["rt-multi-thread"]
|
||||||
|
treedb.workspace = true
|
||||||
|
|
||||||
|
data-encoding = "2.9.0"
|
||||||
|
iroh = { version = "0.95.1", default-features = false }
|
||||||
|
rand = "0.9.2"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
assert_fs.workspace = true
|
||||||
165
plugins/reaction-plugin-cluster/src/cluster.rs
Normal file
165
plugins/reaction-plugin-cluster/src/cluster.rs
Normal file
|
|
@ -0,0 +1,165 @@
|
||||||
|
use std::{
|
||||||
|
collections::BTreeMap,
|
||||||
|
net::{SocketAddrV4, SocketAddrV6},
|
||||||
|
sync::Arc,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::future::join_all;
|
||||||
|
use iroh::{
|
||||||
|
Endpoint,
|
||||||
|
endpoint::{ConnectOptions, TransportConfig},
|
||||||
|
};
|
||||||
|
use reaction_plugin::{Line, shutdown::ShutdownController};
|
||||||
|
use remoc::rch::mpsc as remocMpsc;
|
||||||
|
use tokio::sync::mpsc as tokioMpsc;
|
||||||
|
use treedb::{Database, time::Time};
|
||||||
|
|
||||||
|
use crate::{ActionInit, StreamInit, connection::ConnectionManager, endpoint::EndpointManager};
|
||||||
|
|
||||||
|
pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()];
|
||||||
|
|
||||||
|
pub type UtcLine = (Arc<String>, Time);
|
||||||
|
|
||||||
|
pub fn transport_config() -> TransportConfig {
|
||||||
|
// FIXME higher timeouts and keep alive
|
||||||
|
let mut transport = TransportConfig::default();
|
||||||
|
transport
|
||||||
|
.max_idle_timeout(Some(Duration::from_millis(2000).try_into().unwrap()))
|
||||||
|
.keep_alive_interval(Some(Duration::from_millis(200)));
|
||||||
|
transport
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connect_config() -> ConnectOptions {
|
||||||
|
ConnectOptions::new().with_transport_config(transport_config().into())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn bind(stream: &StreamInit) -> Result<Endpoint, String> {
|
||||||
|
let mut builder = Endpoint::builder()
|
||||||
|
.secret_key(stream.secret_key.clone())
|
||||||
|
.alpns(ALPN.iter().map(|slice| slice.to_vec()).collect())
|
||||||
|
.relay_mode(iroh::RelayMode::Disabled)
|
||||||
|
.clear_discovery()
|
||||||
|
.transport_config(transport_config());
|
||||||
|
|
||||||
|
if let Some(ip) = stream.bind_ipv4 {
|
||||||
|
builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port));
|
||||||
|
}
|
||||||
|
if let Some(ip) = stream.bind_ipv6 {
|
||||||
|
builder = builder.bind_addr_v6(SocketAddrV6::new(ip, stream.listen_port, 0, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.bind().await.map_err(|err| {
|
||||||
|
format!(
|
||||||
|
"Could not create socket address for cluster {}: {err}",
|
||||||
|
stream.name
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn cluster_tasks(
|
||||||
|
endpoint: Endpoint,
|
||||||
|
mut stream: StreamInit,
|
||||||
|
mut actions: Vec<ActionInit>,
|
||||||
|
db: &mut Database,
|
||||||
|
shutdown: ShutdownController,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
eprintln!("DEBUG cluster tasks starts running");
|
||||||
|
|
||||||
|
let (message_action2connection_txs, mut message_action2connection_rxs): (
|
||||||
|
Vec<tokioMpsc::Sender<UtcLine>>,
|
||||||
|
Vec<tokioMpsc::Receiver<UtcLine>>,
|
||||||
|
) = (0..stream.nodes.len())
|
||||||
|
.map(|_| tokioMpsc::channel(1))
|
||||||
|
.unzip();
|
||||||
|
|
||||||
|
// Spawn action tasks
|
||||||
|
while let Some(mut action) = actions.pop() {
|
||||||
|
let message_action2connection_txs = message_action2connection_txs.clone();
|
||||||
|
let own_cluster_tx = stream.tx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
action
|
||||||
|
.serve(message_action2connection_txs, own_cluster_tx)
|
||||||
|
.await
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let endpoint = Arc::new(endpoint);
|
||||||
|
|
||||||
|
let mut connection_endpoint2connection_txs = BTreeMap::new();
|
||||||
|
|
||||||
|
// Spawn connection managers
|
||||||
|
while let Some((pk, endpoint_addr)) = stream.nodes.pop_first() {
|
||||||
|
let cluster_name = stream.name.clone();
|
||||||
|
let endpoint = endpoint.clone();
|
||||||
|
let message_action2connection_rx = message_action2connection_rxs.pop().unwrap();
|
||||||
|
let stream_tx = stream.tx.clone();
|
||||||
|
let shutdown = shutdown.clone();
|
||||||
|
let (connection_manager, connection_endpoint2connection_tx) = ConnectionManager::new(
|
||||||
|
cluster_name,
|
||||||
|
endpoint_addr,
|
||||||
|
endpoint,
|
||||||
|
stream.message_timeout,
|
||||||
|
message_action2connection_rx,
|
||||||
|
stream_tx,
|
||||||
|
db,
|
||||||
|
shutdown,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
tokio::spawn(async move { connection_manager.task().await });
|
||||||
|
connection_endpoint2connection_txs.insert(pk, connection_endpoint2connection_tx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn connection accepter
|
||||||
|
EndpointManager::new(
|
||||||
|
endpoint.clone(),
|
||||||
|
stream.name.clone(),
|
||||||
|
connection_endpoint2connection_txs,
|
||||||
|
shutdown.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
eprintln!("DEBUG cluster tasks finished running");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActionInit {
|
||||||
|
// Receive messages from its reaction action and dispatch them to all connections and to the reaction stream
|
||||||
|
async fn serve(
|
||||||
|
&mut self,
|
||||||
|
nodes_tx: Vec<tokioMpsc::Sender<UtcLine>>,
|
||||||
|
own_stream_tx: remocMpsc::Sender<Line>,
|
||||||
|
) {
|
||||||
|
while let Ok(Some(m)) = self.rx.recv().await {
|
||||||
|
eprintln!("DEBUG action: received a message to send to connections");
|
||||||
|
let line = self.send.line(m.match_);
|
||||||
|
if self.self_
|
||||||
|
&& let Err(err) = own_stream_tx.send((line.clone(), m.time)).await
|
||||||
|
{
|
||||||
|
eprintln!("ERROR while queueing message to be sent to own cluster stream: {err}");
|
||||||
|
}
|
||||||
|
|
||||||
|
let line = (Arc::new(line), m.time.into());
|
||||||
|
for result in join_all(nodes_tx.iter().map(|tx| tx.send(line.clone()))).await {
|
||||||
|
if let Err(err) = result {
|
||||||
|
eprintln!("ERROR while queueing message to be sent to cluster nodes: {err}");
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use chrono::{DateTime, Local};
|
||||||
|
|
||||||
|
// As long as nodes communicate with UTC datetimes, them having different local timezones is not an issue!
|
||||||
|
#[test]
|
||||||
|
fn different_local_tz_is_ok() {
|
||||||
|
let dates: Vec<DateTime<Local>> = serde_json::from_str(
|
||||||
|
"[\"2025-11-02T17:47:21.716229569+01:00\",\"2025-11-02T18:47:21.716229569+02:00\"]",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(dates[0].to_utc(), dates[1].to_utc());
|
||||||
|
}
|
||||||
|
}
|
||||||
668
plugins/reaction-plugin-cluster/src/connection.rs
Normal file
668
plugins/reaction-plugin-cluster/src/connection.rs
Normal file
|
|
@ -0,0 +1,668 @@
|
||||||
|
use std::{cmp::max, io::Error as IoError, sync::Arc, time::Duration};
|
||||||
|
|
||||||
|
use futures::FutureExt;
|
||||||
|
use iroh::{
|
||||||
|
Endpoint, EndpointAddr,
|
||||||
|
endpoint::{Connection, RecvStream, SendStream, VarInt},
|
||||||
|
};
|
||||||
|
use rand::random_range;
|
||||||
|
use reaction_plugin::{Line, shutdown::ShutdownController};
|
||||||
|
use tokio::{
|
||||||
|
io::{AsyncReadExt, AsyncWriteExt, BufReader, BufWriter},
|
||||||
|
sync::mpsc,
|
||||||
|
time::sleep,
|
||||||
|
};
|
||||||
|
use treedb::{
|
||||||
|
Database, Tree,
|
||||||
|
helpers::{to_string, to_time},
|
||||||
|
time::{Time, now},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
cluster::{ALPN, UtcLine, connect_config},
|
||||||
|
key::Show,
|
||||||
|
};
|
||||||
|
|
||||||
|
const PROTOCOL_VERSION: u32 = 1;
|
||||||
|
|
||||||
|
const CLOSE_RECV: (u32, &[u8]) = (1, b"error receiving from your stream");
|
||||||
|
const CLOSE_CLOSED: (u32, &[u8]) = (2, b"you closed your stream");
|
||||||
|
const CLOSE_SEND: (u32, &[u8]) = (3, b"could not send a message to your channel so I quit");
|
||||||
|
|
||||||
|
type MaybeRemoteLine = Result<Option<(String, Time)>, IoError>;
|
||||||
|
|
||||||
|
enum Event {
|
||||||
|
LocalMessageReceived(Option<UtcLine>),
|
||||||
|
RemoteMessageReceived(MaybeRemoteLine),
|
||||||
|
ConnectionReceived(Option<ConnOrConn>),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct OwnConnection {
|
||||||
|
connection: Connection,
|
||||||
|
id: u64,
|
||||||
|
|
||||||
|
line_tx: BufWriter<SendStream>,
|
||||||
|
line_rx: BufReader<RecvStream>,
|
||||||
|
|
||||||
|
next_time_secs: Option<u64>,
|
||||||
|
next_time_nanos: Option<u32>,
|
||||||
|
next_len: Option<usize>,
|
||||||
|
next_line: Option<Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OwnConnection {
|
||||||
|
fn new(
|
||||||
|
connection: Connection,
|
||||||
|
id: u64,
|
||||||
|
line_tx: BufWriter<SendStream>,
|
||||||
|
line_rx: BufReader<RecvStream>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
connection,
|
||||||
|
id,
|
||||||
|
line_tx,
|
||||||
|
line_rx,
|
||||||
|
next_time_secs: None,
|
||||||
|
next_time_nanos: None,
|
||||||
|
next_len: None,
|
||||||
|
next_line: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a line to peer.
|
||||||
|
///
|
||||||
|
/// Time is a std::time::Duration since UNIX_EPOCH, which is defined as UTC
|
||||||
|
/// So it's safe to use between nodes using different timezones
|
||||||
|
async fn send_line(&mut self, line: &String, time: &Time) -> Result<(), std::io::Error> {
|
||||||
|
self.line_tx.write_u64(time.as_secs()).await?;
|
||||||
|
self.line_tx.write_u32(time.subsec_nanos()).await?;
|
||||||
|
self.line_tx.write_u32(line.len() as u32).await?;
|
||||||
|
self.line_tx.write_all(line.as_bytes()).await?;
|
||||||
|
self.line_tx.flush().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cancel-safe function that returns next line from peer
|
||||||
|
/// Returns None if we don't have all data yet.
|
||||||
|
async fn recv_line(&mut self) -> MaybeRemoteLine {
|
||||||
|
if self.next_time_secs.is_none() {
|
||||||
|
self.next_time_secs = Some(self.line_rx.read_u64().await?);
|
||||||
|
}
|
||||||
|
if self.next_time_nanos.is_none() {
|
||||||
|
self.next_time_nanos = Some(self.line_rx.read_u32().await?);
|
||||||
|
}
|
||||||
|
if self.next_len.is_none() {
|
||||||
|
self.next_len = Some(self.line_rx.read_u32().await? as usize);
|
||||||
|
}
|
||||||
|
// Ok we have next_len.is_some()
|
||||||
|
let next_len = self.next_len.clone().unwrap();
|
||||||
|
|
||||||
|
if self.next_line.is_none() {
|
||||||
|
self.next_line = Some(Vec::with_capacity(next_len));
|
||||||
|
}
|
||||||
|
// Ok we have next_line.is_some()
|
||||||
|
let next_line = self.next_line.as_mut().unwrap();
|
||||||
|
|
||||||
|
let actual_len = next_line.len();
|
||||||
|
// Resize to wanted length
|
||||||
|
next_line.resize(next_len, 0);
|
||||||
|
|
||||||
|
// Read bytes
|
||||||
|
let bytes_read = self
|
||||||
|
.line_rx
|
||||||
|
.read(&mut next_line[actual_len..next_len])
|
||||||
|
.await?;
|
||||||
|
// Truncate possibly unread bytes
|
||||||
|
next_line.truncate(actual_len + bytes_read);
|
||||||
|
|
||||||
|
// Let's test if we read all bytes
|
||||||
|
if next_line.len() == next_len {
|
||||||
|
// Ok we have a full line
|
||||||
|
self.next_len.take();
|
||||||
|
let line = String::try_from(self.next_line.take().unwrap()).map_err(|err| {
|
||||||
|
std::io::Error::new(std::io::ErrorKind::InvalidData, err.to_string())
|
||||||
|
})?;
|
||||||
|
let time = Time::new(
|
||||||
|
self.next_time_secs.take().unwrap(),
|
||||||
|
self.next_time_nanos.take().unwrap(),
|
||||||
|
);
|
||||||
|
Ok(Some((line, time)))
|
||||||
|
} else {
|
||||||
|
// Ok we don't have a full line, will be next time!
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum ConnOrConn {
|
||||||
|
Connection(Connection),
|
||||||
|
OwnConnection(OwnConnection),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle a remote node.
|
||||||
|
/// Manage reception and sending of messages to this node.
|
||||||
|
/// Retry failed connections.
|
||||||
|
pub struct ConnectionManager {
|
||||||
|
/// Cluster's name (for logging)
|
||||||
|
cluster_name: String,
|
||||||
|
/// The remote node we're communicating with (for logging)
|
||||||
|
node_id: String,
|
||||||
|
/// Remote
|
||||||
|
remote: EndpointAddr,
|
||||||
|
/// Endpoint
|
||||||
|
endpoint: Arc<Endpoint>,
|
||||||
|
|
||||||
|
/// Cancel asking for a connection
|
||||||
|
cancel_ask_connection: Option<mpsc::Sender<()>>,
|
||||||
|
/// Create a delegated task to send ourselves a connection
|
||||||
|
connection_tx: mpsc::Sender<ConnOrConn>,
|
||||||
|
/// The EndpointManager or our delegated task sending us a connection (whether we asked for it or not)
|
||||||
|
connection_rx: mpsc::Receiver<ConnOrConn>,
|
||||||
|
/// Our own connection (when we have one)
|
||||||
|
connection: Option<OwnConnection>,
|
||||||
|
/// Last connexion ID, used to have a determinist way to choose between conflicting connections
|
||||||
|
last_connection_id: u64,
|
||||||
|
|
||||||
|
/// Max duration before we drop pending messages to a node we can't connect to.
|
||||||
|
message_timeout: Duration,
|
||||||
|
/// Message we receive from actions
|
||||||
|
message_rx: mpsc::Receiver<UtcLine>,
|
||||||
|
/// Our queue of messages to send
|
||||||
|
message_queue: Tree<Time, Arc<String>>,
|
||||||
|
|
||||||
|
/// Messages we send from remote nodes to our own stream
|
||||||
|
own_cluster_tx: remoc::rch::mpsc::Sender<Line>,
|
||||||
|
|
||||||
|
/// shutdown
|
||||||
|
shutdown: ShutdownController,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionManager {
|
||||||
|
pub async fn new(
|
||||||
|
cluster_name: String,
|
||||||
|
remote: EndpointAddr,
|
||||||
|
endpoint: Arc<Endpoint>,
|
||||||
|
message_timeout: Duration,
|
||||||
|
message_rx: mpsc::Receiver<UtcLine>,
|
||||||
|
own_cluster_tx: remoc::rch::mpsc::Sender<Line>,
|
||||||
|
db: &mut Database,
|
||||||
|
shutdown: ShutdownController,
|
||||||
|
) -> Result<(Self, mpsc::Sender<ConnOrConn>), String> {
|
||||||
|
let node_id = remote.id.show();
|
||||||
|
|
||||||
|
let message_queue = db
|
||||||
|
.open_tree(
|
||||||
|
format!("message_queue_{}_{}", endpoint.id().show(), node_id),
|
||||||
|
message_timeout,
|
||||||
|
|(key, value)| Ok((to_time(&key)?, Arc::new(to_string(&value)?))),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let (connection_tx, connection_rx) = mpsc::channel(1);
|
||||||
|
Ok((
|
||||||
|
Self {
|
||||||
|
cluster_name,
|
||||||
|
node_id,
|
||||||
|
remote,
|
||||||
|
endpoint,
|
||||||
|
connection: None,
|
||||||
|
cancel_ask_connection: None,
|
||||||
|
connection_tx: connection_tx.clone(),
|
||||||
|
connection_rx,
|
||||||
|
last_connection_id: 0,
|
||||||
|
message_timeout,
|
||||||
|
message_rx,
|
||||||
|
message_queue,
|
||||||
|
own_cluster_tx,
|
||||||
|
shutdown,
|
||||||
|
},
|
||||||
|
connection_tx,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Main loop
|
||||||
|
pub async fn task(mut self) {
|
||||||
|
self.ask_connection();
|
||||||
|
loop {
|
||||||
|
let have_connection = self.connection.is_some();
|
||||||
|
let maybe_conn_rx = self
|
||||||
|
.connection
|
||||||
|
.as_mut()
|
||||||
|
.map(|conn| conn.recv_line().boxed())
|
||||||
|
// This Future will never be polled because of the if in select!
|
||||||
|
// It still needs to be present because the branch will be evaluated
|
||||||
|
// so we can't unwrap
|
||||||
|
.unwrap_or(false_recv().boxed());
|
||||||
|
|
||||||
|
let event = tokio::select! {
|
||||||
|
biased;
|
||||||
|
// Quitting
|
||||||
|
_ = self.shutdown.wait() => None,
|
||||||
|
// Receive a connection from EndpointManager
|
||||||
|
conn = self.connection_rx.recv() => Some(Event::ConnectionReceived(conn)),
|
||||||
|
// Receive remote message when we have a connection
|
||||||
|
msg = maybe_conn_rx, if have_connection => Some(Event::RemoteMessageReceived(msg)),
|
||||||
|
// Receive a message from local Actions
|
||||||
|
msg = self.message_rx.recv() => Some(Event::LocalMessageReceived(msg)),
|
||||||
|
};
|
||||||
|
|
||||||
|
match event {
|
||||||
|
Some(event) => {
|
||||||
|
self.handle_event(event).await;
|
||||||
|
self.send_queue_messages().await;
|
||||||
|
self.drop_timeout_messages().await;
|
||||||
|
}
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_event(&mut self, event: Event) {
|
||||||
|
match event {
|
||||||
|
Event::ConnectionReceived(connection) => {
|
||||||
|
self.handle_connection(connection).await;
|
||||||
|
}
|
||||||
|
Event::LocalMessageReceived(utc_line) => {
|
||||||
|
self.handle_local_message(utc_line).await;
|
||||||
|
}
|
||||||
|
Event::RemoteMessageReceived(message) => {
|
||||||
|
self.handle_remote_message(message).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_queue_messages(&mut self) {
|
||||||
|
while let Some(connection) = &mut self.connection
|
||||||
|
&& let Some((time, line)) = self
|
||||||
|
.message_queue
|
||||||
|
.first_key_value()
|
||||||
|
.map(|(k, v)| (k.clone(), v.clone()))
|
||||||
|
{
|
||||||
|
if let Err(err) = connection.send_line(&line, &time).await {
|
||||||
|
eprintln!(
|
||||||
|
"INFO cluster {}: connection with node {} failed: {err}",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
);
|
||||||
|
self.close_connection(CLOSE_SEND).await;
|
||||||
|
} else {
|
||||||
|
self.message_queue.remove(&time).await;
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: sent a local message to remote: {}",
|
||||||
|
self.cluster_name, self.node_id, line
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn drop_timeout_messages(&mut self) {
|
||||||
|
let now = now();
|
||||||
|
let mut count = 0;
|
||||||
|
loop {
|
||||||
|
// We have a next key and it reached timeout
|
||||||
|
if let Some(next_key) = self.message_queue.first_key_value().map(|kv| kv.0.clone())
|
||||||
|
&& next_key + self.message_timeout < now
|
||||||
|
{
|
||||||
|
self.message_queue.remove(&next_key).await;
|
||||||
|
count += 1;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count > 0 {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: dropping {count} messages that reached timeout",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bootstrap a new Connection
|
||||||
|
/// Returns true if we have a valid connection now
|
||||||
|
async fn handle_connection(&mut self, connection: Option<ConnOrConn>) {
|
||||||
|
match connection {
|
||||||
|
None => {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: ConnectionManager {}: quitting because EndpointManager has quit",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
);
|
||||||
|
self.quit();
|
||||||
|
}
|
||||||
|
Some(connection) => {
|
||||||
|
if let Some(cancel) = self.cancel_ask_connection.take() {
|
||||||
|
let _ = cancel.send(()).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let last_connection_id = self.last_connection_id;
|
||||||
|
let mut insert_connection = |own_connection: OwnConnection| {
|
||||||
|
if self
|
||||||
|
.connection
|
||||||
|
.as_ref()
|
||||||
|
.is_none_or(|old_own| old_own.id < own_connection.id)
|
||||||
|
{
|
||||||
|
self.last_connection_id = own_connection.id;
|
||||||
|
self.connection = Some(own_connection);
|
||||||
|
} else {
|
||||||
|
eprintln!(
|
||||||
|
"WARN cluster {}: node {}: ignoring incoming connection, as we already have a valid connection with it and our connection id is greater",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match connection {
|
||||||
|
ConnOrConn::Connection(connection) => {
|
||||||
|
match open_channels(
|
||||||
|
connection,
|
||||||
|
last_connection_id,
|
||||||
|
&self.cluster_name,
|
||||||
|
&self.node_id,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(own_connection) => insert_connection(own_connection),
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!(
|
||||||
|
"ERROR cluster {}: trying to initialize connection to node {}: {err}",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
);
|
||||||
|
if self.connection.is_none() {
|
||||||
|
self.ask_connection();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ConnOrConn::OwnConnection(own_connection) => insert_connection(own_connection),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_remote_message(&mut self, message: MaybeRemoteLine) {
|
||||||
|
match message {
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!(
|
||||||
|
"WARN cluster {}: node {}: connection {}: error receiving remote message: {err}",
|
||||||
|
self.cluster_name, self.node_id, self.last_connection_id
|
||||||
|
);
|
||||||
|
self.close_connection(CLOSE_RECV).await;
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
eprintln!(
|
||||||
|
"WARN cluster {}: node {} closed its stream",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
);
|
||||||
|
self.close_connection(CLOSE_CLOSED).await;
|
||||||
|
}
|
||||||
|
Ok(Some(line)) => {
|
||||||
|
if let Err(err) = self
|
||||||
|
.own_cluster_tx
|
||||||
|
.send((line.0.clone(), line.1.into()))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
eprintln!(
|
||||||
|
"ERROR cluster {}: could not send message to reaction stream: {err}",
|
||||||
|
self.cluster_name
|
||||||
|
);
|
||||||
|
eprintln!(
|
||||||
|
"INFO cluster {}: line that can't be sent: {}",
|
||||||
|
self.cluster_name, line.0
|
||||||
|
);
|
||||||
|
self.quit();
|
||||||
|
} else {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: sent a remote message to local stream: {}",
|
||||||
|
self.cluster_name, self.node_id, line.0
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_local_message(&mut self, message: Option<UtcLine>) {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: received a local message",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
);
|
||||||
|
match message {
|
||||||
|
None => {
|
||||||
|
eprintln!(
|
||||||
|
"INFO cluster {}: no action remaining, quitting",
|
||||||
|
self.cluster_name
|
||||||
|
);
|
||||||
|
self.quit();
|
||||||
|
}
|
||||||
|
Some(message) => match &mut self.connection {
|
||||||
|
Some(connection) => {
|
||||||
|
if let Err(err) = connection.send_line(&message.0, &message.1).await {
|
||||||
|
eprintln!(
|
||||||
|
"INFO cluster {}: connection with node {} failed: {err}",
|
||||||
|
self.cluster_name, self.node_id,
|
||||||
|
);
|
||||||
|
self.message_queue.insert(message.1, message.0).await;
|
||||||
|
self.close_connection(CLOSE_SEND).await;
|
||||||
|
} else {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: sent a local message to remote: {}",
|
||||||
|
self.cluster_name, self.node_id, message.0
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: no connection, saving local message to send later: {}",
|
||||||
|
self.cluster_name, self.node_id, message.0
|
||||||
|
);
|
||||||
|
self.message_queue.insert(message.1, message.0).await;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close_connection(&mut self, code: (u32, &[u8])) {
|
||||||
|
if let Some(connection) = self.connection.take() {
|
||||||
|
connection
|
||||||
|
.connection
|
||||||
|
.close(VarInt::from_u32(code.0), code.1);
|
||||||
|
}
|
||||||
|
self.ask_connection();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ask_connection(&mut self) {
|
||||||
|
// if self.node_id.starts_with('H') {
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
self.cancel_ask_connection = Some(tx);
|
||||||
|
try_connect(
|
||||||
|
self.cluster_name.clone(),
|
||||||
|
self.remote.clone(),
|
||||||
|
self.endpoint.clone(),
|
||||||
|
self.last_connection_id,
|
||||||
|
self.connection_tx.clone(),
|
||||||
|
rx,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn quit(&mut self) {
|
||||||
|
self.shutdown.ask_shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open accept one stream and create one stream.
|
||||||
|
/// This way, there is no need to know if we created or accepted the connection.
|
||||||
|
async fn open_channels(
|
||||||
|
connection: Connection,
|
||||||
|
last_connexion_id: u64,
|
||||||
|
cluster_name: &str,
|
||||||
|
node_id: &str,
|
||||||
|
) -> Result<OwnConnection, IoError> {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: opening uni channel",
|
||||||
|
cluster_name, node_id
|
||||||
|
);
|
||||||
|
let mut output = BufWriter::new(connection.open_uni().await?);
|
||||||
|
|
||||||
|
let our_id = random_range(last_connexion_id + 1..last_connexion_id + 1_000_000);
|
||||||
|
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: sending handshake in uni channel",
|
||||||
|
cluster_name, node_id
|
||||||
|
);
|
||||||
|
output.write_u32(PROTOCOL_VERSION).await?;
|
||||||
|
output.write_u64(our_id).await?;
|
||||||
|
output.flush().await?;
|
||||||
|
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: accepting uni channel",
|
||||||
|
cluster_name, node_id
|
||||||
|
);
|
||||||
|
let mut input = BufReader::new(connection.accept_uni().await?);
|
||||||
|
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: reading handshake from uni channel",
|
||||||
|
cluster_name, node_id
|
||||||
|
);
|
||||||
|
let their_version = input.read_u32().await?;
|
||||||
|
|
||||||
|
if their_version != PROTOCOL_VERSION {
|
||||||
|
return Err(IoError::new(
|
||||||
|
std::io::ErrorKind::InvalidData,
|
||||||
|
format!(
|
||||||
|
"incompatible version: {their_version}. We use {PROTOCOL_VERSION}. Consider upgrading the node with the older version."
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let their_id = input.read_u64().await?;
|
||||||
|
// FIXME Do we need to test this? If so, this function should return their_id even when error in order to retry better next time
|
||||||
|
// if their_id < last_connexion_id
|
||||||
|
// ERROR
|
||||||
|
// else
|
||||||
|
let chosen_id = max(our_id, their_id);
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: node {}: version handshake complete: last id: {last_connexion_id}, our id: {our_id}, their id: {their_id}: chosen id: {chosen_id}",
|
||||||
|
cluster_name, node_id
|
||||||
|
);
|
||||||
|
Ok(OwnConnection::new(connection, chosen_id, output, input))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn false_recv() -> MaybeRemoteLine {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
const START_TIMEOUT: Duration = Duration::from_millis(500);
|
||||||
|
const MAX_TIMEOUT: Duration = Duration::from_hours(1);
|
||||||
|
const TIMEOUT_FACTOR: f64 = 1.5;
|
||||||
|
|
||||||
|
fn with_random(d: Duration) -> Duration {
|
||||||
|
let max_delta = d.as_micros() as f32 * 0.2;
|
||||||
|
d + Duration::from_micros(rand::random_range(0.0..max_delta) as u64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the next wait Duration.
|
||||||
|
// We're multiplying the Duration by [`TIMEOUT_FACTOR`] and cap it to [`MAX_TIMEOUT`].
|
||||||
|
fn next_delta(delta: Option<Duration>) -> Duration {
|
||||||
|
with_random(match delta {
|
||||||
|
None => START_TIMEOUT,
|
||||||
|
Some(delta) => {
|
||||||
|
// Multiply timeout by TIMEOUT_FACTOR
|
||||||
|
let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64);
|
||||||
|
// Cap to MAX_TIMEOUT
|
||||||
|
if delta > MAX_TIMEOUT {
|
||||||
|
MAX_TIMEOUT
|
||||||
|
} else {
|
||||||
|
delta
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[test]
|
||||||
|
fn test_with_random() {
|
||||||
|
for d in [
|
||||||
|
123, 1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890,
|
||||||
|
] {
|
||||||
|
let rd = with_random(Duration::from_micros(d)).as_micros();
|
||||||
|
assert!(rd as f32 >= d as f32, "{rd} < {d}");
|
||||||
|
assert!(rd as f32 <= (d + 1) as f32 * 1.2, "{rd} > {d} * 1.2");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_connect(
|
||||||
|
cluster_name: String,
|
||||||
|
remote: EndpointAddr,
|
||||||
|
endpoint: Arc<Endpoint>,
|
||||||
|
last_connection_id: u64,
|
||||||
|
connection_tx: mpsc::Sender<ConnOrConn>,
|
||||||
|
mut order_stop: mpsc::Receiver<()>,
|
||||||
|
) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let node_id = remote.id.show();
|
||||||
|
// Until we have a connection or we're requested to stop
|
||||||
|
let mut keep_trying = true;
|
||||||
|
let mut delta = None;
|
||||||
|
|
||||||
|
while keep_trying {
|
||||||
|
delta = Some(next_delta(delta));
|
||||||
|
|
||||||
|
keep_trying = tokio::select! {
|
||||||
|
_ = sleep(delta.unwrap_or_default()) => true,
|
||||||
|
_ = order_stop.recv() => false,
|
||||||
|
};
|
||||||
|
if keep_trying {
|
||||||
|
eprintln!("DEBUG cluster {cluster_name}: node {node_id}: trying to connect...");
|
||||||
|
let connect = tokio::select! {
|
||||||
|
// conn = endpoint.connect(remote.clone(), ALPN[0]) => Some(conn),
|
||||||
|
conn = endpoint.connect_with_opts(remote.clone(), ALPN[0], connect_config()) => Some(conn),
|
||||||
|
_ = order_stop.recv() => None,
|
||||||
|
};
|
||||||
|
if let Some(connect) = connect {
|
||||||
|
let res = match connect {
|
||||||
|
Ok(connecting) => match connecting.await {
|
||||||
|
Ok(connection) => {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {cluster_name}: node {node_id}: created connection"
|
||||||
|
);
|
||||||
|
match open_channels(
|
||||||
|
connection,
|
||||||
|
last_connection_id,
|
||||||
|
&cluster_name,
|
||||||
|
&node_id,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(own_connection) => {
|
||||||
|
if let Err(err) = connection_tx
|
||||||
|
.send(ConnOrConn::OwnConnection(own_connection))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {cluster_name}: node {node_id}: quitting because ConnectionManager has quit: {err}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// successfully opened connection
|
||||||
|
keep_trying = false;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(err) => Err(err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => Err(err.to_string()),
|
||||||
|
},
|
||||||
|
Err(err) => Err(err.to_string()),
|
||||||
|
};
|
||||||
|
if let Err(err) = res {
|
||||||
|
eprintln!(
|
||||||
|
"WARN cluster {cluster_name}: node {node_id}: while trying to connect: {err}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// received stop order
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {cluster_name}: node {node_id}: stop to try connecting to node be cause we received a connection from it"
|
||||||
|
);
|
||||||
|
keep_trying = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
128
plugins/reaction-plugin-cluster/src/endpoint.rs
Normal file
128
plugins/reaction-plugin-cluster/src/endpoint.rs
Normal file
|
|
@ -0,0 +1,128 @@
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use iroh::{Endpoint, PublicKey, endpoint::Incoming};
|
||||||
|
use reaction_plugin::shutdown::ShutdownController;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use crate::{connection::ConnOrConn, key::Show};
|
||||||
|
|
||||||
|
enum Break {
|
||||||
|
Yes,
|
||||||
|
No,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct EndpointManager {
|
||||||
|
/// The [`iroh::Endpoint`] to manage
|
||||||
|
endpoint: Arc<Endpoint>,
|
||||||
|
/// Cluster's name (for logging)
|
||||||
|
cluster_name: String,
|
||||||
|
/// Connection sender to the Connection Managers
|
||||||
|
connections_tx: BTreeMap<PublicKey, mpsc::Sender<ConnOrConn>>,
|
||||||
|
/// shutdown
|
||||||
|
shutdown: ShutdownController,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EndpointManager {
|
||||||
|
pub fn new(
|
||||||
|
endpoint: Arc<Endpoint>,
|
||||||
|
cluster_name: String,
|
||||||
|
connections_tx: BTreeMap<PublicKey, mpsc::Sender<ConnOrConn>>,
|
||||||
|
shutdown: ShutdownController,
|
||||||
|
) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
Self {
|
||||||
|
endpoint,
|
||||||
|
cluster_name,
|
||||||
|
connections_tx,
|
||||||
|
shutdown,
|
||||||
|
}
|
||||||
|
.task()
|
||||||
|
.await
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn task(&mut self) {
|
||||||
|
loop {
|
||||||
|
// Uncomment this line and comment the select! for faster development in this function
|
||||||
|
// let event = Event::TryConnect(self.endpoint_addr_rx.recv().await);
|
||||||
|
let incoming = tokio::select! {
|
||||||
|
incoming = self.endpoint.accept() => incoming,
|
||||||
|
_ = self.shutdown.wait() => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
match incoming {
|
||||||
|
Some(incoming) => {
|
||||||
|
if let Break::Yes = self.handle_incoming(incoming).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.endpoint.close().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_incoming(&mut self, incoming: Incoming) -> Break {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: EndpointManager: receiving connection",
|
||||||
|
self.cluster_name,
|
||||||
|
);
|
||||||
|
// FIXME a malicious actor could maybe prevent a node from connecting to
|
||||||
|
// its cluster by sending lots of invalid slow connection requests?
|
||||||
|
// This function could be moved to a new 'oneshot' task instead
|
||||||
|
let remote_address = incoming.remote_address();
|
||||||
|
let remote_address_validated = incoming.remote_address_validated();
|
||||||
|
let connection = match incoming.await {
|
||||||
|
Ok(connection) => connection,
|
||||||
|
Err(err) => {
|
||||||
|
if remote_address_validated {
|
||||||
|
eprintln!("INFO refused connection from {}: {err}", remote_address)
|
||||||
|
} else {
|
||||||
|
eprintln!("INFO refused connection: {err}")
|
||||||
|
}
|
||||||
|
return Break::No;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let remote_id = connection.remote_id();
|
||||||
|
|
||||||
|
match self.connections_tx.get(&remote_id) {
|
||||||
|
None => {
|
||||||
|
eprintln!(
|
||||||
|
"WARN cluster {}: incoming connection from node '{}', ip: {} is not in our list, refusing incoming connection.",
|
||||||
|
self.cluster_name,
|
||||||
|
remote_id.show(),
|
||||||
|
remote_address
|
||||||
|
);
|
||||||
|
eprintln!(
|
||||||
|
"INFO cluster {}: {}, {}",
|
||||||
|
self.cluster_name,
|
||||||
|
"maybe it's not from our cluster,",
|
||||||
|
"maybe this node's configuration has not yet been updated to add this new node."
|
||||||
|
);
|
||||||
|
return Break::No;
|
||||||
|
}
|
||||||
|
Some(tx) => {
|
||||||
|
if let Err(_) = tx.send(ConnOrConn::Connection(connection)).await {
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: EndpointManager: quitting because ConnectionManager has quit",
|
||||||
|
self.cluster_name,
|
||||||
|
);
|
||||||
|
self.shutdown.ask_shutdown();
|
||||||
|
return Break::Yes;
|
||||||
|
}
|
||||||
|
eprintln!(
|
||||||
|
"DEBUG cluster {}: EndpointManager: receiving connection from {}",
|
||||||
|
self.cluster_name,
|
||||||
|
remote_id.show(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO persist the incoming address, so that we don't forget this address
|
||||||
|
|
||||||
|
Break::No
|
||||||
|
}
|
||||||
|
}
|
||||||
188
plugins/reaction-plugin-cluster/src/key.rs
Normal file
188
plugins/reaction-plugin-cluster/src/key.rs
Normal file
|
|
@ -0,0 +1,188 @@
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use data_encoding::DecodeError;
|
||||||
|
use iroh::{PublicKey, SecretKey};
|
||||||
|
use tokio::{
|
||||||
|
fs::{self, File},
|
||||||
|
io::AsyncWriteExt,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn secret_key_path(dir: &str, cluster_name: &str) -> String {
|
||||||
|
format!("{dir}/secret_key_{cluster_name}.txt")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn secret_key(dir: &str, cluster_name: &str) -> Result<SecretKey, String> {
|
||||||
|
let path = secret_key_path(dir, cluster_name);
|
||||||
|
if let Some(key) = get_secret_key(&path).await? {
|
||||||
|
Ok(key)
|
||||||
|
} else {
|
||||||
|
let key = SecretKey::generate(&mut rand::rng());
|
||||||
|
set_secret_key(&path, &key).await?;
|
||||||
|
Ok(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_secret_key(path: &str) -> Result<Option<SecretKey>, String> {
|
||||||
|
let key = match fs::read_to_string(path).await {
|
||||||
|
Ok(key) => Ok(key),
|
||||||
|
Err(err) => match err.kind() {
|
||||||
|
io::ErrorKind::NotFound => return Ok(None),
|
||||||
|
_ => Err(format!("can't read secret key file: {err}")),
|
||||||
|
},
|
||||||
|
}?;
|
||||||
|
let bytes = match key_b64_to_bytes(&key) {
|
||||||
|
Ok(key) => Ok(key),
|
||||||
|
Err(err) => Err(format!(
|
||||||
|
"invalid secret key read from file: {err}. Please remove the `{path}` file from plugin directory.",
|
||||||
|
)),
|
||||||
|
}?;
|
||||||
|
Ok(Some(SecretKey::from_bytes(&bytes)))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn set_secret_key(path: &str, key: &SecretKey) -> Result<(), String> {
|
||||||
|
let secret_key = key.show();
|
||||||
|
File::options()
|
||||||
|
.mode(0o600)
|
||||||
|
.write(true)
|
||||||
|
.create(true)
|
||||||
|
.open(path)
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("can't open `{path}` in plugin directory: {err}"))?
|
||||||
|
.write_all(secret_key.as_bytes())
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("can't write to `{path}` in plugin directory: {err}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn key_b64_to_bytes(key: &str) -> Result<[u8; 32], DecodeError> {
|
||||||
|
let vec = data_encoding::BASE64URL.decode(key.as_bytes())?;
|
||||||
|
if vec.len() != 32 {
|
||||||
|
return Err(DecodeError {
|
||||||
|
position: vec.len(),
|
||||||
|
kind: data_encoding::DecodeKind::Length,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
let mut bytes = [0u8; 32];
|
||||||
|
for i in 0..32 {
|
||||||
|
bytes[i] = vec[i];
|
||||||
|
}
|
||||||
|
Ok(bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn key_bytes_to_b64(key: &[u8; 32]) -> String {
|
||||||
|
data_encoding::BASE64URL.encode(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Implemented by PublicKey & SecretKey to display keys as base64 instead of hexadecimal.
|
||||||
|
/// Similar to Display/ToString
|
||||||
|
pub trait Show {
|
||||||
|
fn show(&self) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Show for PublicKey {
|
||||||
|
fn show(&self) -> String {
|
||||||
|
key_bytes_to_b64(self.as_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Show for SecretKey {
|
||||||
|
fn show(&self) -> String {
|
||||||
|
key_bytes_to_b64(&self.to_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use assert_fs::{
|
||||||
|
TempDir,
|
||||||
|
prelude::{FileWriteStr, PathChild},
|
||||||
|
};
|
||||||
|
use iroh::{PublicKey, SecretKey};
|
||||||
|
use tokio::fs::read_to_string;
|
||||||
|
|
||||||
|
use crate::key::{
|
||||||
|
get_secret_key, key_b64_to_bytes, key_bytes_to_b64, secret_key_path, set_secret_key,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn secret_key_encode_decode() {
|
||||||
|
for (secret_key, public_key) in [
|
||||||
|
(
|
||||||
|
"g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=",
|
||||||
|
"HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY=",
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=",
|
||||||
|
"LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc=",
|
||||||
|
),
|
||||||
|
] {
|
||||||
|
assert_eq!(
|
||||||
|
secret_key,
|
||||||
|
&key_bytes_to_b64(&key_b64_to_bytes(secret_key).unwrap())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
public_key,
|
||||||
|
&key_bytes_to_b64(&key_b64_to_bytes(public_key).unwrap())
|
||||||
|
);
|
||||||
|
|
||||||
|
let secret_key_parsed = SecretKey::from_bytes(&key_b64_to_bytes(secret_key).unwrap());
|
||||||
|
let public_key_parsed =
|
||||||
|
PublicKey::from_bytes(&key_b64_to_bytes(public_key).unwrap()).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(secret_key_parsed.public(), public_key_parsed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn secret_key_get() {
|
||||||
|
let tmp_dir = TempDir::new().unwrap();
|
||||||
|
let tmp_dir_str = tmp_dir.to_str().unwrap();
|
||||||
|
for (secret_key, cluster_name) in [
|
||||||
|
("g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=", "my_cluster"),
|
||||||
|
("5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=", "name"),
|
||||||
|
] {
|
||||||
|
tmp_dir
|
||||||
|
.child(&format!("secret_key_{cluster_name}.txt"))
|
||||||
|
.write_str(secret_key)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let secret_key_parsed = SecretKey::from_bytes(&key_b64_to_bytes(secret_key).unwrap());
|
||||||
|
|
||||||
|
let path = secret_key_path(tmp_dir_str, cluster_name);
|
||||||
|
let secret_key_from_file = get_secret_key(&path).await.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
secret_key_parsed.to_bytes(),
|
||||||
|
secret_key_from_file.unwrap().to_bytes()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
Ok(None),
|
||||||
|
get_secret_key(&format!("{tmp_dir_str}/non_existent"))
|
||||||
|
.await
|
||||||
|
// Can't compare secret keys so we map to bytes
|
||||||
|
// even if we don't want one
|
||||||
|
.map(|opt| opt.map(|pk| pk.to_bytes()))
|
||||||
|
);
|
||||||
|
// Will fail if we're root, but who runs this as root??
|
||||||
|
assert!(
|
||||||
|
get_secret_key(&format!("/root/non_existent"))
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn secret_key_set() {
|
||||||
|
let tmp_dir = TempDir::new().unwrap();
|
||||||
|
let tmp_dir_str = tmp_dir.to_str().unwrap();
|
||||||
|
|
||||||
|
let path = format!("{tmp_dir_str}/secret");
|
||||||
|
let key = SecretKey::generate(&mut rand::rng());
|
||||||
|
|
||||||
|
assert!(set_secret_key(&path, &key).await.is_ok());
|
||||||
|
let read_file = read_to_string(&path).await;
|
||||||
|
assert!(read_file.is_ok());
|
||||||
|
assert_eq!(read_file.unwrap(), key_bytes_to_b64(&key.to_bytes()));
|
||||||
|
}
|
||||||
|
}
|
||||||
273
plugins/reaction-plugin-cluster/src/main.rs
Normal file
273
plugins/reaction-plugin-cluster/src/main.rs
Normal file
|
|
@ -0,0 +1,273 @@
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, BTreeSet},
|
||||||
|
net::{Ipv4Addr, Ipv6Addr, SocketAddr},
|
||||||
|
path::PathBuf,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr};
|
||||||
|
use reaction_plugin::{
|
||||||
|
ActionConfig, ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamConfig,
|
||||||
|
StreamImpl, line::PatternLine, main_loop, shutdown::ShutdownController, time::parse_duration,
|
||||||
|
};
|
||||||
|
use remoc::{rch::mpsc, rtc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use treedb::Database;
|
||||||
|
|
||||||
|
use crate::key::Show;
|
||||||
|
|
||||||
|
mod cluster;
|
||||||
|
mod connection;
|
||||||
|
mod endpoint;
|
||||||
|
mod key;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let plugin = Plugin::default();
|
||||||
|
main_loop(plugin).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Plugin {
|
||||||
|
init: BTreeMap<String, (StreamInit, Vec<ActionInit>)>,
|
||||||
|
cluster_shutdown: ShutdownController,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stream options as defined by the user
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct StreamOptions {
|
||||||
|
/// The UDP port to open
|
||||||
|
listen_port: u16,
|
||||||
|
/// The IPv4 to bind to. Defaults to 0.0.0.0.
|
||||||
|
/// Set to `null` to use IPv6 only.
|
||||||
|
#[serde(default = "ipv4_unspecified")]
|
||||||
|
bind_ipv4: Option<Ipv4Addr>,
|
||||||
|
/// The IPv6 to bind to. Defaults to 0.0.0.0.
|
||||||
|
/// Set to `null` to use IPv4 only.
|
||||||
|
#[serde(default = "ipv6_unspecified")]
|
||||||
|
bind_ipv6: Option<Ipv6Addr>,
|
||||||
|
/// Other nodes which are part of the cluster.
|
||||||
|
nodes: Vec<NodeOption>,
|
||||||
|
/// Max duration before we drop pending messages to a node we can't connect to.
|
||||||
|
message_timeout: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ipv4_unspecified() -> Option<Ipv4Addr> {
|
||||||
|
Some(Ipv4Addr::UNSPECIFIED)
|
||||||
|
}
|
||||||
|
fn ipv6_unspecified() -> Option<Ipv6Addr> {
|
||||||
|
Some(Ipv6Addr::UNSPECIFIED)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct NodeOption {
|
||||||
|
public_key: String,
|
||||||
|
#[serde(default)]
|
||||||
|
addresses: Vec<SocketAddr>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stream information before start
|
||||||
|
struct StreamInit {
|
||||||
|
name: String,
|
||||||
|
listen_port: u16,
|
||||||
|
bind_ipv4: Option<Ipv4Addr>,
|
||||||
|
bind_ipv6: Option<Ipv6Addr>,
|
||||||
|
secret_key: SecretKey,
|
||||||
|
message_timeout: Duration,
|
||||||
|
nodes: BTreeMap<PublicKey, EndpointAddr>,
|
||||||
|
tx: mpsc::Sender<Line>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct ActionOptions {
|
||||||
|
/// The line to send to the corresponding cluster, example: "ban \<ip\>"
|
||||||
|
send: String,
|
||||||
|
/// The name of the corresponding cluster, example: "my_cluster_stream"
|
||||||
|
to: String,
|
||||||
|
/// Whether the stream of this node also receives the line
|
||||||
|
#[serde(default, rename = "self")]
|
||||||
|
self_: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ActionInit {
|
||||||
|
name: String,
|
||||||
|
send: PatternLine,
|
||||||
|
self_: bool,
|
||||||
|
rx: mpsc::Receiver<Exec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PluginInfo for Plugin {
|
||||||
|
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
|
||||||
|
Ok(Manifest {
|
||||||
|
hello: Hello::new(),
|
||||||
|
streams: BTreeSet::from(["cluster".into()]),
|
||||||
|
actions: BTreeSet::from(["cluster_send".into()]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
async fn load_config(
|
||||||
|
&mut self,
|
||||||
|
streams: Vec<StreamConfig>,
|
||||||
|
actions: Vec<ActionConfig>,
|
||||||
|
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
|
||||||
|
let mut ret_streams = Vec::with_capacity(streams.len());
|
||||||
|
let mut ret_actions = Vec::with_capacity(actions.len());
|
||||||
|
|
||||||
|
for StreamConfig {
|
||||||
|
stream_name,
|
||||||
|
stream_type,
|
||||||
|
config,
|
||||||
|
} in streams
|
||||||
|
{
|
||||||
|
if &stream_type != "cluster" {
|
||||||
|
return Err("This plugin can't handle other stream types than cluster".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let options: StreamOptions = serde_json::from_value(config.into())
|
||||||
|
.map_err(|err| format!("invalid options: {err}"))?;
|
||||||
|
|
||||||
|
let mut nodes = BTreeMap::default();
|
||||||
|
|
||||||
|
let message_timeout = parse_duration(&options.message_timeout)
|
||||||
|
.map_err(|err| format!("invalid message_timeout: {err}"))?;
|
||||||
|
|
||||||
|
if options.bind_ipv4.is_none() && options.bind_ipv6.is_none() {
|
||||||
|
Err(
|
||||||
|
"At least one of bind_ipv4 and bind_ipv6 must be enabled. Unset at least one of them or set at least one of them to an IP.",
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.nodes.is_empty() {
|
||||||
|
Err("At least one remote node has to be configured for a cluster")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for node in options.nodes.into_iter() {
|
||||||
|
let bytes = key::key_b64_to_bytes(&node.public_key)
|
||||||
|
.map_err(|err| format!("invalid public key {}: {err}", node.public_key))?;
|
||||||
|
|
||||||
|
let public_key = PublicKey::from_bytes(&bytes)
|
||||||
|
.map_err(|err| format!("invalid public key {}: {err}", node.public_key))?;
|
||||||
|
|
||||||
|
nodes.insert(
|
||||||
|
public_key,
|
||||||
|
EndpointAddr {
|
||||||
|
id: public_key,
|
||||||
|
addrs: node
|
||||||
|
.addresses
|
||||||
|
.into_iter()
|
||||||
|
.map(|addr| TransportAddr::Ip(addr))
|
||||||
|
.collect(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let secret_key = key::secret_key(".", &stream_name).await?;
|
||||||
|
eprintln!(
|
||||||
|
"INFO public key of this node for cluster {stream_name}: {}",
|
||||||
|
secret_key.public().show()
|
||||||
|
);
|
||||||
|
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
|
||||||
|
let stream = StreamInit {
|
||||||
|
name: stream_name.clone(),
|
||||||
|
listen_port: options.listen_port,
|
||||||
|
bind_ipv4: options.bind_ipv4,
|
||||||
|
bind_ipv6: options.bind_ipv6,
|
||||||
|
secret_key,
|
||||||
|
message_timeout,
|
||||||
|
nodes,
|
||||||
|
tx,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(_) = self.init.insert(stream_name, (stream, vec![])) {
|
||||||
|
return Err("this virtual stream has already been initialized".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
ret_streams.push(StreamImpl {
|
||||||
|
stream: rx,
|
||||||
|
standalone: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for ActionConfig {
|
||||||
|
stream_name,
|
||||||
|
filter_name,
|
||||||
|
action_name,
|
||||||
|
action_type,
|
||||||
|
config,
|
||||||
|
patterns,
|
||||||
|
} in actions
|
||||||
|
{
|
||||||
|
if &action_type != "cluster_send" {
|
||||||
|
return Err(
|
||||||
|
"This plugin can't handle other action types than 'cluster_send'".into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let options: ActionOptions = serde_json::from_value(config.into())
|
||||||
|
.map_err(|err| format!("invalid options: {err}"))?;
|
||||||
|
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
|
||||||
|
let init_action = ActionInit {
|
||||||
|
name: format!("{}.{}.{}", stream_name, filter_name, action_name),
|
||||||
|
send: PatternLine::new(options.send, patterns),
|
||||||
|
self_: options.self_,
|
||||||
|
rx,
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.init.get_mut(&options.to) {
|
||||||
|
Some((_, actions)) => actions.push(init_action),
|
||||||
|
None => {
|
||||||
|
return Err(format!(
|
||||||
|
"ERROR action '{}' sends 'to' unknown stream '{}'",
|
||||||
|
init_action.name, options.to
|
||||||
|
)
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret_actions.push(ActionImpl { tx })
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((ret_streams, ret_actions))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&mut self) -> RemoteResult<()> {
|
||||||
|
self.cluster_shutdown.delegate().handle_quit_signals()?;
|
||||||
|
|
||||||
|
let mut db = {
|
||||||
|
let path = PathBuf::from(".");
|
||||||
|
let (cancellation_token, task_tracker_token) = self.cluster_shutdown.token().split();
|
||||||
|
Database::open(&path, cancellation_token, task_tracker_token)
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("Can't open database: {err}"))?
|
||||||
|
};
|
||||||
|
|
||||||
|
while let Some((_, (stream, actions))) = self.init.pop_first() {
|
||||||
|
let endpoint = cluster::bind(&stream).await?;
|
||||||
|
cluster::cluster_tasks(
|
||||||
|
endpoint,
|
||||||
|
stream,
|
||||||
|
actions,
|
||||||
|
&mut db,
|
||||||
|
self.cluster_shutdown.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
// Free containers
|
||||||
|
self.init = Default::default();
|
||||||
|
eprintln!("DEBUG started");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close(self) -> RemoteResult<()> {
|
||||||
|
self.cluster_shutdown.ask_shutdown();
|
||||||
|
self.cluster_shutdown.wait_all_task_shutdown().await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
293
plugins/reaction-plugin-cluster/src/tests/conf.rs
Normal file
293
plugins/reaction-plugin-cluster/src/tests/conf.rs
Normal file
|
|
@ -0,0 +1,293 @@
|
||||||
|
use std::env::set_current_dir;
|
||||||
|
|
||||||
|
use assert_fs::TempDir;
|
||||||
|
use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use crate::{Plugin, tests::insert_secret_key};
|
||||||
|
|
||||||
|
use super::{PUBLIC_KEY_A, TEST_MUTEX, stream_ok};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_stream() {
|
||||||
|
// Minimal node configuration
|
||||||
|
let nodes = json!([{
|
||||||
|
"public_key": PUBLIC_KEY_A,
|
||||||
|
}]);
|
||||||
|
|
||||||
|
// Invalid type
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "clust".into(),
|
||||||
|
config: stream_ok().into(),
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
for (json, is_ok) in [
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30m",
|
||||||
|
}),
|
||||||
|
Result::is_ok as fn(&_) -> bool,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// invalid time
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30pv",
|
||||||
|
}),
|
||||||
|
Result::is_err,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"bind_ipv4": "0.0.0.0",
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30m",
|
||||||
|
}),
|
||||||
|
Result::is_ok,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"bind_ipv6": "::",
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30m",
|
||||||
|
}),
|
||||||
|
Result::is_ok,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"bind_ipv4": "0.0.0.0",
|
||||||
|
"bind_ipv6": "::",
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30m",
|
||||||
|
}),
|
||||||
|
Result::is_ok,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"bind_ipv4": null,
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30m",
|
||||||
|
}),
|
||||||
|
Result::is_ok,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"bind_ipv6": null,
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30m",
|
||||||
|
}),
|
||||||
|
Result::is_ok,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// No bind
|
||||||
|
json!({
|
||||||
|
"listen_port": 2048,
|
||||||
|
"bind_ipv4": null,
|
||||||
|
"bind_ipv6": null,
|
||||||
|
"nodes": nodes,
|
||||||
|
"message_timeout": "30m",
|
||||||
|
}),
|
||||||
|
Result::is_err,
|
||||||
|
),
|
||||||
|
(json!({}), Result::is_err),
|
||||||
|
] {
|
||||||
|
assert!(is_ok(
|
||||||
|
&Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: json.into(),
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_action() {
|
||||||
|
let patterns = vec!["p1".into(), "p2".into()];
|
||||||
|
|
||||||
|
// Invalid type
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: stream_ok().into(),
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "cluster_sen".into(),
|
||||||
|
config: json!({
|
||||||
|
"send": "<p1>",
|
||||||
|
"to": "stream",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: patterns.clone(),
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
for (json, is_ok) in [
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"send": "<p1>",
|
||||||
|
"to": "stream",
|
||||||
|
}),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"send": "<p1>",
|
||||||
|
"to": "stream",
|
||||||
|
"self": true,
|
||||||
|
}),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
json!({
|
||||||
|
"send": "<p1>",
|
||||||
|
"to": "stream",
|
||||||
|
"self": false,
|
||||||
|
}),
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// missing to
|
||||||
|
json!({
|
||||||
|
"send": "<p1>",
|
||||||
|
}),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// missing send
|
||||||
|
json!({
|
||||||
|
"to": "stream",
|
||||||
|
}),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// invalid self
|
||||||
|
json!({
|
||||||
|
"send": "<p1>",
|
||||||
|
"to": "stream",
|
||||||
|
"self": "true",
|
||||||
|
}),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
// missing conf
|
||||||
|
json!({}),
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
] {
|
||||||
|
let ret = Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: stream_ok().into(),
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "cluster_send".into(),
|
||||||
|
config: json.clone().into(),
|
||||||
|
patterns: patterns.clone(),
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
ret.is_ok() == is_ok,
|
||||||
|
"is_ok: {is_ok}, ret: {:?}, action conf: {json:?}",
|
||||||
|
ret.map(|_| ())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_send() {
|
||||||
|
let _lock = TEST_MUTEX.lock();
|
||||||
|
let dir = TempDir::new().unwrap();
|
||||||
|
set_current_dir(&dir).unwrap();
|
||||||
|
insert_secret_key().await;
|
||||||
|
|
||||||
|
// No action is ok
|
||||||
|
let res = Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: stream_ok().into(),
|
||||||
|
}],
|
||||||
|
vec![],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(res.is_ok(), "{:?}", res.map(|_| ()));
|
||||||
|
|
||||||
|
// An action is ok
|
||||||
|
let res = Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: stream_ok().into(),
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "cluster_send".into(),
|
||||||
|
config: json!({ "send": "message", "to": "stream" }).into(),
|
||||||
|
patterns: vec![],
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(res.is_ok(), "{:?}", res.map(|_| ()));
|
||||||
|
|
||||||
|
// Invalid to: option
|
||||||
|
let res = Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: stream_ok().into(),
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "cluster_send".into(),
|
||||||
|
config: json!({ "send": "message", "to": "stream1" }).into(),
|
||||||
|
patterns: vec![],
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(res.is_err(), "{:?}", res.map(|_| ()));
|
||||||
|
}
|
||||||
319
plugins/reaction-plugin-cluster/src/tests/e2e.rs
Normal file
319
plugins/reaction-plugin-cluster/src/tests/e2e.rs
Normal file
|
|
@ -0,0 +1,319 @@
|
||||||
|
use std::{env::set_current_dir, time::Duration};
|
||||||
|
|
||||||
|
use assert_fs::TempDir;
|
||||||
|
use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig};
|
||||||
|
use serde_json::json;
|
||||||
|
use tokio::{fs, time::timeout};
|
||||||
|
use treedb::time::now;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Plugin,
|
||||||
|
key::secret_key_path,
|
||||||
|
tests::{PUBLIC_KEY_A, PUBLIC_KEY_B, SECRET_KEY_A, SECRET_KEY_B, TEST_MUTEX},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct TestNode {
|
||||||
|
public_key: &'static str,
|
||||||
|
private_key: &'static str,
|
||||||
|
port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
const POOL: [TestNode; 15] = [
|
||||||
|
TestNode {
|
||||||
|
public_key: PUBLIC_KEY_A,
|
||||||
|
private_key: SECRET_KEY_A,
|
||||||
|
port: 2055,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: PUBLIC_KEY_B,
|
||||||
|
private_key: SECRET_KEY_B,
|
||||||
|
port: 2056,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "ZjEPlIdGikV_sPIAUzO3RFUidlERJUhJ9XwNAlieuvU=",
|
||||||
|
private_key: "SCbd8Ids3Dg9MwzyMNV1KFcUtsyRbeCp7GDmu-xXBSs=",
|
||||||
|
port: 2057,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "2FUpABLl9I6bU9a2XtWKMLDzwHfrVcNEG6K8Ix6sxWQ=",
|
||||||
|
private_key: "F0W8nIlVmuFVpelwYH4PDaBDM0COYOyXDmBEmnHyo5s=",
|
||||||
|
port: 2058,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "qR4JDI_yyPWUBrmBbQjqfFbGP14v9dEaQVPHPOjId1o=",
|
||||||
|
private_key: "S5pxTafNXPd_9TMT4_ERuPXlZ882UmggAHrf8Yntfqg=",
|
||||||
|
port: 2059,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "NjkPBwDO4IEOBjkcxufYtVXspJNQZ0qF6GamRq2TOB4=",
|
||||||
|
private_key: "zM_lXiFuwTkmPuuXqIghW_J0uwq0a53L_yhM57uy_R8=",
|
||||||
|
port: 2060,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "_mgTzrlE8b_zvka3LgfD5qH2h_d3S0hcDU1WzIL6C74=",
|
||||||
|
private_key: "6Obq7fxOXK-u-P3QB5FJvNnwXdKwP1FsVJ0555o7DXs=",
|
||||||
|
port: 2061,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "FLKxCSSjjzxH0ZWTpQ8xXcSIRutXUhIDhZimjamxO2s=",
|
||||||
|
private_key: "pBPcJ32bt4xGZIGZDLDtj0eedg7p5DENjAwA-wM-1vk=",
|
||||||
|
port: 2062,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "yYBWzhzXO4isdPW2SzI-Sv3mcy3dUl6Kl0oFN6YpuzE=",
|
||||||
|
private_key: "nC8F6prLAY9-86EZlfXwpOjQeghlPKf3PtT-zXsJZsA=",
|
||||||
|
port: 2063,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "QLbNxlLEUt0tieD9BX9of663gCm9WjKeqch0BIFJ3CE=",
|
||||||
|
private_key: "JL4bKNHJMaMX_ElnaDHc6Ql74HZbovcswNOrY6fN1sU=",
|
||||||
|
port: 2064,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "2cmAmcaEFW-9val6WMoHSfTW25IxiQHes7Jwy6NqLLc=",
|
||||||
|
private_key: "TCvfDLHLQ5RxfAs7_2Th2u1XF48ygxTLAAsUzVPBn_o=",
|
||||||
|
port: 2065,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "PfKYILyGmu0C6GFUOLw4MSLxN6gtkj0XUdvQW50A2xA=",
|
||||||
|
private_key: "LaQgDWsXpwSQlZZXd8UEllrgpeXw9biSye4zcjLclU0=",
|
||||||
|
port: 2066,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "OQMXwPl90gr-2y-f5qZIZuVG4WEae5cc8JOB39LTNYE=",
|
||||||
|
private_key: "blcigXzk0oeQ8J1jwYFiYHJ-pMiUqbUM4SJBlxA0MiI=",
|
||||||
|
port: 2067,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "DHpkBgnQUfpC7s4-mTfpn1_PN4dzj7hCCMF6GwO3Bus=",
|
||||||
|
private_key: "sw7-2gPOswznF2OJHJdbfyJxdjS-P5O0lie6SdOL_08=",
|
||||||
|
port: 2068,
|
||||||
|
},
|
||||||
|
TestNode {
|
||||||
|
public_key: "odjjaYd6lL1DG8N9AXHW9LGsrKIb5IlW0KZz-rgxfXA=",
|
||||||
|
private_key: "6JU6YHRBM_rJkuQmMaGaio_PZiyzZlTIU0qE8AHPGSE=",
|
||||||
|
port: 2069,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
async fn stream_action(
|
||||||
|
name: &str,
|
||||||
|
index: usize,
|
||||||
|
nodes: &[TestNode],
|
||||||
|
) -> (StreamConfig, ActionConfig) {
|
||||||
|
let stream_name = format!("stream_{name}");
|
||||||
|
let this_node = &nodes[index];
|
||||||
|
let other_nodes: Vec<_> = nodes
|
||||||
|
.iter()
|
||||||
|
.filter(|node| node.public_key != this_node.public_key)
|
||||||
|
.map(|node| {
|
||||||
|
json!({
|
||||||
|
"public_key": node.public_key,
|
||||||
|
"addresses": [format!("[::1]:{}", node.port)]
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
fs::write(secret_key_path(".", &stream_name), this_node.private_key)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
(
|
||||||
|
StreamConfig {
|
||||||
|
stream_name: stream_name.clone(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: json!({
|
||||||
|
"message_timeout": "30s",
|
||||||
|
"listen_port": this_node.port,
|
||||||
|
"nodes": other_nodes,
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
},
|
||||||
|
ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "cluster_send".into(),
|
||||||
|
config: json!({
|
||||||
|
"send": format!("from {name}: <test>"),
|
||||||
|
"to": stream_name,
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["test".into()],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn two_nodes_simultaneous_startup() {
|
||||||
|
for separate_plugin in [true /*, false */] {
|
||||||
|
let _lock = TEST_MUTEX.lock();
|
||||||
|
let dir = TempDir::new().unwrap();
|
||||||
|
set_current_dir(&dir).unwrap();
|
||||||
|
|
||||||
|
let ((mut stream_a, action_a), (mut stream_b, action_b)) = if separate_plugin {
|
||||||
|
let mut plugin_a = Plugin::default();
|
||||||
|
let (sa, aa) = stream_action("a", 0, &POOL[0..2]).await;
|
||||||
|
let (mut streams_a, mut actions_a) =
|
||||||
|
plugin_a.load_config(vec![sa], vec![aa]).await.unwrap();
|
||||||
|
plugin_a.start().await.unwrap();
|
||||||
|
|
||||||
|
let mut plugin_b = Plugin::default();
|
||||||
|
let (sb, ab) = stream_action("b", 1, &POOL[0..2]).await;
|
||||||
|
let (mut streams_b, mut actions_b) =
|
||||||
|
plugin_b.load_config(vec![sb], vec![ab]).await.unwrap();
|
||||||
|
plugin_b.start().await.unwrap();
|
||||||
|
(
|
||||||
|
(streams_a.remove(0), actions_a.remove(0)),
|
||||||
|
(streams_b.remove(0), actions_b.remove(0)),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
let mut plugin = Plugin::default();
|
||||||
|
let a = stream_action("a", 0, &POOL[0..2]).await;
|
||||||
|
let b = stream_action("b", 1, &POOL[0..2]).await;
|
||||||
|
let (mut streams, mut actions) = plugin
|
||||||
|
.load_config(vec![a.0, b.0], vec![a.1, b.1])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
plugin.start().await.unwrap();
|
||||||
|
(
|
||||||
|
(streams.remove(0), actions.remove(0)),
|
||||||
|
(streams.remove(1), actions.remove(1)),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
for m in ["test1", "test2", "test3"] {
|
||||||
|
let time = now().into();
|
||||||
|
for (stream, action, from) in [
|
||||||
|
(&mut stream_b, &action_a, "a"),
|
||||||
|
(&mut stream_a, &action_b, "b"),
|
||||||
|
] {
|
||||||
|
assert!(
|
||||||
|
action
|
||||||
|
.tx
|
||||||
|
.send(Exec {
|
||||||
|
match_: vec![m.into()],
|
||||||
|
time,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.is_ok(),
|
||||||
|
"separate_plugin: {separate_plugin}, message: {m}, from: {from}"
|
||||||
|
);
|
||||||
|
|
||||||
|
let received = timeout(Duration::from_millis(5000), stream.stream.recv()).await;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
received.is_ok(),
|
||||||
|
"separate_plugin: {separate_plugin}, message: {m}, from: {from}, did timeout"
|
||||||
|
);
|
||||||
|
let received = received.unwrap();
|
||||||
|
assert!(
|
||||||
|
received.is_ok(),
|
||||||
|
"separate_plugin: {separate_plugin}, message: {m}, from: {from}, remoc receive error"
|
||||||
|
);
|
||||||
|
let received = received.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
received,
|
||||||
|
Some((format!("from {from}: {m}"), time)),
|
||||||
|
"separate_plugin: {separate_plugin}, message: {m}, from: {from}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn n_nodes_simultaneous_startup() {
|
||||||
|
let _lock = TEST_MUTEX.lock();
|
||||||
|
|
||||||
|
// Ports can take some time to be really closed
|
||||||
|
let mut port_delta = 0;
|
||||||
|
|
||||||
|
for n in 3..=POOL.len() {
|
||||||
|
println!("\nNODES: {n}\n");
|
||||||
|
port_delta += n;
|
||||||
|
// for n in 3..=3 {
|
||||||
|
let dir = TempDir::new().unwrap();
|
||||||
|
set_current_dir(&dir).unwrap();
|
||||||
|
|
||||||
|
let mut plugins = Vec::with_capacity(n);
|
||||||
|
let mut streams = Vec::with_capacity(n);
|
||||||
|
let mut actions = Vec::with_capacity(n);
|
||||||
|
for i in 0..n {
|
||||||
|
let mut plugin = Plugin::default();
|
||||||
|
let name = format!("n{i}");
|
||||||
|
let (stream, action) = stream_action(
|
||||||
|
&name,
|
||||||
|
i,
|
||||||
|
&POOL[0..n]
|
||||||
|
.iter()
|
||||||
|
.map(|node| node.clone())
|
||||||
|
.map(|node| TestNode {
|
||||||
|
port: node.port + port_delta as u16,
|
||||||
|
..node
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.as_slice(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let (mut stream, mut action) = plugin
|
||||||
|
.load_config(vec![stream], vec![action])
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
plugin.start().await.unwrap();
|
||||||
|
plugins.push(plugin);
|
||||||
|
streams.push(stream.pop().unwrap());
|
||||||
|
actions.push((action.pop().unwrap(), name));
|
||||||
|
}
|
||||||
|
|
||||||
|
for m in ["test1", "test2", "test3", "test4", "test5"] {
|
||||||
|
let time = now().into();
|
||||||
|
for (i, (action, from)) in actions.iter().enumerate() {
|
||||||
|
assert!(
|
||||||
|
action
|
||||||
|
.tx
|
||||||
|
.send(Exec {
|
||||||
|
match_: vec![m.into()],
|
||||||
|
time,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.is_ok(),
|
||||||
|
"n nodes: {n}, n°action{i}, message: {m}, from: {from}"
|
||||||
|
);
|
||||||
|
|
||||||
|
for (j, stream) in streams.iter_mut().enumerate().filter(|(j, _)| *j != i) {
|
||||||
|
let received = timeout(Duration::from_millis(5000), stream.stream.recv()).await;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
received.is_ok(),
|
||||||
|
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}, did timeout"
|
||||||
|
);
|
||||||
|
let received = received.unwrap();
|
||||||
|
assert!(
|
||||||
|
received.is_ok(),
|
||||||
|
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}, remoc receive error"
|
||||||
|
);
|
||||||
|
let received = received.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
received,
|
||||||
|
Some((format!("from {from}: {m}"), time)),
|
||||||
|
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}"
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for plugin in plugins {
|
||||||
|
plugin.close().await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO test:
|
||||||
|
// with inexisting nodes
|
||||||
|
// different startup times
|
||||||
|
// stopping & restarting a node mid exchange
|
||||||
40
plugins/reaction-plugin-cluster/src/tests/mod.rs
Normal file
40
plugins/reaction-plugin-cluster/src/tests/mod.rs
Normal file
|
|
@ -0,0 +1,40 @@
|
||||||
|
use std::sync::{LazyLock, Mutex};
|
||||||
|
|
||||||
|
use serde_json::json;
|
||||||
|
use tokio::fs::write;
|
||||||
|
|
||||||
|
mod conf;
|
||||||
|
mod e2e;
|
||||||
|
mod self_;
|
||||||
|
|
||||||
|
const SECRET_KEY_A: &str = "g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=";
|
||||||
|
const PUBLIC_KEY_A: &str = "HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY=";
|
||||||
|
|
||||||
|
const SECRET_KEY_B: &str = "5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=";
|
||||||
|
const PUBLIC_KEY_B: &str = "LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc=";
|
||||||
|
|
||||||
|
// Tests that spawn a database in current directory must be run one at a time
|
||||||
|
static TEST_MUTEX: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
|
||||||
|
|
||||||
|
fn stream_ok_port(port: u16) -> serde_json::Value {
|
||||||
|
json!({
|
||||||
|
"listen_port": port,
|
||||||
|
"nodes": [{
|
||||||
|
"public_key": PUBLIC_KEY_A,
|
||||||
|
}],
|
||||||
|
"message_timeout": "30m",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stream_ok() -> serde_json::Value {
|
||||||
|
stream_ok_port(2048)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_secret_key() {
|
||||||
|
write(
|
||||||
|
"./secret_key_stream.txt",
|
||||||
|
b"pBPcJ32bt4xGZIGZDLDtj0eedg7p5DENjAwA-wM-1vk=",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
78
plugins/reaction-plugin-cluster/src/tests/self_.rs
Normal file
78
plugins/reaction-plugin-cluster/src/tests/self_.rs
Normal file
|
|
@ -0,0 +1,78 @@
|
||||||
|
use std::{env::set_current_dir, time::Duration};
|
||||||
|
|
||||||
|
use assert_fs::TempDir;
|
||||||
|
use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig};
|
||||||
|
use serde_json::json;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
use treedb::time::now;
|
||||||
|
|
||||||
|
use crate::{Plugin, tests::insert_secret_key};
|
||||||
|
|
||||||
|
use super::{TEST_MUTEX, stream_ok_port};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn run_with_self() {
|
||||||
|
let _lock = TEST_MUTEX.lock();
|
||||||
|
let dir = TempDir::new().unwrap();
|
||||||
|
set_current_dir(&dir).unwrap();
|
||||||
|
insert_secret_key().await;
|
||||||
|
|
||||||
|
for self_ in [true, false] {
|
||||||
|
let mut plugin = Plugin::default();
|
||||||
|
let (mut streams, mut actions) = plugin
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "cluster".into(),
|
||||||
|
config: stream_ok_port(2052).into(),
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "cluster_send".into(),
|
||||||
|
config: json!({
|
||||||
|
"send": "message <test>",
|
||||||
|
"to": "stream",
|
||||||
|
"self": self_,
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["test".into()],
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut stream = streams.pop().unwrap();
|
||||||
|
let action = actions.pop().unwrap();
|
||||||
|
assert!(stream.standalone);
|
||||||
|
assert!(plugin.start().await.is_ok());
|
||||||
|
|
||||||
|
for m in ["test1", "test2", "test3", " a a a aa a a"] {
|
||||||
|
let time = now().into();
|
||||||
|
assert!(
|
||||||
|
action
|
||||||
|
.tx
|
||||||
|
.send(Exec {
|
||||||
|
match_: vec![m.into()],
|
||||||
|
time,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
if self_ {
|
||||||
|
assert_eq!(
|
||||||
|
stream.stream.recv().await.unwrap().unwrap(),
|
||||||
|
(format!("message {m}"), time),
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
// Don't receive anything
|
||||||
|
assert!(
|
||||||
|
timeout(Duration::from_millis(100), stream.stream.recv())
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
26
plugins/reaction-plugin-ipset/Cargo.toml
Normal file
26
plugins/reaction-plugin-ipset/Cargo.toml
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
[package]
|
||||||
|
name = "reaction-plugin-ipset"
|
||||||
|
description = "ipset plugin for reaction"
|
||||||
|
version = "1.0.0"
|
||||||
|
edition = "2024"
|
||||||
|
authors = ["ppom <reaction@ppom.me>"]
|
||||||
|
license = "AGPL-3.0"
|
||||||
|
homepage = "https://reaction.ppom.me"
|
||||||
|
repository = "https://framagit.org/ppom/reaction"
|
||||||
|
keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"]
|
||||||
|
default-run = "reaction-plugin-ipset"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||||
|
remoc.workspace = true
|
||||||
|
reaction-plugin.path = "../reaction-plugin"
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
ipset = "0.9.0"
|
||||||
|
|
||||||
|
[package.metadata.deb]
|
||||||
|
section = "net"
|
||||||
|
assets = [
|
||||||
|
[ "target/release/reaction-plugin-ipset", "/usr/bin/reaction-plugin-ipset", "755" ],
|
||||||
|
]
|
||||||
|
depends = ["libipset-dev", "reaction"]
|
||||||
419
plugins/reaction-plugin-ipset/src/action.rs
Normal file
419
plugins/reaction-plugin-ipset/src/action.rs
Normal file
|
|
@ -0,0 +1,419 @@
|
||||||
|
use std::{fmt::Debug, u32, usize};
|
||||||
|
|
||||||
|
use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration};
|
||||||
|
use remoc::rch::mpsc as remocMpsc;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::ipset::{CreateSet, IpSet, Order, SetChain, Version};
|
||||||
|
|
||||||
|
#[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)]
|
||||||
|
pub enum IpVersion {
|
||||||
|
#[default]
|
||||||
|
#[serde(rename = "ip")]
|
||||||
|
Ip,
|
||||||
|
#[serde(rename = "ipv4")]
|
||||||
|
Ipv4,
|
||||||
|
#[serde(rename = "ipv6")]
|
||||||
|
Ipv6,
|
||||||
|
}
|
||||||
|
impl Debug for IpVersion {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}",
|
||||||
|
match self {
|
||||||
|
IpVersion::Ipv4 => "ipv4",
|
||||||
|
IpVersion::Ipv6 => "ipv6",
|
||||||
|
IpVersion::Ip => "ip",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Serialize, Deserialize)]
|
||||||
|
pub enum AddDel {
|
||||||
|
#[default]
|
||||||
|
#[serde(alias = "add")]
|
||||||
|
Add,
|
||||||
|
#[serde(alias = "del")]
|
||||||
|
Del,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// User-facing action options
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
pub struct ActionOptions {
|
||||||
|
/// The set that should be used by this action
|
||||||
|
pub set: String,
|
||||||
|
/// The pattern name of the IP.
|
||||||
|
/// Defaults to "ip"
|
||||||
|
#[serde(default = "serde_ip")]
|
||||||
|
pub pattern: String,
|
||||||
|
#[serde(skip)]
|
||||||
|
ip_index: usize,
|
||||||
|
// Whether the action is to "add" or "del" the ip from the set
|
||||||
|
#[serde(default)]
|
||||||
|
action: AddDel,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub set_options: SetOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serde_ip() -> String {
|
||||||
|
"ip".into()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActionOptions {
|
||||||
|
pub fn set_ip_index(&mut self, patterns: Vec<String>) -> Result<(), ()> {
|
||||||
|
self.ip_index = patterns
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_, name)| name == &self.pattern)
|
||||||
|
.next()
|
||||||
|
.ok_or(())?
|
||||||
|
.0;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merged set options
|
||||||
|
#[derive(Default, Clone, Deserialize, Serialize, Debug, PartialEq, Eq)]
|
||||||
|
pub struct SetOptions {
|
||||||
|
/// The IP type.
|
||||||
|
/// Defaults to `46`.
|
||||||
|
/// If `ipv4`: creates an IPv4 set with this name
|
||||||
|
/// If `ipv6`: creates an IPv6 set with this name
|
||||||
|
/// If `ip`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6'
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(default)]
|
||||||
|
version: Option<IpVersion>,
|
||||||
|
/// Chains where the IP set should be inserted.
|
||||||
|
/// Defaults to `["INPUT", "FORWARD"]`
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(default)]
|
||||||
|
chains: Option<Vec<String>>,
|
||||||
|
// Optional timeout, letting linux/netfilter handle set removal instead of reaction
|
||||||
|
// Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action
|
||||||
|
// Same syntax as after and retryperiod in reaction.
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
timeout: Option<String>,
|
||||||
|
#[serde(skip)]
|
||||||
|
timeout_u32: Option<u32>,
|
||||||
|
// Target that iptables should use when the IP is encountered.
|
||||||
|
// Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(default)]
|
||||||
|
target: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SetOptions {
|
||||||
|
pub fn merge(&mut self, options: &SetOptions) -> Result<(), String> {
|
||||||
|
// merge two Option<T> and fail if there is conflict
|
||||||
|
fn inner_merge<T: Eq + Clone + std::fmt::Debug>(
|
||||||
|
a: &mut Option<T>,
|
||||||
|
b: &Option<T>,
|
||||||
|
name: &str,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
match (&a, &b) {
|
||||||
|
(Some(aa), Some(bb)) => {
|
||||||
|
if aa != bb {
|
||||||
|
return Err(format!(
|
||||||
|
"Conflicting options for {name}: `{aa:?}` and `{bb:?}`"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(None, Some(_)) => {
|
||||||
|
*a = b.clone();
|
||||||
|
}
|
||||||
|
_ => (),
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
inner_merge(&mut self.version, &options.version, "version")?;
|
||||||
|
inner_merge(&mut self.timeout, &options.timeout, "timeout")?;
|
||||||
|
inner_merge(&mut self.chains, &options.chains, "chains")?;
|
||||||
|
inner_merge(&mut self.target, &options.target, "target")?;
|
||||||
|
|
||||||
|
if let Some(timeout) = &self.timeout {
|
||||||
|
let duration = parse_duration(timeout)
|
||||||
|
.map_err(|err| format!("failed to parse timeout: {}", err))?
|
||||||
|
.as_secs();
|
||||||
|
if duration > u32::MAX as u64 {
|
||||||
|
return Err(format!(
|
||||||
|
"timeout is limited to {} seconds (approx {} days)",
|
||||||
|
u32::MAX,
|
||||||
|
49_000
|
||||||
|
));
|
||||||
|
}
|
||||||
|
self.timeout_u32 = Some(duration as u32);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Set {
|
||||||
|
sets: SetNames,
|
||||||
|
chains: Vec<String>,
|
||||||
|
timeout: Option<u32>,
|
||||||
|
target: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Set {
|
||||||
|
pub fn from(name: String, options: SetOptions) -> Self {
|
||||||
|
Self {
|
||||||
|
sets: SetNames::new(name, options.version),
|
||||||
|
timeout: options.timeout_u32,
|
||||||
|
target: options.target.unwrap_or("DROP".into()),
|
||||||
|
chains: options
|
||||||
|
.chains
|
||||||
|
.unwrap_or(vec!["INPUT".into(), "FORWARD".into()]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init(&self, ipset: &mut IpSet) -> Result<(), (usize, String)> {
|
||||||
|
for (set, version) in [
|
||||||
|
(&self.sets.ipv4, Version::IPv4),
|
||||||
|
(&self.sets.ipv6, Version::IPv6),
|
||||||
|
] {
|
||||||
|
if let Some(set) = set {
|
||||||
|
// create set
|
||||||
|
ipset
|
||||||
|
.order(Order::CreateSet(CreateSet {
|
||||||
|
name: set.clone(),
|
||||||
|
version,
|
||||||
|
timeout: self.timeout,
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.map_err(|err| (0, err.to_string()))?;
|
||||||
|
// insert set in chains
|
||||||
|
for (i, chain) in self.chains.iter().enumerate() {
|
||||||
|
ipset
|
||||||
|
.order(Order::InsertSet(SetChain {
|
||||||
|
set: set.clone(),
|
||||||
|
chain: chain.clone(),
|
||||||
|
target: self.target.clone(),
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.map_err(|err| (i + 1, err.to_string()))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn destroy(&self, ipset: &mut IpSet, until: Option<usize>) {
|
||||||
|
for set in [&self.sets.ipv4, &self.sets.ipv6] {
|
||||||
|
if let Some(set) = set {
|
||||||
|
for chain in self
|
||||||
|
.chains
|
||||||
|
.iter()
|
||||||
|
.take(until.map(|until| until - 1).unwrap_or(usize::MAX))
|
||||||
|
{
|
||||||
|
let _ = ipset
|
||||||
|
.order(Order::RemoveSet(SetChain {
|
||||||
|
set: set.clone(),
|
||||||
|
chain: chain.clone(),
|
||||||
|
target: self.target.clone(),
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
if until.is_none_or(|until| until != 0) {
|
||||||
|
let _ = ipset.order(Order::DestroySet(set.clone())).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct SetNames {
|
||||||
|
pub ipv4: Option<String>,
|
||||||
|
pub ipv6: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SetNames {
|
||||||
|
pub fn new(name: String, version: Option<IpVersion>) -> Self {
|
||||||
|
Self {
|
||||||
|
ipv4: match version {
|
||||||
|
Some(IpVersion::Ipv4) => Some(name.clone()),
|
||||||
|
Some(IpVersion::Ipv6) => None,
|
||||||
|
None | Some(IpVersion::Ip) => Some(format!("{}v4", name)),
|
||||||
|
},
|
||||||
|
ipv6: match version {
|
||||||
|
Some(IpVersion::Ipv4) => None,
|
||||||
|
Some(IpVersion::Ipv6) => Some(name),
|
||||||
|
None | Some(IpVersion::Ip) => Some(format!("{}v6", name)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Action {
|
||||||
|
ipset: IpSet,
|
||||||
|
rx: remocMpsc::Receiver<Exec>,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
sets: SetNames,
|
||||||
|
// index of pattern ip in match vec
|
||||||
|
ip_index: usize,
|
||||||
|
action: AddDel,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Action {
|
||||||
|
pub fn new(
|
||||||
|
ipset: IpSet,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
rx: remocMpsc::Receiver<Exec>,
|
||||||
|
options: ActionOptions,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
Ok(Action {
|
||||||
|
ipset,
|
||||||
|
rx,
|
||||||
|
shutdown,
|
||||||
|
sets: SetNames::new(options.set, options.set_options.version),
|
||||||
|
ip_index: options.ip_index,
|
||||||
|
action: options.action,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn serve(mut self) {
|
||||||
|
loop {
|
||||||
|
let event = tokio::select! {
|
||||||
|
exec = self.rx.recv() => Some(exec),
|
||||||
|
_ = self.shutdown.wait() => None,
|
||||||
|
};
|
||||||
|
match event {
|
||||||
|
// shutdown asked
|
||||||
|
None => break,
|
||||||
|
// channel closed
|
||||||
|
Some(Ok(None)) => break,
|
||||||
|
// error from channel
|
||||||
|
Some(Err(err)) => {
|
||||||
|
eprintln!("ERROR {err}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// ok
|
||||||
|
Some(Ok(Some(exec))) => {
|
||||||
|
if let Err(err) = self.handle_exec(exec).await {
|
||||||
|
eprintln!("ERROR {err}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// eprintln!("DEBUG Asking for shutdown");
|
||||||
|
// self.shutdown.ask_shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> {
|
||||||
|
// safeguard against Vec::remove's panic
|
||||||
|
if exec.match_.len() <= self.ip_index {
|
||||||
|
return Err(format!(
|
||||||
|
"match received from reaction is smaller than expected. looking for index {} but size is {}. this is a bug!",
|
||||||
|
self.ip_index,
|
||||||
|
exec.match_.len()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let ip = exec.match_.remove(self.ip_index);
|
||||||
|
// select set
|
||||||
|
let set = match (&self.sets.ipv4, &self.sets.ipv6) {
|
||||||
|
(None, None) => return Err(format!("action is neither IPv4 nor IPv6, this is a bug!")),
|
||||||
|
(None, Some(set)) => set,
|
||||||
|
(Some(set), None) => set,
|
||||||
|
(Some(set4), Some(set6)) => {
|
||||||
|
if ip.contains(':') {
|
||||||
|
set6
|
||||||
|
} else {
|
||||||
|
set4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// add/remove ip to set
|
||||||
|
self.ipset
|
||||||
|
.order(match self.action {
|
||||||
|
AddDel::Add => Order::Add(set.clone(), ip),
|
||||||
|
AddDel::Del => Order::Del(set.clone(), ip),
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::action::{IpVersion, SetOptions};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn set_options_merge() {
|
||||||
|
let s1 = SetOptions {
|
||||||
|
version: None,
|
||||||
|
chains: None,
|
||||||
|
timeout: None,
|
||||||
|
timeout_u32: None,
|
||||||
|
target: None,
|
||||||
|
};
|
||||||
|
let s2 = SetOptions {
|
||||||
|
version: Some(IpVersion::Ipv4),
|
||||||
|
chains: Some(vec!["INPUT".into()]),
|
||||||
|
timeout: Some("3h".into()),
|
||||||
|
timeout_u32: Some(3 * 3600),
|
||||||
|
target: Some("DROP".into()),
|
||||||
|
};
|
||||||
|
assert_ne!(s1, s2);
|
||||||
|
assert_eq!(s1, SetOptions::default());
|
||||||
|
|
||||||
|
{
|
||||||
|
// s2 can be merged in s1
|
||||||
|
let mut s1 = s1.clone();
|
||||||
|
assert!(s1.merge(&s2).is_ok());
|
||||||
|
assert_eq!(s1, s2);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// s1 can be merged in s2
|
||||||
|
let mut s2 = s2.clone();
|
||||||
|
assert!(s2.merge(&s1).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// s1 can be merged in itself
|
||||||
|
let mut s3 = s1.clone();
|
||||||
|
assert!(s3.merge(&s1).is_ok());
|
||||||
|
assert_eq!(s1, s3);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// s2 can be merged in itself
|
||||||
|
let mut s3 = s2.clone();
|
||||||
|
assert!(s3.merge(&s2).is_ok());
|
||||||
|
assert_eq!(s2, s3);
|
||||||
|
}
|
||||||
|
|
||||||
|
for s3 in [
|
||||||
|
SetOptions {
|
||||||
|
version: Some(IpVersion::Ipv6),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
SetOptions {
|
||||||
|
chains: Some(vec!["damn".into()]),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
SetOptions {
|
||||||
|
timeout: Some("30min".into()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
SetOptions {
|
||||||
|
target: Some("log-refuse".into()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
] {
|
||||||
|
// none with some is ok
|
||||||
|
assert!(s3.clone().merge(&s1).is_ok(), "s3: {s3:?}");
|
||||||
|
assert!(s1.clone().merge(&s3).is_ok(), "s3: {s3:?}");
|
||||||
|
// different some is ko
|
||||||
|
assert!(s3.clone().merge(&s2).is_err(), "s3: {s3:?}");
|
||||||
|
assert!(s2.clone().merge(&s3).is_err(), "s3: {s3:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
248
plugins/reaction-plugin-ipset/src/ipset.rs
Normal file
248
plugins/reaction-plugin-ipset/src/ipset.rs
Normal file
|
|
@ -0,0 +1,248 @@
|
||||||
|
use std::{collections::BTreeMap, fmt::Display, net::Ipv4Addr, process::Command, thread};
|
||||||
|
|
||||||
|
use ipset::{
|
||||||
|
Session,
|
||||||
|
types::{HashNet, NetDataType, Parse},
|
||||||
|
};
|
||||||
|
use tokio::sync::{mpsc, oneshot};
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
|
||||||
|
pub enum Version {
|
||||||
|
IPv4,
|
||||||
|
IPv6,
|
||||||
|
}
|
||||||
|
impl Display for Version {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_str(match self {
|
||||||
|
Version::IPv4 => "IPv4",
|
||||||
|
Version::IPv6 => "IPv6",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||||
|
pub struct CreateSet {
|
||||||
|
pub name: String,
|
||||||
|
pub version: Version,
|
||||||
|
pub timeout: Option<u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||||
|
pub struct SetChain {
|
||||||
|
pub set: String,
|
||||||
|
pub chain: String,
|
||||||
|
pub target: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||||
|
pub enum Order {
|
||||||
|
CreateSet(CreateSet),
|
||||||
|
DestroySet(String),
|
||||||
|
InsertSet(SetChain),
|
||||||
|
RemoveSet(SetChain),
|
||||||
|
Add(String, String),
|
||||||
|
Del(String, String),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct IpSet {
|
||||||
|
tx: mpsc::Sender<OrderType>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for IpSet {
|
||||||
|
fn default() -> Self {
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
thread::spawn(move || IPsetManager::default().serve(rx));
|
||||||
|
Self { tx }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IpSet {
|
||||||
|
pub async fn order(&mut self, order: Order) -> Result<(), IpSetError> {
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
self.tx
|
||||||
|
.send((order, tx))
|
||||||
|
.await
|
||||||
|
.map_err(|err| IpSetError::Thread(format!("ipset thread has quit: {err}")))?;
|
||||||
|
rx.await
|
||||||
|
.map_err(|err| IpSetError::Thread(format!("ipset thread didn't respond: {err}")))?
|
||||||
|
.map_err(IpSetError::IpSet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum IpSetError {
|
||||||
|
Thread(String),
|
||||||
|
IpSet(()),
|
||||||
|
}
|
||||||
|
impl Display for IpSetError {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}",
|
||||||
|
match self {
|
||||||
|
IpSetError::Thread(err) => err,
|
||||||
|
IpSetError::IpSet(()) => "ipset error",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<IpSetError> for String {
|
||||||
|
fn from(value: IpSetError) -> Self {
|
||||||
|
match value {
|
||||||
|
IpSetError::Thread(err) => err,
|
||||||
|
IpSetError::IpSet(()) => "ipset error".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type OrderType = (Order, oneshot::Sender<Result<(), ()>>);
|
||||||
|
|
||||||
|
struct Set {
|
||||||
|
session: Session<HashNet>,
|
||||||
|
version: Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct IPsetManager {
|
||||||
|
// IPset sessions
|
||||||
|
sessions: BTreeMap<String, Set>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IPsetManager {
|
||||||
|
fn serve(mut self, mut rx: mpsc::Receiver<OrderType>) {
|
||||||
|
loop {
|
||||||
|
match rx.blocking_recv() {
|
||||||
|
None => break,
|
||||||
|
Some((order, response)) => {
|
||||||
|
let result = self.handle_order(order);
|
||||||
|
let _ = response.send(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_order(&mut self, order: Order) -> Result<(), ()> {
|
||||||
|
match order {
|
||||||
|
Order::CreateSet(CreateSet {
|
||||||
|
name,
|
||||||
|
version,
|
||||||
|
timeout,
|
||||||
|
}) => {
|
||||||
|
eprintln!("INFO creating {version} set {name}");
|
||||||
|
let mut session: Session<HashNet> = Session::new(name.clone());
|
||||||
|
session
|
||||||
|
.create(|builder| {
|
||||||
|
let builder = if let Some(timeout) = timeout {
|
||||||
|
builder.with_timeout(timeout)?
|
||||||
|
} else {
|
||||||
|
builder
|
||||||
|
};
|
||||||
|
builder.with_ipv6(version == Version::IPv6)?.build()
|
||||||
|
})
|
||||||
|
.map_err(|err| eprintln!("ERROR Could not create set {name}: {err}"))?;
|
||||||
|
|
||||||
|
self.sessions.insert(name, Set { session, version });
|
||||||
|
}
|
||||||
|
Order::DestroySet(set) => {
|
||||||
|
if let Some(mut session) = self.sessions.remove(&set) {
|
||||||
|
eprintln!("INFO destroying {} set {set}", session.version);
|
||||||
|
session
|
||||||
|
.session
|
||||||
|
.destroy()
|
||||||
|
.map_err(|err| eprintln!("ERROR Could not destroy set {set}: {err}"))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Order::InsertSet(options) => self.insert_remove_set(options, true)?,
|
||||||
|
Order::RemoveSet(options) => self.insert_remove_set(options, false)?,
|
||||||
|
|
||||||
|
Order::Add(set, ip) => self.insert_remove_ip(set, ip, true)?,
|
||||||
|
Order::Del(set, ip) => self.insert_remove_ip(set, ip, false)?,
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), ()> {
|
||||||
|
self._insert_remove_ip(set, ip, insert)
|
||||||
|
.map_err(|err| eprintln!("ERROR {err}"))
|
||||||
|
}
|
||||||
|
fn _insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), String> {
|
||||||
|
let session = self.sessions.get_mut(&set).ok_or(format!(
|
||||||
|
"No set handled by this plugin with this name: {set}. This likely is a bug."
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let mut net_data = NetDataType::new(Ipv4Addr::LOCALHOST, 0);
|
||||||
|
net_data
|
||||||
|
.parse(&ip)
|
||||||
|
.map_err(|err| format!("`{ip}` is not recognized as an IP: {err}"))?;
|
||||||
|
|
||||||
|
if insert {
|
||||||
|
session.session.add(net_data, &[])
|
||||||
|
} else {
|
||||||
|
session.session.del(net_data)
|
||||||
|
}
|
||||||
|
.map_err(|err| format!("Could not add `{ip}` to set {set}: {err}"))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), ()> {
|
||||||
|
self._insert_remove_set(options, insert)
|
||||||
|
.map_err(|err| eprintln!("ERROR {err}"))
|
||||||
|
}
|
||||||
|
fn _insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), String> {
|
||||||
|
let SetChain { set, chain, target } = options;
|
||||||
|
|
||||||
|
let version = self
|
||||||
|
.sessions
|
||||||
|
.get(&set)
|
||||||
|
.ok_or(format!(
|
||||||
|
"No set managed by this plugin with this name: {set}"
|
||||||
|
))?
|
||||||
|
.version;
|
||||||
|
|
||||||
|
let (verb, verbing, from) = if insert {
|
||||||
|
("insert", "inserting", "in")
|
||||||
|
} else {
|
||||||
|
("remove", "removing", "from")
|
||||||
|
};
|
||||||
|
|
||||||
|
eprintln!("INFO {verbing} {version} set {set} {from} chain {chain}");
|
||||||
|
|
||||||
|
let command = match version {
|
||||||
|
Version::IPv4 => "iptables",
|
||||||
|
Version::IPv6 => "ip6tables",
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut child = Command::new(command)
|
||||||
|
.args([
|
||||||
|
"-w",
|
||||||
|
if insert { "-I" } else { "-D" },
|
||||||
|
&chain,
|
||||||
|
"-m",
|
||||||
|
"set",
|
||||||
|
"--match-set",
|
||||||
|
&set,
|
||||||
|
"src",
|
||||||
|
"-j",
|
||||||
|
&target,
|
||||||
|
])
|
||||||
|
.spawn()
|
||||||
|
.map_err(|err| format!("Could not {verb} ipset {set} {from} chain {chain}: Could not execute {command}: {err}"))?;
|
||||||
|
|
||||||
|
let exit = child
|
||||||
|
.wait()
|
||||||
|
.map_err(|err| format!("Could not {verb} ipset {set} {from} chain {chain}: {err}"))?;
|
||||||
|
|
||||||
|
if exit.success() {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(format!(
|
||||||
|
"Could not {verb} ipset: exit code {}",
|
||||||
|
exit.code()
|
||||||
|
.map(|c| c.to_string())
|
||||||
|
.unwrap_or_else(|| "<unknown>".to_string())
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
159
plugins/reaction-plugin-ipset/src/main.rs
Normal file
159
plugins/reaction-plugin-ipset/src/main.rs
Normal file
|
|
@ -0,0 +1,159 @@
|
||||||
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
|
|
||||||
|
use reaction_plugin::{
|
||||||
|
ActionConfig, ActionImpl, Hello, Manifest, PluginInfo, RemoteError, RemoteResult, StreamConfig,
|
||||||
|
StreamImpl,
|
||||||
|
shutdown::{ShutdownController, ShutdownToken},
|
||||||
|
};
|
||||||
|
use remoc::rtc;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
action::{Action, ActionOptions, Set, SetOptions},
|
||||||
|
ipset::IpSet,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
mod action;
|
||||||
|
mod ipset;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let plugin = Plugin::default();
|
||||||
|
reaction_plugin::main_loop(plugin).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Plugin {
|
||||||
|
ipset: IpSet,
|
||||||
|
sets: Vec<Set>,
|
||||||
|
actions: Vec<Action>,
|
||||||
|
shutdown: ShutdownController,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PluginInfo for Plugin {
|
||||||
|
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
|
||||||
|
Ok(Manifest {
|
||||||
|
hello: Hello::new(),
|
||||||
|
streams: BTreeSet::default(),
|
||||||
|
actions: BTreeSet::from(["ipset".into()]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_config(
|
||||||
|
&mut self,
|
||||||
|
streams: Vec<StreamConfig>,
|
||||||
|
actions: Vec<ActionConfig>,
|
||||||
|
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
|
||||||
|
if !streams.is_empty() {
|
||||||
|
return Err("This plugin can't handle any stream type".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut ret_actions = Vec::with_capacity(actions.len());
|
||||||
|
let mut set_options: BTreeMap<String, SetOptions> = BTreeMap::new();
|
||||||
|
|
||||||
|
for ActionConfig {
|
||||||
|
stream_name,
|
||||||
|
filter_name,
|
||||||
|
action_name,
|
||||||
|
action_type,
|
||||||
|
config,
|
||||||
|
patterns,
|
||||||
|
} in actions
|
||||||
|
{
|
||||||
|
if &action_type != "ipset" {
|
||||||
|
return Err("This plugin can't handle other action types than ipset".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| {
|
||||||
|
format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
options.set_ip_index(patterns).map_err(|_|
|
||||||
|
format!(
|
||||||
|
"No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'",
|
||||||
|
&options.pattern
|
||||||
|
)
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Merge option
|
||||||
|
set_options
|
||||||
|
.entry(options.set.clone())
|
||||||
|
.or_default()
|
||||||
|
.merge(&options.set_options)
|
||||||
|
.map_err(|err| format!("ipset {}: {err}", options.set))?;
|
||||||
|
|
||||||
|
let (tx, rx) = remoc::rch::mpsc::channel(1);
|
||||||
|
self.actions.push(Action::new(
|
||||||
|
self.ipset.clone(),
|
||||||
|
self.shutdown.token(),
|
||||||
|
rx,
|
||||||
|
options,
|
||||||
|
)?);
|
||||||
|
|
||||||
|
ret_actions.push(ActionImpl { tx });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init all sets
|
||||||
|
while let Some((name, options)) = set_options.pop_first() {
|
||||||
|
self.sets.push(Set::from(name, options));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((vec![], ret_actions))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&mut self) -> RemoteResult<()> {
|
||||||
|
self.shutdown.delegate().handle_quit_signals()?;
|
||||||
|
|
||||||
|
let mut first_error = None;
|
||||||
|
for (i, set) in self.sets.iter().enumerate() {
|
||||||
|
// Retain if error
|
||||||
|
if let Err((failed_step, err)) = set.init(&mut self.ipset).await {
|
||||||
|
first_error = Some((i, failed_step, RemoteError::Plugin(err)));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Destroy initialized sets if error
|
||||||
|
if let Some((last_set, failed_step, err)) = first_error {
|
||||||
|
eprintln!("DEBUG last_set: {last_set} failed_step: {failed_step} err: {err}");
|
||||||
|
for (curr_set, set) in self.sets.iter().enumerate().take(last_set + 1) {
|
||||||
|
let until = if last_set == curr_set {
|
||||||
|
Some(failed_step)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
let _ = set.destroy(&mut self.ipset, until).await;
|
||||||
|
}
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Launch a task that will destroy the sets on shutdown
|
||||||
|
tokio::spawn(destroy_sets_at_shutdown(
|
||||||
|
self.ipset.clone(),
|
||||||
|
std::mem::take(&mut self.sets),
|
||||||
|
self.shutdown.token(),
|
||||||
|
));
|
||||||
|
|
||||||
|
// Launch all actions
|
||||||
|
while let Some(action) = self.actions.pop() {
|
||||||
|
tokio::spawn(async move { action.serve().await });
|
||||||
|
}
|
||||||
|
self.actions = Default::default();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close(self) -> RemoteResult<()> {
|
||||||
|
self.shutdown.ask_shutdown();
|
||||||
|
self.shutdown.wait_all_task_shutdown().await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn destroy_sets_at_shutdown(mut ipset: IpSet, sets: Vec<Set>, shutdown: ShutdownToken) {
|
||||||
|
shutdown.wait().await;
|
||||||
|
for set in sets {
|
||||||
|
set.destroy(&mut ipset, None).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
253
plugins/reaction-plugin-ipset/src/tests.rs
Normal file
253
plugins/reaction-plugin-ipset/src/tests.rs
Normal file
|
|
@ -0,0 +1,253 @@
|
||||||
|
use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig, Value};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use crate::Plugin;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_stream() {
|
||||||
|
// No stream is supported by ipset
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "ipset".into(),
|
||||||
|
config: Value::Null
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Nothing is ok
|
||||||
|
assert!(Plugin::default().load_config(vec![], vec![]).await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_action_standalone() {
|
||||||
|
let p = vec!["name".into(), "ip".into(), "ip2".into()];
|
||||||
|
let p_noip = vec!["name".into(), "ip2".into()];
|
||||||
|
|
||||||
|
for (is_ok, conf, patterns) in [
|
||||||
|
// minimal set
|
||||||
|
(true, json!({ "set": "test" }), &p),
|
||||||
|
// missing set key
|
||||||
|
(false, json!({}), &p),
|
||||||
|
(false, json!({ "version": "ipv4" }), &p),
|
||||||
|
// unknown key
|
||||||
|
(false, json!({ "set": "test", "unknown": "yes" }), &p),
|
||||||
|
(false, json!({ "set": "test", "ip_index": 1 }), &p),
|
||||||
|
(false, json!({ "set": "test", "timeout_u32": 1 }), &p),
|
||||||
|
// pattern //
|
||||||
|
(true, json!({ "set": "test" }), &p),
|
||||||
|
(true, json!({ "set": "test", "pattern": "ip" }), &p),
|
||||||
|
(true, json!({ "set": "test", "pattern": "ip2" }), &p),
|
||||||
|
(true, json!({ "set": "test", "pattern": "ip2" }), &p_noip),
|
||||||
|
// unknown pattern "ip"
|
||||||
|
(false, json!({ "set": "test" }), &p_noip),
|
||||||
|
(false, json!({ "set": "test", "pattern": "ip" }), &p_noip),
|
||||||
|
// unknown pattern
|
||||||
|
(false, json!({ "set": "test", "pattern": "unknown" }), &p),
|
||||||
|
(false, json!({ "set": "test", "pattern": "uwu" }), &p_noip),
|
||||||
|
// bad type
|
||||||
|
(false, json!({ "set": "test", "pattern": 0 }), &p_noip),
|
||||||
|
(false, json!({ "set": "test", "pattern": true }), &p_noip),
|
||||||
|
// action //
|
||||||
|
(true, json!({ "set": "test", "action": "add" }), &p),
|
||||||
|
(true, json!({ "set": "test", "action": "del" }), &p),
|
||||||
|
// unknown action
|
||||||
|
(false, json!({ "set": "test", "action": "create" }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": "insert" }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": "delete" }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": "destroy" }), &p),
|
||||||
|
// bad type
|
||||||
|
(false, json!({ "set": "test", "action": true }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": 1 }), &p),
|
||||||
|
// ip version //
|
||||||
|
// ok
|
||||||
|
(true, json!({ "set": "test", "version": "ipv4" }), &p),
|
||||||
|
(true, json!({ "set": "test", "version": "ipv6" }), &p),
|
||||||
|
(true, json!({ "set": "test", "version": "ip" }), &p),
|
||||||
|
// unknown version
|
||||||
|
(false, json!({ "set": "test", "version": 4 }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": 6 }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": 46 }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "5" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "ipv5" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "4" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "6" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "46" }), &p),
|
||||||
|
// bad type
|
||||||
|
(false, json!({ "set": "test", "version": true }), &p),
|
||||||
|
// chains //
|
||||||
|
// everything is fine really
|
||||||
|
(true, json!({ "set": "test", "chains": [] }), &p),
|
||||||
|
(true, json!({ "set": "test", "chains": ["INPUT"] }), &p),
|
||||||
|
(true, json!({ "set": "test", "chains": ["FORWARD"] }), &p),
|
||||||
|
(
|
||||||
|
true,
|
||||||
|
json!({ "set": "test", "chains": ["custom_chain"] }),
|
||||||
|
&p,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
true,
|
||||||
|
json!({ "set": "test", "chains": ["INPUT", "FORWARD"] }),
|
||||||
|
&p,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
true,
|
||||||
|
json!({
|
||||||
|
"set": "test",
|
||||||
|
"chains": ["INPUT", "FORWARD", "my_iptables_chain"]
|
||||||
|
}),
|
||||||
|
&p,
|
||||||
|
),
|
||||||
|
// timeout //
|
||||||
|
(true, json!({ "set": "test", "timeout": "1m" }), &p),
|
||||||
|
(true, json!({ "set": "test", "timeout": "3 days" }), &p),
|
||||||
|
// bad
|
||||||
|
(false, json!({ "set": "test", "timeout": "3 dayz"}), &p),
|
||||||
|
(false, json!({ "set": "test", "timeout": 12 }), &p),
|
||||||
|
// target //
|
||||||
|
// anything is fine too
|
||||||
|
(true, json!({ "set": "test", "target": "DROP" }), &p),
|
||||||
|
(true, json!({ "set": "test", "target": "ACCEPT" }), &p),
|
||||||
|
(true, json!({ "set": "test", "target": "RETURN" }), &p),
|
||||||
|
(true, json!({ "set": "test", "target": "custom_chain" }), &p),
|
||||||
|
// bad
|
||||||
|
(false, json!({ "set": "test", "target": 11 }), &p),
|
||||||
|
(false, json!({ "set": "test", "target": ["DROP"] }), &p),
|
||||||
|
] {
|
||||||
|
let res = Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "ipset".into(),
|
||||||
|
config: conf.clone().into(),
|
||||||
|
patterns: patterns.clone(),
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
res.is_ok() == is_ok,
|
||||||
|
"conf: {:?}, must be ok: {is_ok}, result: {:?}",
|
||||||
|
conf,
|
||||||
|
// empty Result::Ok because ActionImpl is not Debug
|
||||||
|
res.map(|_| ())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_action_merge() {
|
||||||
|
let mut plugin = Plugin::default();
|
||||||
|
|
||||||
|
let set1 = ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action1".into(),
|
||||||
|
action_type: "ipset".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"target": "DROP",
|
||||||
|
"chains": ["INPUT"],
|
||||||
|
"action": "add",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let set2 = ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action2".into(),
|
||||||
|
action_type: "ipset".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"target": "DROP",
|
||||||
|
"version": "ip",
|
||||||
|
"action": "add",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let set3 = ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action2".into(),
|
||||||
|
action_type: "ipset".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"action": "del",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let res = plugin
|
||||||
|
.load_config(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
// First set
|
||||||
|
set1.clone(),
|
||||||
|
// Same set, adding options, no conflict
|
||||||
|
set2.clone(),
|
||||||
|
// Same set, no new options, no conflict
|
||||||
|
set3.clone(),
|
||||||
|
// Unrelated set, so no conflict
|
||||||
|
ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action3".into(),
|
||||||
|
action_type: "ipset".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test2",
|
||||||
|
"target": "target1",
|
||||||
|
"version": "ipv6",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert!(res.is_ok(), "res: {:?}", res.map(|_| ()));
|
||||||
|
|
||||||
|
// Another set with conflict is not ok
|
||||||
|
let res = plugin
|
||||||
|
.load_config(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
// First set
|
||||||
|
set1,
|
||||||
|
// Same set, adding options, no conflict
|
||||||
|
set2,
|
||||||
|
// Same set, no new options, no conflict
|
||||||
|
set3,
|
||||||
|
// Another set with conflict
|
||||||
|
ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action3".into(),
|
||||||
|
action_type: "ipset".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"target": "target2",
|
||||||
|
"action": "del",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(res.is_err(), "res: {:?}", res.map(|_| ()));
|
||||||
|
}
|
||||||
13
plugins/reaction-plugin-nftables/Cargo.toml
Normal file
13
plugins/reaction-plugin-nftables/Cargo.toml
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
[package]
|
||||||
|
name = "reaction-plugin-nftables"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||||
|
remoc.workspace = true
|
||||||
|
reaction-plugin.path = "../reaction-plugin"
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
nftables = { version = "0.6.3", features = ["tokio"] }
|
||||||
|
libnftables1-sys = { version = "0.1.1" }
|
||||||
493
plugins/reaction-plugin-nftables/src/action.rs
Normal file
493
plugins/reaction-plugin-nftables/src/action.rs
Normal file
|
|
@ -0,0 +1,493 @@
|
||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::HashSet,
|
||||||
|
fmt::{Debug, Display},
|
||||||
|
u32,
|
||||||
|
};
|
||||||
|
|
||||||
|
use nftables::{
|
||||||
|
batch::Batch,
|
||||||
|
expr::Expression,
|
||||||
|
schema::{Element, NfListObject, Rule, SetFlag, SetType, SetTypeValue},
|
||||||
|
stmt::Statement,
|
||||||
|
types::{NfFamily, NfHook},
|
||||||
|
};
|
||||||
|
use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration};
|
||||||
|
use remoc::rch::mpsc as remocMpsc;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::{helpers::Version, nft::NftClient};
|
||||||
|
|
||||||
|
#[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)]
|
||||||
|
pub enum IpVersion {
|
||||||
|
#[default]
|
||||||
|
#[serde(rename = "ip")]
|
||||||
|
Ip,
|
||||||
|
#[serde(rename = "ipv4")]
|
||||||
|
Ipv4,
|
||||||
|
#[serde(rename = "ipv6")]
|
||||||
|
Ipv6,
|
||||||
|
}
|
||||||
|
impl Debug for IpVersion {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}",
|
||||||
|
match self {
|
||||||
|
IpVersion::Ipv4 => "ipv4",
|
||||||
|
IpVersion::Ipv6 => "ipv6",
|
||||||
|
IpVersion::Ip => "ip",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Debug, Serialize, Deserialize)]
|
||||||
|
pub enum AddDel {
|
||||||
|
#[default]
|
||||||
|
#[serde(alias = "add")]
|
||||||
|
Add,
|
||||||
|
#[serde(alias = "delete")]
|
||||||
|
Delete,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// User-facing action options
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
pub struct ActionOptions {
|
||||||
|
/// The set that should be used by this action
|
||||||
|
pub set: String,
|
||||||
|
/// The pattern name of the IP.
|
||||||
|
/// Defaults to "ip"
|
||||||
|
#[serde(default = "serde_ip")]
|
||||||
|
pub pattern: String,
|
||||||
|
#[serde(skip)]
|
||||||
|
ip_index: usize,
|
||||||
|
// Whether the action is to "add" or "del" the ip from the set
|
||||||
|
#[serde(default)]
|
||||||
|
action: AddDel,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub set_options: SetOptions,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serde_ip() -> String {
|
||||||
|
"ip".into()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActionOptions {
|
||||||
|
pub fn set_ip_index(&mut self, patterns: Vec<String>) -> Result<(), ()> {
|
||||||
|
self.ip_index = patterns
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_, name)| name == &self.pattern)
|
||||||
|
.next()
|
||||||
|
.ok_or(())?
|
||||||
|
.0;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merged set options
|
||||||
|
#[derive(Default, Clone, Deserialize, Serialize, Debug, PartialEq, Eq)]
|
||||||
|
pub struct SetOptions {
|
||||||
|
/// The IP type.
|
||||||
|
/// Defaults to `46`.
|
||||||
|
/// If `ipv4`: creates an IPv4 set with this name
|
||||||
|
/// If `ipv6`: creates an IPv6 set with this name
|
||||||
|
/// If `ip`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6'
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(default)]
|
||||||
|
version: Option<IpVersion>,
|
||||||
|
/// Chains where the IP set should be inserted.
|
||||||
|
/// Defaults to `["input", "forward"]`
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(default)]
|
||||||
|
hooks: Option<Vec<RHook>>,
|
||||||
|
// Optional timeout, letting linux/netfilter handle set removal instead of reaction
|
||||||
|
// Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action
|
||||||
|
// Same syntax as after and retryperiod in reaction.
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
timeout: Option<String>,
|
||||||
|
#[serde(skip)]
|
||||||
|
timeout_u32: Option<u32>,
|
||||||
|
// Target that iptables should use when the IP is encountered.
|
||||||
|
// Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain
|
||||||
|
/// *Merged set-wise*.
|
||||||
|
#[serde(default)]
|
||||||
|
target: Option<RStatement>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SetOptions {
|
||||||
|
pub fn merge(&mut self, options: &SetOptions) -> Result<(), String> {
|
||||||
|
// merge two Option<T> and fail if there is conflict
|
||||||
|
fn inner_merge<T: Eq + Clone + std::fmt::Debug>(
|
||||||
|
a: &mut Option<T>,
|
||||||
|
b: &Option<T>,
|
||||||
|
name: &str,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
match (&a, &b) {
|
||||||
|
(Some(aa), Some(bb)) => {
|
||||||
|
if aa != bb {
|
||||||
|
return Err(format!(
|
||||||
|
"Conflicting options for {name}: `{aa:?}` and `{bb:?}`"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(None, Some(_)) => {
|
||||||
|
*a = b.clone();
|
||||||
|
}
|
||||||
|
_ => (),
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
inner_merge(&mut self.version, &options.version, "version")?;
|
||||||
|
inner_merge(&mut self.timeout, &options.timeout, "timeout")?;
|
||||||
|
inner_merge(&mut self.hooks, &options.hooks, "chains")?;
|
||||||
|
inner_merge(&mut self.target, &options.target, "target")?;
|
||||||
|
|
||||||
|
if let Some(timeout) = &self.timeout {
|
||||||
|
let duration = parse_duration(timeout)
|
||||||
|
.map_err(|err| format!("failed to parse timeout: {}", err))?
|
||||||
|
.as_secs();
|
||||||
|
if duration > u32::MAX as u64 {
|
||||||
|
return Err(format!(
|
||||||
|
"timeout is limited to {} seconds (approx {} days)",
|
||||||
|
u32::MAX,
|
||||||
|
49_000
|
||||||
|
));
|
||||||
|
}
|
||||||
|
self.timeout_u32 = Some(duration as u32);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum RHook {
|
||||||
|
Ingress,
|
||||||
|
Prerouting,
|
||||||
|
Forward,
|
||||||
|
Input,
|
||||||
|
Output,
|
||||||
|
Postrouting,
|
||||||
|
Egress,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RHook {
|
||||||
|
pub fn as_str(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
RHook::Ingress => "ingress",
|
||||||
|
RHook::Prerouting => "prerouting",
|
||||||
|
RHook::Forward => "forward",
|
||||||
|
RHook::Input => "input",
|
||||||
|
RHook::Output => "output",
|
||||||
|
RHook::Postrouting => "postrouting",
|
||||||
|
RHook::Egress => "egress",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for RHook {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", self.as_str())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&RHook> for NfHook {
|
||||||
|
fn from(value: &RHook) -> Self {
|
||||||
|
match value {
|
||||||
|
RHook::Ingress => Self::Ingress,
|
||||||
|
RHook::Prerouting => Self::Prerouting,
|
||||||
|
RHook::Forward => Self::Forward,
|
||||||
|
RHook::Input => Self::Input,
|
||||||
|
RHook::Output => Self::Output,
|
||||||
|
RHook::Postrouting => Self::Postrouting,
|
||||||
|
RHook::Egress => Self::Egress,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum RStatement {
|
||||||
|
Accept,
|
||||||
|
Drop,
|
||||||
|
Continue,
|
||||||
|
Return,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Set {
|
||||||
|
pub sets: SetNames,
|
||||||
|
pub hooks: Vec<RHook>,
|
||||||
|
pub timeout: Option<u32>,
|
||||||
|
pub target: RStatement,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Set {
|
||||||
|
pub fn from(name: String, options: SetOptions) -> Self {
|
||||||
|
Self {
|
||||||
|
sets: SetNames::new(name, options.version),
|
||||||
|
timeout: options.timeout_u32,
|
||||||
|
target: options.target.unwrap_or(RStatement::Drop),
|
||||||
|
hooks: options.hooks.unwrap_or(vec![RHook::Input, RHook::Forward]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn init<'a>(&self, batch: &mut Batch<'a>) -> Result<(), String> {
|
||||||
|
for (set, version) in [
|
||||||
|
(&self.sets.ipv4, Version::IPv4),
|
||||||
|
(&self.sets.ipv6, Version::IPv6),
|
||||||
|
] {
|
||||||
|
if let Some(set) = set {
|
||||||
|
let family = NfFamily::INet;
|
||||||
|
let table = Cow::from("reaction");
|
||||||
|
|
||||||
|
// create set
|
||||||
|
batch.add(NfListObject::<'a>::Set(Box::new(nftables::schema::Set::<
|
||||||
|
'a,
|
||||||
|
> {
|
||||||
|
family,
|
||||||
|
table: table.to_owned(),
|
||||||
|
name: Cow::Owned(set.to_owned()),
|
||||||
|
// TODO Try a set which is both ipv4 and ipv6?
|
||||||
|
set_type: SetTypeValue::Single(match version {
|
||||||
|
Version::IPv4 => SetType::Ipv4Addr,
|
||||||
|
Version::IPv6 => SetType::Ipv6Addr,
|
||||||
|
}),
|
||||||
|
flags: Some({
|
||||||
|
let mut flags = HashSet::from([SetFlag::Interval]);
|
||||||
|
if self.timeout.is_some() {
|
||||||
|
flags.insert(SetFlag::Timeout);
|
||||||
|
}
|
||||||
|
flags
|
||||||
|
}),
|
||||||
|
timeout: self.timeout.clone(),
|
||||||
|
..Default::default()
|
||||||
|
})));
|
||||||
|
// insert set in chains
|
||||||
|
let expr = vec![match self.target {
|
||||||
|
RStatement::Accept => Statement::Accept(None),
|
||||||
|
RStatement::Drop => Statement::Drop(None),
|
||||||
|
RStatement::Continue => Statement::Continue(None),
|
||||||
|
RStatement::Return => Statement::Return(None),
|
||||||
|
}];
|
||||||
|
for hook in &self.hooks {
|
||||||
|
batch.add(NfListObject::Rule(Rule {
|
||||||
|
family,
|
||||||
|
table: table.to_owned(),
|
||||||
|
chain: Cow::from(hook.to_string()),
|
||||||
|
expr: Cow::Owned(expr.clone()),
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct SetNames {
|
||||||
|
pub ipv4: Option<String>,
|
||||||
|
pub ipv6: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SetNames {
|
||||||
|
pub fn new(name: String, version: Option<IpVersion>) -> Self {
|
||||||
|
Self {
|
||||||
|
ipv4: match version {
|
||||||
|
Some(IpVersion::Ipv4) => Some(name.clone()),
|
||||||
|
Some(IpVersion::Ipv6) => None,
|
||||||
|
None | Some(IpVersion::Ip) => Some(format!("{}v4", name)),
|
||||||
|
},
|
||||||
|
ipv6: match version {
|
||||||
|
Some(IpVersion::Ipv4) => None,
|
||||||
|
Some(IpVersion::Ipv6) => Some(name),
|
||||||
|
None | Some(IpVersion::Ip) => Some(format!("{}v6", name)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Action {
|
||||||
|
nft: NftClient,
|
||||||
|
rx: remocMpsc::Receiver<Exec>,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
sets: SetNames,
|
||||||
|
// index of pattern ip in match vec
|
||||||
|
ip_index: usize,
|
||||||
|
action: AddDel,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Action {
|
||||||
|
pub fn new(
|
||||||
|
nft: NftClient,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
rx: remocMpsc::Receiver<Exec>,
|
||||||
|
options: ActionOptions,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
Ok(Action {
|
||||||
|
nft,
|
||||||
|
rx,
|
||||||
|
shutdown,
|
||||||
|
sets: SetNames::new(options.set, options.set_options.version),
|
||||||
|
ip_index: options.ip_index,
|
||||||
|
action: options.action,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn serve(mut self) {
|
||||||
|
loop {
|
||||||
|
let event = tokio::select! {
|
||||||
|
exec = self.rx.recv() => Some(exec),
|
||||||
|
_ = self.shutdown.wait() => None,
|
||||||
|
};
|
||||||
|
match event {
|
||||||
|
// shutdown asked
|
||||||
|
None => break,
|
||||||
|
// channel closed
|
||||||
|
Some(Ok(None)) => break,
|
||||||
|
// error from channel
|
||||||
|
Some(Err(err)) => {
|
||||||
|
eprintln!("ERROR {err}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// ok
|
||||||
|
Some(Ok(Some(exec))) => {
|
||||||
|
if let Err(err) = self.handle_exec(exec).await {
|
||||||
|
eprintln!("ERROR {err}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// eprintln!("DEBUG Asking for shutdown");
|
||||||
|
// self.shutdown.ask_shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> {
|
||||||
|
// safeguard against Vec::remove's panic
|
||||||
|
if exec.match_.len() <= self.ip_index {
|
||||||
|
return Err(format!(
|
||||||
|
"match received from reaction is smaller than expected. looking for index {} but size is {}. this is a bug!",
|
||||||
|
self.ip_index,
|
||||||
|
exec.match_.len()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let ip = exec.match_.remove(self.ip_index);
|
||||||
|
// select set
|
||||||
|
let set = match (&self.sets.ipv4, &self.sets.ipv6) {
|
||||||
|
(None, None) => return Err(format!("action is neither IPv4 nor IPv6, this is a bug!")),
|
||||||
|
(None, Some(set)) => set,
|
||||||
|
(Some(set), None) => set,
|
||||||
|
(Some(set4), Some(set6)) => {
|
||||||
|
if ip.contains(':') {
|
||||||
|
set6
|
||||||
|
} else {
|
||||||
|
set4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// add/remove ip to set
|
||||||
|
let element = NfListObject::Element(Element {
|
||||||
|
family: NfFamily::INet,
|
||||||
|
table: Cow::from("reaction"),
|
||||||
|
name: Cow::from(set),
|
||||||
|
elem: Cow::from(vec![Expression::String(Cow::from(ip.clone()))]),
|
||||||
|
});
|
||||||
|
let mut batch = Batch::new();
|
||||||
|
match self.action {
|
||||||
|
AddDel::Add => batch.add(element),
|
||||||
|
AddDel::Delete => batch.delete(element),
|
||||||
|
};
|
||||||
|
match self.nft.send(batch).await {
|
||||||
|
Ok(ok) => {
|
||||||
|
eprintln!("DEBUG action ok {:?} {ip}: {ok}", self.action);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(err) => Err(format!("action ko {:?} {ip}: {err}", self.action)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::action::{IpVersion, RHook, RStatement, SetOptions};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn set_options_merge() {
|
||||||
|
let s1 = SetOptions {
|
||||||
|
version: None,
|
||||||
|
hooks: None,
|
||||||
|
timeout: None,
|
||||||
|
timeout_u32: None,
|
||||||
|
target: None,
|
||||||
|
};
|
||||||
|
let s2 = SetOptions {
|
||||||
|
version: Some(IpVersion::Ipv4),
|
||||||
|
hooks: Some(vec![RHook::Input]),
|
||||||
|
timeout: Some("3h".into()),
|
||||||
|
timeout_u32: Some(3 * 3600),
|
||||||
|
target: Some(RStatement::Drop),
|
||||||
|
};
|
||||||
|
assert_ne!(s1, s2);
|
||||||
|
assert_eq!(s1, SetOptions::default());
|
||||||
|
|
||||||
|
{
|
||||||
|
// s2 can be merged in s1
|
||||||
|
let mut s1 = s1.clone();
|
||||||
|
assert!(s1.merge(&s2).is_ok());
|
||||||
|
assert_eq!(s1, s2);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// s1 can be merged in s2
|
||||||
|
let mut s2 = s2.clone();
|
||||||
|
assert!(s2.merge(&s1).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// s1 can be merged in itself
|
||||||
|
let mut s3 = s1.clone();
|
||||||
|
assert!(s3.merge(&s1).is_ok());
|
||||||
|
assert_eq!(s1, s3);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// s2 can be merged in itself
|
||||||
|
let mut s3 = s2.clone();
|
||||||
|
assert!(s3.merge(&s2).is_ok());
|
||||||
|
assert_eq!(s2, s3);
|
||||||
|
}
|
||||||
|
|
||||||
|
for s3 in [
|
||||||
|
SetOptions {
|
||||||
|
version: Some(IpVersion::Ipv6),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
SetOptions {
|
||||||
|
hooks: Some(vec![RHook::Output]),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
SetOptions {
|
||||||
|
timeout: Some("30min".into()),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
SetOptions {
|
||||||
|
target: Some(RStatement::Continue),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
] {
|
||||||
|
// none with some is ok
|
||||||
|
assert!(s3.clone().merge(&s1).is_ok(), "s3: {s3:?}");
|
||||||
|
assert!(s1.clone().merge(&s3).is_ok(), "s3: {s3:?}");
|
||||||
|
// different some is ko
|
||||||
|
assert!(s3.clone().merge(&s2).is_err(), "s3: {s3:?}");
|
||||||
|
assert!(s2.clone().merge(&s3).is_err(), "s3: {s3:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
15
plugins/reaction-plugin-nftables/src/helpers.rs
Normal file
15
plugins/reaction-plugin-nftables/src/helpers.rs
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
|
||||||
|
pub enum Version {
|
||||||
|
IPv4,
|
||||||
|
IPv6,
|
||||||
|
}
|
||||||
|
impl Display for Version {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.write_str(match self {
|
||||||
|
Version::IPv4 => "IPv4",
|
||||||
|
Version::IPv6 => "IPv6",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
176
plugins/reaction-plugin-nftables/src/main.rs
Normal file
176
plugins/reaction-plugin-nftables/src/main.rs
Normal file
|
|
@ -0,0 +1,176 @@
|
||||||
|
use std::{
|
||||||
|
borrow::Cow,
|
||||||
|
collections::{BTreeMap, BTreeSet},
|
||||||
|
};
|
||||||
|
|
||||||
|
use nftables::{
|
||||||
|
batch::Batch,
|
||||||
|
schema::{Chain, NfListObject, Table},
|
||||||
|
types::{NfChainType, NfFamily},
|
||||||
|
};
|
||||||
|
use reaction_plugin::{
|
||||||
|
ActionConfig, ActionImpl, Hello, Manifest, PluginInfo, RemoteResult, StreamConfig, StreamImpl,
|
||||||
|
shutdown::ShutdownController,
|
||||||
|
};
|
||||||
|
use remoc::rtc;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
action::{Action, ActionOptions, Set, SetOptions},
|
||||||
|
nft::NftClient,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
mod action;
|
||||||
|
pub mod helpers;
|
||||||
|
mod nft;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let plugin = Plugin::default();
|
||||||
|
reaction_plugin::main_loop(plugin).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Plugin {
|
||||||
|
nft: NftClient,
|
||||||
|
sets: Vec<Set>,
|
||||||
|
actions: Vec<Action>,
|
||||||
|
shutdown: ShutdownController,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PluginInfo for Plugin {
|
||||||
|
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
|
||||||
|
Ok(Manifest {
|
||||||
|
hello: Hello::new(),
|
||||||
|
streams: BTreeSet::default(),
|
||||||
|
actions: BTreeSet::from(["nftables".into()]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_config(
|
||||||
|
&mut self,
|
||||||
|
streams: Vec<StreamConfig>,
|
||||||
|
actions: Vec<ActionConfig>,
|
||||||
|
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
|
||||||
|
if !streams.is_empty() {
|
||||||
|
return Err("This plugin can't handle any stream type".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut ret_actions = Vec::with_capacity(actions.len());
|
||||||
|
let mut set_options: BTreeMap<String, SetOptions> = BTreeMap::new();
|
||||||
|
|
||||||
|
for ActionConfig {
|
||||||
|
stream_name,
|
||||||
|
filter_name,
|
||||||
|
action_name,
|
||||||
|
action_type,
|
||||||
|
config,
|
||||||
|
patterns,
|
||||||
|
} in actions
|
||||||
|
{
|
||||||
|
if &action_type != "nftables" {
|
||||||
|
return Err("This plugin can't handle other action types than nftables".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| {
|
||||||
|
format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
options.set_ip_index(patterns).map_err(|_|
|
||||||
|
format!(
|
||||||
|
"No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'",
|
||||||
|
&options.pattern
|
||||||
|
)
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Merge option
|
||||||
|
set_options
|
||||||
|
.entry(options.set.clone())
|
||||||
|
.or_default()
|
||||||
|
.merge(&options.set_options)
|
||||||
|
.map_err(|err| format!("set {}: {err}", options.set))?;
|
||||||
|
|
||||||
|
let (tx, rx) = remoc::rch::mpsc::channel(1);
|
||||||
|
self.actions.push(Action::new(
|
||||||
|
self.nft.clone(),
|
||||||
|
self.shutdown.token(),
|
||||||
|
rx,
|
||||||
|
options,
|
||||||
|
)?);
|
||||||
|
|
||||||
|
ret_actions.push(ActionImpl { tx });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init all sets
|
||||||
|
while let Some((name, options)) = set_options.pop_first() {
|
||||||
|
self.sets.push(Set::from(name, options));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((vec![], ret_actions))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&mut self) -> RemoteResult<()> {
|
||||||
|
self.shutdown.delegate().handle_quit_signals()?;
|
||||||
|
|
||||||
|
let mut batch = Batch::new();
|
||||||
|
batch.add(reaction_table());
|
||||||
|
|
||||||
|
// Create a chain for each registered netfilter hook
|
||||||
|
for hook in self
|
||||||
|
.sets
|
||||||
|
.iter()
|
||||||
|
.flat_map(|set| &set.hooks)
|
||||||
|
.collect::<BTreeSet<_>>()
|
||||||
|
{
|
||||||
|
batch.add(NfListObject::Chain(Chain {
|
||||||
|
family: NfFamily::INet,
|
||||||
|
table: Cow::Borrowed("reaction"),
|
||||||
|
name: Cow::from(hook.as_str()),
|
||||||
|
_type: Some(NfChainType::Filter),
|
||||||
|
hook: Some(hook.into()),
|
||||||
|
prio: Some(0),
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for set in &self.sets {
|
||||||
|
set.init(&mut batch)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO apply batch
|
||||||
|
self.nft.send(batch).await?;
|
||||||
|
|
||||||
|
// Launch a task that will destroy the table on shutdown
|
||||||
|
{
|
||||||
|
let token = self.shutdown.token();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
token.wait().await;
|
||||||
|
Batch::new().delete(reaction_table());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Launch all actions
|
||||||
|
while let Some(action) = self.actions.pop() {
|
||||||
|
tokio::spawn(async move { action.serve().await });
|
||||||
|
}
|
||||||
|
self.actions = Default::default();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close(self) -> RemoteResult<()> {
|
||||||
|
self.shutdown.ask_shutdown();
|
||||||
|
self.shutdown.wait_all_task_shutdown().await;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reaction_table() -> NfListObject<'static> {
|
||||||
|
NfListObject::Table(Table {
|
||||||
|
family: NfFamily::INet,
|
||||||
|
name: Cow::Borrowed("reaction"),
|
||||||
|
handle: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
81
plugins/reaction-plugin-nftables/src/nft.rs
Normal file
81
plugins/reaction-plugin-nftables/src/nft.rs
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
use std::{
|
||||||
|
ffi::{CStr, CString},
|
||||||
|
thread,
|
||||||
|
};
|
||||||
|
|
||||||
|
use libnftables1_sys::Nftables;
|
||||||
|
use nftables::batch::Batch;
|
||||||
|
use tokio::sync::{mpsc, oneshot};
|
||||||
|
|
||||||
|
/// A client with a dedicated server thread to libnftables.
|
||||||
|
/// Calling [`Default::default()`] spawns a new server thread.
|
||||||
|
/// Cloning just creates a new client to the same server thread.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct NftClient {
|
||||||
|
tx: mpsc::Sender<NftCommand>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NftClient {
|
||||||
|
fn default() -> Self {
|
||||||
|
let (tx, mut rx) = mpsc::channel(10);
|
||||||
|
|
||||||
|
thread::spawn(move || {
|
||||||
|
let mut conn = Nftables::new();
|
||||||
|
|
||||||
|
while let Some(NftCommand { json, ret }) = rx.blocking_recv() {
|
||||||
|
let (rc, output, error) = conn.run_cmd(json.as_ptr());
|
||||||
|
let res = match rc {
|
||||||
|
0 => to_rust_string(output)
|
||||||
|
.ok_or_else(|| "unknown ok (rc = 0 but no output buffer)".into()),
|
||||||
|
_ => to_rust_string(error)
|
||||||
|
.map(|err| format!("error (rc = {rc}: {err})"))
|
||||||
|
.ok_or_else(|| format!("unknown error (rc = {rc} but no error buffer)")),
|
||||||
|
};
|
||||||
|
let _ = ret.send(res);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
NftClient { tx }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NftClient {
|
||||||
|
/// Send a batch to nftables.
|
||||||
|
pub async fn send(&self, batch: Batch<'_>) -> Result<String, String> {
|
||||||
|
// convert JSON to CString
|
||||||
|
let mut json = serde_json::to_vec(&batch.to_nftables())
|
||||||
|
.map_err(|err| format!("couldn't build json to send to nftables: {err}"))?;
|
||||||
|
json.push('\0' as u8);
|
||||||
|
let json = CString::from_vec_with_nul(json)
|
||||||
|
.map_err(|err| format!("invalid json with null char: {err}"))?;
|
||||||
|
|
||||||
|
// Send command
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
let command = NftCommand { json, ret: tx };
|
||||||
|
self.tx
|
||||||
|
.send(command)
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("nftables thread has quit, can't send command: {err}"))?;
|
||||||
|
|
||||||
|
// Wait for result
|
||||||
|
rx.await
|
||||||
|
.map_err(|_| format!("nftables thread has quit, no response for command"))?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct NftCommand {
|
||||||
|
json: CString,
|
||||||
|
ret: oneshot::Sender<Result<String, String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn to_rust_string(c_ptr: *const i8) -> Option<String> {
|
||||||
|
if c_ptr.is_null() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(
|
||||||
|
unsafe { CStr::from_ptr(c_ptr) }
|
||||||
|
.to_string_lossy()
|
||||||
|
.into_owned(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
247
plugins/reaction-plugin-nftables/src/tests.rs
Normal file
247
plugins/reaction-plugin-nftables/src/tests.rs
Normal file
|
|
@ -0,0 +1,247 @@
|
||||||
|
use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig, Value};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use crate::Plugin;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_stream() {
|
||||||
|
// No stream is supported by nftables
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "nftables".into(),
|
||||||
|
config: Value::Null
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Empty config is ok
|
||||||
|
assert!(Plugin::default().load_config(vec![], vec![]).await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_action_standalone() {
|
||||||
|
let p = vec!["name".into(), "ip".into(), "ip2".into()];
|
||||||
|
let p_noip = vec!["name".into(), "ip2".into()];
|
||||||
|
|
||||||
|
for (is_ok, conf, patterns) in [
|
||||||
|
// minimal set
|
||||||
|
(true, json!({ "set": "test" }), &p),
|
||||||
|
// missing set key
|
||||||
|
(false, json!({}), &p),
|
||||||
|
(false, json!({ "version": "ipv4" }), &p),
|
||||||
|
// unknown key
|
||||||
|
(false, json!({ "set": "test", "unknown": "yes" }), &p),
|
||||||
|
(false, json!({ "set": "test", "ip_index": 1 }), &p),
|
||||||
|
(false, json!({ "set": "test", "timeout_u32": 1 }), &p),
|
||||||
|
// pattern //
|
||||||
|
(true, json!({ "set": "test" }), &p),
|
||||||
|
(true, json!({ "set": "test", "pattern": "ip" }), &p),
|
||||||
|
(true, json!({ "set": "test", "pattern": "ip2" }), &p),
|
||||||
|
(true, json!({ "set": "test", "pattern": "ip2" }), &p_noip),
|
||||||
|
// unknown pattern "ip"
|
||||||
|
(false, json!({ "set": "test" }), &p_noip),
|
||||||
|
(false, json!({ "set": "test", "pattern": "ip" }), &p_noip),
|
||||||
|
// unknown pattern
|
||||||
|
(false, json!({ "set": "test", "pattern": "unknown" }), &p),
|
||||||
|
(false, json!({ "set": "test", "pattern": "uwu" }), &p_noip),
|
||||||
|
// bad type
|
||||||
|
(false, json!({ "set": "test", "pattern": 0 }), &p_noip),
|
||||||
|
(false, json!({ "set": "test", "pattern": true }), &p_noip),
|
||||||
|
// action //
|
||||||
|
(true, json!({ "set": "test", "action": "add" }), &p),
|
||||||
|
(true, json!({ "set": "test", "action": "delete" }), &p),
|
||||||
|
// unknown action
|
||||||
|
(false, json!({ "set": "test", "action": "create" }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": "insert" }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": "del" }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": "destroy" }), &p),
|
||||||
|
// bad type
|
||||||
|
(false, json!({ "set": "test", "action": true }), &p),
|
||||||
|
(false, json!({ "set": "test", "action": 1 }), &p),
|
||||||
|
// ip version //
|
||||||
|
// ok
|
||||||
|
(true, json!({ "set": "test", "version": "ipv4" }), &p),
|
||||||
|
(true, json!({ "set": "test", "version": "ipv6" }), &p),
|
||||||
|
(true, json!({ "set": "test", "version": "ip" }), &p),
|
||||||
|
// unknown version
|
||||||
|
(false, json!({ "set": "test", "version": 4 }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": 6 }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": 46 }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "5" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "ipv5" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "4" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "6" }), &p),
|
||||||
|
(false, json!({ "set": "test", "version": "46" }), &p),
|
||||||
|
// bad type
|
||||||
|
(false, json!({ "set": "test", "version": true }), &p),
|
||||||
|
// hooks //
|
||||||
|
// everything is fine really
|
||||||
|
(true, json!({ "set": "test", "hooks": [] }), &p),
|
||||||
|
(
|
||||||
|
true,
|
||||||
|
json!({ "set": "test", "hooks": ["input", "forward", "ingress", "prerouting", "output", "postrouting", "egress"] }),
|
||||||
|
&p,
|
||||||
|
),
|
||||||
|
(false, json!({ "set": "test", "hooks": ["INPUT"] }), &p),
|
||||||
|
(false, json!({ "set": "test", "hooks": ["FORWARD"] }), &p),
|
||||||
|
(
|
||||||
|
false,
|
||||||
|
json!({ "set": "test", "hooks": ["unknown_hook"] }),
|
||||||
|
&p,
|
||||||
|
),
|
||||||
|
// timeout //
|
||||||
|
(true, json!({ "set": "test", "timeout": "1m" }), &p),
|
||||||
|
(true, json!({ "set": "test", "timeout": "3 days" }), &p),
|
||||||
|
// bad
|
||||||
|
(false, json!({ "set": "test", "timeout": "3 dayz"}), &p),
|
||||||
|
(false, json!({ "set": "test", "timeout": 12 }), &p),
|
||||||
|
// target //
|
||||||
|
// anything is fine too
|
||||||
|
(true, json!({ "set": "test", "target": "drop" }), &p),
|
||||||
|
(true, json!({ "set": "test", "target": "accept" }), &p),
|
||||||
|
(true, json!({ "set": "test", "target": "return" }), &p),
|
||||||
|
(true, json!({ "set": "test", "target": "continue" }), &p),
|
||||||
|
// bad
|
||||||
|
(false, json!({ "set": "test", "target": "custom" }), &p),
|
||||||
|
(false, json!({ "set": "test", "target": "DROP" }), &p),
|
||||||
|
(false, json!({ "set": "test", "target": 11 }), &p),
|
||||||
|
(false, json!({ "set": "test", "target": ["DROP"] }), &p),
|
||||||
|
] {
|
||||||
|
let res = Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "nftables".into(),
|
||||||
|
config: conf.clone().into(),
|
||||||
|
patterns: patterns.clone(),
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
res.is_ok() == is_ok,
|
||||||
|
"conf: {:?}, must be ok: {is_ok}, result: {:?}",
|
||||||
|
conf,
|
||||||
|
// empty Result::Ok because ActionImpl is not Debug
|
||||||
|
res.map(|_| ())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_action_merge() {
|
||||||
|
let mut plugin = Plugin::default();
|
||||||
|
|
||||||
|
let set1 = ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action1".into(),
|
||||||
|
action_type: "nftables".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"target": "drop",
|
||||||
|
"hooks": ["input"],
|
||||||
|
"action": "add",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let set2 = ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action2".into(),
|
||||||
|
action_type: "nftables".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"target": "drop",
|
||||||
|
"version": "ip",
|
||||||
|
"action": "add",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let set3 = ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action2".into(),
|
||||||
|
action_type: "nftables".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"action": "delete",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let res = plugin
|
||||||
|
.load_config(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
// First set
|
||||||
|
set1.clone(),
|
||||||
|
// Same set, adding options, no conflict
|
||||||
|
set2.clone(),
|
||||||
|
// Same set, no new options, no conflict
|
||||||
|
set3.clone(),
|
||||||
|
// Unrelated set, so no conflict
|
||||||
|
ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action3".into(),
|
||||||
|
action_type: "nftables".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test2",
|
||||||
|
"target": "return",
|
||||||
|
"version": "ipv6",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert!(res.is_ok(), "res: {:?}", res.map(|_| ()));
|
||||||
|
|
||||||
|
// Another set with conflict is not ok
|
||||||
|
let res = plugin
|
||||||
|
.load_config(
|
||||||
|
vec![],
|
||||||
|
vec![
|
||||||
|
// First set
|
||||||
|
set1,
|
||||||
|
// Same set, adding options, no conflict
|
||||||
|
set2,
|
||||||
|
// Same set, no new options, no conflict
|
||||||
|
set3,
|
||||||
|
// Another set with conflict
|
||||||
|
ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action3".into(),
|
||||||
|
action_type: "nftables".into(),
|
||||||
|
config: json!({
|
||||||
|
"set": "test",
|
||||||
|
"target": "target2",
|
||||||
|
"action": "del",
|
||||||
|
})
|
||||||
|
.into(),
|
||||||
|
patterns: vec!["ip".into()],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(res.is_err(), "res: {:?}", res.map(|_| ()));
|
||||||
|
}
|
||||||
11
plugins/reaction-plugin-virtual/Cargo.toml
Normal file
11
plugins/reaction-plugin-virtual/Cargo.toml
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
[package]
|
||||||
|
name = "reaction-plugin-virtual"
|
||||||
|
version = "1.0.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tokio = { workspace = true, features = ["rt-multi-thread"] }
|
||||||
|
remoc.workspace = true
|
||||||
|
reaction-plugin.path = "../reaction-plugin"
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
179
plugins/reaction-plugin-virtual/src/main.rs
Normal file
179
plugins/reaction-plugin-virtual/src/main.rs
Normal file
|
|
@ -0,0 +1,179 @@
|
||||||
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
|
|
||||||
|
use reaction_plugin::{
|
||||||
|
ActionConfig, ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamConfig,
|
||||||
|
StreamImpl, Value, line::PatternLine,
|
||||||
|
};
|
||||||
|
use remoc::{rch::mpsc, rtc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
let plugin = Plugin::default();
|
||||||
|
reaction_plugin::main_loop(plugin).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Plugin {}
|
||||||
|
|
||||||
|
impl PluginInfo for Plugin {
|
||||||
|
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
|
||||||
|
Ok(Manifest {
|
||||||
|
hello: Hello::new(),
|
||||||
|
streams: BTreeSet::from(["virtual".into()]),
|
||||||
|
actions: BTreeSet::from(["virtual".into()]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_config(
|
||||||
|
&mut self,
|
||||||
|
streams: Vec<StreamConfig>,
|
||||||
|
actions: Vec<ActionConfig>,
|
||||||
|
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
|
||||||
|
let mut ret_streams = Vec::with_capacity(streams.len());
|
||||||
|
let mut ret_actions = Vec::with_capacity(actions.len());
|
||||||
|
|
||||||
|
let mut local_streams = BTreeMap::new();
|
||||||
|
|
||||||
|
for StreamConfig {
|
||||||
|
stream_name,
|
||||||
|
stream_type,
|
||||||
|
config,
|
||||||
|
} in streams
|
||||||
|
{
|
||||||
|
if stream_type != "virtual" {
|
||||||
|
return Err("This plugin can't handle other stream types than virtual".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let (virtual_stream, receiver) = VirtualStream::new(config)?;
|
||||||
|
|
||||||
|
if let Some(_) = local_streams.insert(stream_name, virtual_stream) {
|
||||||
|
return Err("this virtual stream has already been initialized".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
ret_streams.push(StreamImpl {
|
||||||
|
stream: receiver,
|
||||||
|
standalone: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
for ActionConfig {
|
||||||
|
stream_name,
|
||||||
|
filter_name,
|
||||||
|
action_name,
|
||||||
|
action_type,
|
||||||
|
config,
|
||||||
|
patterns,
|
||||||
|
} in actions
|
||||||
|
{
|
||||||
|
if &action_type != "virtual" {
|
||||||
|
return Err("This plugin can't handle other action types than virtual".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let (mut virtual_action, tx) = VirtualAction::new(
|
||||||
|
stream_name,
|
||||||
|
filter_name,
|
||||||
|
action_name,
|
||||||
|
config,
|
||||||
|
patterns,
|
||||||
|
&local_streams,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
tokio::spawn(async move { virtual_action.serve().await });
|
||||||
|
|
||||||
|
ret_actions.push(ActionImpl { tx });
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((ret_streams, ret_actions))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start(&mut self) -> RemoteResult<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close(self) -> RemoteResult<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct VirtualStream {
|
||||||
|
tx: mpsc::Sender<Line>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VirtualStream {
|
||||||
|
fn new(config: Value) -> Result<(Self, mpsc::Receiver<Line>), String> {
|
||||||
|
const CONFIG_ERROR: &'static str = "streams of type virtual take no options";
|
||||||
|
match config {
|
||||||
|
Value::Null => (),
|
||||||
|
Value::Object(map) => {
|
||||||
|
if map.len() != 0 {
|
||||||
|
return Err(CONFIG_ERROR.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => return Err(CONFIG_ERROR.into()),
|
||||||
|
}
|
||||||
|
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
Ok((Self { tx }, rx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
struct ActionOptions {
|
||||||
|
/// The line to send to the corresponding virtual stream, example: "ban \<ip\>"
|
||||||
|
send: String,
|
||||||
|
/// The name of the corresponding virtual stream, example: "my_stream"
|
||||||
|
to: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct VirtualAction {
|
||||||
|
rx: mpsc::Receiver<Exec>,
|
||||||
|
send: PatternLine,
|
||||||
|
to: VirtualStream,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VirtualAction {
|
||||||
|
fn new(
|
||||||
|
stream_name: String,
|
||||||
|
filter_name: String,
|
||||||
|
action_name: String,
|
||||||
|
config: Value,
|
||||||
|
patterns: Vec<String>,
|
||||||
|
streams: &BTreeMap<String, VirtualStream>,
|
||||||
|
) -> Result<(Self, mpsc::Sender<Exec>), String> {
|
||||||
|
let options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| {
|
||||||
|
format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let send = PatternLine::new(options.send, patterns);
|
||||||
|
|
||||||
|
let stream = streams.get(&options.to).ok_or_else(|| {
|
||||||
|
format!(
|
||||||
|
"action {}.{}.{}: send \"{}\" matches no stream name",
|
||||||
|
stream_name, filter_name, action_name, options.to
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let (tx, rx) = mpsc::channel(1);
|
||||||
|
Ok((
|
||||||
|
Self {
|
||||||
|
rx,
|
||||||
|
send: send,
|
||||||
|
to: stream.clone(),
|
||||||
|
},
|
||||||
|
tx,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn serve(&mut self) {
|
||||||
|
while let Ok(Some(exec)) = self.rx.recv().await {
|
||||||
|
let line = self.send.line(exec.match_);
|
||||||
|
self.to.tx.send((line, exec.time)).await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
322
plugins/reaction-plugin-virtual/src/tests.rs
Normal file
322
plugins/reaction-plugin-virtual/src/tests.rs
Normal file
|
|
@ -0,0 +1,322 @@
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
|
use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig, Value};
|
||||||
|
use serde_json::json;
|
||||||
|
|
||||||
|
use crate::Plugin;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_stream() {
|
||||||
|
// Invalid type
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtu".into(),
|
||||||
|
config: Value::Null
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: Value::Null
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: json!({}).into(),
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Invalid conf: must be empty
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: json!({"key": "value" }).into(),
|
||||||
|
}],
|
||||||
|
vec![]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_action() {
|
||||||
|
let streams = vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: Value::Null,
|
||||||
|
}];
|
||||||
|
|
||||||
|
let valid_conf = json!({ "send": "message", "to": "stream" });
|
||||||
|
|
||||||
|
let missing_send_conf = json!({ "to": "stream" });
|
||||||
|
let missing_to_conf = json!({ "send": "stream" });
|
||||||
|
let extra_attr_conf = json!({ "send": "message", "send2": "message", "to": "stream" });
|
||||||
|
|
||||||
|
let patterns = Vec::default();
|
||||||
|
|
||||||
|
// Invalid type
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
streams.clone(),
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtu".into(),
|
||||||
|
config: Value::Null,
|
||||||
|
patterns: patterns.clone(),
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
streams.clone(),
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtual".into(),
|
||||||
|
config: valid_conf.into(),
|
||||||
|
patterns: patterns.clone()
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
|
||||||
|
for conf in [missing_send_conf, missing_to_conf, extra_attr_conf] {
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
streams.clone(),
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtual".into(),
|
||||||
|
config: conf.clone().into(),
|
||||||
|
patterns: patterns.clone()
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err(),
|
||||||
|
"conf: {:?}",
|
||||||
|
conf
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn conf_send() {
|
||||||
|
// Valid to: option
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: Value::Null,
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtual".into(),
|
||||||
|
config: json!({ "send": "message", "to": "stream" }).into(),
|
||||||
|
patterns: vec![],
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_ok(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Invalid to: option
|
||||||
|
assert!(
|
||||||
|
Plugin::default()
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: Value::Null,
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtual".into(),
|
||||||
|
config: json!({ "send": "message", "to": "stream1" }).into(),
|
||||||
|
patterns: vec![],
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Let's allow empty streams for now.
|
||||||
|
// I guess it can be useful to have manual only actions.
|
||||||
|
//
|
||||||
|
// #[tokio::test]
|
||||||
|
// async fn conf_empty_stream() {
|
||||||
|
// assert!(
|
||||||
|
// Plugin::default()
|
||||||
|
// .load_config(
|
||||||
|
// vec![StreamConfig {
|
||||||
|
// stream_name: "stream".into(),
|
||||||
|
// stream_type: "virtual".into(),
|
||||||
|
// config: Value::Null,
|
||||||
|
// }],
|
||||||
|
// vec![],
|
||||||
|
// )
|
||||||
|
// .await
|
||||||
|
// .is_err(),
|
||||||
|
// );
|
||||||
|
// }
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn run_simple() {
|
||||||
|
let mut plugin = Plugin::default();
|
||||||
|
let (mut streams, mut actions) = plugin
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: Value::Null,
|
||||||
|
}],
|
||||||
|
vec![ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtual".into(),
|
||||||
|
config: json!({ "send": "message <test>", "to": "stream" }).into(),
|
||||||
|
patterns: vec!["test".into()],
|
||||||
|
}],
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut stream = streams.pop().unwrap();
|
||||||
|
let action = actions.pop().unwrap();
|
||||||
|
assert!(!stream.standalone);
|
||||||
|
|
||||||
|
for m in ["test1", "test2", "test3", " a a a aa a a"] {
|
||||||
|
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||||
|
assert!(
|
||||||
|
action
|
||||||
|
.tx
|
||||||
|
.send(Exec {
|
||||||
|
match_: vec![m.into()],
|
||||||
|
time,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
stream.stream.recv().await.unwrap().unwrap(),
|
||||||
|
(format!("message {m}"), time),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn run_two_actions() {
|
||||||
|
let mut plugin = Plugin::default();
|
||||||
|
let (mut streams, mut actions) = plugin
|
||||||
|
.load_config(
|
||||||
|
vec![StreamConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
stream_type: "virtual".into(),
|
||||||
|
config: Value::Null,
|
||||||
|
}],
|
||||||
|
vec![
|
||||||
|
ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtual".into(),
|
||||||
|
config: json!({ "send": "send <a>", "to": "stream" }).into(),
|
||||||
|
patterns: vec!["a".into(), "b".into()],
|
||||||
|
},
|
||||||
|
ActionConfig {
|
||||||
|
stream_name: "stream".into(),
|
||||||
|
filter_name: "filter".into(),
|
||||||
|
action_name: "action".into(),
|
||||||
|
action_type: "virtual".into(),
|
||||||
|
config: json!({ "send": "<b> send", "to": "stream" }).into(),
|
||||||
|
patterns: vec!["a".into(), "b".into()],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut stream = streams.pop().unwrap();
|
||||||
|
assert!(!stream.standalone);
|
||||||
|
|
||||||
|
let action2 = actions.pop().unwrap();
|
||||||
|
let action1 = actions.pop().unwrap();
|
||||||
|
|
||||||
|
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
action1
|
||||||
|
.tx
|
||||||
|
.send(Exec {
|
||||||
|
match_: vec!["aa".into(), "bb".into()],
|
||||||
|
time,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.is_ok(),
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
stream.stream.recv().await.unwrap().unwrap(),
|
||||||
|
("send aa".into(), time),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
action2
|
||||||
|
.tx
|
||||||
|
.send(Exec {
|
||||||
|
match_: vec!["aa".into(), "bb".into()],
|
||||||
|
time,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.is_ok(),
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
stream.stream.recv().await.unwrap().unwrap(),
|
||||||
|
("bb send".into(), time),
|
||||||
|
);
|
||||||
|
}
|
||||||
20
plugins/reaction-plugin/Cargo.toml
Normal file
20
plugins/reaction-plugin/Cargo.toml
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
[package]
|
||||||
|
name = "reaction-plugin"
|
||||||
|
version = "1.0.0"
|
||||||
|
edition = "2024"
|
||||||
|
authors = ["ppom <reaction@ppom.me>"]
|
||||||
|
license = "AGPL-3.0"
|
||||||
|
homepage = "https://reaction.ppom.me"
|
||||||
|
repository = "https://framagit.org/ppom/reaction"
|
||||||
|
keywords = ["security", "sysadmin", "logs", "monitoring", "plugin"]
|
||||||
|
categories = ["security"]
|
||||||
|
description = "Plugin interface for reaction, a daemon that scans logs and takes action (alternative to fail2ban)"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
remoc.workspace = true
|
||||||
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
tokio.features = ["io-std", "signal"]
|
||||||
|
tokio-util.workspace = true
|
||||||
|
tokio-util.features = ["rt"]
|
||||||
599
plugins/reaction-plugin/src/lib.rs
Normal file
599
plugins/reaction-plugin/src/lib.rs
Normal file
|
|
@ -0,0 +1,599 @@
|
||||||
|
//! This crate defines the API between reaction's core and plugins.
|
||||||
|
//!
|
||||||
|
//! Plugins must be written in Rust, for now.
|
||||||
|
//!
|
||||||
|
//! This documentation assumes the reader has some knowledge of Rust.
|
||||||
|
//! However, if you find that something is unclear, don't hesitate to
|
||||||
|
//! [ask for help](https://framagit.org/ppom/reaction/#help), even if you're new to Rust.
|
||||||
|
//!
|
||||||
|
//! To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides
|
||||||
|
//! the entrypoint for a plugin.
|
||||||
|
//! It permits to define `0` to `n` custom stream and action types.
|
||||||
|
//!
|
||||||
|
//! ## Note on reaction-plugin API stability
|
||||||
|
//!
|
||||||
|
//! This is the v1 of reaction's plugin interface.
|
||||||
|
//! It's quite efficient and complete, but it has the big drawback of being Rust-only and [`tokio`]-only.
|
||||||
|
//!
|
||||||
|
//! In the future, I'd like to define a language-agnostic interface, which will be a major breaking change in the API.
|
||||||
|
//! However, I'll try my best to reduce the necessary code changes for plugins that use this v1.
|
||||||
|
//!
|
||||||
|
//! ## Naming & calling conventions
|
||||||
|
//!
|
||||||
|
//! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql`.
|
||||||
|
//! It will be invoked with one positional argument "serve".
|
||||||
|
//! ```bash
|
||||||
|
//! reaction-plugin-$NAME serve
|
||||||
|
//! ```
|
||||||
|
//! This can be useful if you want to provide CLI functionnality to your users,
|
||||||
|
//! so you can distinguish between a human user and reaction.
|
||||||
|
//!
|
||||||
|
//! ### State directory
|
||||||
|
//!
|
||||||
|
//! It will be executed in its own directory, in which it should have write access.
|
||||||
|
//! The directory is `$reaction_state_directory/plugin_data/$NAME`.
|
||||||
|
//! reaction's [state_directory](https://reaction.ppom.me/reference.html#state_directory)
|
||||||
|
//! defaults to its working directory, which is `/var/lib/reaction` in most setups.
|
||||||
|
//!
|
||||||
|
//! So your plugin directory should most often be `/var/lib/reaction/plugin_data/$NAME`,
|
||||||
|
//! but the plugin shouldn't expect that and use the current working directory instead.
|
||||||
|
//!
|
||||||
|
//! ## Communication
|
||||||
|
//!
|
||||||
|
//! Communication between the plugin and reaction is based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait
|
||||||
|
//! calls over a single transport channel.
|
||||||
|
//! The channels read and write channels are stdin and stdout, so you shouldn't use them for something else.
|
||||||
|
//!
|
||||||
|
//! [`remoc`] builds upon [`tokio`], so you'll need to use tokio too.
|
||||||
|
//!
|
||||||
|
//! ### Errors
|
||||||
|
//!
|
||||||
|
//! Errors during:
|
||||||
|
//! - config loading in [`PluginInfo::load_config`]
|
||||||
|
//! - startup in [`PluginInfo::start`]
|
||||||
|
//!
|
||||||
|
//! should be returned to reaction by the function's return value, permitting reaction to abort startup.
|
||||||
|
//!
|
||||||
|
//! During normal runtime, after the plugin has loaded its config and started, and before reaction is quitting, there is no *rusty* way to send errors to reaction.
|
||||||
|
//! Then errors can be printed to stderr.
|
||||||
|
//! They'll be captured line by line and re-printed by reaction, with the plugin name prepended.
|
||||||
|
//!
|
||||||
|
//! A line can start with `DEBUG `, `INFO `, `WARN `, `ERROR `.
|
||||||
|
//! If it starts with none of the above, the line is assumed to be an error.
|
||||||
|
//!
|
||||||
|
//! Example:
|
||||||
|
//! Those lines:
|
||||||
|
//! ```log
|
||||||
|
//! WARN This is an official warning from the plugin
|
||||||
|
//! Freeeee errrooooorrr
|
||||||
|
//! ```
|
||||||
|
//! Will become:
|
||||||
|
//! ```log
|
||||||
|
//! WARN plugin test: This is an official warning from the plugin
|
||||||
|
//! ERROR plugin test: Freeeee errrooooorrr
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Plugins should not exit when there is an error: reaction quits only when told to do so,
|
||||||
|
//! or if all its streams exit, and won't retry starting a failing plugin or stream.
|
||||||
|
//! Please only exit if you're in a 100% failing state.
|
||||||
|
//! It's considered better to continue operating in a degraded state than exiting.
|
||||||
|
//!
|
||||||
|
//! ## Getting started
|
||||||
|
//!
|
||||||
|
//! If you don't have Rust already installed, follow their [*Getting Started* documentation](https://rust-lang.org/learn/get-started/)
|
||||||
|
//! to get rust build tools and learn about editor support.
|
||||||
|
//!
|
||||||
|
//! Then create a new repository with cargo:
|
||||||
|
//!
|
||||||
|
//! ```bash
|
||||||
|
//! cargo new reaction-plugin-$NAME
|
||||||
|
//! cd reaction-plugin-$NAME
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Add required dependencies:
|
||||||
|
//!
|
||||||
|
//! ```bash
|
||||||
|
//! cargo add reaction-plugin tokio
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Replace `src/main.rs` with those contents:
|
||||||
|
//!
|
||||||
|
//! ```ignore
|
||||||
|
//! use reaction_plugin::PluginInfo;
|
||||||
|
//!
|
||||||
|
//! #[tokio::main]
|
||||||
|
//! async fn main() {
|
||||||
|
//! let plugin = MyPlugin::default();
|
||||||
|
//! reaction_plugin::main_loop(plugin).await;
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! #[derive(Default)]
|
||||||
|
//! struct MyPlugin {}
|
||||||
|
//!
|
||||||
|
//! impl PluginInfo for MyPlugin {
|
||||||
|
//! // ...
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Your IDE should now propose to implement missing members of the [`PluginInfo`] trait.
|
||||||
|
//! Your journey starts!
|
||||||
|
//!
|
||||||
|
//! ## Examples
|
||||||
|
//!
|
||||||
|
//! Core plugins can be found here: <https://framagit.org/ppom/reaction/-/tree/main/plugins>.
|
||||||
|
//!
|
||||||
|
//! - The "virtual" plugin is the simplest and can serve as a good complete example that links custom stream types and custom action types.
|
||||||
|
//! - The "ipset" plugin is a good example of an action-only plugin.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, BTreeSet},
|
||||||
|
env::args,
|
||||||
|
error::Error,
|
||||||
|
fmt::Display,
|
||||||
|
process::exit,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use remoc::{
|
||||||
|
Connect, rch,
|
||||||
|
rtc::{self, Server},
|
||||||
|
};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::{Number, Value as JValue};
|
||||||
|
use tokio::io::{stdin, stdout};
|
||||||
|
|
||||||
|
pub mod line;
|
||||||
|
pub mod shutdown;
|
||||||
|
pub mod time;
|
||||||
|
|
||||||
|
/// The only trait that **must** be implemented by a plugin.
|
||||||
|
/// It provides lists of stream, filter and action types implemented by a dynamic plugin.
|
||||||
|
#[rtc::remote]
|
||||||
|
pub trait PluginInfo {
|
||||||
|
/// Return the manifest of the plugin.
|
||||||
|
/// This should not be dynamic, and return always the same manifest.
|
||||||
|
///
|
||||||
|
/// Example implementation:
|
||||||
|
/// ```
|
||||||
|
/// Ok(Manifest {
|
||||||
|
/// hello: Hello::new(),
|
||||||
|
/// streams: BTreeSet::from(["mystreamtype".into()]),
|
||||||
|
/// actions: BTreeSet::from(["myactiontype".into()]),
|
||||||
|
/// })
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// First function called.
|
||||||
|
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError>;
|
||||||
|
|
||||||
|
/// Load all plugin stream and action configurations.
|
||||||
|
/// Must error if config is invalid.
|
||||||
|
///
|
||||||
|
/// The plugin should not start running mutable commands here:
|
||||||
|
/// It should be ok to quit without cleanup for now.
|
||||||
|
///
|
||||||
|
/// Each [`StreamConfig`] from the `streams` arg should result in a corresponding [`StreamImpl`] returned, in the same order.
|
||||||
|
/// Each [`ActionConfig`] from the `actions` arg should result in a corresponding [`ActionImpl`] returned, in the same order.
|
||||||
|
///
|
||||||
|
/// Function called after [`PluginInfo::manifest`].
|
||||||
|
async fn load_config(
|
||||||
|
&mut self,
|
||||||
|
streams: Vec<StreamConfig>,
|
||||||
|
actions: Vec<ActionConfig>,
|
||||||
|
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)>;
|
||||||
|
|
||||||
|
/// Notify the plugin that setup is finished, permitting a last occasion to report an error that'll make reaction exit.
|
||||||
|
/// All initialization (opening remote connections, starting streams, etc) should happen here.
|
||||||
|
///
|
||||||
|
/// Function called after [`PluginInfo::load_config`].
|
||||||
|
async fn start(&mut self) -> RemoteResult<()>;
|
||||||
|
|
||||||
|
/// Notify the plugin that reaction is quitting and that the plugin should quit too.
|
||||||
|
/// A few seconds later, the plugin will receive SIGTERM.
|
||||||
|
/// A few seconds later, the plugin will receive SIGKILL.
|
||||||
|
///
|
||||||
|
/// Function called after [`PluginInfo::start`], when reaction is quitting.
|
||||||
|
async fn close(mut self) -> RemoteResult<()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The config for one Stream of a type advertised by this plugin.
|
||||||
|
///
|
||||||
|
/// For example this user config:
|
||||||
|
/// ```jsonnet
|
||||||
|
/// {
|
||||||
|
/// streams: {
|
||||||
|
/// mystream: {
|
||||||
|
/// type: "mystreamtype",
|
||||||
|
/// options: {
|
||||||
|
/// key: "value",
|
||||||
|
/// num: 3,
|
||||||
|
/// },
|
||||||
|
/// // filters: ...
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// would result in the following `StreamConfig`:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// StreamConfig {
|
||||||
|
/// stream_name: "mystream",
|
||||||
|
/// stream_type: "mystreamtype",
|
||||||
|
/// config: Value::Object(BTreeMap::from([
|
||||||
|
/// ("key", Value::String("value")),
|
||||||
|
/// ("num", Value::Integer(3)),
|
||||||
|
/// ])),
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Don't hesitate to take advantage of [`serde_json::from_value`], to deserialize the [`Value`] into a Rust struct:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// #[derive(Deserialize)]
|
||||||
|
/// struct MyStreamOptions {
|
||||||
|
/// key: String,
|
||||||
|
/// num: i64,
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// fn validate_config(stream_config: Value) -> Result<MyStreamOptions, serde_json::Error> {
|
||||||
|
/// serde_json::from_value(stream_config.into())
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
|
pub struct StreamConfig {
|
||||||
|
pub stream_name: String,
|
||||||
|
pub stream_type: String,
|
||||||
|
pub config: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The config for one Stream of a type advertised by this plugin.
|
||||||
|
///
|
||||||
|
/// For example this user config:
|
||||||
|
/// ```jsonnet
|
||||||
|
/// {
|
||||||
|
/// streams: {
|
||||||
|
/// mystream: {
|
||||||
|
/// // ...
|
||||||
|
/// filters: {
|
||||||
|
/// myfilter: {
|
||||||
|
/// // ...
|
||||||
|
/// actions: {
|
||||||
|
/// myaction: {
|
||||||
|
/// type: "myactiontype",
|
||||||
|
/// options: {
|
||||||
|
/// boolean: true,
|
||||||
|
/// array: ["item"],
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// would result in the following `ActionConfig`:
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// ActionConfig {
|
||||||
|
/// action_name: "myaction",
|
||||||
|
/// action_type: "myactiontype",
|
||||||
|
/// config: Value::Object(BTreeMap::from([
|
||||||
|
/// ("boolean", Value::Boolean(true)),
|
||||||
|
/// ("array", Value::Array([Value::String("item")])),
|
||||||
|
/// ])),
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Don't hesitate to take advantage of [`serde_json::from_value`], to deserialize the [`Value`] into a Rust struct:
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// #[derive(Deserialize)]
|
||||||
|
/// struct MyActionOptions {
|
||||||
|
/// boolean: bool,
|
||||||
|
/// array: Vec<String>,
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// fn validate_config(action_config: Value) -> Result<MyActionOptions, serde_json::Error> {
|
||||||
|
/// serde_json::from_value(action_config.into())
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
|
pub struct ActionConfig {
|
||||||
|
pub stream_name: String,
|
||||||
|
pub filter_name: String,
|
||||||
|
pub action_name: String,
|
||||||
|
pub action_type: String,
|
||||||
|
pub config: Value,
|
||||||
|
pub patterns: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mandatory announcement of a plugin's protocol version, stream and action types.
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct Manifest {
|
||||||
|
// Protocol version.
|
||||||
|
// Just use the [`Hello::new`] constructor that uses this crate's current version.
|
||||||
|
pub hello: Hello,
|
||||||
|
/// Stream types that should be made available to reaction users
|
||||||
|
///
|
||||||
|
/// ```jsonnet
|
||||||
|
/// {
|
||||||
|
/// streams: {
|
||||||
|
/// my_stream: {
|
||||||
|
/// type: "..."
|
||||||
|
/// # ↑ all those exposed types
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub streams: BTreeSet<String>,
|
||||||
|
/// Action types that should be made available to reaction users
|
||||||
|
///
|
||||||
|
/// ```jsonnet
|
||||||
|
/// {
|
||||||
|
/// streams: {
|
||||||
|
/// mystream: {
|
||||||
|
/// filters: {
|
||||||
|
/// myfilter: {
|
||||||
|
/// actions: {
|
||||||
|
/// myaction: {
|
||||||
|
/// type: "myactiontype",
|
||||||
|
/// # ↑ all those exposed types
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// },
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub actions: BTreeSet<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct Hello {
|
||||||
|
/// Major version of the protocol
|
||||||
|
/// Increment means breaking change
|
||||||
|
pub version_major: u32,
|
||||||
|
/// Minor version of the protocol
|
||||||
|
/// Increment means reaction core can handle older version plugins
|
||||||
|
pub version_minor: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hello {
|
||||||
|
/// Constructor that fills a [`Hello`] struct with [`crate`]'s version.
|
||||||
|
/// You should use this in your plugin [`Manifest`].
|
||||||
|
pub fn new() -> Hello {
|
||||||
|
Hello {
|
||||||
|
version_major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
|
||||||
|
version_minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used by the reaction daemon. Permits to check compatibility between two versions.
|
||||||
|
/// Major versions must be the same between the daemon and plugin.
|
||||||
|
/// Minor version of the daemon must be greater than or equal minor version of the plugin.
|
||||||
|
pub fn is_compatible(server: &Hello, plugin: &Hello) -> std::result::Result<(), String> {
|
||||||
|
if server.version_major == plugin.version_major
|
||||||
|
&& server.version_minor >= plugin.version_minor
|
||||||
|
{
|
||||||
|
Ok(())
|
||||||
|
} else if plugin.version_major > server.version_major
|
||||||
|
|| (plugin.version_major == server.version_major
|
||||||
|
&& plugin.version_minor > server.version_minor)
|
||||||
|
{
|
||||||
|
Err("consider upgrading reaction".into())
|
||||||
|
} else {
|
||||||
|
Err("consider upgrading the plugin".into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A clone of [`serde_json::Value`].
|
||||||
|
/// Implements From & Into [`serde_json::Value`].
|
||||||
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
|
pub enum Value {
|
||||||
|
Null,
|
||||||
|
Bool(bool),
|
||||||
|
Integer(i64),
|
||||||
|
Float(f64),
|
||||||
|
String(String),
|
||||||
|
Array(Vec<Value>),
|
||||||
|
Object(BTreeMap<String, Value>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<JValue> for Value {
|
||||||
|
fn from(value: serde_json::Value) -> Self {
|
||||||
|
match value {
|
||||||
|
JValue::Null => Value::Null,
|
||||||
|
JValue::Bool(b) => Value::Bool(b),
|
||||||
|
JValue::Number(number) => {
|
||||||
|
if let Some(number) = number.as_i64() {
|
||||||
|
Value::Integer(number)
|
||||||
|
} else if let Some(number) = number.as_f64() {
|
||||||
|
Value::Float(number)
|
||||||
|
} else {
|
||||||
|
Value::Null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
JValue::String(s) => Value::String(s.into()),
|
||||||
|
JValue::Array(v) => Value::Array(v.into_iter().map(|e| e.into()).collect()),
|
||||||
|
JValue::Object(m) => Value::Object(m.into_iter().map(|(k, v)| (k, v.into())).collect()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<JValue> for Value {
|
||||||
|
fn into(self) -> JValue {
|
||||||
|
match self {
|
||||||
|
Value::Null => JValue::Null,
|
||||||
|
Value::Bool(v) => JValue::Bool(v),
|
||||||
|
Value::Integer(v) => JValue::Number(v.into()),
|
||||||
|
Value::Float(v) => JValue::Number(Number::from_f64(v).unwrap()),
|
||||||
|
Value::String(v) => JValue::String(v),
|
||||||
|
Value::Array(v) => JValue::Array(v.into_iter().map(|e| e.into()).collect()),
|
||||||
|
Value::Object(m) => JValue::Object(m.into_iter().map(|(k, v)| (k, v.into())).collect()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents a Stream handled by a plugin on reaction core's side.
|
||||||
|
///
|
||||||
|
/// During [`PluginInfo::load_config`], the plugin should create a [`remoc::rch::mpsc::channel`] of [`Line`].
|
||||||
|
/// It will keep the sending side for itself and put the receiving side in a [`StreamImpl`].
|
||||||
|
///
|
||||||
|
/// The plugin should start sending [`Line`]s in the channel only after [`PluginInfo::start`] has been called by reaction core.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct StreamImpl {
|
||||||
|
pub stream: rch::mpsc::Receiver<Line>,
|
||||||
|
/// Whether this stream works standalone, or if it needs other streams or actions to be fed.
|
||||||
|
/// Defaults to true.
|
||||||
|
/// When `false`, reaction will exit if it's the last one standing.
|
||||||
|
#[serde(default = "_true")]
|
||||||
|
pub standalone: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _true() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Messages passed from the [`StreamImpl`] of a plugin to reaction core
|
||||||
|
pub type Line = (String, Duration);
|
||||||
|
|
||||||
|
// // Filters
|
||||||
|
// // For now, plugins can't handle custom filter implementations.
|
||||||
|
// #[derive(Serialize, Deserialize)]
|
||||||
|
// pub struct FilterImpl {
|
||||||
|
// pub stream: rch::lr::Sender<Exec>,
|
||||||
|
// }
|
||||||
|
// #[derive(Serialize, Deserialize)]
|
||||||
|
// pub struct Match {
|
||||||
|
// pub match_: String,
|
||||||
|
// pub result: rch::oneshot::Sender<bool>,
|
||||||
|
// }
|
||||||
|
|
||||||
|
/// Represents an Action handled by a plugin on reaction core's side.
|
||||||
|
///
|
||||||
|
/// During [`PluginInfo::load_config`], the plugin should create a [`remoc::rch::mpsc::channel`] of [`Exec`].
|
||||||
|
/// It will keep the receiving side for itself and put the sending side in a [`ActionImpl`].
|
||||||
|
///
|
||||||
|
/// The plugin will start receiving [`Exec`]s in the channel from reaction only after [`PluginInfo::start`] has been called by reaction core.
|
||||||
|
#[derive(Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ActionImpl {
|
||||||
|
pub tx: rch::mpsc::Sender<Exec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [trigger](https://reaction.ppom.me/reference.html#trigger) of the Action, sent by reaction core to the plugin.
|
||||||
|
///
|
||||||
|
/// The plugin should perform the configured action for each received [`Exec`].
|
||||||
|
///
|
||||||
|
/// Any error during its execution should be logged to stderr, see [`crate#Errors`] for error handling recommandations.
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct Exec {
|
||||||
|
pub match_: Vec<String>,
|
||||||
|
pub time: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The main loop for a plugin.
|
||||||
|
///
|
||||||
|
/// Bootstraps the communication with reaction core on the process' stdin and stdout,
|
||||||
|
/// then holds the connection and maintains the plugin in a server state.
|
||||||
|
///
|
||||||
|
/// Your main function should only create a struct that implements [`PluginInfo`]
|
||||||
|
/// and then call [`main_loop`]:
|
||||||
|
/// ```ignore
|
||||||
|
/// #[tokio::main]
|
||||||
|
/// async fn main() {
|
||||||
|
/// let plugin = MyPlugin::default();
|
||||||
|
/// reaction_plugin::main_loop(plugin).await;
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub async fn main_loop<T: PluginInfo + Send + Sync + 'static>(plugin_info: T) {
|
||||||
|
// First check that we're called by reaction
|
||||||
|
let mut args = args();
|
||||||
|
// skip 0th argument
|
||||||
|
let _skip = args.next();
|
||||||
|
if args.next().is_none_or(|arg| arg != "serve") {
|
||||||
|
eprintln!("This plugin is not meant to be called as-is.");
|
||||||
|
eprintln!(
|
||||||
|
"reaction daemon starts plugins itself and communicates with them on stdin, stdout and stderr."
|
||||||
|
);
|
||||||
|
eprintln!("See the doc on plugin configuration: https://reaction.ppom.me/plugins/");
|
||||||
|
exit(1);
|
||||||
|
} else {
|
||||||
|
let (conn, mut tx, _rx): (
|
||||||
|
_,
|
||||||
|
remoc::rch::base::Sender<PluginInfoClient>,
|
||||||
|
remoc::rch::base::Receiver<()>,
|
||||||
|
) = Connect::io(remoc::Cfg::default(), stdin(), stdout())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let (server, client) = PluginInfoServer::new(plugin_info, 1);
|
||||||
|
|
||||||
|
let (res1, (_, res2), res3) = tokio::join!(tx.send(client), server.serve(), conn);
|
||||||
|
let mut exit_code = 0;
|
||||||
|
if let Err(err) = res1 {
|
||||||
|
eprintln!("ERROR could not send plugin info to reaction: {err}");
|
||||||
|
exit_code = 1;
|
||||||
|
}
|
||||||
|
if let Err(err) = res2 {
|
||||||
|
eprintln!("ERROR could not launch plugin service for reaction: {err}");
|
||||||
|
exit_code = 2;
|
||||||
|
}
|
||||||
|
if let Err(err) = res3 {
|
||||||
|
eprintln!("ERROR connection error with reaction: {err}");
|
||||||
|
exit_code = 3;
|
||||||
|
}
|
||||||
|
exit(exit_code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errors
|
||||||
|
|
||||||
|
pub type RemoteResult<T> = Result<T, RemoteError>;
|
||||||
|
|
||||||
|
/// reaction-plugin's Error type.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub enum RemoteError {
|
||||||
|
/// A connection error that origins from [`remoc`], the crate used for communication on the plugin's `stdin`/`stdout`.
|
||||||
|
///
|
||||||
|
/// You should not instantiate this type of error yourself.
|
||||||
|
Remoc(rtc::CallError),
|
||||||
|
/// A free String for application-specific errors.
|
||||||
|
///
|
||||||
|
/// You should only instantiate this type of error yourself, for any error that you encounter at startup and shutdown.
|
||||||
|
///
|
||||||
|
/// Otherwise, any error during the plugin's runtime should be logged to stderr, see [`crate#Errors`] for error handling recommandations.
|
||||||
|
Plugin(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for RemoteError {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
RemoteError::Remoc(call_error) => write!(f, "communication error: {call_error}"),
|
||||||
|
RemoteError::Plugin(err) => write!(f, "{err}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for RemoteError {}
|
||||||
|
|
||||||
|
impl From<String> for RemoteError {
|
||||||
|
fn from(value: String) -> Self {
|
||||||
|
Self::Plugin(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&str> for RemoteError {
|
||||||
|
fn from(value: &str) -> Self {
|
||||||
|
Self::Plugin(value.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<rtc::CallError> for RemoteError {
|
||||||
|
fn from(value: rtc::CallError) -> Self {
|
||||||
|
Self::Remoc(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
237
plugins/reaction-plugin/src/line.rs
Normal file
237
plugins/reaction-plugin/src/line.rs
Normal file
|
|
@ -0,0 +1,237 @@
|
||||||
|
//! Helper module that permits to use templated lines (ie. `bad password for <ip>`), like in Stream's and Action's `cmd`.
|
||||||
|
//!
|
||||||
|
//! Corresponding reaction core settings:
|
||||||
|
//! - [Stream's `cmd`](https://reaction.ppom.me/reference.html#cmd)
|
||||||
|
//! - [Action's `cmd`](https://reaction.ppom.me/reference.html#cmd-1)
|
||||||
|
//!
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
enum SendItem {
|
||||||
|
Index(usize),
|
||||||
|
Str(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SendItem {
|
||||||
|
fn min_size(&self) -> usize {
|
||||||
|
match self {
|
||||||
|
Self::Index(_) => 0,
|
||||||
|
Self::Str(s) => s.len(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper struct that permits to transform a template line with patterns into an instantiated line from a match.
|
||||||
|
///
|
||||||
|
/// Useful when you permit the user to reconstruct lines from an action, like in reaction's native actions and in the virtual plugin:
|
||||||
|
/// ```yaml
|
||||||
|
/// actions:
|
||||||
|
/// native:
|
||||||
|
/// cmd: ["iptables", "...", "<ip>"]
|
||||||
|
///
|
||||||
|
/// virtual:
|
||||||
|
/// type: virtual
|
||||||
|
/// options:
|
||||||
|
/// send: "<ip>: bad password on user <user>"
|
||||||
|
/// to: "my_virtual_stream"
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Usage example:
|
||||||
|
/// ```
|
||||||
|
/// # use reaction_plugin::line::PatternLine;
|
||||||
|
/// #
|
||||||
|
/// let template = "<ip>: bad password on user <user>".to_string();
|
||||||
|
/// let patterns = vec!["ip".to_string(), "user".to_string()];
|
||||||
|
/// let pattern_line = PatternLine::new(template, patterns);
|
||||||
|
///
|
||||||
|
/// assert_eq!(
|
||||||
|
/// pattern_line.line(vec!["1.2.3.4".to_string(), "root".to_string()]),
|
||||||
|
/// "1.2.3.4: bad password on user root".to_string(),
|
||||||
|
/// );
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// You can find full examples in those plugins:
|
||||||
|
/// `reaction-plugin-virtual`,
|
||||||
|
/// `reaction-plugin-cluster`.
|
||||||
|
///
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PatternLine {
|
||||||
|
line: Vec<SendItem>,
|
||||||
|
min_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PatternLine {
|
||||||
|
/// Construct [`PatternLine`] from a template line and the list of patterns of the underlying [Filter](https://reaction.ppom.me/reference.html#filter).
|
||||||
|
///
|
||||||
|
/// This list of patterns comes from [`super::ActionConfig`].
|
||||||
|
pub fn new(template: String, patterns: Vec<String>) -> Self {
|
||||||
|
let line = Self::_from(patterns, Vec::from([SendItem::Str(template)]));
|
||||||
|
Self {
|
||||||
|
min_size: line.iter().map(SendItem::min_size).sum(),
|
||||||
|
line,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn _from(mut patterns: Vec<String>, acc: Vec<SendItem>) -> Vec<SendItem> {
|
||||||
|
match patterns.pop() {
|
||||||
|
None => acc,
|
||||||
|
Some(pattern) => {
|
||||||
|
let enclosed_pattern = format!("<{pattern}>");
|
||||||
|
let acc = acc
|
||||||
|
.into_iter()
|
||||||
|
.flat_map(|item| match &item {
|
||||||
|
SendItem::Index(_) => vec![item],
|
||||||
|
SendItem::Str(str) => match str.find(&enclosed_pattern) {
|
||||||
|
Some(i) => {
|
||||||
|
let pattern_index = patterns.len();
|
||||||
|
let mut ret = vec![];
|
||||||
|
|
||||||
|
let (left, mid) = str.split_at(i);
|
||||||
|
if !left.is_empty() {
|
||||||
|
ret.push(SendItem::Str(left.into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.push(SendItem::Index(pattern_index));
|
||||||
|
|
||||||
|
if mid.len() > enclosed_pattern.len() {
|
||||||
|
let (_, right) = mid.split_at(enclosed_pattern.len());
|
||||||
|
ret.push(SendItem::Str(right.into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
None => vec![item],
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Self::_from(patterns, acc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn line(&self, match_: Vec<String>) -> String {
|
||||||
|
let mut res = String::with_capacity(self.min_size);
|
||||||
|
for item in &self.line {
|
||||||
|
match item {
|
||||||
|
SendItem::Index(i) => {
|
||||||
|
if let Some(element) = match_.get(*i) {
|
||||||
|
res.push_str(element);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SendItem::Str(str) => res.push_str(str),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::line::{PatternLine, SendItem};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn line_0_pattern() {
|
||||||
|
let msg = "my message".to_string();
|
||||||
|
let line = PatternLine::new(msg.clone(), vec![]);
|
||||||
|
assert_eq!(line.line, vec![SendItem::Str(msg.clone())]);
|
||||||
|
assert_eq!(line.min_size, msg.len());
|
||||||
|
assert_eq!(line.line(vec![]), msg.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn line_1_pattern() {
|
||||||
|
let patterns = vec![
|
||||||
|
"ignored".into(),
|
||||||
|
"oh".into(),
|
||||||
|
"ignored".into(),
|
||||||
|
"my".into(),
|
||||||
|
"test".into(),
|
||||||
|
];
|
||||||
|
|
||||||
|
let matches = vec!["yay", "oh", "my", "test", "<oh>", "<my>", "<test>"];
|
||||||
|
|
||||||
|
let tests = [
|
||||||
|
(
|
||||||
|
"<oh> my test",
|
||||||
|
1,
|
||||||
|
vec![SendItem::Index(1), SendItem::Str(" my test".into())],
|
||||||
|
vec![
|
||||||
|
("yay", "yay my test"),
|
||||||
|
("oh", "oh my test"),
|
||||||
|
("my", "my my test"),
|
||||||
|
("test", "test my test"),
|
||||||
|
("<oh>", "<oh> my test"),
|
||||||
|
("<my>", "<my> my test"),
|
||||||
|
("<test>", "<test> my test"),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"oh <my> test",
|
||||||
|
3,
|
||||||
|
vec![
|
||||||
|
SendItem::Str("oh ".into()),
|
||||||
|
SendItem::Index(3),
|
||||||
|
SendItem::Str(" test".into()),
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
("yay", "oh yay test"),
|
||||||
|
("oh", "oh oh test"),
|
||||||
|
("my", "oh my test"),
|
||||||
|
("test", "oh test test"),
|
||||||
|
("<oh>", "oh <oh> test"),
|
||||||
|
("<my>", "oh <my> test"),
|
||||||
|
("<test>", "oh <test> test"),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"oh my <test>",
|
||||||
|
4,
|
||||||
|
vec![SendItem::Str("oh my ".into()), SendItem::Index(4)],
|
||||||
|
vec![
|
||||||
|
("yay", "oh my yay"),
|
||||||
|
("oh", "oh my oh"),
|
||||||
|
("my", "oh my my"),
|
||||||
|
("test", "oh my test"),
|
||||||
|
("<oh>", "oh my <oh>"),
|
||||||
|
("<my>", "oh my <my>"),
|
||||||
|
("<test>", "oh my <test>"),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (msg, index, expected_pl, lines) in tests {
|
||||||
|
let pattern_line = PatternLine::new(msg.to_string(), patterns.clone());
|
||||||
|
assert_eq!(pattern_line.line, expected_pl);
|
||||||
|
|
||||||
|
for (match_element, line) in lines {
|
||||||
|
for match_default in &matches {
|
||||||
|
let mut match_ = vec![
|
||||||
|
match_default.to_string(),
|
||||||
|
match_default.to_string(),
|
||||||
|
match_default.to_string(),
|
||||||
|
match_default.to_string(),
|
||||||
|
match_default.to_string(),
|
||||||
|
];
|
||||||
|
match_[index] = match_element.to_string();
|
||||||
|
assert_eq!(
|
||||||
|
pattern_line.line(match_.clone()),
|
||||||
|
line,
|
||||||
|
"match: {match_:?}, pattern_line: {pattern_line:?}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn line_2_pattern() {
|
||||||
|
let pattern_line = PatternLine::new("<a> ; <b>".into(), vec!["a".into(), "b".into()]);
|
||||||
|
|
||||||
|
let matches = ["a", "b", "ab", "<a>", "<b>"];
|
||||||
|
for a in &matches {
|
||||||
|
for b in &matches {
|
||||||
|
assert_eq!(
|
||||||
|
pattern_line.line(vec![a.to_string(), b.to_string()]),
|
||||||
|
format!("{a} ; {b}"),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
162
plugins/reaction-plugin/src/shutdown.rs
Normal file
162
plugins/reaction-plugin/src/shutdown.rs
Normal file
|
|
@ -0,0 +1,162 @@
|
||||||
|
//! Helper module that provides structures to ease the quitting process when having multiple tokio tasks.
|
||||||
|
//!
|
||||||
|
//! It defines a [`ShutdownController`], that permits to keep track of ongoing tasks, ask them to shutdown and wait for all of them to quit.
|
||||||
|
//!
|
||||||
|
//! You can have it as an attribute of your plugin struct.
|
||||||
|
//! ```
|
||||||
|
//! struct MyPlugin {
|
||||||
|
//! shutdown: ShutdownController
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! You can then give a [`ShutdownToken`] to other tasks when creating them:
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! impl PluginInfo for MyPlugin {
|
||||||
|
//! async fn start(&mut self) -> RemoteResult<()> {
|
||||||
|
//! let token = self.shutdown.token();
|
||||||
|
//!
|
||||||
|
//! tokio::spawn(async move {
|
||||||
|
//! token.wait().await;
|
||||||
|
//! eprintln!("DEBUG shutdown asked to quit, now quitting")
|
||||||
|
//! })
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! On closing, calling [`ShutdownController::ask_shutdown`] will inform all tasks waiting on [`ShutdownToken::wait`] that it's time to leave.
|
||||||
|
//! Then we can wait for [`ShutdownController::wait_all_task_shutdown`] to complete.
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! impl PluginInfo for MyPlugin {
|
||||||
|
//! async fn close(self) -> RemoteResult<()> {
|
||||||
|
//! self.shutdown.ask_shutdown();
|
||||||
|
//! self.shutdown.wait_all_task_shutdown().await;
|
||||||
|
//! Ok(())
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! [`ShutdownDelegate::handle_quit_signals`] permits to handle SIGHUP, SIGINT and SIGTERM by gracefully shutting down tasks.
|
||||||
|
|
||||||
|
use tokio::signal::unix::{SignalKind, signal};
|
||||||
|
use tokio_util::{
|
||||||
|
sync::{CancellationToken, WaitForCancellationFuture},
|
||||||
|
task::task_tracker::{TaskTracker, TaskTrackerToken},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Permits to keep track of ongoing tasks, ask them to shutdown and wait for all of them to quit.
|
||||||
|
/// Stupid wrapper around [`tokio_util::sync::CancellationToken`] and [`tokio_util::task::task_tracker::TaskTracker`].
|
||||||
|
#[derive(Default, Clone)]
|
||||||
|
pub struct ShutdownController {
|
||||||
|
shutdown_notifyer: CancellationToken,
|
||||||
|
task_tracker: TaskTracker,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShutdownController {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ask for all tasks to quit
|
||||||
|
pub fn ask_shutdown(&self) {
|
||||||
|
self.shutdown_notifyer.cancel();
|
||||||
|
self.task_tracker.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wait for all tasks to quit.
|
||||||
|
/// This task may return even without having called [`ShutdownController::ask_shutdown`]
|
||||||
|
/// first, if all tasks quit by themselves.
|
||||||
|
pub async fn wait_all_task_shutdown(self) {
|
||||||
|
self.task_tracker.close();
|
||||||
|
self.task_tracker.wait().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a new shutdown token, to be held by a task.
|
||||||
|
pub fn token(&self) -> ShutdownToken {
|
||||||
|
ShutdownToken::new(self.shutdown_notifyer.clone(), self.task_tracker.token())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a [`ShutdownDelegate`], which is able to ask for shutdown,
|
||||||
|
/// without counting as a task that needs to be awaited.
|
||||||
|
pub fn delegate(&self) -> ShutdownDelegate {
|
||||||
|
ShutdownDelegate(self.shutdown_notifyer.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a future that will resolve only when a shutdown request happened.
|
||||||
|
pub fn wait(&self) -> WaitForCancellationFuture<'_> {
|
||||||
|
self.shutdown_notifyer.cancelled()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Permits to ask for shutdown, without counting as a task that needs to be awaited.
|
||||||
|
pub struct ShutdownDelegate(CancellationToken);
|
||||||
|
|
||||||
|
impl ShutdownDelegate {
|
||||||
|
/// Ask for all tasks to quit
|
||||||
|
pub fn ask_shutdown(&self) {
|
||||||
|
self.0.cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure [`Self::ask_shutdown`] is called whenever we receive SIGHUP,
|
||||||
|
/// SIGTERM or SIGINT. Spawns a task that consumes self.
|
||||||
|
pub fn handle_quit_signals(self) -> Result<(), String> {
|
||||||
|
let err_str = |err| format!("could not register signal: {err}");
|
||||||
|
|
||||||
|
let mut sighup = signal(SignalKind::hangup()).map_err(err_str)?;
|
||||||
|
let mut sigint = signal(SignalKind::interrupt()).map_err(err_str)?;
|
||||||
|
let mut sigterm = signal(SignalKind::terminate()).map_err(err_str)?;
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let signal = tokio::select! {
|
||||||
|
_ = sighup.recv() => "SIGHUP",
|
||||||
|
_ = sigint.recv() => "SIGINT",
|
||||||
|
_ = sigterm.recv() => "SIGTERM",
|
||||||
|
};
|
||||||
|
eprintln!("received {signal}, closing...");
|
||||||
|
self.ask_shutdown();
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Created by a [`ShutdownController`].
|
||||||
|
/// Serves two purposes:
|
||||||
|
///
|
||||||
|
/// - Wait for a shutdown request to happen with [`Self::wait`]
|
||||||
|
/// - Keep track of the current task. While this token is held,
|
||||||
|
/// [`ShutdownController::wait_all_task_shutdown`] will block.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ShutdownToken {
|
||||||
|
shutdown_notifyer: CancellationToken,
|
||||||
|
_task_tracker_token: TaskTrackerToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShutdownToken {
|
||||||
|
fn new(shutdown_notifyer: CancellationToken, _task_tracker_token: TaskTrackerToken) -> Self {
|
||||||
|
Self {
|
||||||
|
shutdown_notifyer,
|
||||||
|
_task_tracker_token,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns underlying [`CancellationToken`] and [`TaskTrackerToken`], consuming self.
|
||||||
|
pub fn split(self) -> (CancellationToken, TaskTrackerToken) {
|
||||||
|
(self.shutdown_notifyer, self._task_tracker_token)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a future that will resolve only when a shutdown request happened.
|
||||||
|
pub fn wait(&self) -> WaitForCancellationFuture<'_> {
|
||||||
|
self.shutdown_notifyer.cancelled()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if the shutdown request happened
|
||||||
|
pub fn is_shutdown(&self) -> bool {
|
||||||
|
self.shutdown_notifyer.is_cancelled()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ask for all tasks to quit
|
||||||
|
pub fn ask_shutdown(&self) {
|
||||||
|
self.shutdown_notifyer.cancel();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,7 +1,13 @@
|
||||||
use chrono::TimeDelta;
|
//! This module provides [`parse_duration`], which parses duration in reaction's format (ie. `6h`, `3 days`)
|
||||||
|
//!
|
||||||
|
//! Like in those reaction core settings:
|
||||||
|
//! - [Filters' `retryperiod`](https://reaction.ppom.me/reference.html#retryperiod)
|
||||||
|
//! - [Actions' `after`](https://reaction.ppom.me/reference.html#after).
|
||||||
|
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
/// Parses the &str argument as a Duration
|
/// Parses the &str argument as a Duration
|
||||||
/// Returns Ok(TimeDelta) if successful, or Err(String).
|
/// Returns Ok(Duration) if successful, or Err(String).
|
||||||
///
|
///
|
||||||
/// Format is defined as follows: `<integer> <unit>`
|
/// Format is defined as follows: `<integer> <unit>`
|
||||||
/// - whitespace between the integer and unit is optional
|
/// - whitespace between the integer and unit is optional
|
||||||
|
|
@ -12,7 +18,7 @@ use chrono::TimeDelta;
|
||||||
/// - `m` / `min` / `mins` / `minute` / `minutes`
|
/// - `m` / `min` / `mins` / `minute` / `minutes`
|
||||||
/// - `h` / `hour` / `hours`
|
/// - `h` / `hour` / `hours`
|
||||||
/// - `d` / `day` / `days`
|
/// - `d` / `day` / `days`
|
||||||
pub fn parse_duration(d: &str) -> Result<TimeDelta, String> {
|
pub fn parse_duration(d: &str) -> Result<Duration, String> {
|
||||||
let d_trimmed = d.trim();
|
let d_trimmed = d.trim();
|
||||||
let chars = d_trimmed.as_bytes();
|
let chars = d_trimmed.as_bytes();
|
||||||
let mut value = 0;
|
let mut value = 0;
|
||||||
|
|
@ -24,14 +30,14 @@ pub fn parse_duration(d: &str) -> Result<TimeDelta, String> {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
return Err(format!("duration '{}' doesn't start with digits", d));
|
return Err(format!("duration '{}' doesn't start with digits", d));
|
||||||
}
|
}
|
||||||
let ok_as = |func: fn(i64) -> TimeDelta| -> Result<_, String> { Ok(func(value as i64)) };
|
let ok_as = |func: fn(u64) -> Duration| -> Result<_, String> { Ok(func(value as u64)) };
|
||||||
|
|
||||||
match d_trimmed[i..].trim() {
|
match d_trimmed[i..].trim() {
|
||||||
"ms" | "millis" | "millisecond" | "milliseconds" => ok_as(TimeDelta::milliseconds),
|
"ms" | "millis" | "millisecond" | "milliseconds" => ok_as(Duration::from_millis),
|
||||||
"s" | "sec" | "secs" | "second" | "seconds" => ok_as(TimeDelta::seconds),
|
"s" | "sec" | "secs" | "second" | "seconds" => ok_as(Duration::from_secs),
|
||||||
"m" | "min" | "mins" | "minute" | "minutes" => ok_as(TimeDelta::minutes),
|
"m" | "min" | "mins" | "minute" | "minutes" => ok_as(Duration::from_mins),
|
||||||
"h" | "hour" | "hours" => ok_as(TimeDelta::hours),
|
"h" | "hour" | "hours" => ok_as(Duration::from_hours),
|
||||||
"d" | "day" | "days" => ok_as(TimeDelta::days),
|
"d" | "day" | "days" => ok_as(|d: u64| Duration::from_hours(d * 24)),
|
||||||
unit => Err(format!(
|
unit => Err(format!(
|
||||||
"unit {} not recognised. must be one of s/sec/seconds, m/min/minutes, h/hours, d/days",
|
"unit {} not recognised. must be one of s/sec/seconds, m/min/minutes, h/hours, d/days",
|
||||||
unit
|
unit
|
||||||
|
|
@ -42,8 +48,6 @@ pub fn parse_duration(d: &str) -> Result<TimeDelta, String> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use chrono::TimeDelta;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
@ -53,13 +57,13 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn parse_duration_test() {
|
fn parse_duration_test() {
|
||||||
assert_eq!(parse_duration("1s"), Ok(TimeDelta::seconds(1)));
|
assert_eq!(parse_duration("1s"), Ok(Duration::from_secs(1)));
|
||||||
assert_eq!(parse_duration("12s"), Ok(TimeDelta::seconds(12)));
|
assert_eq!(parse_duration("12s"), Ok(Duration::from_secs(12)));
|
||||||
assert_eq!(parse_duration(" 12 secs "), Ok(TimeDelta::seconds(12)));
|
assert_eq!(parse_duration(" 12 secs "), Ok(Duration::from_secs(12)));
|
||||||
assert_eq!(parse_duration("2m"), Ok(TimeDelta::minutes(2)));
|
assert_eq!(parse_duration("2m"), Ok(Duration::from_mins(2)));
|
||||||
assert_eq!(parse_duration("6 hours"), Ok(TimeDelta::hours(6)));
|
assert_eq!(parse_duration("6 hours"), Ok(Duration::from_hours(6)));
|
||||||
assert_eq!(parse_duration("1d"), Ok(TimeDelta::days(1)));
|
assert_eq!(parse_duration("1d"), Ok(Duration::from_hours(1 * 24)));
|
||||||
assert_eq!(parse_duration("365d"), Ok(TimeDelta::days(365)));
|
assert_eq!(parse_duration("365d"), Ok(Duration::from_hours(365 * 24)));
|
||||||
|
|
||||||
assert!(parse_duration("d 3").is_err());
|
assert!(parse_duration("d 3").is_err());
|
||||||
assert!(parse_duration("d3").is_err());
|
assert!(parse_duration("d3").is_err());
|
||||||
173
release.py
173
release.py
|
|
@ -1,11 +1,11 @@
|
||||||
#!/usr/bin/env nix-shell
|
#!/usr/bin/env nix-shell
|
||||||
#!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" -p debian-devscripts git minisign cargo-cross rustup cargo-deb
|
#!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" -p debian-devscripts git minisign docker cargo-deb
|
||||||
import argparse
|
import argparse
|
||||||
import http.client
|
import http.client
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import subprocess
|
|
||||||
import shutil
|
import shutil
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
|
|
@ -56,14 +56,14 @@ def main():
|
||||||
print("exiting.")
|
print("exiting.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Minisign password
|
||||||
|
cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True)
|
||||||
|
minisign_password = cmd.stdout
|
||||||
|
|
||||||
if args.publish:
|
if args.publish:
|
||||||
# Git push
|
# Git push
|
||||||
run_command(["git", "push", "--tags"])
|
run_command(["git", "push", "--tags"])
|
||||||
|
|
||||||
# Minisign password
|
|
||||||
cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True)
|
|
||||||
minisign_password = cmd.stdout
|
|
||||||
|
|
||||||
# Create directory
|
# Create directory
|
||||||
run_command(
|
run_command(
|
||||||
[
|
[
|
||||||
|
|
@ -86,8 +86,11 @@ def main():
|
||||||
pass
|
pass
|
||||||
|
|
||||||
architectures = {
|
architectures = {
|
||||||
"x86_64-unknown-linux-musl": "amd64",
|
"x86_64-unknown-linux-gnu": "amd64",
|
||||||
"aarch64-unknown-linux-musl": "arm64",
|
# I would like to build for those targets instead:
|
||||||
|
# "x86_64-unknown-linux-musl": "amd64",
|
||||||
|
# "aarch64-unknown-linux-musl": "arm64",
|
||||||
|
# "arm-unknown-linux-gnueabihf": "armhf",
|
||||||
}
|
}
|
||||||
|
|
||||||
all_files = []
|
all_files = []
|
||||||
|
|
@ -99,9 +102,8 @@ def main():
|
||||||
|
|
||||||
You'll need to install minisign to check the authenticity of the package.
|
You'll need to install minisign to check the authenticity of the package.
|
||||||
|
|
||||||
After installing reaction, create your configuration file at
|
After installing reaction, create your configuration file(s) in JSON, YAML or JSONnet in the
|
||||||
`/etc/reaction.json`, `/etc/reaction.jsonnet` or `/etc/reaction.yml`.
|
`/etc/reaction/` directory.
|
||||||
You can also provide a directory containing multiple configuration files in the previous formats.
|
|
||||||
See <https://reaction.ppom.me> for documentation.
|
See <https://reaction.ppom.me> for documentation.
|
||||||
|
|
||||||
Reload systemd:
|
Reload systemd:
|
||||||
|
|
@ -111,40 +113,63 @@ $ sudo systemctl daemon-reload
|
||||||
|
|
||||||
Then enable and start reaction with this command
|
Then enable and start reaction with this command
|
||||||
```bash
|
```bash
|
||||||
# replace `reaction.jsonnet` with the name of your configuration file in /etc/
|
# write first your configuration file(s) in /etc/reaction/
|
||||||
$ sudo systemctl enable --now reaction@reaction.jsonnet.service
|
$ sudo systemctl enable --now reaction.service
|
||||||
```
|
```
|
||||||
""".strip(),
|
""".strip(),
|
||||||
]
|
]
|
||||||
|
|
||||||
for (architecture_rs, architecture_pretty) in architectures.items():
|
for architecture_rs, architecture_pretty in architectures.items():
|
||||||
# Cargo clean
|
# Cargo clean
|
||||||
run_command(["cargo", "clean"])
|
# run_command(["cargo", "clean"])
|
||||||
|
|
||||||
# Install toolchain
|
# Build docker image
|
||||||
|
run_command(["docker", "pull", "rust:bookworm"])
|
||||||
|
run_command(["docker", "build", "-t", "rust:reaction", "."])
|
||||||
|
|
||||||
|
binaries = [
|
||||||
|
# Binaries
|
||||||
|
"reaction",
|
||||||
|
"reaction-plugin-virtual",
|
||||||
|
"reaction-plugin-ipset",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Build
|
||||||
run_command(
|
run_command(
|
||||||
[
|
[
|
||||||
"rustup",
|
"docker",
|
||||||
"toolchain",
|
"run",
|
||||||
"install",
|
"--rm",
|
||||||
f"stable-{architecture_rs}",
|
"-u", str(os.getuid()),
|
||||||
"--force-non-host", # I know, I know!
|
"-v", ".:/reaction",
|
||||||
"--profile",
|
"rust:reaction",
|
||||||
"minimal",
|
"sh", "-c",
|
||||||
|
" && ".join([
|
||||||
|
f"cargo build --release --target {architecture_rs} --package {binary}"
|
||||||
|
for binary in binaries
|
||||||
|
])
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Build
|
|
||||||
run_command(["cross", "build", "--release", "--target", architecture_rs])
|
|
||||||
|
|
||||||
# Build .deb
|
# Build .deb
|
||||||
cmd = run_command(
|
debs = [
|
||||||
["cargo-deb", f"--target={architecture_rs}", "--no-build", "--no-strip"]
|
"reaction",
|
||||||
)
|
"reaction-plugin-ipset",
|
||||||
|
]
|
||||||
|
for deb in debs:
|
||||||
|
cmd = run_command(
|
||||||
|
[
|
||||||
|
"cargo-deb",
|
||||||
|
"--target", architecture_rs,
|
||||||
|
"--package", deb,
|
||||||
|
"--no-build",
|
||||||
|
"--no-strip"
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
deb_dir = os.path.join("./target", architecture_rs, "debian")
|
deb_dir = os.path.join("./target", architecture_rs, "debian")
|
||||||
deb_name = [f for f in os.listdir(deb_dir) if f.endswith(".deb")][0]
|
deb_names = [f for f in os.listdir(deb_dir) if f.endswith(".deb")]
|
||||||
deb_path = os.path.join(deb_dir, deb_name)
|
deb_paths = [os.path.join(deb_dir, deb_name) for deb_name in deb_names]
|
||||||
|
|
||||||
# Archive
|
# Archive
|
||||||
files_path = os.path.join("./target", architecture_rs, "release")
|
files_path = os.path.join("./target", architecture_rs, "release")
|
||||||
|
|
@ -158,11 +183,7 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
files = [
|
files = binaries + [
|
||||||
# Binaries
|
|
||||||
"reaction",
|
|
||||||
"nft46",
|
|
||||||
"ip46tables",
|
|
||||||
# Shell completion
|
# Shell completion
|
||||||
"reaction.bash",
|
"reaction.bash",
|
||||||
"reaction.fish",
|
"reaction.fish",
|
||||||
|
|
@ -188,16 +209,17 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service
|
||||||
|
|
||||||
os.chdir(root_dir)
|
os.chdir(root_dir)
|
||||||
|
|
||||||
if args.publish:
|
# Sign
|
||||||
# Sign
|
run_command(
|
||||||
run_command(
|
["minisign", "-Sm", tar_path] + deb_paths,
|
||||||
["minisign", "-Sm", deb_path, tar_path],
|
text=True,
|
||||||
text=True,
|
input=minisign_password,
|
||||||
input=minisign_password,
|
)
|
||||||
)
|
deb_sig_paths = [f"{deb_path}.minisig" for deb_path in deb_paths]
|
||||||
deb_sig = f"{deb_path}.minisig"
|
deb_sig_names = [f"{deb_name}.minisig" for deb_name in deb_names]
|
||||||
tar_sig = f"{tar_path}.minisig"
|
tar_sig = f"{tar_path}.minisig"
|
||||||
|
|
||||||
|
if args.publish:
|
||||||
# Push
|
# Push
|
||||||
run_command(
|
run_command(
|
||||||
[
|
[
|
||||||
|
|
@ -205,18 +227,25 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service
|
||||||
"-az", # "-e", "ssh -J pica01",
|
"-az", # "-e", "ssh -J pica01",
|
||||||
tar_path,
|
tar_path,
|
||||||
tar_sig,
|
tar_sig,
|
||||||
deb_path,
|
]
|
||||||
deb_sig,
|
+ deb_paths
|
||||||
|
+ deb_sig_paths
|
||||||
|
+ [
|
||||||
f"akesi:/var/www/static/reaction/releases/{tag}/",
|
f"akesi:/var/www/static/reaction/releases/{tag}/",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
# Copy
|
||||||
|
run_command(["cp", tar_path, tar_sig] + deb_paths + deb_sig_paths + [local_dir])
|
||||||
|
|
||||||
all_files.extend([tar_path, tar_sig, deb_path, deb_sig])
|
all_files.extend([tar_path, tar_sig])
|
||||||
|
all_files.extend(deb_paths)
|
||||||
|
all_files.extend(deb_sig_paths)
|
||||||
|
|
||||||
# Instructions
|
# Instructions
|
||||||
|
|
||||||
instructions.append(
|
instructions.append(
|
||||||
f"""
|
f"""
|
||||||
## Tar installation ({architecture_pretty} linux)
|
## Tar installation ({architecture_pretty} linux)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
@ -224,30 +253,42 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{tar_name} \\
|
||||||
-O https://static.ppom.me/reaction/releases/{tag}/{tar_name}.minisig \\
|
-O https://static.ppom.me/reaction/releases/{tag}/{tar_name}.minisig \\
|
||||||
&& minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {tar_name} \\
|
&& minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {tar_name} \\
|
||||||
&& rm {tar_name}.minisig \\
|
&& rm {tar_name}.minisig \\
|
||||||
&& cd {tar_name} \\
|
&& tar xvf {tar_name} \\
|
||||||
|
&& cd {pkg_name} \\
|
||||||
&& sudo make install
|
&& sudo make install
|
||||||
```
|
```
|
||||||
""".strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
instructions.append(
|
If you want to install the ipset plugin as well:
|
||||||
f"""
|
```bash
|
||||||
|
sudo apt install -y libipset-dev && sudo make install-ipset
|
||||||
|
```
|
||||||
|
""".strip()
|
||||||
|
)
|
||||||
|
|
||||||
|
instructions.append(
|
||||||
|
f"""
|
||||||
## Debian installation ({architecture_pretty} linux)
|
## Debian installation ({architecture_pretty} linux)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\
|
curl \\
|
||||||
-O https://static.ppom.me/reaction/releases/{tag}/{deb_name}.minisig \\
|
{"\n".join([
|
||||||
&& minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {deb_name} \\
|
f" -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\"
|
||||||
&& rm {deb_name}.minisig \\
|
for deb_name in deb_names + deb_sig_names
|
||||||
&& sudo apt install ./{deb_name}
|
])}
|
||||||
|
{"\n".join([
|
||||||
|
f" && minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {deb_name} \\"
|
||||||
|
for deb_name in deb_names
|
||||||
|
])}
|
||||||
|
&& rm {" ".join(deb_sig_names)} \\
|
||||||
|
&& sudo apt install {" ".join([f"./{deb_name}" for deb_name in deb_names])}
|
||||||
```
|
```
|
||||||
""".strip()
|
|
||||||
)
|
*You can also use [this third-party package repository](https://packages.azlux.fr).*
|
||||||
else:
|
""".strip()
|
||||||
# Copy
|
)
|
||||||
run_command(["cp", tar_path, deb_path, local_dir])
|
|
||||||
|
|
||||||
if not args.publish:
|
if not args.publish:
|
||||||
|
print("\n\n".join(instructions))
|
||||||
return
|
return
|
||||||
|
|
||||||
# Release
|
# Release
|
||||||
|
|
|
||||||
14
shell.nix
Normal file
14
shell.nix
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
# This shell.nix for NixOS users is only needed when building reaction-plugin-ipset
|
||||||
|
with import <nixpkgs> {};
|
||||||
|
pkgs.mkShell {
|
||||||
|
name = "libipset";
|
||||||
|
buildInputs = [
|
||||||
|
ipset
|
||||||
|
nftables
|
||||||
|
clang
|
||||||
|
];
|
||||||
|
src = null;
|
||||||
|
shellHook = ''
|
||||||
|
export LIBCLANG_PATH="$(clang -print-file-name=libclang.so)"
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
|
@ -1,23 +1,23 @@
|
||||||
use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc};
|
use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc, time::Duration};
|
||||||
|
|
||||||
use chrono::TimeDelta;
|
|
||||||
|
|
||||||
|
use reaction_plugin::{ActionConfig, time::parse_duration};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
use tokio::process::Command;
|
use tokio::process::Command;
|
||||||
|
|
||||||
use super::{parse_duration::*, PatternType};
|
use super::{Match, Pattern, PatternType};
|
||||||
use super::{Match, Pattern};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Action {
|
pub struct Action {
|
||||||
|
#[serde(default)]
|
||||||
pub cmd: Vec<String>,
|
pub cmd: Vec<String>,
|
||||||
|
|
||||||
// TODO one shot time deserialization
|
// TODO one shot time deserialization
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub after: Option<String>,
|
pub after: Option<String>,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub after_duration: Option<TimeDelta>,
|
pub after_duration: Option<Duration>,
|
||||||
|
|
||||||
#[serde(
|
#[serde(
|
||||||
rename = "onexit",
|
rename = "onexit",
|
||||||
|
|
@ -41,6 +41,12 @@ pub struct Action {
|
||||||
pub filter_name: String,
|
pub filter_name: String,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub stream_name: String,
|
pub stream_name: String,
|
||||||
|
|
||||||
|
// Plugin-specific
|
||||||
|
#[serde(default, rename = "type", skip_serializing_if = "Option::is_none")]
|
||||||
|
pub action_type: Option<String>,
|
||||||
|
#[serde(default, skip_serializing_if = "Value::is_null")]
|
||||||
|
pub options: Value,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_false() -> bool {
|
fn set_false() -> bool {
|
||||||
|
|
@ -52,6 +58,12 @@ fn is_false(b: &bool) -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Action {
|
impl Action {
|
||||||
|
pub fn is_plugin(&self) -> bool {
|
||||||
|
self.action_type
|
||||||
|
.as_ref()
|
||||||
|
.is_some_and(|action_type| action_type != "cmd")
|
||||||
|
}
|
||||||
|
|
||||||
pub fn setup(
|
pub fn setup(
|
||||||
&mut self,
|
&mut self,
|
||||||
stream_name: &str,
|
stream_name: &str,
|
||||||
|
|
@ -82,11 +94,18 @@ impl Action {
|
||||||
return Err("character '.' is not allowed in filter name".into());
|
return Err("character '.' is not allowed in filter name".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.cmd.is_empty() {
|
if !self.is_plugin() {
|
||||||
return Err("cmd is empty".into());
|
if self.cmd.is_empty() {
|
||||||
}
|
return Err("cmd is empty".into());
|
||||||
if self.cmd[0].is_empty() {
|
}
|
||||||
return Err("cmd's first item is empty".into());
|
if self.cmd[0].is_empty() {
|
||||||
|
return Err("cmd's first item is empty".into());
|
||||||
|
}
|
||||||
|
if !self.options.is_null() {
|
||||||
|
return Err("can't define options without a plugin type".into());
|
||||||
|
}
|
||||||
|
} else if !self.cmd.is_empty() {
|
||||||
|
return Err("can't define a cmd and a plugin type".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(after) = &self.after {
|
if let Some(after) = &self.after {
|
||||||
|
|
@ -138,6 +157,24 @@ impl Action {
|
||||||
cmd.args(&computed_command[1..]);
|
cmd.args(&computed_command[1..]);
|
||||||
cmd
|
cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_action_config(&self) -> Result<ActionConfig, String> {
|
||||||
|
Ok(ActionConfig {
|
||||||
|
stream_name: self.stream_name.clone(),
|
||||||
|
filter_name: self.filter_name.clone(),
|
||||||
|
action_name: self.name.clone(),
|
||||||
|
action_type: self
|
||||||
|
.action_type
|
||||||
|
.clone()
|
||||||
|
.ok_or_else(|| format!("action {} doesn't load a plugin. this is a bug!", self))?,
|
||||||
|
config: self.options.clone().into(),
|
||||||
|
patterns: self
|
||||||
|
.patterns
|
||||||
|
.iter()
|
||||||
|
.map(|pattern| pattern.name.clone())
|
||||||
|
.collect(),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for Action {
|
impl PartialEq for Action {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::{btree_map::Entry, BTreeMap},
|
collections::{BTreeMap, btree_map::Entry},
|
||||||
fs::File,
|
fs::File,
|
||||||
io,
|
io,
|
||||||
path::Path,
|
path::Path,
|
||||||
|
|
@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
use super::{Pattern, Stream};
|
use super::{Pattern, Plugin, Stream, merge_attrs};
|
||||||
|
|
||||||
pub type Patterns = BTreeMap<String, Arc<Pattern>>;
|
pub type Patterns = BTreeMap<String, Arc<Pattern>>;
|
||||||
|
|
||||||
|
|
@ -24,6 +24,9 @@ pub struct Config {
|
||||||
#[serde(default = "dot", skip_serializing_if = "String::is_empty")]
|
#[serde(default = "dot", skip_serializing_if = "String::is_empty")]
|
||||||
pub state_directory: String,
|
pub state_directory: String,
|
||||||
|
|
||||||
|
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
|
||||||
|
pub plugins: BTreeMap<String, Plugin>,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub patterns: Patterns,
|
pub patterns: Patterns,
|
||||||
|
|
||||||
|
|
@ -46,13 +49,30 @@ fn dot() -> String {
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
fn merge(&mut self, mut other: Config) -> Result<(), String> {
|
fn merge(&mut self, mut other: Config) -> Result<(), String> {
|
||||||
|
for (key, plugin) in other.plugins.into_iter() {
|
||||||
|
match self.plugins.entry(key) {
|
||||||
|
Entry::Vacant(e) => {
|
||||||
|
e.insert(plugin);
|
||||||
|
}
|
||||||
|
Entry::Occupied(e) => {
|
||||||
|
return Err(format!(
|
||||||
|
"plugin {} is already defined. plugin definitions can't be spread accross multiple files.",
|
||||||
|
e.key()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (key, pattern) in other.patterns.into_iter() {
|
for (key, pattern) in other.patterns.into_iter() {
|
||||||
match self.patterns.entry(key) {
|
match self.patterns.entry(key) {
|
||||||
Entry::Vacant(e) => {
|
Entry::Vacant(e) => {
|
||||||
e.insert(pattern);
|
e.insert(pattern);
|
||||||
}
|
}
|
||||||
Entry::Occupied(e) => {
|
Entry::Occupied(e) => {
|
||||||
return Err(format!("pattern {} is already defined. pattern definitions can't be spread accross multiple files.", e.key()));
|
return Err(format!(
|
||||||
|
"pattern {} is already defined. pattern definitions can't be spread accross multiple files.",
|
||||||
|
e.key()
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -73,25 +93,19 @@ impl Config {
|
||||||
self.start.append(&mut other.start);
|
self.start.append(&mut other.start);
|
||||||
self.stop.append(&mut other.stop);
|
self.stop.append(&mut other.stop);
|
||||||
|
|
||||||
if !(self.state_directory == dot()
|
self.state_directory = merge_attrs(
|
||||||
|| other.state_directory == dot()
|
self.state_directory.clone(),
|
||||||
|| self.state_directory == other.state_directory)
|
other.state_directory,
|
||||||
{
|
".".into(),
|
||||||
return Err("state_directory have conflicting definitions".into());
|
"state_directory",
|
||||||
}
|
)?;
|
||||||
if self.state_directory == dot() {
|
|
||||||
self.state_directory = other.state_directory;
|
|
||||||
}
|
|
||||||
|
|
||||||
if !(self.concurrency == num_cpus::get()
|
self.concurrency = merge_attrs(
|
||||||
|| other.concurrency == num_cpus::get()
|
self.concurrency,
|
||||||
|| self.concurrency == other.concurrency)
|
other.concurrency,
|
||||||
{
|
num_cpus::get(),
|
||||||
return Err("concurrency have conflicting definitions".into());
|
"concurrency",
|
||||||
}
|
)?;
|
||||||
if self.concurrency == num_cpus::get() {
|
|
||||||
self.concurrency = other.concurrency;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
@ -104,6 +118,10 @@ impl Config {
|
||||||
// Nullify this useless field
|
// Nullify this useless field
|
||||||
self._definitions = serde_json::Value::Null;
|
self._definitions = serde_json::Value::Null;
|
||||||
|
|
||||||
|
for (key, value) in &mut self.plugins {
|
||||||
|
value.setup(key)?;
|
||||||
|
}
|
||||||
|
|
||||||
if self.patterns.is_empty() {
|
if self.patterns.is_empty() {
|
||||||
return Err("no patterns configured".into());
|
return Err("no patterns configured".into());
|
||||||
}
|
}
|
||||||
|
|
@ -327,7 +345,7 @@ enum ConfigError {
|
||||||
mod jsonnet {
|
mod jsonnet {
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
use jrsonnet_evaluator::{error::LocError, EvaluationState, FileImportResolver};
|
use jrsonnet_evaluator::{EvaluationState, FileImportResolver, error::LocError};
|
||||||
|
|
||||||
use super::ConfigError;
|
use super::ConfigError;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,14 +4,14 @@ use std::{
|
||||||
fmt::Display,
|
fmt::Display,
|
||||||
hash::Hash,
|
hash::Hash,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::TimeDelta;
|
use reaction_plugin::time::parse_duration;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use super::{parse_duration, PatternType};
|
use super::{Action, Match, Pattern, PatternType, Patterns};
|
||||||
use super::{Action, Match, Pattern, Patterns};
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
|
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
|
||||||
pub enum Duplicate {
|
pub enum Duplicate {
|
||||||
|
|
@ -30,7 +30,7 @@ pub enum Duplicate {
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Filter {
|
pub struct Filter {
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub longuest_action_duration: TimeDelta,
|
pub longuest_action_duration: Duration,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub has_ip: bool,
|
pub has_ip: bool,
|
||||||
|
|
||||||
|
|
@ -47,7 +47,7 @@ pub struct Filter {
|
||||||
#[serde(rename = "retryperiod", skip_serializing_if = "Option::is_none")]
|
#[serde(rename = "retryperiod", skip_serializing_if = "Option::is_none")]
|
||||||
pub retry_period: Option<String>,
|
pub retry_period: Option<String>,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub retry_duration: Option<TimeDelta>,
|
pub retry_duration: Option<Duration>,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub duplicate: Duplicate,
|
pub duplicate: Duplicate,
|
||||||
|
|
@ -58,6 +58,11 @@ pub struct Filter {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub stream_name: String,
|
pub stream_name: String,
|
||||||
|
// // Plugin-specific
|
||||||
|
// #[serde(default, rename = "type")]
|
||||||
|
// pub filter_type: Option<String>,
|
||||||
|
// #[serde(default = "null_value")]
|
||||||
|
// pub options: Value,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Filter {
|
impl Filter {
|
||||||
|
|
@ -121,7 +126,7 @@ impl Filter {
|
||||||
if let Some(retry_period) = &self.retry_period {
|
if let Some(retry_period) = &self.retry_period {
|
||||||
self.retry_duration = Some(
|
self.retry_duration = Some(
|
||||||
parse_duration(retry_period)
|
parse_duration(retry_period)
|
||||||
.map_err(|err| format!("failed to parse retry time: {}", err))?,
|
.map_err(|err| format!("failed to parse retry period: {}", err))?,
|
||||||
);
|
);
|
||||||
self.retry_period = None;
|
self.retry_period = None;
|
||||||
}
|
}
|
||||||
|
|
@ -155,9 +160,9 @@ impl Filter {
|
||||||
}
|
}
|
||||||
} else if !first && new_patterns.contains(pattern) {
|
} else if !first && new_patterns.contains(pattern) {
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"pattern {} is present in the first regex but is not present in a following regex. all regexes should contain the same set of regexes",
|
"pattern {} is present in the first regex but is not present in a following regex. all regexes should contain the same set of regexes",
|
||||||
&pattern.name_with_braces()
|
&pattern.name_with_braces()
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
regex_buf = regex_buf.replacen(pattern.name_with_braces(), &pattern.regex, 1);
|
regex_buf = regex_buf.replacen(pattern.name_with_braces(), &pattern.regex, 1);
|
||||||
}
|
}
|
||||||
|
|
@ -182,10 +187,12 @@ impl Filter {
|
||||||
.any(|action| action.ipv4only || action.ipv6only);
|
.any(|action| action.ipv4only || action.ipv6only);
|
||||||
|
|
||||||
self.longuest_action_duration =
|
self.longuest_action_duration =
|
||||||
self.actions.values().fold(TimeDelta::seconds(0), |acc, v| {
|
self.actions
|
||||||
v.after_duration
|
.values()
|
||||||
.map_or(acc, |v| if v > acc { v } else { acc })
|
.fold(Duration::from_secs(0), |acc, v| {
|
||||||
});
|
v.after_duration
|
||||||
|
.map_or(acc, |v| if v > acc { v } else { acc })
|
||||||
|
});
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
@ -198,12 +205,12 @@ impl Filter {
|
||||||
for pattern in self.patterns.as_ref() {
|
for pattern in self.patterns.as_ref() {
|
||||||
// if the pattern is in an optional part of the regex,
|
// if the pattern is in an optional part of the regex,
|
||||||
// there may be no captured group for it.
|
// there may be no captured group for it.
|
||||||
if let Some(match_) = matches.name(&pattern.name) {
|
if let Some(match_) = matches.name(&pattern.name)
|
||||||
if !pattern.is_ignore(match_.as_str()) {
|
&& !pattern.is_ignore(match_.as_str())
|
||||||
let mut match_ = match_.as_str().to_string();
|
{
|
||||||
pattern.normalize(&mut match_);
|
let mut match_ = match_.as_str().to_string();
|
||||||
result.push(match_);
|
pattern.normalize(&mut match_);
|
||||||
}
|
result.push(match_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if result.len() == self.patterns.len() {
|
if result.len() == self.patterns.len() {
|
||||||
|
|
@ -404,10 +411,10 @@ impl Filter {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use crate::concepts::action::tests::{ok_action, ok_action_with_after};
|
use crate::concepts::action::tests::{ok_action, ok_action_with_after};
|
||||||
|
use crate::concepts::pattern::PatternIp;
|
||||||
use crate::concepts::pattern::tests::{
|
use crate::concepts::pattern::tests::{
|
||||||
boubou_pattern_with_ignore, default_pattern, number_pattern, ok_pattern_with_ignore,
|
boubou_pattern_with_ignore, default_pattern, number_pattern, ok_pattern_with_ignore,
|
||||||
};
|
};
|
||||||
use crate::concepts::pattern::PatternIp;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
|
@ -475,14 +482,14 @@ pub mod tests {
|
||||||
let name = "name".to_string();
|
let name = "name".to_string();
|
||||||
let empty_patterns = Patterns::new();
|
let empty_patterns = Patterns::new();
|
||||||
let minute_str = "1m".to_string();
|
let minute_str = "1m".to_string();
|
||||||
let minute = TimeDelta::seconds(60);
|
let minute = Duration::from_secs(60);
|
||||||
let two_minutes = TimeDelta::seconds(60 * 2);
|
let two_minutes = Duration::from_secs(60 * 2);
|
||||||
let two_minutes_str = "2m".to_string();
|
let two_minutes_str = "2m".to_string();
|
||||||
|
|
||||||
// duration 0
|
// duration 0
|
||||||
filter = ok_filter();
|
filter = ok_filter();
|
||||||
filter.setup(&name, &name, &empty_patterns).unwrap();
|
filter.setup(&name, &name, &empty_patterns).unwrap();
|
||||||
assert_eq!(filter.longuest_action_duration, TimeDelta::default());
|
assert_eq!(filter.longuest_action_duration, Duration::default());
|
||||||
|
|
||||||
let minute_action = ok_action_with_after(minute_str.clone(), &minute_str);
|
let minute_action = ok_action_with_after(minute_str.clone(), &minute_str);
|
||||||
|
|
||||||
|
|
@ -703,24 +710,32 @@ pub mod tests {
|
||||||
Ok(vec!("b".into()))
|
Ok(vec!("b".into()))
|
||||||
);
|
);
|
||||||
// Doesn't match
|
// Doesn't match
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "abc".into())]))
|
filter
|
||||||
.is_err());
|
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "abc".into())]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Ignored match
|
// Ignored match
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "a".into())]))
|
filter
|
||||||
.is_err());
|
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "a".into())]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Bad pattern
|
// Bad pattern
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([(boubou.clone(), "bou".into())]))
|
filter
|
||||||
.is_err());
|
.get_match_from_patterns(BTreeMap::from([(boubou.clone(), "bou".into())]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Bad number of patterns
|
// Bad number of patterns
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([
|
filter
|
||||||
(pattern.clone(), "b".into()),
|
.get_match_from_patterns(BTreeMap::from([
|
||||||
(boubou.clone(), "bou".into()),
|
(pattern.clone(), "b".into()),
|
||||||
]))
|
(boubou.clone(), "bou".into()),
|
||||||
.is_err());
|
]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Bad number of patterns
|
// Bad number of patterns
|
||||||
assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err());
|
assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err());
|
||||||
|
|
||||||
|
|
@ -748,34 +763,42 @@ pub mod tests {
|
||||||
Ok(vec!("bou".into(), "b".into()))
|
Ok(vec!("bou".into(), "b".into()))
|
||||||
);
|
);
|
||||||
// Doesn't match
|
// Doesn't match
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([
|
filter
|
||||||
(pattern.clone(), "abc".into()),
|
.get_match_from_patterns(BTreeMap::from([
|
||||||
(boubou.clone(), "bou".into()),
|
(pattern.clone(), "abc".into()),
|
||||||
]))
|
(boubou.clone(), "bou".into()),
|
||||||
.is_err());
|
]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Ignored match
|
// Ignored match
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([
|
filter
|
||||||
(pattern.clone(), "b".into()),
|
.get_match_from_patterns(BTreeMap::from([
|
||||||
(boubou.clone(), "boubou".into()),
|
(pattern.clone(), "b".into()),
|
||||||
]))
|
(boubou.clone(), "boubou".into()),
|
||||||
.is_err());
|
]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Bad pattern
|
// Bad pattern
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([
|
filter
|
||||||
(pattern.clone(), "b".into()),
|
.get_match_from_patterns(BTreeMap::from([
|
||||||
(number_pattern.clone(), "1".into()),
|
(pattern.clone(), "b".into()),
|
||||||
]))
|
(number_pattern.clone(), "1".into()),
|
||||||
.is_err());
|
]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Bad number of patterns
|
// Bad number of patterns
|
||||||
assert!(filter
|
assert!(
|
||||||
.get_match_from_patterns(BTreeMap::from([
|
filter
|
||||||
(pattern.clone(), "b".into()),
|
.get_match_from_patterns(BTreeMap::from([
|
||||||
(boubou.clone(), "bou".into()),
|
(pattern.clone(), "b".into()),
|
||||||
(number_pattern.clone(), "1".into()),
|
(boubou.clone(), "bou".into()),
|
||||||
]))
|
(number_pattern.clone(), "1".into()),
|
||||||
.is_err());
|
]))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
// Bad number of patterns
|
// Bad number of patterns
|
||||||
assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err());
|
assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err());
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,21 +1,22 @@
|
||||||
mod action;
|
mod action;
|
||||||
mod config;
|
mod config;
|
||||||
mod filter;
|
mod filter;
|
||||||
mod parse_duration;
|
|
||||||
mod pattern;
|
mod pattern;
|
||||||
|
mod plugin;
|
||||||
mod stream;
|
mod stream;
|
||||||
|
|
||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub use action::Action;
|
pub use action::Action;
|
||||||
pub use config::{Config, Patterns};
|
pub use config::{Config, Patterns};
|
||||||
pub use filter::{Duplicate, Filter};
|
pub use filter::{Duplicate, Filter};
|
||||||
use parse_duration::parse_duration;
|
|
||||||
pub use pattern::{Pattern, PatternType};
|
pub use pattern::{Pattern, PatternType};
|
||||||
use serde::{Deserialize, Serialize};
|
pub use plugin::Plugin;
|
||||||
pub use stream::Stream;
|
pub use stream::Stream;
|
||||||
|
pub use treedb::time::{Time, now};
|
||||||
|
|
||||||
use chrono::{DateTime, Local};
|
|
||||||
|
|
||||||
pub type Time = DateTime<Local>;
|
|
||||||
pub type Match = Vec<String>;
|
pub type Match = Vec<String>;
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||||
|
|
@ -24,5 +25,66 @@ pub struct MatchTime {
|
||||||
pub t: Time,
|
pub t: Time,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn merge_attrs<A: Default + Debug + PartialEq + Eq + Clone>(
|
||||||
|
this: A,
|
||||||
|
other: A,
|
||||||
|
default: A,
|
||||||
|
name: &str,
|
||||||
|
) -> Result<A, String> {
|
||||||
|
if !(this == default || other == default || this == other) {
|
||||||
|
return Err(format!(
|
||||||
|
"'{name}' has conflicting definitions: '{this:?}', '{other:?}'"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if this == default {
|
||||||
|
return Ok(other);
|
||||||
|
}
|
||||||
|
Ok(this)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub use filter::tests as filter_tests;
|
pub use filter::tests as filter_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::concepts::merge_attrs;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merge_attrs() {
|
||||||
|
assert_eq!(merge_attrs(None::<String>, None, None, "t"), Ok(None));
|
||||||
|
assert_eq!(
|
||||||
|
merge_attrs(Some("coucou"), None, None, "t"),
|
||||||
|
Ok(Some("coucou"))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
merge_attrs(None, Some("coucou"), None, "t"),
|
||||||
|
Ok(Some("coucou"))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
merge_attrs(Some("coucou"), Some("coucou"), None, "t"),
|
||||||
|
Ok(Some("coucou"))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
merge_attrs(Some("coucou"), Some("hello"), None, "t"),
|
||||||
|
Err("'t' has conflicting definitions: 'Some(\"coucou\")', 'Some(\"hello\")'".into())
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(merge_attrs("", "", "", "t"), Ok(""));
|
||||||
|
assert_eq!(merge_attrs("coucou", "", "", "t"), Ok("coucou"));
|
||||||
|
assert_eq!(merge_attrs("", "coucou", "", "t"), Ok("coucou"));
|
||||||
|
assert_eq!(merge_attrs("coucou", "coucou", "", "t"), Ok("coucou"));
|
||||||
|
assert_eq!(
|
||||||
|
merge_attrs("coucou", "hello", "", "t"),
|
||||||
|
Err("'t' has conflicting definitions: '\"coucou\"', '\"hello\"'".into())
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(merge_attrs(0, 0, 0, "t"), Ok(0));
|
||||||
|
assert_eq!(merge_attrs(5, 0, 0, "t"), Ok(5));
|
||||||
|
assert_eq!(merge_attrs(0, 5, 0, "t"), Ok(5));
|
||||||
|
assert_eq!(merge_attrs(5, 5, 0, "t"), Ok(5));
|
||||||
|
assert_eq!(
|
||||||
|
merge_attrs(5, 6, 0, "t"),
|
||||||
|
Err("'t' has conflicting definitions: '5', '6'".into())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -189,7 +189,9 @@ impl PatternIp {
|
||||||
let cidr_normalized = Cidr::from_str(cidr)?;
|
let cidr_normalized = Cidr::from_str(cidr)?;
|
||||||
let cidr_normalized_string = cidr_normalized.to_string();
|
let cidr_normalized_string = cidr_normalized.to_string();
|
||||||
if &cidr_normalized_string != cidr {
|
if &cidr_normalized_string != cidr {
|
||||||
warn!("CIDR {cidr} should be rewritten in its normalized form: {cidr_normalized_string}");
|
warn!(
|
||||||
|
"CIDR {cidr} should be rewritten in its normalized form: {cidr_normalized_string}"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
self.ignore_cidr_normalized.push(cidr_normalized);
|
self.ignore_cidr_normalized.push(cidr_normalized);
|
||||||
}
|
}
|
||||||
|
|
@ -289,12 +291,11 @@ impl PatternIp {
|
||||||
mod patternip_tests {
|
mod patternip_tests {
|
||||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||||
|
|
||||||
use chrono::Local;
|
|
||||||
use tokio::{fs::read_to_string, task::JoinSet};
|
use tokio::{fs::read_to_string, task::JoinSet};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
concepts::{Action, Duplicate, Filter, Pattern},
|
concepts::{Action, Duplicate, Filter, Pattern, now},
|
||||||
daemon::{tests::TestBed, React},
|
daemon::{React, tests::TestBed},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{Cidr, PatternIp, PatternType};
|
use super::{Cidr, PatternIp, PatternType};
|
||||||
|
|
@ -708,9 +709,9 @@ mod patternip_tests {
|
||||||
Duplicate::Ignore,
|
Duplicate::Ignore,
|
||||||
&bed.ip_patterns,
|
&bed.ip_patterns,
|
||||||
);
|
);
|
||||||
let bed = bed.part2(filter, Local::now(), None).await;
|
let bed = bed.part2(filter, now(), None).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line(&line, Local::now()),
|
bed.manager.handle_line(&line, now()).await,
|
||||||
React::Trigger,
|
React::Trigger,
|
||||||
"line: {line}"
|
"line: {line}"
|
||||||
);
|
);
|
||||||
|
|
|
||||||
218
src/concepts/plugin.rs
Normal file
218
src/concepts/plugin.rs
Normal file
|
|
@ -0,0 +1,218 @@
|
||||||
|
use std::{collections::BTreeMap, io::Error, path, process::Stdio};
|
||||||
|
|
||||||
|
#[cfg(target_os = "macos")]
|
||||||
|
use std::os::darwin::fs::MetadataExt;
|
||||||
|
#[cfg(target_os = "freebsd")]
|
||||||
|
use std::os::freebsd::fs::MetadataExt;
|
||||||
|
#[cfg(target_os = "illumos")]
|
||||||
|
use std::os::illumos::fs::MetadataExt;
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
use std::os::linux::fs::MetadataExt;
|
||||||
|
#[cfg(target_os = "netbsd")]
|
||||||
|
use std::os::netbsd::fs::MetadataExt;
|
||||||
|
#[cfg(target_os = "openbsd")]
|
||||||
|
use std::os::openbsd::fs::MetadataExt;
|
||||||
|
#[cfg(target_os = "solaris")]
|
||||||
|
use std::os::solaris::fs::MetadataExt;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::{
|
||||||
|
fs,
|
||||||
|
process::{Child, Command},
|
||||||
|
};
|
||||||
|
use tracing::{debug, warn};
|
||||||
|
|
||||||
|
// TODO commented options block execution of program,
|
||||||
|
// while developping in my home directory.
|
||||||
|
// Some options may still be useful in production environments.
|
||||||
|
fn systemd_default_options(working_directory: &str) -> BTreeMap<String, Vec<String>> {
|
||||||
|
BTreeMap::from(
|
||||||
|
[
|
||||||
|
// reaction slice (does nothing if inexistent)
|
||||||
|
("Slice", vec!["reaction.slice"]),
|
||||||
|
// Started in its own directory
|
||||||
|
("WorkingDirectory", vec![working_directory]),
|
||||||
|
// No file access except own directory
|
||||||
|
("ReadWritePaths", vec![working_directory]),
|
||||||
|
("ReadOnlyPaths", vec!["/"]),
|
||||||
|
("InaccessiblePaths", vec!["/boot", "/etc"]),
|
||||||
|
// Protect special filesystems
|
||||||
|
("PrivateDevices", vec!["true"]),
|
||||||
|
("PrivateMounts", vec!["true"]),
|
||||||
|
("PrivateTmp", vec!["true"]),
|
||||||
|
// ("PrivateUsers", vec!["true"]),
|
||||||
|
("ProcSubset", vec!["pid"]),
|
||||||
|
("ProtectClock", vec!["true"]),
|
||||||
|
("ProtectControlGroups", vec!["true"]),
|
||||||
|
#[cfg(not(debug_assertions))]
|
||||||
|
("ProtectHome", vec!["true"]),
|
||||||
|
("ProtectHostname", vec!["true"]),
|
||||||
|
("ProtectKernelLogs", vec!["true"]),
|
||||||
|
("ProtectKernelModules", vec!["true"]),
|
||||||
|
("ProtectKernelTunables", vec!["true"]),
|
||||||
|
("ProtectProc", vec!["invisible"]),
|
||||||
|
("ProtectSystem", vec!["strict"]),
|
||||||
|
// Various Protections
|
||||||
|
("LockPersonality", vec!["true"]),
|
||||||
|
("NoNewPrivileges", vec!["true"]),
|
||||||
|
("AmbientCapabilities", vec![""]),
|
||||||
|
("CapabilityBoundingSet", vec![""]),
|
||||||
|
// Isolate File
|
||||||
|
("RemoveIPC", vec!["true"]),
|
||||||
|
("RestrictNamespaces", vec!["true"]),
|
||||||
|
("RestrictSUIDSGID", vec!["true"]),
|
||||||
|
("SystemCallArchitectures", vec!["native"]),
|
||||||
|
(
|
||||||
|
"SystemCallFilter",
|
||||||
|
vec!["@system-service", "~@privileged", "~@resources", "~@setuid"],
|
||||||
|
),
|
||||||
|
// User
|
||||||
|
// FIXME Setting another user doesn't work, because of stdio pipe permission errors
|
||||||
|
// ("DynamicUser", vec!["true"]),
|
||||||
|
// ("User", vec!["reaction-plugin-test"]),
|
||||||
|
// Too restrictive
|
||||||
|
// ("NoExecPaths", vec!["/"]),
|
||||||
|
// ("RestrictAddressFamilies", vec![""]),
|
||||||
|
]
|
||||||
|
.map(|(k, v)| (k.into(), v.into_iter().map(|v| v.into()).collect())),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||||
|
#[cfg_attr(test, derive(Default))]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
pub struct Plugin {
|
||||||
|
#[serde(skip)]
|
||||||
|
pub name: String,
|
||||||
|
|
||||||
|
pub path: String,
|
||||||
|
/// Check that plugin file owner is root
|
||||||
|
#[serde(default = "_true")]
|
||||||
|
pub check_root: bool,
|
||||||
|
/// Enable systemd containerization
|
||||||
|
#[serde(default = "_true")]
|
||||||
|
pub systemd: bool,
|
||||||
|
/// Options for `run0`
|
||||||
|
#[serde(default)]
|
||||||
|
pub systemd_options: BTreeMap<String, Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _true() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE
|
||||||
|
// `run0` can be used for security customisation.
|
||||||
|
// with the --pipe option, raw stdio fd are transmitted to the underlying command, so there is no overhead.
|
||||||
|
|
||||||
|
impl Plugin {
|
||||||
|
pub fn setup(&mut self, name: &str) -> Result<(), String> {
|
||||||
|
self.name = name.to_string();
|
||||||
|
|
||||||
|
if self.path.is_empty() {
|
||||||
|
return Err("can't specify empty plugin path".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only when testing, make relative paths absolute
|
||||||
|
#[cfg(debug_assertions)]
|
||||||
|
if !self.path.starts_with("/") {
|
||||||
|
self.path = format!(
|
||||||
|
"{}/{}",
|
||||||
|
std::env::current_dir()
|
||||||
|
.map_err(|err| format!("error on working directory: {err}"))?
|
||||||
|
.to_string_lossy(),
|
||||||
|
self.path
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disallow relative paths
|
||||||
|
if !self.path.starts_with("/") {
|
||||||
|
return Err(format!("plugin paths must be absolute: {}", self.path));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Override default options with user-defined options, when defined.
|
||||||
|
pub fn systemd_setup(&self, working_directory: &str) -> BTreeMap<String, Vec<String>> {
|
||||||
|
let mut new_options = systemd_default_options(working_directory);
|
||||||
|
for (option, value) in self.systemd_options.iter() {
|
||||||
|
new_options.insert(option.clone(), value.clone());
|
||||||
|
}
|
||||||
|
new_options
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn launch(&self, state_directory: &str) -> Result<Child, std::io::Error> {
|
||||||
|
// owner check
|
||||||
|
if self.check_root {
|
||||||
|
let path = self.path.clone();
|
||||||
|
let stat = fs::metadata(path).await?;
|
||||||
|
|
||||||
|
if stat.st_uid() != 0 {
|
||||||
|
return Err(Error::other("plugin file is not owned by root"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let self_uid = if self.systemd {
|
||||||
|
Some(
|
||||||
|
// Well well we want to check if we're root
|
||||||
|
#[allow(unsafe_code)]
|
||||||
|
unsafe {
|
||||||
|
nix::libc::geteuid()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create plugin working directory (also state directory)
|
||||||
|
let plugin_working_directory = format!("{state_directory}/plugin_data/{}", self.name);
|
||||||
|
fs::create_dir_all(&plugin_working_directory).await?;
|
||||||
|
|
||||||
|
let mut command = if self_uid.is_some_and(|self_uid| self_uid == 0) {
|
||||||
|
let mut command = Command::new("run0");
|
||||||
|
// --pipe gives direct, non-emulated stdio access, for better performance.
|
||||||
|
command.arg("--pipe");
|
||||||
|
// run the command inside the same slice as reaction
|
||||||
|
command.arg("--slice-inherit");
|
||||||
|
|
||||||
|
// Make path absolute for systemd
|
||||||
|
let full_workdir = path::absolute(&plugin_working_directory)?;
|
||||||
|
let full_workdir = full_workdir.to_str().ok_or_else(|| {
|
||||||
|
std::io::Error::new(
|
||||||
|
std::io::ErrorKind::InvalidFilename,
|
||||||
|
format!(
|
||||||
|
"Could not absolutize plugin working directory {plugin_working_directory}"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let merged_systemd_options = self.systemd_setup(full_workdir);
|
||||||
|
// run0 options
|
||||||
|
for (option, values) in merged_systemd_options.iter() {
|
||||||
|
for value in values.iter() {
|
||||||
|
command.arg("--property").arg(format!("{option}={value}"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
command.arg(&self.path);
|
||||||
|
command
|
||||||
|
} else {
|
||||||
|
if self.systemd {
|
||||||
|
warn!("Disabling systemd because reaction does not run as root");
|
||||||
|
}
|
||||||
|
let mut command = Command::new(&self.path);
|
||||||
|
command.current_dir(plugin_working_directory);
|
||||||
|
command
|
||||||
|
};
|
||||||
|
command.arg("serve");
|
||||||
|
debug!(
|
||||||
|
"plugin {}: running command: {:?}",
|
||||||
|
self.name,
|
||||||
|
command.as_std()
|
||||||
|
);
|
||||||
|
command
|
||||||
|
.stdin(Stdio::piped())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.stderr(Stdio::piped())
|
||||||
|
.env("RUST_BACKTRACE", "1")
|
||||||
|
.spawn()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,8 +1,11 @@
|
||||||
use std::{cmp::Ordering, collections::BTreeMap, hash::Hash};
|
use std::{cmp::Ordering, collections::BTreeMap, hash::Hash};
|
||||||
|
|
||||||
|
use reaction_plugin::StreamConfig;
|
||||||
|
use regex::RegexSet;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{Filter, Patterns};
|
use super::{Filter, Patterns, merge_attrs};
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||||
#[cfg_attr(test, derive(Default))]
|
#[cfg_attr(test, derive(Default))]
|
||||||
|
|
@ -10,11 +13,23 @@ use super::{Filter, Patterns};
|
||||||
pub struct Stream {
|
pub struct Stream {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub cmd: Vec<String>,
|
pub cmd: Vec<String>,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub filters: BTreeMap<String, Filter>,
|
pub filters: BTreeMap<String, Filter>,
|
||||||
|
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
|
||||||
|
#[serde(skip)]
|
||||||
|
pub compiled_regex_set: RegexSet,
|
||||||
|
#[serde(skip)]
|
||||||
|
pub regex_index_to_filter_name: Vec<String>,
|
||||||
|
|
||||||
|
// Plugin-specific
|
||||||
|
#[serde(default, rename = "type", skip_serializing_if = "Option::is_none")]
|
||||||
|
pub stream_type: Option<String>,
|
||||||
|
#[serde(default, skip_serializing_if = "Value::is_null")]
|
||||||
|
pub options: Value,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Stream {
|
impl Stream {
|
||||||
|
|
@ -23,23 +38,27 @@ impl Stream {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn merge(&mut self, other: Stream) -> Result<(), String> {
|
pub fn merge(&mut self, other: Stream) -> Result<(), String> {
|
||||||
if !(self.cmd.is_empty() || other.cmd.is_empty() || self.cmd == other.cmd) {
|
self.cmd = merge_attrs(self.cmd.clone(), other.cmd, Vec::default(), "cmd")?;
|
||||||
return Err("cmd has conflicting definitions".into());
|
self.stream_type = merge_attrs(self.stream_type.clone(), other.stream_type, None, "type")?;
|
||||||
}
|
|
||||||
|
|
||||||
if self.cmd.is_empty() {
|
|
||||||
self.cmd = other.cmd;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (key, filter) in other.filters.into_iter() {
|
for (key, filter) in other.filters.into_iter() {
|
||||||
if self.filters.insert(key.clone(), filter).is_some() {
|
if self.filters.insert(key.clone(), filter).is_some() {
|
||||||
return Err(format!("filter {} is already defined. filter definitions can't be spread accross multiple files.", key));
|
return Err(format!(
|
||||||
|
"filter {} is already defined. filter definitions can't be spread accross multiple files.",
|
||||||
|
key
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_plugin(&self) -> bool {
|
||||||
|
self.stream_type
|
||||||
|
.as_ref()
|
||||||
|
.is_some_and(|stream_type| stream_type != "cmd")
|
||||||
|
}
|
||||||
|
|
||||||
pub fn setup(&mut self, name: &str, patterns: &Patterns) -> Result<(), String> {
|
pub fn setup(&mut self, name: &str, patterns: &Patterns) -> Result<(), String> {
|
||||||
self._setup(name, patterns)
|
self._setup(name, patterns)
|
||||||
.map_err(|msg| format!("stream {}: {}", name, msg))
|
.map_err(|msg| format!("stream {}: {}", name, msg))
|
||||||
|
|
@ -55,11 +74,18 @@ impl Stream {
|
||||||
return Err("character '.' is not allowed in stream name".into());
|
return Err("character '.' is not allowed in stream name".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.cmd.is_empty() {
|
if !self.is_plugin() {
|
||||||
return Err("cmd is empty".into());
|
if self.cmd.is_empty() {
|
||||||
}
|
return Err("cmd is empty".into());
|
||||||
if self.cmd[0].is_empty() {
|
}
|
||||||
return Err("cmd's first item is empty".into());
|
if self.cmd[0].is_empty() {
|
||||||
|
return Err("cmd's first item is empty".into());
|
||||||
|
}
|
||||||
|
if !self.options.is_null() {
|
||||||
|
return Err("can't define options without a plugin type".into());
|
||||||
|
}
|
||||||
|
} else if !self.cmd.is_empty() {
|
||||||
|
return Err("can't define cmd and a plugin type".into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.filters.is_empty() {
|
if self.filters.is_empty() {
|
||||||
|
|
@ -70,8 +96,33 @@ impl Stream {
|
||||||
filter.setup(name, key, patterns)?;
|
filter.setup(name, key, patterns)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let all_regexes: BTreeMap<_, _> = self
|
||||||
|
.filters
|
||||||
|
.values()
|
||||||
|
.flat_map(|filter| {
|
||||||
|
filter
|
||||||
|
.regex
|
||||||
|
.iter()
|
||||||
|
.map(|regex| (regex, filter.name.clone()))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
self.compiled_regex_set = RegexSet::new(all_regexes.keys())
|
||||||
|
.map_err(|err| format!("too much regexes on the filters of this stream: {err}"))?;
|
||||||
|
self.regex_index_to_filter_name = all_regexes.into_values().collect();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn to_stream_config(&self) -> Result<StreamConfig, String> {
|
||||||
|
Ok(StreamConfig {
|
||||||
|
stream_name: self.name.clone(),
|
||||||
|
stream_type: self.stream_type.clone().ok_or_else(|| {
|
||||||
|
format!("stream {} doesn't load a plugin. this is a bug!", self.name)
|
||||||
|
})?,
|
||||||
|
config: self.options.clone().into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for Stream {
|
impl PartialEq for Stream {
|
||||||
|
|
@ -102,19 +153,12 @@ mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::concepts::filter::tests::ok_filter;
|
use crate::concepts::filter::tests::ok_filter;
|
||||||
|
|
||||||
fn default_stream() -> Stream {
|
|
||||||
Stream {
|
|
||||||
cmd: Vec::new(),
|
|
||||||
name: "".into(),
|
|
||||||
filters: BTreeMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ok_stream() -> Stream {
|
fn ok_stream() -> Stream {
|
||||||
let mut stream = default_stream();
|
Stream {
|
||||||
stream.cmd = vec!["command".into()];
|
cmd: vec!["command".into()],
|
||||||
stream.filters.insert("name".into(), ok_filter());
|
filters: BTreeMap::from([("name".into(), ok_filter())]),
|
||||||
stream
|
..Default::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
||||||
|
|
@ -3,26 +3,23 @@ pub mod tests;
|
||||||
|
|
||||||
mod state;
|
mod state;
|
||||||
|
|
||||||
use std::{
|
use std::{collections::BTreeMap, process::Stdio, sync::Arc};
|
||||||
collections::BTreeMap,
|
|
||||||
process::Stdio,
|
|
||||||
sync::{Arc, Mutex, MutexGuard},
|
|
||||||
};
|
|
||||||
|
|
||||||
|
use chrono::TimeZone;
|
||||||
|
use reaction_plugin::{ActionImpl, shutdown::ShutdownToken};
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::{Mutex, MutexGuard, Semaphore};
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
concepts::{Action, Duplicate, Filter, Match, Pattern, Time},
|
concepts::{Action, Duplicate, Filter, Match, Pattern, Time},
|
||||||
|
daemon::plugin::Plugins,
|
||||||
protocol::{Order, PatternStatus},
|
protocol::{Order, PatternStatus},
|
||||||
treedb::Database,
|
|
||||||
};
|
};
|
||||||
|
use treedb::Database;
|
||||||
|
|
||||||
use state::State;
|
use state::State;
|
||||||
|
|
||||||
use super::shutdown::ShutdownToken;
|
|
||||||
|
|
||||||
/// Responsible for handling all runtime logic dedicated to a [`Filter`].
|
/// Responsible for handling all runtime logic dedicated to a [`Filter`].
|
||||||
/// Notably handles incoming lines from [`super::stream::stream_manager`]
|
/// Notably handles incoming lines from [`super::stream::stream_manager`]
|
||||||
/// and orders from the [`super::socket::socket_manager`]
|
/// and orders from the [`super::socket::socket_manager`]
|
||||||
|
|
@ -34,6 +31,8 @@ pub struct FilterManager {
|
||||||
exec_limit: Option<Arc<Semaphore>>,
|
exec_limit: Option<Arc<Semaphore>>,
|
||||||
/// Permits to run pending actions on shutdown
|
/// Permits to run pending actions on shutdown
|
||||||
shutdown: ShutdownToken,
|
shutdown: ShutdownToken,
|
||||||
|
/// Action Plugins
|
||||||
|
action_plugins: BTreeMap<&'static String, ActionImpl>,
|
||||||
/// Inner state.
|
/// Inner state.
|
||||||
/// Protected by a [`Mutex`], permitting FilterManager to be cloned
|
/// Protected by a [`Mutex`], permitting FilterManager to be cloned
|
||||||
/// and concurrently owned by its stream manager, the socket manager,
|
/// and concurrently owned by its stream manager, the socket manager,
|
||||||
|
|
@ -54,26 +53,36 @@ pub enum React {
|
||||||
|
|
||||||
#[allow(clippy::unwrap_used)]
|
#[allow(clippy::unwrap_used)]
|
||||||
impl FilterManager {
|
impl FilterManager {
|
||||||
pub fn new(
|
pub async fn new(
|
||||||
filter: &'static Filter,
|
filter: &'static Filter,
|
||||||
exec_limit: Option<Arc<Semaphore>>,
|
exec_limit: Option<Arc<Semaphore>>,
|
||||||
shutdown: ShutdownToken,
|
shutdown: ShutdownToken,
|
||||||
db: &mut Database,
|
db: &mut Database,
|
||||||
|
plugins: &mut Plugins,
|
||||||
now: Time,
|
now: Time,
|
||||||
) -> Result<Self, String> {
|
) -> Result<Self, String> {
|
||||||
|
let mut action_plugins = BTreeMap::default();
|
||||||
|
for (action_name, action) in filter.actions.iter().filter(|action| action.1.is_plugin()) {
|
||||||
|
action_plugins.insert(
|
||||||
|
action_name,
|
||||||
|
plugins.get_action_impl(action.to_string()).ok_or_else(|| {
|
||||||
|
format!("action {action} doesn't load a plugin. this is a bug!")
|
||||||
|
})?,
|
||||||
|
);
|
||||||
|
}
|
||||||
let this = Self {
|
let this = Self {
|
||||||
filter,
|
filter,
|
||||||
exec_limit,
|
exec_limit,
|
||||||
shutdown,
|
shutdown,
|
||||||
state: Arc::new(Mutex::new(State::new(filter, db, now)?)),
|
action_plugins,
|
||||||
|
state: Arc::new(Mutex::new(State::new(filter, db, now).await?)),
|
||||||
};
|
};
|
||||||
this.clear_past_triggers_and_schedule_future_actions(now);
|
|
||||||
Ok(this)
|
Ok(this)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_line(&self, line: &str, now: Time) -> React {
|
pub async fn handle_line(&self, line: &str, now: Time) -> React {
|
||||||
if let Some(match_) = self.filter.get_match(line) {
|
if let Some(match_) = self.filter.get_match(line) {
|
||||||
if self.handle_match(match_, now) {
|
if self.handle_match(match_, now).await {
|
||||||
React::Trigger
|
React::Trigger
|
||||||
} else {
|
} else {
|
||||||
React::Match
|
React::Match
|
||||||
|
|
@ -83,10 +92,10 @@ impl FilterManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_match(&self, m: Match, now: Time) -> bool {
|
async fn handle_match(&self, m: Match, now: Time) -> bool {
|
||||||
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().await;
|
||||||
state.clear_past_matches(now);
|
state.clear_past_matches(now).await;
|
||||||
|
|
||||||
// if Duplicate::Ignore and already triggered, skip
|
// if Duplicate::Ignore and already triggered, skip
|
||||||
if state.triggers.contains_key(&m) && Duplicate::Ignore == self.filter.duplicate {
|
if state.triggers.contains_key(&m) && Duplicate::Ignore == self.filter.duplicate {
|
||||||
|
|
@ -98,31 +107,33 @@ impl FilterManager {
|
||||||
let trigger = match self.filter.retry {
|
let trigger = match self.filter.retry {
|
||||||
None => true,
|
None => true,
|
||||||
Some(retry) => {
|
Some(retry) => {
|
||||||
state.add_match(m.clone(), now);
|
state.add_match(m.clone(), now).await;
|
||||||
// Number of stored times for this match >= configured retry for this filter
|
// Number of stored times for this match >= configured retry for this filter
|
||||||
state.get_times(&m) >= retry as usize
|
state.get_times(&m).await >= retry as usize
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if trigger {
|
if trigger {
|
||||||
state.remove_match(&m);
|
state.remove_match(&m).await;
|
||||||
let actions_left = if Duplicate::Extend == self.filter.duplicate {
|
let actions_left = if Duplicate::Extend == self.filter.duplicate {
|
||||||
// Get number of actions left from last trigger
|
// Get number of actions left from last trigger
|
||||||
state
|
state
|
||||||
.remove_trigger(&m)
|
.remove_trigger(&m)
|
||||||
|
.await
|
||||||
// Only one entry in the map because Duplicate::Extend
|
// Only one entry in the map because Duplicate::Extend
|
||||||
.and_then(|map| map.first_key_value().map(|(_, n)| n.clone()))
|
.and_then(|map| map.first_key_value().map(|(_, n)| *n))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
state.add_trigger(m.clone(), now, actions_left);
|
state.add_trigger(m.clone(), now, actions_left).await;
|
||||||
self.schedule_exec(m, now, now, &mut state, false, actions_left);
|
self.schedule_exec(m, now, now, &mut state, false, actions_left)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
trigger
|
trigger
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_trigger(
|
pub async fn handle_trigger(
|
||||||
&self,
|
&self,
|
||||||
patterns: BTreeMap<Arc<Pattern>, String>,
|
patterns: BTreeMap<Arc<Pattern>, String>,
|
||||||
now: Time,
|
now: Time,
|
||||||
|
|
@ -130,15 +141,16 @@ impl FilterManager {
|
||||||
let match_ = self.filter.get_match_from_patterns(patterns)?;
|
let match_ = self.filter.get_match_from_patterns(patterns)?;
|
||||||
|
|
||||||
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().await;
|
||||||
state.remove_match(&match_);
|
state.remove_match(&match_).await;
|
||||||
state.add_trigger(match_.clone(), now, None);
|
state.add_trigger(match_.clone(), now, None).await;
|
||||||
self.schedule_exec(match_, now, now, &mut state, false, None);
|
self.schedule_exec(match_, now, now, &mut state, false, None)
|
||||||
|
.await;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_order(
|
pub async fn handle_order(
|
||||||
&self,
|
&self,
|
||||||
patterns: &BTreeMap<Arc<Pattern>, Regex>,
|
patterns: &BTreeMap<Arc<Pattern>, Regex>,
|
||||||
order: Order,
|
order: Order,
|
||||||
|
|
@ -155,7 +167,7 @@ impl FilterManager {
|
||||||
};
|
};
|
||||||
|
|
||||||
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().await;
|
||||||
|
|
||||||
let mut cs: BTreeMap<_, _> = {
|
let mut cs: BTreeMap<_, _> = {
|
||||||
let cloned_matches = state
|
let cloned_matches = state
|
||||||
|
|
@ -167,27 +179,26 @@ impl FilterManager {
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
cloned_matches
|
let mut cs = BTreeMap::new();
|
||||||
.into_iter()
|
for match_ in cloned_matches {
|
||||||
.map(|match_| {
|
// mutable State required here
|
||||||
// mutable State required here
|
if let Order::Flush = order {
|
||||||
if let Order::Flush = order {
|
state.remove_match(&match_).await;
|
||||||
state.remove_match(&match_);
|
}
|
||||||
}
|
let matches = state
|
||||||
let matches = state
|
.matches
|
||||||
.matches
|
.get(&match_)
|
||||||
.get(&match_)
|
.map(|times| times.len())
|
||||||
.map(|times| times.len())
|
.unwrap_or(0);
|
||||||
.unwrap_or(0);
|
cs.insert(
|
||||||
(
|
match_,
|
||||||
match_,
|
PatternStatus {
|
||||||
PatternStatus {
|
matches,
|
||||||
matches,
|
..Default::default()
|
||||||
..Default::default()
|
},
|
||||||
},
|
);
|
||||||
)
|
}
|
||||||
})
|
cs
|
||||||
.collect()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let cloned_triggers = state
|
let cloned_triggers = state
|
||||||
|
|
@ -203,7 +214,7 @@ impl FilterManager {
|
||||||
let map = state.triggers.get(&m).unwrap().clone();
|
let map = state.triggers.get(&m).unwrap().clone();
|
||||||
|
|
||||||
if let Order::Flush = order {
|
if let Order::Flush = order {
|
||||||
state.remove_trigger(&m);
|
state.remove_trigger(&m).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (t, remaining) in map {
|
for (t, remaining) in map {
|
||||||
|
|
@ -213,21 +224,27 @@ impl FilterManager {
|
||||||
for action in self.filter.filtered_actions_from_match(&m) {
|
for action in self.filter.filtered_actions_from_match(&m) {
|
||||||
let action_time = t + action.after_duration.unwrap_or_default();
|
let action_time = t + action.after_duration.unwrap_or_default();
|
||||||
if action_time > now {
|
if action_time > now {
|
||||||
|
// Pretty print time
|
||||||
|
let time = chrono::Local
|
||||||
|
.timestamp_opt(
|
||||||
|
action_time.as_secs() as i64,
|
||||||
|
action_time.subsec_nanos(),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.to_rfc3339()
|
||||||
|
.chars()
|
||||||
|
.take(19)
|
||||||
|
.collect();
|
||||||
// Insert action
|
// Insert action
|
||||||
pattern_status
|
pattern_status
|
||||||
.actions
|
.actions
|
||||||
.entry(action.name.clone())
|
.entry(action.name.clone())
|
||||||
.or_default()
|
.or_default()
|
||||||
.push(action_time.to_rfc3339().chars().take(19).collect());
|
.push(time);
|
||||||
|
|
||||||
// Execute the action early
|
// Execute the action early
|
||||||
if let Order::Flush = order {
|
if let Order::Flush = order {
|
||||||
exec_now(
|
self.exec_now(action, m.clone(), t);
|
||||||
&self.exec_limit,
|
|
||||||
self.shutdown.clone(),
|
|
||||||
action,
|
|
||||||
m.clone(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -241,12 +258,12 @@ impl FilterManager {
|
||||||
/// Schedule execution for a given Match.
|
/// Schedule execution for a given Match.
|
||||||
/// We check first if the trigger is still here
|
/// We check first if the trigger is still here
|
||||||
/// because pending actions can be flushed.
|
/// because pending actions can be flushed.
|
||||||
fn schedule_exec(
|
async fn schedule_exec(
|
||||||
&self,
|
&self,
|
||||||
m: Match,
|
m: Match,
|
||||||
t: Time,
|
t: Time,
|
||||||
now: Time,
|
now: Time,
|
||||||
state: &mut MutexGuard<State>,
|
state: &mut MutexGuard<'_, State>,
|
||||||
startup: bool,
|
startup: bool,
|
||||||
actions_left: Option<u64>,
|
actions_left: Option<u64>,
|
||||||
) {
|
) {
|
||||||
|
|
@ -258,7 +275,9 @@ impl FilterManager {
|
||||||
.filter(|action| !startup || !action.oneshot)
|
.filter(|action| !startup || !action.oneshot)
|
||||||
// skip any actions
|
// skip any actions
|
||||||
.skip(match actions_left {
|
.skip(match actions_left {
|
||||||
Some(actions_left) => self.filter.actions.len() - actions_left as usize,
|
Some(actions_left) => {
|
||||||
|
self.filter.filtered_actions_from_match(&m).len() - actions_left as usize
|
||||||
|
}
|
||||||
None => 0,
|
None => 0,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -268,29 +287,26 @@ impl FilterManager {
|
||||||
let m = m.clone();
|
let m = m.clone();
|
||||||
|
|
||||||
if exec_time <= now {
|
if exec_time <= now {
|
||||||
if state.decrement_trigger(&m, t, false) {
|
if state.decrement_trigger(&m, t, false).await {
|
||||||
exec_now(&self.exec_limit, self.shutdown.clone(), action, m);
|
self.exec_now(action, m, t);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
|
let action_impl = self.action_plugins.get(&action.name).cloned();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let dur = (exec_time - now)
|
let dur = exec_time - now;
|
||||||
.to_std()
|
|
||||||
// Could cause an error if t + after < now
|
|
||||||
// In this case, 0 is fine
|
|
||||||
.unwrap_or_default();
|
|
||||||
// Wait either for end of sleep
|
// Wait either for end of sleep
|
||||||
// or reaction exiting
|
// or reaction exiting
|
||||||
let exiting = tokio::select! {
|
let exiting = tokio::select! {
|
||||||
_ = tokio::time::sleep(dur) => false,
|
_ = tokio::time::sleep(dur.into()) => false,
|
||||||
_ = this.shutdown.wait() => true,
|
_ = this.shutdown.wait() => true,
|
||||||
};
|
};
|
||||||
// Exec action if triggered hasn't been already flushed
|
// Exec action if triggered hasn't been already flushed
|
||||||
if !exiting || action.on_exit {
|
if !exiting || action.on_exit {
|
||||||
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
||||||
let mut state = this.state.lock().unwrap();
|
let mut state = this.state.lock().await;
|
||||||
if state.decrement_trigger(&m, t, exiting) {
|
if state.decrement_trigger(&m, t, exiting).await {
|
||||||
exec_now(&this.exec_limit, this.shutdown, action, m);
|
exec_now(&this.exec_limit, this.shutdown, action, action_impl, m, t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
@ -298,7 +314,8 @@ impl FilterManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_past_triggers_and_schedule_future_actions(&self, now: Time) {
|
/// Clear past triggers and schedule future actions
|
||||||
|
pub async fn start(&self, now: Time) {
|
||||||
let longuest_action_duration = self.filter.longuest_action_duration;
|
let longuest_action_duration = self.filter.longuest_action_duration;
|
||||||
let number_of_actions = self
|
let number_of_actions = self
|
||||||
.filter
|
.filter
|
||||||
|
|
@ -309,7 +326,7 @@ impl FilterManager {
|
||||||
.count() as u64;
|
.count() as u64;
|
||||||
|
|
||||||
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
#[allow(clippy::unwrap_used)] // propagating panics is ok
|
||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().await;
|
||||||
|
|
||||||
let cloned_triggers = state
|
let cloned_triggers = state
|
||||||
.triggers
|
.triggers
|
||||||
|
|
@ -327,7 +344,7 @@ impl FilterManager {
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if map.is_empty() {
|
if map.is_empty() {
|
||||||
state.triggers.remove(&m);
|
state.triggers.remove(&m).await;
|
||||||
} else {
|
} else {
|
||||||
// Filter duplicates
|
// Filter duplicates
|
||||||
// unwrap is fine because map is not empty (see if)
|
// unwrap is fine because map is not empty (see if)
|
||||||
|
|
@ -339,45 +356,85 @@ impl FilterManager {
|
||||||
// No filtering
|
// No filtering
|
||||||
Duplicate::Rerun => map,
|
Duplicate::Rerun => map,
|
||||||
};
|
};
|
||||||
state.triggers.insert(m.clone(), map.clone());
|
state.triggers.insert(m.clone(), map.clone()).await;
|
||||||
for (t, _) in map {
|
for (t, _) in map {
|
||||||
// Schedule the upcoming times
|
// Schedule the upcoming times
|
||||||
self.schedule_exec(m.clone(), t, now, &mut state, true, None);
|
self.schedule_exec(m.clone(), t, now, &mut state, true, None)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn exec_now(&self, action: &'static Action, m: Match, t: Time) {
|
||||||
|
let action_impl = self.action_plugins.get(&action.name).cloned();
|
||||||
|
exec_now(
|
||||||
|
&self.exec_limit,
|
||||||
|
self.shutdown.clone(),
|
||||||
|
action,
|
||||||
|
action_impl,
|
||||||
|
m,
|
||||||
|
t,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exec_now(
|
fn exec_now(
|
||||||
exec_limit: &Option<Arc<Semaphore>>,
|
exec_limit: &Option<Arc<Semaphore>>,
|
||||||
shutdown: ShutdownToken,
|
shutdown: ShutdownToken,
|
||||||
action: &'static Action,
|
action: &'static Action,
|
||||||
|
action_impl: Option<ActionImpl>,
|
||||||
m: Match,
|
m: Match,
|
||||||
|
t: Time,
|
||||||
) {
|
) {
|
||||||
let exec_limit = exec_limit.clone();
|
let exec_limit = exec_limit.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
// Move ShutdownToken in task
|
// Move ShutdownToken in task
|
||||||
let _shutdown = shutdown;
|
let _shutdown = shutdown;
|
||||||
// Wait for semaphore's permission, if it is Some
|
|
||||||
let _permit = match exec_limit {
|
|
||||||
#[allow(clippy::unwrap_used)] // We know the semaphore is not closed
|
|
||||||
Some(semaphore) => Some(semaphore.acquire_owned().await.unwrap()),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Construct command
|
match action_impl {
|
||||||
let mut command = action.exec(&m);
|
Some(action_impl) => {
|
||||||
|
info!(
|
||||||
|
"{action}: run {} {:?}",
|
||||||
|
action.action_type.clone().unwrap_or_default(),
|
||||||
|
&m,
|
||||||
|
);
|
||||||
|
|
||||||
info!("{}: run [{:?}]", &action, command.as_std());
|
// Sending action
|
||||||
if let Err(err) = command
|
if let Err(err) = action_impl
|
||||||
.stdin(Stdio::null())
|
.tx
|
||||||
.stderr(Stdio::null())
|
.send(reaction_plugin::Exec {
|
||||||
.stdout(Stdio::piped())
|
match_: m,
|
||||||
.status()
|
time: t.into(),
|
||||||
.await
|
})
|
||||||
{
|
.await
|
||||||
error!("{}: run [{:?}], code {}", &action, command.as_std(), err);
|
{
|
||||||
|
error!("{action}: communication with plugin failed: {err}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// Wait for semaphore's permission, if it is Some
|
||||||
|
let _permit = match exec_limit {
|
||||||
|
#[allow(clippy::unwrap_used)] // We know the semaphore is not closed
|
||||||
|
Some(semaphore) => Some(semaphore.acquire_owned().await.unwrap()),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Construct command
|
||||||
|
let mut command = action.exec(&m);
|
||||||
|
|
||||||
|
info!("{action}: run [{:?}]", command.as_std());
|
||||||
|
if let Err(err) = command
|
||||||
|
.stdin(Stdio::null())
|
||||||
|
.stderr(Stdio::null())
|
||||||
|
.stdout(Stdio::piped())
|
||||||
|
.status()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
error!("{action}: run [{:?}], code {err}", command.as_std());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,9 @@
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
|
|
||||||
use crate::{
|
use serde_json::Value;
|
||||||
concepts::{Filter, Match, MatchTime, Time},
|
use treedb::{Database, Tree, helpers::*};
|
||||||
treedb::{
|
|
||||||
helpers::{to_match, to_matchtime, to_time, to_timemap, to_u64},
|
use crate::concepts::{Filter, Match, MatchTime, Time};
|
||||||
Database, Tree,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn filter_ordered_times_db_name(filter: &Filter) -> String {
|
pub fn filter_ordered_times_db_name(filter: &Filter) -> String {
|
||||||
format!(
|
format!(
|
||||||
|
|
@ -41,12 +38,12 @@ pub struct State {
|
||||||
/// I'm pretty confident that Time will always be unique, because it has enough precision.
|
/// I'm pretty confident that Time will always be unique, because it has enough precision.
|
||||||
/// See this code that gives different times, even in a minimal loop:
|
/// See this code that gives different times, even in a minimal loop:
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use chrono::{Local};
|
/// use reaction::concepts::now;
|
||||||
///
|
///
|
||||||
/// let mut res = vec![];
|
/// let mut res = vec![];
|
||||||
/// for _ in 0..10 {
|
/// for _ in 0..10 {
|
||||||
/// let now = Local::now();
|
/// let now = now();
|
||||||
/// res.push(format!("Now: {now}"));
|
/// res.push(format!("Now: {}", now.as_nanos()));
|
||||||
/// }
|
/// }
|
||||||
/// for s in res {
|
/// for s in res {
|
||||||
/// println!("{s}");
|
/// println!("{s}");
|
||||||
|
|
@ -59,33 +56,45 @@ pub struct State {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl State {
|
impl State {
|
||||||
pub fn new(filter: &'static Filter, db: &mut Database, now: Time) -> Result<Self, String> {
|
pub async fn new(
|
||||||
let ordered_times = db.open_tree(
|
filter: &'static Filter,
|
||||||
filter_ordered_times_db_name(filter),
|
db: &mut Database,
|
||||||
filter.retry_duration.unwrap_or_default(),
|
now: Time,
|
||||||
|(key, value)| Ok((to_time(&key)?, to_match(&value)?)),
|
) -> Result<Self, String> {
|
||||||
)?;
|
let ordered_times = db
|
||||||
let mut triggers = db.open_tree(
|
.open_tree(
|
||||||
filter_triggers_db_name(filter),
|
filter_ordered_times_db_name(filter),
|
||||||
filter.longuest_action_duration,
|
filter.retry_duration.unwrap_or_default(),
|
||||||
|(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)),
|
|(key, value)| Ok((to_time(&key)?, to_match(&value)?)),
|
||||||
)?;
|
)
|
||||||
if triggers.is_empty() {
|
.await?;
|
||||||
let old_triggers = db.open_tree(
|
let mut triggers = db
|
||||||
filter_triggers_old_db_name(filter),
|
.open_tree(
|
||||||
|
filter_triggers_db_name(filter),
|
||||||
filter.longuest_action_duration,
|
filter.longuest_action_duration,
|
||||||
|(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)),
|
|(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)),
|
||||||
)?;
|
)
|
||||||
|
.await?;
|
||||||
|
if triggers.is_empty() {
|
||||||
|
let old_triggers = db
|
||||||
|
.open_tree(
|
||||||
|
filter_triggers_old_db_name(filter),
|
||||||
|
filter.longuest_action_duration,
|
||||||
|
|(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
for (mt, n) in old_triggers.iter() {
|
for (mt, n) in old_triggers.iter() {
|
||||||
triggers.fetch_update(mt.m.clone(), |map| {
|
triggers
|
||||||
Some(match map {
|
.fetch_update(mt.m.clone(), |map| {
|
||||||
None => [(mt.t, *n)].into(),
|
Some(match map {
|
||||||
Some(mut map) => {
|
None => [(mt.t, *n)].into(),
|
||||||
map.insert(mt.t, *n);
|
Some(mut map) => {
|
||||||
map
|
map.insert(mt.t, *n);
|
||||||
}
|
map
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
});
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let mut this = Self {
|
let mut this = Self {
|
||||||
|
|
@ -95,52 +104,54 @@ impl State {
|
||||||
ordered_times,
|
ordered_times,
|
||||||
triggers,
|
triggers,
|
||||||
};
|
};
|
||||||
this.clear_past_matches(now);
|
this.clear_past_matches(now).await;
|
||||||
this.load_matches_from_ordered_times();
|
this.load_matches_from_ordered_times().await;
|
||||||
Ok(this)
|
Ok(this)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_match(&mut self, m: Match, t: Time) {
|
pub async fn add_match(&mut self, m: Match, t: Time) {
|
||||||
let set = self.matches.entry(m.clone()).or_default();
|
let set = self.matches.entry(m.clone()).or_default();
|
||||||
set.insert(t);
|
set.insert(t);
|
||||||
self.ordered_times.insert(t, m);
|
self.ordered_times.insert(t, m).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_trigger(&mut self, m: Match, t: Time, action_count: Option<u64>) {
|
pub async fn add_trigger(&mut self, m: Match, t: Time, action_count: Option<u64>) {
|
||||||
// We record triggered filters only when there is an action with an `after` directive
|
// We record triggered filters only when there is an action with an `after` directive
|
||||||
if self.has_after {
|
if self.has_after {
|
||||||
// Add the (Match, Time) to the triggers map
|
// Add the (Match, Time) to the triggers map
|
||||||
let n = action_count
|
let n = action_count
|
||||||
.unwrap_or_else(|| self.filter.filtered_actions_from_match(&m).len() as u64);
|
.unwrap_or_else(|| self.filter.filtered_actions_from_match(&m).len() as u64);
|
||||||
self.triggers.fetch_update(m, |map| {
|
self.triggers
|
||||||
Some(match map {
|
.fetch_update(m, |map| {
|
||||||
None => [(t, n)].into(),
|
Some(match map {
|
||||||
Some(mut value) => {
|
None => [(t, n)].into(),
|
||||||
value.insert(t, n);
|
Some(mut value) => {
|
||||||
value
|
value.insert(t, n);
|
||||||
}
|
value
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
});
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Completely remove a Match from the matches
|
// Completely remove a Match from the matches
|
||||||
pub fn remove_match(&mut self, m: &Match) {
|
pub async fn remove_match(&mut self, m: &Match) {
|
||||||
if let Some(set) = self.matches.get(m) {
|
if let Some(set) = self.matches.get(m) {
|
||||||
for t in set {
|
for t in set {
|
||||||
self.ordered_times.remove(t);
|
self.ordered_times.remove(t).await;
|
||||||
}
|
}
|
||||||
self.matches.remove(m);
|
self.matches.remove(m);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Completely remove a Match from the triggers
|
/// Completely remove a Match from the triggers
|
||||||
pub fn remove_trigger(&mut self, m: &Match) -> Option<BTreeMap<Time, u64>> {
|
pub async fn remove_trigger(&mut self, m: &Match) -> Option<BTreeMap<Time, u64>> {
|
||||||
self.triggers.remove(m)
|
self.triggers.remove(m).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns whether we should still execute an action for this (Match, Time) trigger
|
/// Returns whether we should still execute an action for this (Match, Time) trigger
|
||||||
pub fn decrement_trigger(&mut self, m: &Match, t: Time, exiting: bool) -> bool {
|
pub async fn decrement_trigger(&mut self, m: &Match, t: Time, exiting: bool) -> bool {
|
||||||
// We record triggered filters only when there is an action with an `after` directive
|
// We record triggered filters only when there is an action with an `after` directive
|
||||||
if self.has_after {
|
if self.has_after {
|
||||||
let mut exec_needed = false;
|
let mut exec_needed = false;
|
||||||
|
|
@ -154,16 +165,14 @@ impl State {
|
||||||
exec_needed = true;
|
exec_needed = true;
|
||||||
if count <= 1 {
|
if count <= 1 {
|
||||||
if !exiting {
|
if !exiting {
|
||||||
self.triggers.fetch_update(mt.m, |map| {
|
self.triggers
|
||||||
map.and_then(|mut map| {
|
.fetch_update(mt.m, |map| {
|
||||||
map.remove(&mt.t);
|
map.and_then(|mut map| {
|
||||||
if map.is_empty() {
|
map.remove(&mt.t);
|
||||||
None
|
if map.is_empty() { None } else { Some(map) }
|
||||||
} else {
|
})
|
||||||
Some(map)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
});
|
.await;
|
||||||
}
|
}
|
||||||
// else don't do anything
|
// else don't do anything
|
||||||
// Because that will remove the entry in the DB, and make
|
// Because that will remove the entry in the DB, and make
|
||||||
|
|
@ -172,12 +181,14 @@ impl State {
|
||||||
// - The current for action counting, not persisted
|
// - The current for action counting, not persisted
|
||||||
// - Another like ordered_times, Tree<Time, Match>, persisted
|
// - Another like ordered_times, Tree<Time, Match>, persisted
|
||||||
} else {
|
} else {
|
||||||
self.triggers.fetch_update(mt.m, |map| {
|
self.triggers
|
||||||
map.map(|mut map| {
|
.fetch_update(mt.m, |map| {
|
||||||
map.insert(mt.t, count - 1);
|
map.map(|mut map| {
|
||||||
map
|
map.insert(mt.t, count - 1);
|
||||||
|
map
|
||||||
|
})
|
||||||
})
|
})
|
||||||
});
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exec_needed
|
exec_needed
|
||||||
|
|
@ -186,7 +197,7 @@ impl State {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear_past_matches(&mut self, now: Time) {
|
pub async fn clear_past_matches(&mut self, now: Time) {
|
||||||
let retry_duration = self.filter.retry_duration.unwrap_or_default();
|
let retry_duration = self.filter.retry_duration.unwrap_or_default();
|
||||||
while self
|
while self
|
||||||
.ordered_times
|
.ordered_times
|
||||||
|
|
@ -199,7 +210,7 @@ impl State {
|
||||||
let (t, m) = self.ordered_times.first_key_value().unwrap();
|
let (t, m) = self.ordered_times.first_key_value().unwrap();
|
||||||
(*t, m.clone())
|
(*t, m.clone())
|
||||||
};
|
};
|
||||||
self.ordered_times.remove(&t);
|
self.ordered_times.remove(&t).await;
|
||||||
if let Some(set) = self.matches.get(&m) {
|
if let Some(set) = self.matches.get(&m) {
|
||||||
let mut set = set.clone();
|
let mut set = set.clone();
|
||||||
set.remove(&t);
|
set.remove(&t);
|
||||||
|
|
@ -212,14 +223,14 @@ impl State {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_times(&self, m: &Match) -> usize {
|
pub async fn get_times(&self, m: &Match) -> usize {
|
||||||
match self.matches.get(m) {
|
match self.matches.get(m) {
|
||||||
Some(vec) => vec.len(),
|
Some(vec) => vec.len(),
|
||||||
None => 0,
|
None => 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_matches_from_ordered_times(&mut self) {
|
async fn load_matches_from_ordered_times(&mut self) {
|
||||||
for (t, m) in self.ordered_times.iter() {
|
for (t, m) in self.ordered_times.iter() {
|
||||||
let set = self.matches.entry(m.clone()).or_default();
|
let set = self.matches.entry(m.clone()).or_default();
|
||||||
set.insert(*t);
|
set.insert(*t);
|
||||||
|
|
@ -227,19 +238,30 @@ impl State {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Tries to convert a [`Value`] into a [`MatchTime`]
|
||||||
|
pub fn to_matchtime(val: &Value) -> Result<MatchTime, String> {
|
||||||
|
let map = val.as_object().ok_or("not an object")?;
|
||||||
|
Ok(MatchTime {
|
||||||
|
m: to_match(map.get("m").ok_or("no m in object")?)?,
|
||||||
|
t: to_time(map.get("t").ok_or("no t in object")?)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::collections::{BTreeMap, HashMap};
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
|
||||||
use chrono::{DateTime, Local, TimeDelta};
|
use serde_json::{Map, Value, json};
|
||||||
use serde_json::json;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
concepts::{filter_tests::ok_filter, Action, Duplicate, Filter, Pattern},
|
concepts::{
|
||||||
daemon::filter::state::State,
|
Action, Duplicate, Filter, MatchTime, Pattern, Time, filter_tests::ok_filter, now,
|
||||||
|
},
|
||||||
tests::TempDatabase,
|
tests::TempDatabase,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use super::{State, to_matchtime};
|
||||||
|
|
||||||
// Tests `new`, `clear_past_matches` and `load_matches_from_ordered_times`
|
// Tests `new`, `clear_past_matches` and `load_matches_from_ordered_times`
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn state_new() {
|
async fn state_new() {
|
||||||
|
|
@ -267,15 +289,16 @@ mod tests {
|
||||||
&patterns,
|
&patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let now = DateTime::parse_from_rfc3339("2025-07-10T12:35:00.000+00:00")
|
let now = Time::from_secs(1234567);
|
||||||
.unwrap()
|
// DateTime::parse_from_rfc3339("2025-07-10T12:35:00.000+00:00")
|
||||||
.with_timezone(&Local);
|
// .unwrap()
|
||||||
let now_plus_1m = now + TimeDelta::minutes(1);
|
// .with_timezone(&Local);
|
||||||
let now_plus_1m01 = now_plus_1m + TimeDelta::seconds(1);
|
let now_plus_1m = now + Time::from_mins(1);
|
||||||
let now_less_1m = now - TimeDelta::minutes(1);
|
let now_plus_1m01 = now_plus_1m + Time::from_secs(1);
|
||||||
let now_less_1s = now - TimeDelta::seconds(1);
|
let now_less_1m = now - Time::from_mins(1);
|
||||||
let now_less_4s = now - TimeDelta::seconds(4);
|
let now_less_1s = now - Time::from_secs(1);
|
||||||
let now_less_5s = now - TimeDelta::seconds(5);
|
let now_less_4s = now - Time::from_secs(4);
|
||||||
|
let now_less_5s = now - Time::from_secs(5);
|
||||||
|
|
||||||
let triggers = [
|
let triggers = [
|
||||||
// format v1
|
// format v1
|
||||||
|
|
@ -314,35 +337,35 @@ mod tests {
|
||||||
json!(["one"]),
|
json!(["one"]),
|
||||||
json!({
|
json!({
|
||||||
// Will stay
|
// Will stay
|
||||||
now_plus_1m.to_rfc3339(): 1,
|
now_plus_1m.as_nanos().to_string(): 1,
|
||||||
now_less_1s.to_rfc3339(): 1,
|
now_less_1s.as_nanos().to_string(): 1,
|
||||||
// Will not get cleaned because it's FilterManager's task
|
// Will not get cleaned because it's FilterManager's task
|
||||||
now_less_5s.to_rfc3339(): 1,
|
now_less_5s.as_nanos().to_string(): 1,
|
||||||
}),
|
}),
|
||||||
)]),
|
)]),
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
for trigger_db in triggers {
|
for trigger_db in triggers {
|
||||||
let mut db = TempDatabase::default().await;
|
let mut db = TempDatabase::from_loaded_db(HashMap::from([
|
||||||
db.set_loaded_db(HashMap::from([
|
|
||||||
(
|
(
|
||||||
"filter_ordered_times_s1.f1".into(),
|
"filter_ordered_times_s1.f1".into(),
|
||||||
HashMap::from([
|
HashMap::from([
|
||||||
// Will stay
|
// Will stay
|
||||||
(now_plus_1m.to_rfc3339().into(), ["one"].into()),
|
(now_plus_1m.as_nanos().to_string().into(), ["one"].into()),
|
||||||
(now_plus_1m01.to_rfc3339().into(), ["one"].into()),
|
(now_plus_1m01.as_nanos().to_string().into(), ["one"].into()),
|
||||||
(now_less_1s.to_rfc3339().into(), ["two"].into()), // stays because retry: 2s
|
(now_less_1s.as_nanos().to_string().into(), ["two"].into()), // stays because retry: 2s
|
||||||
// Will get cleaned
|
// Will get cleaned
|
||||||
(now_less_4s.to_rfc3339().into(), ["two"].into()),
|
(now_less_4s.as_nanos().to_string().into(), ["two"].into()),
|
||||||
(now_less_5s.to_rfc3339().into(), ["three"].into()),
|
(now_less_5s.as_nanos().to_string().into(), ["three"].into()),
|
||||||
(now_less_1m.to_rfc3339().into(), ["two"].into()),
|
(now_less_1m.as_nanos().to_string().into(), ["two"].into()),
|
||||||
]),
|
]),
|
||||||
),
|
),
|
||||||
trigger_db,
|
trigger_db,
|
||||||
]));
|
]))
|
||||||
|
.await;
|
||||||
|
|
||||||
let state = State::new(filter, &mut db, now).unwrap();
|
let state = State::new(filter, &mut db, now).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.ordered_times.tree(),
|
state.ordered_times.tree(),
|
||||||
|
|
@ -379,20 +402,18 @@ mod tests {
|
||||||
|
|
||||||
let one = vec!["one".into()];
|
let one = vec!["one".into()];
|
||||||
|
|
||||||
let now = DateTime::parse_from_rfc3339("2025-07-10T12:35:00.000+00:00")
|
let now = Time::from_secs(1234567);
|
||||||
.unwrap()
|
let now_less_1s = now - Time::from_secs(1);
|
||||||
.with_timezone(&Local);
|
let now_less_4s = now - Time::from_secs(4);
|
||||||
let now_less_1s = now - TimeDelta::seconds(1);
|
|
||||||
let now_less_4s = now - TimeDelta::seconds(4);
|
|
||||||
|
|
||||||
let mut db = TempDatabase::default().await;
|
let mut db = TempDatabase::default().await;
|
||||||
let mut state = State::new(filter, &mut db, now).unwrap();
|
let mut state = State::new(filter, &mut db, now).await.unwrap();
|
||||||
|
|
||||||
assert!(state.ordered_times.tree().is_empty());
|
assert!(state.ordered_times.tree().is_empty());
|
||||||
assert!(state.matches.is_empty());
|
assert!(state.matches.is_empty());
|
||||||
|
|
||||||
// Add non-previously added match
|
// Add non-previously added match
|
||||||
state.add_match(one.clone(), now_less_1s);
|
state.add_match(one.clone(), now_less_1s).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.ordered_times.tree(),
|
state.ordered_times.tree(),
|
||||||
&BTreeMap::from([(now_less_1s, one.clone()),])
|
&BTreeMap::from([(now_less_1s, one.clone()),])
|
||||||
|
|
@ -403,7 +424,7 @@ mod tests {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Add previously added match
|
// Add previously added match
|
||||||
state.add_match(one.clone(), now_less_4s);
|
state.add_match(one.clone(), now_less_4s).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.ordered_times.tree(),
|
state.ordered_times.tree(),
|
||||||
&BTreeMap::from([(now_less_1s, one.clone()), (now_less_4s, one.clone())])
|
&BTreeMap::from([(now_less_1s, one.clone()), (now_less_4s, one.clone())])
|
||||||
|
|
@ -414,7 +435,7 @@ mod tests {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Remove added match
|
// Remove added match
|
||||||
state.remove_match(&one);
|
state.remove_match(&one).await;
|
||||||
assert!(state.ordered_times.tree().is_empty());
|
assert!(state.ordered_times.tree().is_empty());
|
||||||
assert!(state.matches.is_empty());
|
assert!(state.matches.is_empty());
|
||||||
}
|
}
|
||||||
|
|
@ -424,20 +445,20 @@ mod tests {
|
||||||
let filter = Box::leak(Box::new(ok_filter()));
|
let filter = Box::leak(Box::new(ok_filter()));
|
||||||
|
|
||||||
let one = vec!["one".into()];
|
let one = vec!["one".into()];
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
|
|
||||||
let mut db = TempDatabase::default().await;
|
let mut db = TempDatabase::default().await;
|
||||||
let mut state = State::new(filter, &mut db, now).unwrap();
|
let mut state = State::new(filter, &mut db, now).await.unwrap();
|
||||||
|
|
||||||
assert!(state.triggers.tree().is_empty());
|
assert!(state.triggers.tree().is_empty());
|
||||||
|
|
||||||
// Add unique trigger
|
// Add unique trigger
|
||||||
state.add_trigger(one.clone(), now, None);
|
state.add_trigger(one.clone(), now, None).await;
|
||||||
// Nothing is really added
|
// Nothing is really added
|
||||||
assert!(state.triggers.tree().is_empty());
|
assert!(state.triggers.tree().is_empty());
|
||||||
|
|
||||||
// Will be called immediately after, it returns true
|
// Will be called immediately after, it returns true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|
@ -477,108 +498,158 @@ mod tests {
|
||||||
);
|
);
|
||||||
|
|
||||||
let one = vec!["one".into()];
|
let one = vec!["one".into()];
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let now_plus_1s = now + TimeDelta::seconds(1);
|
let now_plus_1s = now + Time::from_secs(1);
|
||||||
|
|
||||||
let mut db = TempDatabase::default().await;
|
let mut db = TempDatabase::default().await;
|
||||||
let mut state = State::new(filter, &mut db, now).unwrap();
|
let mut state = State::new(filter, &mut db, now).await.unwrap();
|
||||||
|
|
||||||
assert!(state.triggers.tree().is_empty());
|
assert!(state.triggers.tree().is_empty());
|
||||||
|
|
||||||
// Add unique trigger
|
// Add unique trigger
|
||||||
state.add_trigger(one.clone(), now, None);
|
state.add_trigger(one.clone(), now, None).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now, 3)].into())])
|
&BTreeMap::from([(one.clone(), [(now, 3)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now, 2)].into())])
|
&BTreeMap::from([(one.clone(), [(now, 2)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
|
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
assert!(state.triggers.tree().is_empty());
|
assert!(state.triggers.tree().is_empty());
|
||||||
// Decrement → false
|
// Decrement → false
|
||||||
assert!(!state.decrement_trigger(&one, now, false));
|
assert!(!state.decrement_trigger(&one, now, false).await);
|
||||||
|
|
||||||
// Add unique trigger (but decrement exiting-like)
|
// Add unique trigger (but decrement exiting-like)
|
||||||
state.add_trigger(one.clone(), now, None);
|
state.add_trigger(one.clone(), now, None).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now, 3)].into())])
|
&BTreeMap::from([(one.clone(), [(now, 3)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, true));
|
assert!(state.decrement_trigger(&one, now, true).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now, 2)].into())])
|
&BTreeMap::from([(one.clone(), [(now, 2)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, true));
|
assert!(state.decrement_trigger(&one, now, true).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
|
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
|
||||||
);
|
);
|
||||||
// Decrement but exiting → true, does nothing
|
// Decrement but exiting → true, does nothing
|
||||||
assert!(state.decrement_trigger(&one, now, true));
|
assert!(state.decrement_trigger(&one, now, true).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
|
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
assert!(state.triggers.tree().is_empty());
|
assert!(state.triggers.tree().is_empty());
|
||||||
// Decrement → false
|
// Decrement → false
|
||||||
assert!(!state.decrement_trigger(&one, now, false));
|
assert!(!state.decrement_trigger(&one, now, false).await);
|
||||||
|
|
||||||
// Add trigger with neighbour
|
// Add trigger with neighbour
|
||||||
state.add_trigger(one.clone(), now, None);
|
state.add_trigger(one.clone(), now, None).await;
|
||||||
state.add_trigger(one.clone(), now_plus_1s, None);
|
state.add_trigger(one.clone(), now_plus_1s, None).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())])
|
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 2)].into())])
|
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 2)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 1)].into())])
|
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 1)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → true
|
// Decrement → true
|
||||||
assert!(state.decrement_trigger(&one, now, false));
|
assert!(state.decrement_trigger(&one, now, false).await);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3)].into())])
|
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3)].into())])
|
||||||
);
|
);
|
||||||
// Decrement → false
|
// Decrement → false
|
||||||
assert!(!state.decrement_trigger(&one, now, false));
|
assert!(!state.decrement_trigger(&one, now, false).await);
|
||||||
// Remove neighbour
|
// Remove neighbour
|
||||||
state.remove_trigger(&one);
|
state.remove_trigger(&one).await;
|
||||||
assert!(state.triggers.tree().is_empty());
|
assert!(state.triggers.tree().is_empty());
|
||||||
|
|
||||||
// Add two neighbour triggers
|
// Add two neighbour triggers
|
||||||
state.add_trigger(one.clone(), now, None);
|
state.add_trigger(one.clone(), now, None).await;
|
||||||
state.add_trigger(one.clone(), now_plus_1s, None);
|
state.add_trigger(one.clone(), now_plus_1s, None).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.triggers.tree(),
|
state.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())])
|
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())])
|
||||||
);
|
);
|
||||||
// Remove them
|
// Remove them
|
||||||
state.remove_trigger(&one);
|
state.remove_trigger(&one).await;
|
||||||
assert!(state.triggers.tree().is_empty());
|
assert!(state.triggers.tree().is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_to_matchtime() {
|
||||||
|
assert_eq!(
|
||||||
|
to_matchtime(&Value::Object(Map::from_iter(
|
||||||
|
BTreeMap::from([
|
||||||
|
("m".into(), ["plip", "ploup"].into()),
|
||||||
|
("t".into(), "12345678".into()),
|
||||||
|
])
|
||||||
|
.into_iter()
|
||||||
|
))),
|
||||||
|
Ok(MatchTime {
|
||||||
|
m: vec!["plip".into(), "ploup".into()],
|
||||||
|
t: Time::from_nanos(12345678),
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
to_matchtime(&Value::Object(Map::from_iter(
|
||||||
|
BTreeMap::from([("m".into(), ["plip", "ploup"].into()),]).into_iter()
|
||||||
|
)))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
to_matchtime(&Value::Object(Map::from_iter(
|
||||||
|
BTreeMap::from([("t".into(), 12345678.into()),]).into_iter()
|
||||||
|
)))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
to_matchtime(&Value::Object(Map::from_iter(
|
||||||
|
BTreeMap::from([("m".into(), "ploup".into()), ("t".into(), 12345678.into()),])
|
||||||
|
.into_iter()
|
||||||
|
)))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
to_matchtime(&Value::Object(Map::from_iter(
|
||||||
|
BTreeMap::from([
|
||||||
|
("m".into(), ["plip", "ploup"].into()),
|
||||||
|
("t".into(), [1234567].into()),
|
||||||
|
])
|
||||||
|
.into_iter()
|
||||||
|
)))
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,18 +5,18 @@ use std::{
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::{Local, TimeDelta};
|
use reaction_plugin::shutdown::ShutdownController;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tempfile::TempPath;
|
use tempfile::TempPath;
|
||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
state::{filter_ordered_times_db_name, filter_triggers_db_name},
|
|
||||||
FilterManager, React,
|
FilterManager, React,
|
||||||
|
state::{filter_ordered_times_db_name, filter_triggers_db_name},
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
concepts::{Action, Duplicate, Filter, Pattern, Patterns, Time},
|
concepts::{Action, Duplicate, Filter, Pattern, Patterns, Time, now},
|
||||||
daemon::shutdown::ShutdownController,
|
daemon::plugin::Plugins,
|
||||||
tests::TempDatabase,
|
tests::TempDatabase,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -89,7 +89,7 @@ impl TestBed {
|
||||||
};
|
};
|
||||||
let controller = ShutdownController::new();
|
let controller = ShutdownController::new();
|
||||||
let semaphore = Arc::new(Semaphore::new(1));
|
let semaphore = Arc::new(Semaphore::new(1));
|
||||||
TestBed2 {
|
let test_bed2 = TestBed2 {
|
||||||
_out_path: self._out_path,
|
_out_path: self._out_path,
|
||||||
out_file: self.out_file,
|
out_file: self.out_file,
|
||||||
az_patterns: self.az_patterns,
|
az_patterns: self.az_patterns,
|
||||||
|
|
@ -100,11 +100,15 @@ impl TestBed {
|
||||||
Some(semaphore.clone()),
|
Some(semaphore.clone()),
|
||||||
controller.token(),
|
controller.token(),
|
||||||
&mut db,
|
&mut db,
|
||||||
|
&mut Plugins::default(),
|
||||||
now,
|
now,
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
semaphore,
|
semaphore,
|
||||||
}
|
};
|
||||||
|
test_bed2.manager.start(now).await;
|
||||||
|
test_bed2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -119,8 +123,8 @@ pub struct TestBed2 {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TestBed2 {
|
impl TestBed2 {
|
||||||
pub fn assert_empty_trees(&self) {
|
pub async fn assert_empty_trees(&self) {
|
||||||
let state = self.manager.state.lock().unwrap();
|
let state = self.manager.state.lock().await;
|
||||||
assert!(state.matches.is_empty(), "matches must be empty");
|
assert!(state.matches.is_empty(), "matches must be empty");
|
||||||
assert!(
|
assert!(
|
||||||
state.ordered_times.is_empty(),
|
state.ordered_times.is_empty(),
|
||||||
|
|
@ -170,21 +174,25 @@ async fn three_matches_then_action_then_delayed_action() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let bed = bed.part2(filter, Local::now(), None).await;
|
let now = now();
|
||||||
|
let bed = bed.part2(filter, now, None).await;
|
||||||
|
|
||||||
let now = bed.now;
|
let now = bed.now;
|
||||||
let now1s = bed.now + TimeDelta::seconds(1);
|
let now1s = bed.now + Time::from_secs(1);
|
||||||
let now2s = bed.now + TimeDelta::seconds(2);
|
let now2s = bed.now + Time::from_secs(2);
|
||||||
|
|
||||||
// No match
|
// No match
|
||||||
assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch);
|
assert_eq!(
|
||||||
bed.assert_empty_trees();
|
bed.manager.handle_line("test 131", now).await,
|
||||||
|
React::NoMatch
|
||||||
|
);
|
||||||
|
bed.assert_empty_trees().await;
|
||||||
|
|
||||||
// First match
|
// First match
|
||||||
let one = vec!["one".to_string()];
|
let one = vec!["one".to_string()];
|
||||||
assert_eq!(bed.manager.handle_line("test one", now), React::Match);
|
assert_eq!(bed.manager.handle_line("test one", now).await, React::Match);
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.matches,
|
state.matches,
|
||||||
BTreeMap::from([(one.clone(), BTreeSet::from([now]))]),
|
BTreeMap::from([(one.clone(), BTreeSet::from([now]))]),
|
||||||
|
|
@ -199,9 +207,12 @@ async fn three_matches_then_action_then_delayed_action() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second match
|
// Second match
|
||||||
assert_eq!(bed.manager.handle_line("test one", now1s), React::Match);
|
assert_eq!(
|
||||||
|
bed.manager.handle_line("test one", now1s).await,
|
||||||
|
React::Match
|
||||||
|
);
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.matches,
|
state.matches,
|
||||||
BTreeMap::from([(one.clone(), BTreeSet::from([now, now1s]))]),
|
BTreeMap::from([(one.clone(), BTreeSet::from([now, now1s]))]),
|
||||||
|
|
@ -217,9 +228,12 @@ async fn three_matches_then_action_then_delayed_action() {
|
||||||
|
|
||||||
// Third match, exec
|
// Third match, exec
|
||||||
let _block = bed.semaphore.acquire().await.unwrap();
|
let _block = bed.semaphore.acquire().await.unwrap();
|
||||||
assert_eq!(bed.manager.handle_line("test one", now2s), React::Trigger);
|
assert_eq!(
|
||||||
|
bed.manager.handle_line("test one", now2s).await,
|
||||||
|
React::Trigger
|
||||||
|
);
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert!(
|
assert!(
|
||||||
state.matches.is_empty(),
|
state.matches.is_empty(),
|
||||||
"matches are emptied after trigger"
|
"matches are emptied after trigger"
|
||||||
|
|
@ -241,7 +255,7 @@ async fn three_matches_then_action_then_delayed_action() {
|
||||||
tokio::time::sleep(Duration::from_millis(40)).await;
|
tokio::time::sleep(Duration::from_millis(40)).await;
|
||||||
// Check first action
|
// Check first action
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.state.lock().unwrap().triggers.tree(),
|
bed.manager.state.lock().await.triggers.tree(),
|
||||||
&BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]),
|
&BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]),
|
||||||
"triggers still contain the triggered match with 1 action left"
|
"triggers still contain the triggered match with 1 action left"
|
||||||
);
|
);
|
||||||
|
|
@ -252,10 +266,10 @@ async fn three_matches_then_action_then_delayed_action() {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Now the second action executes
|
// Now the second action executes
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
tokio::time::sleep(Duration::from_millis(140)).await;
|
||||||
// Check second action
|
// Check second action
|
||||||
assert!(
|
assert!(
|
||||||
bed.manager.state.lock().unwrap().triggers.is_empty(),
|
bed.manager.state.lock().await.triggers.is_empty(),
|
||||||
"triggers are empty again"
|
"triggers are empty again"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|
@ -264,7 +278,7 @@ async fn three_matches_then_action_then_delayed_action() {
|
||||||
"the output file contains the result of the 2 actions"
|
"the output file contains the result of the 2 actions"
|
||||||
);
|
);
|
||||||
|
|
||||||
bed.assert_empty_trees();
|
bed.assert_empty_trees().await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -292,16 +306,22 @@ async fn one_match_one_action() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let bed = bed.part2(filter, Local::now(), None).await;
|
let now = now();
|
||||||
let now = bed.now;
|
let bed = bed.part2(filter, now, None).await;
|
||||||
|
|
||||||
// No match
|
// No match
|
||||||
assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch);
|
assert_eq!(
|
||||||
bed.assert_empty_trees();
|
bed.manager.handle_line("test 131", now).await,
|
||||||
|
React::NoMatch
|
||||||
|
);
|
||||||
|
bed.assert_empty_trees().await;
|
||||||
|
|
||||||
// match
|
// match
|
||||||
assert_eq!(bed.manager.handle_line("test one", now), React::Trigger);
|
assert_eq!(
|
||||||
bed.assert_empty_trees();
|
bed.manager.handle_line("test one", now).await,
|
||||||
|
React::Trigger
|
||||||
|
);
|
||||||
|
bed.assert_empty_trees().await;
|
||||||
|
|
||||||
// the action executes
|
// the action executes
|
||||||
tokio::time::sleep(Duration::from_millis(40)).await;
|
tokio::time::sleep(Duration::from_millis(40)).await;
|
||||||
|
|
@ -311,7 +331,7 @@ async fn one_match_one_action() {
|
||||||
"the output file contains the result of the first action"
|
"the output file contains the result of the first action"
|
||||||
);
|
);
|
||||||
|
|
||||||
bed.assert_empty_trees();
|
bed.assert_empty_trees().await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -339,18 +359,24 @@ async fn one_match_one_delayed_action() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let bed = bed.part2(filter, Local::now(), None).await;
|
let now = now();
|
||||||
let now = bed.now;
|
let bed = bed.part2(filter, now, None).await;
|
||||||
|
|
||||||
// No match
|
// No match
|
||||||
assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch);
|
assert_eq!(
|
||||||
bed.assert_empty_trees();
|
bed.manager.handle_line("test 131", now).await,
|
||||||
|
React::NoMatch
|
||||||
|
);
|
||||||
|
bed.assert_empty_trees().await;
|
||||||
|
|
||||||
// Match
|
// Match
|
||||||
let one = vec!["one".to_string()];
|
let one = vec!["one".to_string()];
|
||||||
assert_eq!(bed.manager.handle_line("test one", now), React::Trigger);
|
assert_eq!(
|
||||||
|
bed.manager.handle_line("test one", now).await,
|
||||||
|
React::Trigger
|
||||||
|
);
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert!(state.matches.is_empty(), "matches stay empty");
|
assert!(state.matches.is_empty(), "matches stay empty");
|
||||||
assert!(state.ordered_times.is_empty(), "ordered_times stay empty");
|
assert!(state.ordered_times.is_empty(), "ordered_times stay empty");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|
@ -368,7 +394,7 @@ async fn one_match_one_delayed_action() {
|
||||||
// The action executes
|
// The action executes
|
||||||
tokio::time::sleep(Duration::from_millis(140)).await;
|
tokio::time::sleep(Duration::from_millis(140)).await;
|
||||||
assert!(
|
assert!(
|
||||||
bed.manager.state.lock().unwrap().triggers.is_empty(),
|
bed.manager.state.lock().await.triggers.is_empty(),
|
||||||
"triggers are empty again"
|
"triggers are empty again"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|
@ -377,7 +403,7 @@ async fn one_match_one_delayed_action() {
|
||||||
"the output file contains the result of the action"
|
"the output file contains the result of the action"
|
||||||
);
|
);
|
||||||
|
|
||||||
bed.assert_empty_trees();
|
bed.assert_empty_trees().await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -405,23 +431,22 @@ async fn one_db_match_one_runtime_match_one_action() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut db = TempDatabase::default().await;
|
|
||||||
|
|
||||||
// Pre-add match
|
// Pre-add match
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let one = vec!["one".to_string()];
|
let one = vec!["one".to_string()];
|
||||||
let now1s = now - TimeDelta::seconds(1);
|
let now1s = now - Time::from_secs(1);
|
||||||
|
|
||||||
db.set_loaded_db(HashMap::from([(
|
let db = TempDatabase::from_loaded_db(HashMap::from([(
|
||||||
filter_ordered_times_db_name(filter),
|
filter_ordered_times_db_name(filter),
|
||||||
HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]),
|
HashMap::from([(now1s.as_nanos().to_string().into(), one.clone().into())]),
|
||||||
)]));
|
)]))
|
||||||
|
.await;
|
||||||
|
|
||||||
// Finish setup
|
// Finish setup
|
||||||
let bed = bed.part2(filter, now, Some(db)).await;
|
let bed = bed.part2(filter, now, Some(db)).await;
|
||||||
|
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.matches,
|
state.matches,
|
||||||
BTreeMap::from([(one.clone(), BTreeSet::from([now1s]))]),
|
BTreeMap::from([(one.clone(), BTreeSet::from([now1s]))]),
|
||||||
|
|
@ -436,8 +461,11 @@ async fn one_db_match_one_runtime_match_one_action() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// match
|
// match
|
||||||
assert_eq!(bed.manager.handle_line("test one", now), React::Trigger);
|
assert_eq!(
|
||||||
bed.assert_empty_trees();
|
bed.manager.handle_line("test one", now).await,
|
||||||
|
React::Trigger
|
||||||
|
);
|
||||||
|
bed.assert_empty_trees().await;
|
||||||
// the action executes
|
// the action executes
|
||||||
tokio::time::sleep(Duration::from_millis(40)).await;
|
tokio::time::sleep(Duration::from_millis(40)).await;
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|
@ -472,21 +500,20 @@ async fn one_outdated_db_match() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut db = TempDatabase::default().await;
|
|
||||||
|
|
||||||
// Pre-add match
|
// Pre-add match
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let one = vec!["one".to_string()];
|
let one = vec!["one".to_string()];
|
||||||
let now1s = now - TimeDelta::milliseconds(1001);
|
let now1s = now - Time::from_millis(1001);
|
||||||
|
|
||||||
db.set_loaded_db(HashMap::from([(
|
let db = TempDatabase::from_loaded_db(HashMap::from([(
|
||||||
filter_ordered_times_db_name(filter),
|
filter_ordered_times_db_name(filter),
|
||||||
HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]),
|
HashMap::from([(now1s.as_nanos().to_string().into(), one.clone().into())]),
|
||||||
)]));
|
)]))
|
||||||
|
.await;
|
||||||
|
|
||||||
// Finish setup
|
// Finish setup
|
||||||
let bed = bed.part2(filter, now, Some(db)).await;
|
let bed = bed.part2(filter, now, Some(db)).await;
|
||||||
bed.assert_empty_trees();
|
bed.assert_empty_trees().await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -531,7 +558,7 @@ async fn trigger_unmatched_pattern() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let one = vec!["one".to_string()];
|
let one = vec!["one".to_string()];
|
||||||
let bed = bed.part2(filter, now, None).await;
|
let bed = bed.part2(filter, now, None).await;
|
||||||
|
|
||||||
|
|
@ -545,6 +572,7 @@ async fn trigger_unmatched_pattern() {
|
||||||
.collect(),
|
.collect(),
|
||||||
now,
|
now,
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// the action executes
|
// the action executes
|
||||||
|
|
@ -552,7 +580,7 @@ async fn trigger_unmatched_pattern() {
|
||||||
|
|
||||||
// No matches, one action registered
|
// No matches, one action registered
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert!(state.matches.is_empty());
|
assert!(state.matches.is_empty());
|
||||||
assert!(state.ordered_times.is_empty());
|
assert!(state.ordered_times.is_empty());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|
@ -604,15 +632,15 @@ async fn trigger_matched_pattern() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let now1s = now - TimeDelta::milliseconds(10);
|
let now1s = now - Time::from_millis(10);
|
||||||
let one = vec!["one".to_string()];
|
let one = vec!["one".to_string()];
|
||||||
|
|
||||||
let mut db = TempDatabase::default().await;
|
let db = TempDatabase::from_loaded_db(HashMap::from([(
|
||||||
db.set_loaded_db(HashMap::from([(
|
|
||||||
filter_ordered_times_db_name(filter),
|
filter_ordered_times_db_name(filter),
|
||||||
HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]),
|
HashMap::from([(now1s.as_nanos().to_string().into(), one.clone().into())]),
|
||||||
)]));
|
)]))
|
||||||
|
.await;
|
||||||
let bed = bed.part2(filter, now, Some(db)).await;
|
let bed = bed.part2(filter, now, Some(db)).await;
|
||||||
|
|
||||||
bed.manager
|
bed.manager
|
||||||
|
|
@ -625,6 +653,7 @@ async fn trigger_matched_pattern() {
|
||||||
.collect(),
|
.collect(),
|
||||||
now,
|
now,
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// the action executes
|
// the action executes
|
||||||
|
|
@ -632,7 +661,7 @@ async fn trigger_matched_pattern() {
|
||||||
|
|
||||||
// No matches, one action registered
|
// No matches, one action registered
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert!(state.matches.is_empty());
|
assert!(state.matches.is_empty());
|
||||||
assert!(state.ordered_times.is_empty());
|
assert!(state.ordered_times.is_empty());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|
@ -684,22 +713,22 @@ async fn trigger_deduplication_on_start() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let now1s = now - TimeDelta::milliseconds(1000);
|
let now1s = now - Time::from_millis(1000);
|
||||||
let now2s = now - TimeDelta::milliseconds(1030);
|
let now2s = now - Time::from_millis(1030);
|
||||||
let one = vec!["one".to_string()];
|
let one = vec!["one".to_string()];
|
||||||
|
|
||||||
let mut db = TempDatabase::default().await;
|
let db = TempDatabase::from_loaded_db(HashMap::from([(
|
||||||
db.set_loaded_db(HashMap::from([(
|
|
||||||
filter_triggers_db_name(filter),
|
filter_triggers_db_name(filter),
|
||||||
HashMap::from([(
|
HashMap::from([(
|
||||||
one.clone().into(),
|
one.clone().into(),
|
||||||
json!({
|
json!({
|
||||||
now1s.to_rfc3339(): 1,
|
now1s.as_nanos().to_string(): 1,
|
||||||
now2s.to_rfc3339(): 1,
|
now2s.as_nanos().to_string(): 1,
|
||||||
}),
|
}),
|
||||||
)]),
|
)]),
|
||||||
)]));
|
)]))
|
||||||
|
.await;
|
||||||
let bed = bed.part2(filter, now, Some(db)).await;
|
let bed = bed.part2(filter, now, Some(db)).await;
|
||||||
|
|
||||||
// the action executes
|
// the action executes
|
||||||
|
|
@ -707,7 +736,7 @@ async fn trigger_deduplication_on_start() {
|
||||||
|
|
||||||
// No matches, one or two action·s registered
|
// No matches, one or two action·s registered
|
||||||
{
|
{
|
||||||
let state = bed.manager.state.lock().unwrap();
|
let state = bed.manager.state.lock().await;
|
||||||
assert!(state.matches.is_empty());
|
assert!(state.matches.is_empty());
|
||||||
assert!(state.ordered_times.is_empty());
|
assert!(state.ordered_times.is_empty());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
|
@ -770,15 +799,17 @@ async fn multiple_triggers() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let bed = bed.part2(filter, Local::now(), None).await;
|
let now = now();
|
||||||
|
let bed = bed.part2(filter, now, None).await;
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line("test one", Local::now()),
|
bed.manager.handle_line("test one", now).await,
|
||||||
React::Match,
|
React::Match,
|
||||||
"Duplicate: {dup:?}"
|
"Duplicate: {dup:?}"
|
||||||
);
|
);
|
||||||
|
let now = crate::concepts::now();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line("test one", Local::now()),
|
bed.manager.handle_line("test one", now).await,
|
||||||
React::Trigger,
|
React::Trigger,
|
||||||
"Duplicate: {dup:?}"
|
"Duplicate: {dup:?}"
|
||||||
);
|
);
|
||||||
|
|
@ -794,8 +825,9 @@ async fn multiple_triggers() {
|
||||||
|
|
||||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||||
|
|
||||||
|
let now = crate::concepts::now();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line("test one", Local::now()),
|
bed.manager.handle_line("test one", now).await,
|
||||||
match dup {
|
match dup {
|
||||||
Duplicate::Ignore => React::Match,
|
Duplicate::Ignore => React::Match,
|
||||||
_ => React::Match,
|
_ => React::Match,
|
||||||
|
|
@ -803,8 +835,9 @@ async fn multiple_triggers() {
|
||||||
"Duplicate: {dup:?}"
|
"Duplicate: {dup:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let now = crate::concepts::now();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line("test one", Local::now()),
|
bed.manager.handle_line("test one", now).await,
|
||||||
match dup {
|
match dup {
|
||||||
Duplicate::Ignore => React::Match,
|
Duplicate::Ignore => React::Match,
|
||||||
_ => React::Trigger,
|
_ => React::Trigger,
|
||||||
|
|
@ -935,10 +968,12 @@ async fn extend_trigger_multiple_after_actions() {
|
||||||
&bed.az_patterns,
|
&bed.az_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let bed = bed.part2(filter, Local::now(), None).await;
|
let now = now();
|
||||||
|
let bed = bed.part2(filter, now, None).await;
|
||||||
|
|
||||||
|
let now = crate::concepts::now();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line("test one", Local::now()),
|
bed.manager.handle_line("test one", now).await,
|
||||||
React::Trigger,
|
React::Trigger,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
@ -951,8 +986,9 @@ async fn extend_trigger_multiple_after_actions() {
|
||||||
"Sleep: {second_match_duration:?}"
|
"Sleep: {second_match_duration:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let now = crate::concepts::now();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line("test one", Local::now()),
|
bed.manager.handle_line("test one", now).await,
|
||||||
React::Trigger,
|
React::Trigger,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
@ -1016,10 +1052,12 @@ async fn ip_specific() {
|
||||||
&bed.ip_patterns,
|
&bed.ip_patterns,
|
||||||
);
|
);
|
||||||
|
|
||||||
let bed = bed.part2(filter, Local::now(), None).await;
|
let now = now();
|
||||||
|
let bed = bed.part2(filter, now, None).await;
|
||||||
|
|
||||||
|
let now = crate::concepts::now();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager.handle_line("test 1.2.3.4", Local::now()),
|
bed.manager.handle_line("test 1.2.3.4", now).await,
|
||||||
React::Trigger,
|
React::Trigger,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
@ -1033,9 +1071,9 @@ async fn ip_specific() {
|
||||||
|
|
||||||
bed.reset_out_file().await;
|
bed.reset_out_file().await;
|
||||||
|
|
||||||
|
let now = crate::concepts::now();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bed.manager
|
bed.manager.handle_line("test 1:2:3:4:5:6:7:8", now).await,
|
||||||
.handle_line("test 1:2:3:4:5:6:7:8", Local::now()),
|
|
||||||
React::Trigger,
|
React::Trigger,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,23 +3,25 @@ use std::{
|
||||||
error::Error,
|
error::Error,
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{AtomicBool, Ordering},
|
|
||||||
Arc,
|
Arc,
|
||||||
|
atomic::{AtomicBool, Ordering},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::Local;
|
use futures::future::join_all;
|
||||||
|
use reaction_plugin::shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
select,
|
select,
|
||||||
signal::unix::{signal, SignalKind},
|
signal::unix::{SignalKind, signal},
|
||||||
sync::Semaphore,
|
sync::Semaphore,
|
||||||
};
|
};
|
||||||
use tracing::{debug, info};
|
use tracing::{debug, error, info};
|
||||||
|
use treedb::Database;
|
||||||
|
|
||||||
use crate::{concepts::Config, treedb::Database};
|
use crate::concepts::{Config, now};
|
||||||
use filter::FilterManager;
|
use filter::FilterManager;
|
||||||
pub use filter::React;
|
pub use filter::React;
|
||||||
pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken};
|
use plugin::Plugins;
|
||||||
use socket::Socket;
|
use socket::Socket;
|
||||||
use stream::StreamManager;
|
use stream::StreamManager;
|
||||||
|
|
||||||
|
|
@ -27,30 +29,95 @@ use stream::StreamManager;
|
||||||
pub use filter::tests;
|
pub use filter::tests;
|
||||||
|
|
||||||
mod filter;
|
mod filter;
|
||||||
mod shutdown;
|
mod plugin;
|
||||||
mod socket;
|
mod socket;
|
||||||
mod stream;
|
mod stream;
|
||||||
|
mod utils;
|
||||||
|
|
||||||
pub async fn daemon(
|
pub async fn daemon(config_path: PathBuf, socket: PathBuf) -> i32 {
|
||||||
config_path: PathBuf,
|
// Load config or quit
|
||||||
socket: PathBuf,
|
let config: &'static Config = Box::leak(Box::new(match Config::from_path(&config_path) {
|
||||||
) -> Result<(), Box<dyn Error + Send + Sync>> {
|
Ok(config) => config,
|
||||||
let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?));
|
Err(err) => {
|
||||||
|
error!("{err}");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
// Cancellation Token
|
// Cancellation Token
|
||||||
let shutdown = ShutdownController::new();
|
let shutdown = ShutdownController::new();
|
||||||
|
|
||||||
// Open Database
|
// Cancel when we receive a quit signal
|
||||||
let mut db = Database::open(config).await?;
|
let signal_received = Arc::new(AtomicBool::new(false));
|
||||||
|
if let Err(err) = handle_signals(shutdown.delegate(), signal_received.clone()) {
|
||||||
// Open Socket
|
error!("{err}");
|
||||||
let socket = Socket::open(socket).await?;
|
return 1;
|
||||||
|
|
||||||
// reaction won't abort on startup anymore, we can run start commands
|
|
||||||
if !config.start() {
|
|
||||||
return Err("a start command failed, exiting.".into());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut db = None;
|
||||||
|
let mut config_started = false;
|
||||||
|
let mut daemon_err = false;
|
||||||
|
|
||||||
|
// Start the real daemon 👹
|
||||||
|
if let Err(err) = daemon_start(
|
||||||
|
config,
|
||||||
|
socket,
|
||||||
|
shutdown.token(),
|
||||||
|
&mut db,
|
||||||
|
&mut config_started,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
error!("{err}");
|
||||||
|
daemon_err = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release last db's sender
|
||||||
|
let mut db_status = None;
|
||||||
|
if let Some(db) = db {
|
||||||
|
db_status = Some(db.quit());
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("Asking for all tasks to quit...");
|
||||||
|
shutdown.ask_shutdown();
|
||||||
|
|
||||||
|
debug!("Waiting for all tasks to quit...");
|
||||||
|
shutdown.wait_all_task_shutdown().await;
|
||||||
|
|
||||||
|
let mut stop_ok = true;
|
||||||
|
if config_started {
|
||||||
|
stop_ok = config.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
if daemon_err || !stop_ok {
|
||||||
|
return 1;
|
||||||
|
} else if let Some(mut db_status) = db_status
|
||||||
|
&& let Ok(Err(err)) = db_status.try_recv()
|
||||||
|
{
|
||||||
|
error!("database error: {}", err);
|
||||||
|
return 1;
|
||||||
|
} else if !signal_received.load(Ordering::SeqCst) {
|
||||||
|
error!("quitting because all streams finished");
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn daemon_start(
|
||||||
|
config: &'static Config,
|
||||||
|
socket: PathBuf,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
db: &mut Option<Database>,
|
||||||
|
config_started: &mut bool,
|
||||||
|
) -> Result<(), Box<dyn Error + Send + Sync>> {
|
||||||
|
let mut plugins = Plugins::new(config, shutdown.clone()).await?;
|
||||||
|
|
||||||
|
// Open Database
|
||||||
|
let (cancellation, task_tracker) = shutdown.clone().split();
|
||||||
|
let path = PathBuf::from(config.state_directory.clone());
|
||||||
|
*db = Some(Database::open(&path, cancellation, task_tracker).await?);
|
||||||
|
|
||||||
let (state, stream_managers) = {
|
let (state, stream_managers) = {
|
||||||
// Semaphore limiting action execution concurrency
|
// Semaphore limiting action execution concurrency
|
||||||
let exec_limit = match config.concurrency {
|
let exec_limit = match config.concurrency {
|
||||||
|
|
@ -59,67 +126,57 @@ pub async fn daemon(
|
||||||
};
|
};
|
||||||
|
|
||||||
// Filter managers
|
// Filter managers
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let mut state = HashMap::new();
|
let mut state = HashMap::new();
|
||||||
let mut stream_managers = Vec::new();
|
let mut stream_managers = Vec::new();
|
||||||
for stream in config.streams.values() {
|
for stream in config.streams.values() {
|
||||||
let mut filter_managers = HashMap::new();
|
let mut filter_managers = HashMap::new();
|
||||||
for filter in stream.filters.values() {
|
for filter in stream.filters.values() {
|
||||||
let manager =
|
let manager = FilterManager::new(
|
||||||
FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now)?;
|
filter,
|
||||||
|
exec_limit.clone(),
|
||||||
|
shutdown.clone(),
|
||||||
|
db.as_mut().unwrap(),
|
||||||
|
&mut plugins,
|
||||||
|
now,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
filter_managers.insert(filter, manager);
|
filter_managers.insert(filter, manager);
|
||||||
}
|
}
|
||||||
state.insert(stream, filter_managers.clone());
|
state.insert(stream, filter_managers.clone());
|
||||||
|
|
||||||
stream_managers.push(StreamManager::new(
|
stream_managers.push(
|
||||||
stream,
|
StreamManager::new(stream, filter_managers, shutdown.clone(), &mut plugins).await?,
|
||||||
filter_managers,
|
);
|
||||||
shutdown.token(),
|
|
||||||
)?);
|
|
||||||
}
|
}
|
||||||
(state, stream_managers)
|
(state, stream_managers)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Run database task
|
// Open socket and run task
|
||||||
let mut db_status_rx = db.manager(shutdown.token());
|
let socket = Socket::open(socket).await?;
|
||||||
|
socket.manager(config, state, shutdown.clone());
|
||||||
|
|
||||||
// Run socket task
|
// all core systems started, we can run start commands
|
||||||
socket.manager(config, state, shutdown.token());
|
*config_started = true;
|
||||||
|
if !config.start() {
|
||||||
|
return Err("a start command failed, exiting.".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish plugin setup
|
||||||
|
plugins.start().await?;
|
||||||
|
plugins.manager();
|
||||||
|
|
||||||
// Start Stream managers
|
// Start Stream managers
|
||||||
let mut stream_task_handles = Vec::new();
|
let stream_task_handles = stream_managers.into_iter().filter_map(|stream_manager| {
|
||||||
for stream_manager in stream_managers {
|
let standalone = stream_manager.is_standalone();
|
||||||
stream_task_handles.push(tokio::spawn(async move { stream_manager.start().await }));
|
let handle = tokio::spawn(async move { stream_manager.start().await });
|
||||||
}
|
// Only wait for standalone streams
|
||||||
|
if standalone { Some(handle) } else { None }
|
||||||
// Close streams when we receive a quit signal
|
});
|
||||||
let signal_received = Arc::new(AtomicBool::new(false));
|
|
||||||
handle_signals(shutdown.delegate(), signal_received.clone())?;
|
|
||||||
|
|
||||||
// Wait for all streams to quit
|
// Wait for all streams to quit
|
||||||
for task_handle in stream_task_handles {
|
join_all(stream_task_handles).await;
|
||||||
let _ = task_handle.await;
|
Ok(())
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Asking for all tasks to quit...");
|
|
||||||
shutdown.ask_shutdown();
|
|
||||||
|
|
||||||
debug!("Waiting for all tasks to quit...");
|
|
||||||
shutdown.wait_shutdown().await;
|
|
||||||
|
|
||||||
let db_status = db_status_rx.try_recv();
|
|
||||||
|
|
||||||
let stop_ok = config.stop();
|
|
||||||
|
|
||||||
if let Ok(Err(err)) = db_status {
|
|
||||||
Err(format!("database error: {}", err).into())
|
|
||||||
} else if !signal_received.load(Ordering::SeqCst) {
|
|
||||||
Err("quitting because all streams finished".into())
|
|
||||||
} else if !stop_ok {
|
|
||||||
Err("while executing stop command".into())
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_signals(
|
fn handle_signals(
|
||||||
|
|
|
||||||
405
src/daemon/plugin/mod.rs
Normal file
405
src/daemon/plugin/mod.rs
Normal file
|
|
@ -0,0 +1,405 @@
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, BTreeSet},
|
||||||
|
fmt::Display,
|
||||||
|
io,
|
||||||
|
ops::{Deref, DerefMut},
|
||||||
|
process::ExitStatus,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use futures::{StreamExt, future::join_all};
|
||||||
|
use reaction_plugin::{
|
||||||
|
ActionConfig, ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamConfig, StreamImpl,
|
||||||
|
};
|
||||||
|
use remoc::Connect;
|
||||||
|
use tokio::{
|
||||||
|
process::{Child, ChildStderr},
|
||||||
|
time::timeout,
|
||||||
|
};
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
concepts::{Action, Config, Plugin, Stream},
|
||||||
|
daemon::{ShutdownToken, stream::reader_to_stream, utils::kill_child},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct PluginManager {
|
||||||
|
child: Child,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
plugin: &'static Plugin,
|
||||||
|
plugin_info: PluginInfoClient,
|
||||||
|
streams: BTreeSet<String>,
|
||||||
|
actions: BTreeSet<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for PluginManager {
|
||||||
|
type Target = PluginInfoClient;
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.plugin_info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl DerefMut for PluginManager {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
&mut self.plugin_info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PluginManager {
|
||||||
|
async fn new(
|
||||||
|
plugin: &'static Plugin,
|
||||||
|
state_directory: &str,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
let mut child = plugin
|
||||||
|
.launch(state_directory)
|
||||||
|
.await
|
||||||
|
.map_err(|err| systemd_error(plugin, "could not launch plugin", err))?;
|
||||||
|
|
||||||
|
{
|
||||||
|
let stderr = child.stderr.take().unwrap();
|
||||||
|
// let shutdown = shutdown.clone();
|
||||||
|
tokio::spawn(async move { handle_stderr(stderr, plugin.name.clone()).await });
|
||||||
|
}
|
||||||
|
|
||||||
|
let stdin = child.stdin.take().unwrap();
|
||||||
|
let stdout = child.stdout.take().unwrap();
|
||||||
|
|
||||||
|
let (conn, _tx, mut rx): (
|
||||||
|
_,
|
||||||
|
remoc::rch::base::Sender<()>,
|
||||||
|
remoc::rch::base::Receiver<PluginInfoClient>,
|
||||||
|
) = Connect::io(remoc::Cfg::default(), stdout, stdin)
|
||||||
|
.await
|
||||||
|
.map_err(|err| {
|
||||||
|
systemd_error(plugin, "could not init communication with plugin", err)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tokio::spawn(conn);
|
||||||
|
|
||||||
|
let mut plugin_info = rx
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("could not retrieve initial information from plugin: {err}"))?
|
||||||
|
.ok_or("could not retrieve initial information from plugin: no data")?;
|
||||||
|
|
||||||
|
let manifest = plugin_info
|
||||||
|
.manifest()
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("error while getting plugin {} manifest: {err}", plugin.name))?;
|
||||||
|
|
||||||
|
let my_hello = Hello::new();
|
||||||
|
|
||||||
|
if let Err(hint) = Hello::is_compatible(&my_hello, &manifest.hello) {
|
||||||
|
return Err(format!(
|
||||||
|
"reaction can't handle plugin {} with incompatible version {}.{}: current version: {}.{}. {}",
|
||||||
|
plugin.name,
|
||||||
|
manifest.hello.version_major,
|
||||||
|
manifest.hello.version_minor,
|
||||||
|
my_hello.version_major,
|
||||||
|
my_hello.version_minor,
|
||||||
|
hint
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
child,
|
||||||
|
shutdown,
|
||||||
|
plugin,
|
||||||
|
plugin_info,
|
||||||
|
streams: manifest.streams,
|
||||||
|
actions: manifest.actions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_child(mut self) {
|
||||||
|
const PLUGIN_STOP_GRACE_TIME: u64 = 15;
|
||||||
|
|
||||||
|
// wait either for the child process to exit on its own or for the shutdown signal
|
||||||
|
tokio::select! {
|
||||||
|
status = self.child.wait() => {
|
||||||
|
self.print_exit(status);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
_ = self.shutdown.wait() => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
match timeout(
|
||||||
|
Duration::from_secs(PLUGIN_STOP_GRACE_TIME),
|
||||||
|
self.plugin_info.close(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Ok(())) => (),
|
||||||
|
Ok(Err(err)) => {
|
||||||
|
error!("plugin {}: {err}", self.plugin.name);
|
||||||
|
}
|
||||||
|
// got timeout
|
||||||
|
Err(_) => {
|
||||||
|
error!(
|
||||||
|
"plugin {} did not respond to close request in time, killing",
|
||||||
|
self.plugin.name
|
||||||
|
);
|
||||||
|
kill_child(self.child, format!("plugin {}", self.plugin.name), 5).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn print_exit(&self, status: io::Result<ExitStatus>) {
|
||||||
|
match status {
|
||||||
|
Ok(status) => match status.code() {
|
||||||
|
Some(code) => {
|
||||||
|
error!(
|
||||||
|
"plugin {}: process exited. exit code: {}",
|
||||||
|
self.plugin.name, code
|
||||||
|
);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
error!("plugin {}: process exited.", self.plugin.name);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
error!("plugin {}: process exited. {err}", self.plugin.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn systemd_error(plugin: &Plugin, message: &str, err: impl Display) -> String {
|
||||||
|
if plugin.systemd {
|
||||||
|
format!(
|
||||||
|
"{message}: {err}. \
|
||||||
|
`plugins.{0}.systemd` is set to true, so this may be an issue with systemd's run0. \
|
||||||
|
please make sure `sudo run0 ls /` returns the same thing as `sudo ls /` as a test. \
|
||||||
|
if run0 can't be found or doesn't output anything, set `plugins.{0}.systemd` to false.",
|
||||||
|
plugin.name,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
format!("{message}: {err}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_stderr(stderr: ChildStderr, plugin_name: String) {
|
||||||
|
// read lines until shutdown
|
||||||
|
let lines = reader_to_stream(stderr);
|
||||||
|
tokio::pin!(lines);
|
||||||
|
loop {
|
||||||
|
match lines.next().await {
|
||||||
|
Some(Ok(line)) => {
|
||||||
|
// sad: I can't factorize this because the tracing::event! macro
|
||||||
|
// requires its log level to be a constant.
|
||||||
|
if line.starts_with("DEBUG ") {
|
||||||
|
tracing::debug!("plugin {plugin_name}: {}", line.split_at(6).1)
|
||||||
|
} else if line.starts_with("INFO ") {
|
||||||
|
tracing::info!("plugin {plugin_name}: {}", line.split_at(5).1)
|
||||||
|
} else if line.starts_with("WARN ") {
|
||||||
|
tracing::warn!("plugin {plugin_name}: {}", line.split_at(5).1)
|
||||||
|
} else if line.starts_with("ERROR ") {
|
||||||
|
tracing::error!("plugin {plugin_name}: {}", line.split_at(6).1)
|
||||||
|
} else {
|
||||||
|
// If there is no log level, we suppose it's an error (may be a panic or something)
|
||||||
|
tracing::error!("plugin {plugin_name}: {}", line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(Err(err)) => {
|
||||||
|
tracing::error!("while trying to read plugin {plugin_name} stderr: {err}");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct Plugins {
|
||||||
|
/// Loaded plugins
|
||||||
|
plugins: BTreeMap<String, PluginManager>,
|
||||||
|
/// stream_type to plugin name
|
||||||
|
stream_to_plugin: BTreeMap<String, String>,
|
||||||
|
/// action_type to plugin name
|
||||||
|
action_to_plugin: BTreeMap<String, String>,
|
||||||
|
/// plugin name to config list
|
||||||
|
plugin_to_confs: BTreeMap<String, (Vec<&'static Stream>, Vec<&'static Action>)>,
|
||||||
|
/// stream name to impl
|
||||||
|
stream_to_impl: BTreeMap<String, StreamImpl>,
|
||||||
|
/// action name to impl
|
||||||
|
action_to_impl: BTreeMap<String, ActionImpl>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Plugins {
|
||||||
|
pub async fn new(config: &'static Config, shutdown: ShutdownToken) -> Result<Self, String> {
|
||||||
|
let mut this = Self::default();
|
||||||
|
|
||||||
|
for plugin in config.plugins.values() {
|
||||||
|
let name = plugin.name.clone();
|
||||||
|
this.load_plugin(plugin, &config.state_directory, shutdown.clone())
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("plugin {name}: {err}]"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.aggregate_plugin_configs(config)?;
|
||||||
|
|
||||||
|
this.load_plugin_configs().await?;
|
||||||
|
|
||||||
|
Ok(this)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_plugin(
|
||||||
|
&mut self,
|
||||||
|
plugin: &'static Plugin,
|
||||||
|
state_directory: &str,
|
||||||
|
shutdown: ShutdownToken,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let name = plugin.name.clone();
|
||||||
|
let manager = PluginManager::new(plugin, state_directory, shutdown).await?;
|
||||||
|
|
||||||
|
for stream in &manager.streams {
|
||||||
|
if let Some(name) = self.stream_to_plugin.insert(stream.clone(), name.clone()) {
|
||||||
|
return Err(format!(
|
||||||
|
"plugin {name} already exposed a stream with type name '{stream}'",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for action in &manager.actions {
|
||||||
|
if let Some(name) = self.action_to_plugin.insert(action.clone(), name.clone()) {
|
||||||
|
return Err(format!(
|
||||||
|
"plugin {name} already exposed a action with type name '{action}'",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.plugins.insert(name, manager);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn aggregate_plugin_configs(&mut self, config: &'static Config) -> Result<(), String> {
|
||||||
|
for stream in config.streams.values() {
|
||||||
|
if stream.is_plugin()
|
||||||
|
&& let Some(stream_type) = &stream.stream_type
|
||||||
|
{
|
||||||
|
let plugin_name = self.stream_to_plugin.get(stream_type).ok_or_else(|| {
|
||||||
|
display_plugin_exposed_types(&self.stream_to_plugin, "stream", stream_type)
|
||||||
|
})?;
|
||||||
|
let (streams, _) = self
|
||||||
|
.plugin_to_confs
|
||||||
|
.entry(plugin_name.to_owned())
|
||||||
|
.or_default();
|
||||||
|
streams.push(stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
for action in stream
|
||||||
|
.filters
|
||||||
|
.values()
|
||||||
|
.flat_map(|filter| filter.actions.values())
|
||||||
|
{
|
||||||
|
if action.is_plugin()
|
||||||
|
&& let Some(action_type) = &action.action_type
|
||||||
|
{
|
||||||
|
let plugin_name = self.action_to_plugin.get(action_type).ok_or_else(|| {
|
||||||
|
display_plugin_exposed_types(&self.action_to_plugin, "action", action_type)
|
||||||
|
})?;
|
||||||
|
let (_, actions) = self
|
||||||
|
.plugin_to_confs
|
||||||
|
.entry(plugin_name.to_owned())
|
||||||
|
.or_default();
|
||||||
|
actions.push(action);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_plugin_configs(&mut self) -> Result<(), String> {
|
||||||
|
let plugin_to_confs = std::mem::take(&mut self.plugin_to_confs);
|
||||||
|
for (plugin_name, (streams, actions)) in plugin_to_confs {
|
||||||
|
let plugin = self
|
||||||
|
.plugins
|
||||||
|
.get_mut(&plugin_name)
|
||||||
|
.ok_or_else(|| format!("could not find plugin {plugin_name}. this is a bug!"))?;
|
||||||
|
|
||||||
|
let stream_names: Vec<String> =
|
||||||
|
streams.iter().map(|stream| stream.name.clone()).collect();
|
||||||
|
let action_names: Vec<String> =
|
||||||
|
actions.iter().map(|action| action.to_string()).collect();
|
||||||
|
|
||||||
|
let (stream_impls, action_impls) = plugin
|
||||||
|
.load_config(
|
||||||
|
streams
|
||||||
|
.into_iter()
|
||||||
|
.map(Stream::to_stream_config)
|
||||||
|
.collect::<Result<Vec<StreamConfig>, String>>()?,
|
||||||
|
actions
|
||||||
|
.into_iter()
|
||||||
|
.map(Action::to_action_config)
|
||||||
|
.collect::<Result<Vec<ActionConfig>, String>>()?,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|err| {
|
||||||
|
format!("plugin {plugin_name} is not happy with your config: {err}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
self.stream_to_impl
|
||||||
|
.extend(stream_names.into_iter().zip(stream_impls));
|
||||||
|
self.action_to_impl
|
||||||
|
.extend(action_names.into_iter().zip(action_impls));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_stream_impl(&mut self, stream_name: String) -> Option<StreamImpl> {
|
||||||
|
self.stream_to_impl.remove(&stream_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_action_impl(&mut self, action_fullname: String) -> Option<ActionImpl> {
|
||||||
|
self.action_to_impl.remove(&action_fullname)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start(&mut self) -> Result<(), String> {
|
||||||
|
// Finish setup of all plugins
|
||||||
|
join_all(
|
||||||
|
self.plugins
|
||||||
|
.values_mut()
|
||||||
|
.map(|plugin_manager| plugin_manager.start()),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
// Convert Vec<Result<Result>> into Result
|
||||||
|
.into_iter()
|
||||||
|
.zip(self.plugins.values())
|
||||||
|
.try_for_each(|(result, plugin_manager)| {
|
||||||
|
result.map_err(|err| {
|
||||||
|
format!(
|
||||||
|
"plugin {}: {}",
|
||||||
|
plugin_manager.plugin.name,
|
||||||
|
err.to_string().replace('\n', " ")
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn manager(self) {
|
||||||
|
for plugin in self.plugins.into_values() {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
plugin.handle_child().await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn display_plugin_exposed_types(
|
||||||
|
type_to_plugin: &BTreeMap<String, String>,
|
||||||
|
name: &str,
|
||||||
|
invalid: &str,
|
||||||
|
) -> String {
|
||||||
|
let mut plugin_to_types: BTreeMap<&str, Vec<&str>> = BTreeMap::new();
|
||||||
|
for (type_, plugin) in type_to_plugin {
|
||||||
|
plugin_to_types.entry(plugin).or_default().push(type_);
|
||||||
|
}
|
||||||
|
for (plugin, types) in plugin_to_types {
|
||||||
|
info!(
|
||||||
|
"Plugin {plugin} exposes those {name} types: '{}'",
|
||||||
|
types.join("', '")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
format!("No plugin provides the {name} type: {invalid}")
|
||||||
|
}
|
||||||
|
|
@ -1,91 +0,0 @@
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tokio_util::sync::{CancellationToken, WaitForCancellationFuture};
|
|
||||||
|
|
||||||
// Thanks to this article for inspiration
|
|
||||||
// https://www.wcygan.io/post/tokio-graceful-shutdown/
|
|
||||||
// Now TaskTracker exist, but I don't know what I'd gain for using it instead?
|
|
||||||
// https://docs.rs/tokio-util/0.7.13/tokio_util/task/task_tracker/struct.TaskTracker.html
|
|
||||||
|
|
||||||
/// Permits to keep track of ongoing tasks and ask them to shutdown.
|
|
||||||
pub struct ShutdownController {
|
|
||||||
shutdown_notifyer: CancellationToken,
|
|
||||||
task_tracker: mpsc::Sender<()>,
|
|
||||||
task_waiter: mpsc::Receiver<()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShutdownController {
|
|
||||||
#[allow(clippy::new_without_default)]
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let (task_tracker, task_waiter) = mpsc::channel(1);
|
|
||||||
Self {
|
|
||||||
shutdown_notifyer: CancellationToken::new(),
|
|
||||||
task_tracker,
|
|
||||||
task_waiter,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ask for all tasks to quit
|
|
||||||
pub fn ask_shutdown(&self) {
|
|
||||||
self.shutdown_notifyer.cancel();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wait for all tasks to quit.
|
|
||||||
/// This task may return even without having called [`ShutdownController::ask_shutdown`]
|
|
||||||
/// first, if all tasks quit by themselves.
|
|
||||||
pub async fn wait_shutdown(mut self) {
|
|
||||||
drop(self.task_tracker);
|
|
||||||
self.task_waiter.recv().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a new shutdown token, to be held by a task.
|
|
||||||
pub fn token(&self) -> ShutdownToken {
|
|
||||||
ShutdownToken::new(self.shutdown_notifyer.clone(), self.task_tracker.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a [`ShutdownDelegate`], which is able to ask for shutdown,
|
|
||||||
/// without counting as a task that needs to be awaited.
|
|
||||||
pub fn delegate(&self) -> ShutdownDelegate {
|
|
||||||
ShutdownDelegate(self.shutdown_notifyer.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Permits to ask for shutdown, without counting as a task that needs to be awaited.
|
|
||||||
pub struct ShutdownDelegate(CancellationToken);
|
|
||||||
|
|
||||||
impl ShutdownDelegate {
|
|
||||||
/// Ask for all tasks to quit
|
|
||||||
pub fn ask_shutdown(&self) {
|
|
||||||
self.0.cancel();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Created by a [`ShutdownController`].
|
|
||||||
/// Serves two purposes:
|
|
||||||
///
|
|
||||||
/// - Wait for a shutdown request to happen.
|
|
||||||
/// - Keep track of the current task. While this token is held,
|
|
||||||
/// the [`ShutdownController::wait_shutdown`] will block.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ShutdownToken {
|
|
||||||
shutdown_notifyer: CancellationToken,
|
|
||||||
_task_tracker: mpsc::Sender<()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ShutdownToken {
|
|
||||||
fn new(shutdown_notifyer: CancellationToken, _task_tracker: mpsc::Sender<()>) -> Self {
|
|
||||||
Self {
|
|
||||||
shutdown_notifyer,
|
|
||||||
_task_tracker,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a future that will resolve only when a shutdown request happened.
|
|
||||||
pub fn wait(&self) -> WaitForCancellationFuture<'_> {
|
|
||||||
self.shutdown_notifyer.cancelled()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ask for all tasks to quit
|
|
||||||
pub fn ask_shutdown(&self) {
|
|
||||||
self.shutdown_notifyer.cancel();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -4,8 +4,8 @@ use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::Local;
|
|
||||||
use futures::{SinkExt, StreamExt};
|
use futures::{SinkExt, StreamExt};
|
||||||
|
use reaction_plugin::shutdown::ShutdownToken;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use tokio::{fs, net::UnixListener};
|
use tokio::{fs, net::UnixListener};
|
||||||
use tokio_util::{
|
use tokio_util::{
|
||||||
|
|
@ -15,11 +15,11 @@ use tokio_util::{
|
||||||
use tracing::{error, warn};
|
use tracing::{error, warn};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
concepts::{Config, Filter, Pattern, Stream},
|
concepts::{Config, Filter, Pattern, Stream, now},
|
||||||
protocol::{ClientRequest, ClientStatus, DaemonResponse, Order},
|
protocol::{ClientRequest, ClientStatus, DaemonResponse, Order},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{filter::FilterManager, shutdown::ShutdownToken};
|
use super::filter::FilterManager;
|
||||||
|
|
||||||
async fn open_socket(path: PathBuf) -> Result<UnixListener, String> {
|
async fn open_socket(path: PathBuf) -> Result<UnixListener, String> {
|
||||||
macro_rules! err_str {
|
macro_rules! err_str {
|
||||||
|
|
@ -51,7 +51,7 @@ async fn open_socket(path: PathBuf) -> Result<UnixListener, String> {
|
||||||
err_str!(UnixListener::bind(path))
|
err_str!(UnixListener::bind(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_trigger_order(
|
async fn handle_trigger_order(
|
||||||
stream_name: Option<String>,
|
stream_name: Option<String>,
|
||||||
filter_name: Option<String>,
|
filter_name: Option<String>,
|
||||||
patterns: BTreeMap<Arc<Pattern>, String>,
|
patterns: BTreeMap<Arc<Pattern>, String>,
|
||||||
|
|
@ -99,55 +99,53 @@ fn handle_trigger_order(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let now = Local::now();
|
match filter_manager.handle_trigger(patterns, now()).await {
|
||||||
match filter_manager.handle_trigger(patterns, now) {
|
|
||||||
Ok(()) => DaemonResponse::Ok(()),
|
Ok(()) => DaemonResponse::Ok(()),
|
||||||
Err(err) => DaemonResponse::Err(err),
|
Err(err) => DaemonResponse::Err(err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_show_or_flush_order(
|
async fn handle_show_or_flush_order(
|
||||||
stream_name: Option<String>,
|
stream_name: Option<String>,
|
||||||
filter_name: Option<String>,
|
filter_name: Option<String>,
|
||||||
patterns: BTreeMap<Arc<Pattern>, Regex>,
|
patterns: BTreeMap<Arc<Pattern>, Regex>,
|
||||||
order: Order,
|
order: Order,
|
||||||
shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
|
shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
|
||||||
) -> DaemonResponse {
|
) -> DaemonResponse {
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
let cs: ClientStatus = shared_state
|
let iter = shared_state
|
||||||
.iter()
|
.iter()
|
||||||
// stream filtering
|
// stream filtering
|
||||||
.filter(|(stream, _)| {
|
.filter(|(stream, _)| {
|
||||||
stream_name.is_none() || stream_name.clone().is_some_and(|name| name == stream.name)
|
stream_name.is_none() || stream_name.clone().is_some_and(|name| name == stream.name)
|
||||||
})
|
|
||||||
.fold(BTreeMap::new(), |mut acc, (stream, filter_manager)| {
|
|
||||||
let inner_map = filter_manager
|
|
||||||
.iter()
|
|
||||||
// filter filtering
|
|
||||||
.filter(|(filter, _)| {
|
|
||||||
filter_name.is_none()
|
|
||||||
|| filter_name.clone().is_some_and(|name| name == filter.name)
|
|
||||||
})
|
|
||||||
// pattern filtering
|
|
||||||
.filter(|(filter, _)| {
|
|
||||||
patterns
|
|
||||||
.iter()
|
|
||||||
.all(|(pattern, _)| filter.patterns.get(pattern).is_some())
|
|
||||||
})
|
|
||||||
.map(|(filter, manager)| {
|
|
||||||
(
|
|
||||||
filter.name.to_owned(),
|
|
||||||
manager.handle_order(&patterns, order, now),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
acc.insert(stream.name.to_owned(), inner_map);
|
|
||||||
acc
|
|
||||||
});
|
});
|
||||||
|
let mut cs = ClientStatus::new();
|
||||||
|
for (stream, filter_manager) in iter {
|
||||||
|
let iter = filter_manager
|
||||||
|
.iter()
|
||||||
|
// filter filtering
|
||||||
|
.filter(|(filter, _)| {
|
||||||
|
filter_name.is_none() || filter_name.clone().is_some_and(|name| name == filter.name)
|
||||||
|
})
|
||||||
|
// pattern filtering
|
||||||
|
.filter(|(filter, _)| {
|
||||||
|
patterns
|
||||||
|
.iter()
|
||||||
|
.all(|(pattern, _)| filter.patterns.get(pattern).is_some())
|
||||||
|
});
|
||||||
|
let mut inner_map = BTreeMap::new();
|
||||||
|
for (filter, manager) in iter {
|
||||||
|
inner_map.insert(
|
||||||
|
filter.name.to_owned(),
|
||||||
|
manager.handle_order(&patterns, order, now).await,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
cs.insert(stream.name.to_owned(), inner_map);
|
||||||
|
}
|
||||||
DaemonResponse::Order(cs)
|
DaemonResponse::Order(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn answer_order(
|
async fn answer_order(
|
||||||
config: &'static Config,
|
config: &'static Config,
|
||||||
shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
|
shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
|
||||||
options: ClientRequest,
|
options: ClientRequest,
|
||||||
|
|
@ -182,7 +180,7 @@ fn answer_order(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Order::Trigger = options.order {
|
if let Order::Trigger = options.order {
|
||||||
handle_trigger_order(stream_name, filter_name, patterns, shared_state)
|
handle_trigger_order(stream_name, filter_name, patterns, shared_state).await
|
||||||
} else {
|
} else {
|
||||||
let patterns = match patterns
|
let patterns = match patterns
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|
@ -206,6 +204,7 @@ fn answer_order(
|
||||||
options.order,
|
options.order,
|
||||||
shared_state,
|
shared_state,
|
||||||
)
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -264,7 +263,7 @@ impl Socket {
|
||||||
serde_json::from_slice(&encoded_request)
|
serde_json::from_slice(&encoded_request)
|
||||||
);
|
);
|
||||||
// Process
|
// Process
|
||||||
let response = answer_order(config, &shared_state, request);
|
let response = answer_order(config, &shared_state, request).await;
|
||||||
// Encode
|
// Encode
|
||||||
let encoded_response =
|
let encoded_response =
|
||||||
or_next!("failed to serialize response", serde_json::to_string::<DaemonResponse>(&response));
|
or_next!("failed to serialize response", serde_json::to_string::<DaemonResponse>(&response));
|
||||||
|
|
|
||||||
|
|
@ -1,26 +1,21 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, BTreeSet, HashMap},
|
collections::{BTreeSet, HashMap},
|
||||||
process::Stdio,
|
process::Stdio,
|
||||||
time::Duration,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::Local;
|
use futures::{FutureExt, Stream as AsyncStream, StreamExt, future::join_all};
|
||||||
use futures::{FutureExt, Stream as AsyncStream, StreamExt};
|
use reaction_plugin::{StreamImpl, shutdown::ShutdownToken};
|
||||||
use regex::RegexSet;
|
|
||||||
use tokio::{
|
use tokio::{
|
||||||
io::{AsyncBufReadExt, BufReader},
|
io::{AsyncBufReadExt, BufReader},
|
||||||
process::{Child, ChildStderr, ChildStdout, Command},
|
process::{Child, ChildStderr, ChildStdout, Command},
|
||||||
time::sleep,
|
|
||||||
};
|
};
|
||||||
use tracing::{error, info, warn};
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
concepts::{Filter, Stream},
|
concepts::{Filter, Stream, Time, now},
|
||||||
daemon::filter::FilterManager,
|
daemon::{filter::FilterManager, plugin::Plugins, utils::kill_child},
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::shutdown::ShutdownToken;
|
|
||||||
|
|
||||||
/// Converts bytes to line string, discarding invalid utf8 sequences and newlines at the end
|
/// Converts bytes to line string, discarding invalid utf8 sequences and newlines at the end
|
||||||
fn to_line(data: &[u8]) -> String {
|
fn to_line(data: &[u8]) -> String {
|
||||||
String::from_utf8_lossy(data)
|
String::from_utf8_lossy(data)
|
||||||
|
|
@ -28,7 +23,7 @@ fn to_line(data: &[u8]) -> String {
|
||||||
.replace(std::char::REPLACEMENT_CHARACTER, "")
|
.replace(std::char::REPLACEMENT_CHARACTER, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reader_to_stream(
|
pub fn reader_to_stream(
|
||||||
reader: impl tokio::io::AsyncRead + Unpin,
|
reader: impl tokio::io::AsyncRead + Unpin,
|
||||||
) -> impl AsyncStream<Item = Result<String, std::io::Error>> {
|
) -> impl AsyncStream<Item = Result<String, std::io::Error>> {
|
||||||
let buf_reader = BufReader::new(reader);
|
let buf_reader = BufReader::new(reader);
|
||||||
|
|
@ -49,38 +44,117 @@ fn reader_to_stream(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct StreamManager {
|
pub struct StreamManager {
|
||||||
compiled_regex_set: RegexSet,
|
|
||||||
regex_index_to_filter_manager: Vec<FilterManager>,
|
regex_index_to_filter_manager: Vec<FilterManager>,
|
||||||
stream: &'static Stream,
|
stream: &'static Stream,
|
||||||
|
stream_plugin: Option<StreamImpl>,
|
||||||
shutdown: ShutdownToken,
|
shutdown: ShutdownToken,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StreamManager {
|
impl StreamManager {
|
||||||
pub fn new(
|
pub async fn new(
|
||||||
stream: &'static Stream,
|
stream: &'static Stream,
|
||||||
filter_managers: HashMap<&'static Filter, FilterManager>,
|
filter_managers: HashMap<&'static Filter, FilterManager>,
|
||||||
shutdown: ShutdownToken,
|
shutdown: ShutdownToken,
|
||||||
) -> Result<Self, regex::Error> {
|
plugins: &mut Plugins,
|
||||||
let all_regexes: BTreeMap<_, _> = filter_managers
|
) -> Result<Self, String> {
|
||||||
|
let stream_plugin = if stream.is_plugin() {
|
||||||
|
Some(
|
||||||
|
plugins
|
||||||
|
.get_stream_impl(stream.name.clone())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
format!(
|
||||||
|
"stream {} doesn't load a plugin. this is a bug!",
|
||||||
|
stream.name
|
||||||
|
)
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let regex_index_to_filter_manager = stream
|
||||||
|
.regex_index_to_filter_name
|
||||||
.iter()
|
.iter()
|
||||||
.flat_map(|(filter, filter_manager)| {
|
.map(|filter_name| {
|
||||||
filter
|
filter_managers
|
||||||
.regex
|
|
||||||
.iter()
|
.iter()
|
||||||
.map(|regex| (regex, filter_manager.clone()))
|
.find(|(filter, _)| filter_name == &filter.name)
|
||||||
|
.unwrap()
|
||||||
|
.1
|
||||||
|
.clone()
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
debug!("successfully initialized stream {}", stream.name);
|
||||||
|
|
||||||
Ok(StreamManager {
|
Ok(StreamManager {
|
||||||
compiled_regex_set: RegexSet::new(all_regexes.keys())?,
|
regex_index_to_filter_manager,
|
||||||
regex_index_to_filter_manager: all_regexes.into_values().collect(),
|
|
||||||
stream,
|
stream,
|
||||||
|
stream_plugin,
|
||||||
shutdown,
|
shutdown,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start(self) {
|
pub fn is_standalone(&self) -> bool {
|
||||||
|
match &self.stream_plugin {
|
||||||
|
Some(plugin) => plugin.standalone,
|
||||||
|
None => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start(mut self) {
|
||||||
|
// First start FilterManagers persisted actions
|
||||||
|
let now = now();
|
||||||
|
join_all(
|
||||||
|
self.regex_index_to_filter_manager
|
||||||
|
.iter()
|
||||||
|
.map(|filter_manager| filter_manager.start(now)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Then start stream
|
||||||
info!("{}: start {:?}", self.stream.name, self.stream.cmd);
|
info!("{}: start {:?}", self.stream.name, self.stream.cmd);
|
||||||
|
|
||||||
|
if self.stream_plugin.is_some() {
|
||||||
|
self.start_plugin().await
|
||||||
|
} else {
|
||||||
|
self.start_cmd().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start_plugin(&mut self) {
|
||||||
|
let mut plugin = self.stream_plugin.take().unwrap();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match plugin.stream.recv().await {
|
||||||
|
Ok(Some((line, time))) => {
|
||||||
|
self.handle_line(line, time.into()).await;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
if err.is_final() {
|
||||||
|
error!(
|
||||||
|
"error reading from plugin stream {}: {}",
|
||||||
|
self.stream.name, err
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
error!(
|
||||||
|
"temporary error reading from plugin stream {}: {}",
|
||||||
|
self.stream.name, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
if !self.shutdown.is_shutdown() {
|
||||||
|
error!("stream {} has exited", self.stream.name);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start_cmd(&self) {
|
||||||
let mut child = match Command::new(&self.stream.cmd[0])
|
let mut child = match Command::new(&self.stream.cmd[0])
|
||||||
.args(&self.stream.cmd[1..])
|
.args(&self.stream.cmd[1..])
|
||||||
.stdin(Stdio::null())
|
.stdin(Stdio::null())
|
||||||
|
|
@ -110,9 +184,6 @@ impl StreamManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_child(&self, mut child: Child) {
|
async fn handle_child(&self, mut child: Child) {
|
||||||
const STREAM_PROCESS_GRACE_TIME_SEC: u64 = 15;
|
|
||||||
const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5;
|
|
||||||
|
|
||||||
// wait either for the child process to exit on its own or for the shutdown signal
|
// wait either for the child process to exit on its own or for the shutdown signal
|
||||||
futures::select! {
|
futures::select! {
|
||||||
_ = child.wait().fuse() => {
|
_ = child.wait().fuse() => {
|
||||||
|
|
@ -122,43 +193,7 @@ impl StreamManager {
|
||||||
_ = self.shutdown.wait().fuse() => {}
|
_ = self.shutdown.wait().fuse() => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// first, try to ask nicely the child process to exit
|
kill_child(child, format!("stream {}", self.stream.name), 15).await;
|
||||||
if let Some(pid) = child.id() {
|
|
||||||
let pid = nix::unistd::Pid::from_raw(pid as i32);
|
|
||||||
|
|
||||||
// the most likely error is that the process does not exist anymore
|
|
||||||
// but we still need to reclaim it with Child::wait
|
|
||||||
let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM);
|
|
||||||
|
|
||||||
futures::select! {
|
|
||||||
_ = child.wait().fuse() => {
|
|
||||||
return;
|
|
||||||
},
|
|
||||||
_ = sleep(Duration::from_secs(STREAM_PROCESS_GRACE_TIME_SEC)).fuse() => {},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!(
|
|
||||||
"could not get PID of child process for stream {}",
|
|
||||||
self.stream.name
|
|
||||||
);
|
|
||||||
// still try to use tokio API to kill and reclaim the child process
|
|
||||||
}
|
|
||||||
|
|
||||||
// if that fails, or we cannot get the underlying PID, terminate the process.
|
|
||||||
// NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a
|
|
||||||
// syscall to a resource no-longer available (a notorious example is a read on a disconnected
|
|
||||||
// NFS share)
|
|
||||||
|
|
||||||
// as before, the only expected error is that the child process already terminated
|
|
||||||
// but we still need to reclaim it if that's the case.
|
|
||||||
let _ = child.start_kill();
|
|
||||||
|
|
||||||
futures::select! {
|
|
||||||
_ = child.wait().fuse() => {}
|
|
||||||
_ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => {
|
|
||||||
error!("child process of stream {} did not terminate", self.stream.name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_io(&self, child_stdout: ChildStdout, child_stderr: ChildStderr) {
|
async fn handle_io(&self, child_stdout: ChildStdout, child_stderr: ChildStderr) {
|
||||||
|
|
@ -171,10 +206,8 @@ impl StreamManager {
|
||||||
loop {
|
loop {
|
||||||
match lines.next().await {
|
match lines.next().await {
|
||||||
Some(Ok(line)) => {
|
Some(Ok(line)) => {
|
||||||
let now = Local::now();
|
let now = now();
|
||||||
for manager in self.matching_filters(&line) {
|
self.handle_line(line, now).await;
|
||||||
manager.handle_line(&line, now);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Some(Err(err)) => {
|
Some(Err(err)) => {
|
||||||
error!(
|
error!(
|
||||||
|
|
@ -190,8 +223,14 @@ impl StreamManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_line(&self, line: String, time: Time) {
|
||||||
|
for manager in self.matching_filters(&line) {
|
||||||
|
manager.handle_line(&line, time).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn matching_filters(&self, line: &str) -> BTreeSet<&FilterManager> {
|
fn matching_filters(&self, line: &str) -> BTreeSet<&FilterManager> {
|
||||||
let matches = self.compiled_regex_set.matches(line);
|
let matches = self.stream.compiled_regex_set.matches(line);
|
||||||
matches
|
matches
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|match_| &self.regex_index_to_filter_manager[match_])
|
.map(|match_| &self.regex_index_to_filter_manager[match_])
|
||||||
|
|
|
||||||
51
src/daemon/utils.rs
Normal file
51
src/daemon/utils.rs
Normal file
|
|
@ -0,0 +1,51 @@
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tokio::{process::Child, time::timeout};
|
||||||
|
use tracing::{error, warn};
|
||||||
|
|
||||||
|
pub async fn kill_child(mut child: Child, context: String, grace_time_sec: u64) {
|
||||||
|
const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5;
|
||||||
|
|
||||||
|
// first, try to ask nicely the child process to exit
|
||||||
|
if let Some(pid) = child.id() {
|
||||||
|
let pid = nix::unistd::Pid::from_raw(pid as i32);
|
||||||
|
|
||||||
|
// the most likely error is that the process does not exist anymore
|
||||||
|
// but we still need to reclaim it with Child::wait
|
||||||
|
let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM);
|
||||||
|
|
||||||
|
if let Ok(_) = timeout(Duration::from_secs(grace_time_sec), child.wait()).await {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("could not get PID of child process for {context}");
|
||||||
|
// still try to use tokio API to kill and reclaim the child process
|
||||||
|
}
|
||||||
|
|
||||||
|
// if that fails, or we cannot get the underlying PID, terminate the process.
|
||||||
|
// NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a
|
||||||
|
// syscall to a resource no-longer available (a notorious example is a read on a disconnected
|
||||||
|
// NFS share)
|
||||||
|
|
||||||
|
// as before, the only expected error is that the child process already terminated
|
||||||
|
// but we still need to reclaim it if that's the case.
|
||||||
|
warn!("process for {context} didn't exit {grace_time_sec}s after SIGTERM, sending SIGKILL");
|
||||||
|
let _ = child.start_kill();
|
||||||
|
|
||||||
|
match timeout(
|
||||||
|
Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC),
|
||||||
|
child.wait(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(_) => match child.id() {
|
||||||
|
Some(id) => {
|
||||||
|
error!("child process of {context} did not terminate. PID: {id}");
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
error!("child process of {context} did not terminate");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,10 +1,4 @@
|
||||||
#![warn(
|
#![warn(clippy::panic, clippy::todo, clippy::unimplemented, unsafe_code)]
|
||||||
clippy::panic,
|
|
||||||
clippy::todo,
|
|
||||||
clippy::unimplemented,
|
|
||||||
clippy::unwrap_used,
|
|
||||||
unsafe_code
|
|
||||||
)]
|
|
||||||
#![allow(clippy::upper_case_acronyms, clippy::mutable_key_type)]
|
#![allow(clippy::upper_case_acronyms, clippy::mutable_key_type)]
|
||||||
// Allow unwrap in tests
|
// Allow unwrap in tests
|
||||||
#![cfg_attr(test, allow(clippy::unwrap_used))]
|
#![cfg_attr(test, allow(clippy::unwrap_used))]
|
||||||
|
|
@ -15,4 +9,3 @@ pub mod concepts;
|
||||||
pub mod daemon;
|
pub mod daemon;
|
||||||
pub mod protocol;
|
pub mod protocol;
|
||||||
pub mod tests;
|
pub mod tests;
|
||||||
pub mod treedb;
|
|
||||||
|
|
|
||||||
99
src/main.rs
99
src/main.rs
|
|
@ -7,7 +7,6 @@ use reaction::{
|
||||||
daemon::daemon,
|
daemon::daemon,
|
||||||
protocol::Order,
|
protocol::Order,
|
||||||
};
|
};
|
||||||
use tracing::{error, Level};
|
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
|
|
@ -28,68 +27,64 @@ async fn main() {
|
||||||
|
|
||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
|
|
||||||
let (is_daemon, level) = if let SubCommand::Start { loglevel, .. } = cli.command {
|
if let SubCommand::Start {
|
||||||
(true, loglevel)
|
loglevel,
|
||||||
} else {
|
config,
|
||||||
(false, Level::DEBUG)
|
socket,
|
||||||
};
|
} = cli.command
|
||||||
|
{
|
||||||
if is_daemon {
|
|
||||||
// Set log level
|
|
||||||
if let Err(err) = tracing_subscriber::fmt::fmt()
|
if let Err(err) = tracing_subscriber::fmt::fmt()
|
||||||
.without_time()
|
.without_time()
|
||||||
.with_target(false)
|
.with_target(false)
|
||||||
.with_ansi(std::io::stdout().is_terminal())
|
.with_ansi(std::io::stdout().is_terminal())
|
||||||
.with_max_level(level)
|
.with_max_level(loglevel)
|
||||||
// .with_max_level(Level::TRACE)
|
// .with_max_level(Level::TRACE)
|
||||||
.try_init()
|
.try_init()
|
||||||
{
|
{
|
||||||
eprintln!("ERROR could not initialize logging: {err}");
|
eprintln!("ERROR could not initialize logging: {err}");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
exit(daemon(config, socket).await);
|
||||||
|
} else {
|
||||||
let result = match cli.command {
|
let result = match cli.command {
|
||||||
SubCommand::Start { config, socket, .. } => daemon(config, socket).await,
|
SubCommand::Show {
|
||||||
SubCommand::Show {
|
socket,
|
||||||
socket,
|
format,
|
||||||
format,
|
limit,
|
||||||
limit,
|
patterns,
|
||||||
patterns,
|
} => request(socket, format, limit, patterns, Order::Show).await,
|
||||||
} => request(socket, format, limit, patterns, Order::Show).await,
|
SubCommand::Flush {
|
||||||
SubCommand::Flush {
|
socket,
|
||||||
socket,
|
format,
|
||||||
format,
|
limit,
|
||||||
limit,
|
patterns,
|
||||||
patterns,
|
} => request(socket, format, limit, patterns, Order::Flush).await,
|
||||||
} => request(socket, format, limit, patterns, Order::Flush).await,
|
SubCommand::Trigger {
|
||||||
SubCommand::Trigger {
|
socket,
|
||||||
socket,
|
limit,
|
||||||
limit,
|
patterns,
|
||||||
patterns,
|
} => request(socket, Format::JSON, Some(limit), patterns, Order::Trigger).await,
|
||||||
} => request(socket, Format::JSON, Some(limit), patterns, Order::Trigger).await,
|
SubCommand::TestRegex {
|
||||||
SubCommand::TestRegex {
|
config,
|
||||||
config,
|
regex,
|
||||||
regex,
|
line,
|
||||||
line,
|
} => test_regex(config, regex, line),
|
||||||
} => test_regex(config, regex, line),
|
SubCommand::TestConfig {
|
||||||
SubCommand::TestConfig {
|
config,
|
||||||
config,
|
format,
|
||||||
format,
|
verbose,
|
||||||
verbose,
|
} => test_config(config, format, verbose),
|
||||||
} => test_config(config, format, verbose),
|
// Can't be daemon
|
||||||
};
|
_ => Ok(()),
|
||||||
match result {
|
};
|
||||||
Ok(()) => {
|
match result {
|
||||||
exit(0);
|
Ok(()) => {
|
||||||
}
|
exit(0);
|
||||||
Err(err) => {
|
}
|
||||||
if is_daemon {
|
Err(err) => {
|
||||||
error!("{err}");
|
eprintln!("ERROR {err}");
|
||||||
} else {
|
exit(1);
|
||||||
eprintln!("ERROR {err}");
|
|
||||||
}
|
}
|
||||||
exit(1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
12
src/tests.rs
12
src/tests.rs
|
|
@ -9,7 +9,7 @@ use std::{
|
||||||
|
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
use crate::treedb::Database;
|
use treedb::{Database, LoadedDB};
|
||||||
|
|
||||||
pub struct Fixture {
|
pub struct Fixture {
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
|
|
@ -65,7 +65,15 @@ pub struct TempDatabase {
|
||||||
impl TempDatabase {
|
impl TempDatabase {
|
||||||
pub async fn default() -> Self {
|
pub async fn default() -> Self {
|
||||||
let _tempdir = TempDir::new().unwrap();
|
let _tempdir = TempDir::new().unwrap();
|
||||||
let db = Database::from_dir(_tempdir.path()).await.unwrap();
|
let db = Database::from_dir(_tempdir.path(), None).await.unwrap();
|
||||||
|
TempDatabase { _tempdir, db }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn from_loaded_db(loaded_db: LoadedDB) -> Self {
|
||||||
|
let _tempdir = TempDir::new().unwrap();
|
||||||
|
let db = Database::from_dir(_tempdir.path(), Some(loaded_db))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
TempDatabase { _tempdir, db }
|
TempDatabase { _tempdir, db }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,30 +1,30 @@
|
||||||
use predicates::prelude::*;
|
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
|
|
||||||
use assert_cmd::Command;
|
use assert_cmd::cargo::cargo_bin_cmd;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "currently failing"] // FIXME
|
|
||||||
fn load_conf_directory() -> Result<(), Box<dyn Error>> {
|
fn load_conf_directory() -> Result<(), Box<dyn Error>> {
|
||||||
let mut cmd = Command::cargo_bin("reaction")?;
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
cmd.args([
|
cmd.args([
|
||||||
"test-config",
|
"test-config",
|
||||||
"--verbose",
|
"--verbose",
|
||||||
"--config",
|
"--config",
|
||||||
"./tests/test-conf/conf-00.d",
|
"./tests/test-conf/conf-00.d",
|
||||||
]);
|
]);
|
||||||
cmd.assert().success().stdout(predicate::eq(
|
cmd.assert().success().stdout(
|
||||||
r#"Loaded the configuration from the following files in the directory ./tests/test-conf/conf-00.d in this order:
|
r#"Loaded the configuration from the following files in the directory ./tests/test-conf/conf-00.d in this order:
|
||||||
part.json
|
part.json
|
||||||
part.jsonnet
|
part.jsonnet
|
||||||
part.yaml
|
part.yaml
|
||||||
part.yml
|
part.yml
|
||||||
|
|
||||||
concurrency: 16
|
concurrency: 1
|
||||||
state_directory: .
|
state_directory: .
|
||||||
patterns:
|
patterns:
|
||||||
mypat:
|
mypat:
|
||||||
regex: FLAG
|
regex: FLAG
|
||||||
|
ipv4mask: null
|
||||||
|
ipv6mask: null
|
||||||
start:
|
start:
|
||||||
- - echo
|
- - echo
|
||||||
- start
|
- start
|
||||||
|
|
@ -40,6 +40,7 @@ streams:
|
||||||
from_jsonnet:
|
from_jsonnet:
|
||||||
regex:
|
regex:
|
||||||
- ^<mypat>
|
- ^<mypat>
|
||||||
|
duplicate: extend
|
||||||
actions:
|
actions:
|
||||||
ban:
|
ban:
|
||||||
cmd:
|
cmd:
|
||||||
|
|
@ -53,6 +54,7 @@ streams:
|
||||||
from_yaml:
|
from_yaml:
|
||||||
regex:
|
regex:
|
||||||
- ^'<mypat>'
|
- ^'<mypat>'
|
||||||
|
duplicate: extend
|
||||||
actions:
|
actions:
|
||||||
print:
|
print:
|
||||||
cmd:
|
cmd:
|
||||||
|
|
@ -60,7 +62,7 @@ streams:
|
||||||
- <mypat>
|
- <mypat>
|
||||||
after: 1s
|
after: 1s
|
||||||
|
|
||||||
"#));
|
"#);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -68,7 +70,7 @@ streams:
|
||||||
fn example_configs_are_equal() {
|
fn example_configs_are_equal() {
|
||||||
let outputs = ["config/example.yml", "config/example.jsonnet"]
|
let outputs = ["config/example.yml", "config/example.jsonnet"]
|
||||||
.map(|config_path| {
|
.map(|config_path| {
|
||||||
let mut cmd = Command::cargo_bin("reaction").unwrap();
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
cmd.args(["test-config", "--config", config_path]);
|
cmd.args(["test-config", "--config", config_path]);
|
||||||
cmd.assert().success().get_output().stdout.clone()
|
cmd.assert().success().get_output().stdout.clone()
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
use std::{error::Error, path::Path, process::Stdio, thread::sleep, time::Duration};
|
use std::{error::Error, path::Path, process::Stdio, thread::sleep, time::Duration};
|
||||||
|
|
||||||
use assert_cmd::Command;
|
use assert_cmd::cargo::cargo_bin_cmd;
|
||||||
use assert_fs::prelude::*;
|
use assert_fs::prelude::*;
|
||||||
use nix::sys::signal;
|
use nix::sys::signal;
|
||||||
use predicates::prelude::predicate;
|
use predicates::prelude::predicate;
|
||||||
|
|
@ -14,7 +14,7 @@ fn actions_delayed_and_on_exit() -> Result<(), Box<dyn Error>> {
|
||||||
.child("config.jsonnet")
|
.child("config.jsonnet")
|
||||||
.write_file(Path::new("tests/test-conf/test-after.jsonnet"))?;
|
.write_file(Path::new("tests/test-conf/test-after.jsonnet"))?;
|
||||||
|
|
||||||
let mut cmd = Command::cargo_bin("reaction")?;
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
|
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
|
||||||
cmd.current_dir(tmp_dir.path());
|
cmd.current_dir(tmp_dir.path());
|
||||||
cmd.timeout(Duration::from_secs(5));
|
cmd.timeout(Duration::from_secs(5));
|
||||||
|
|
@ -46,7 +46,7 @@ fn kill_stream_on_exit() -> Result<(), Box<dyn Error>> {
|
||||||
.child("config.jsonnet")
|
.child("config.jsonnet")
|
||||||
.write_file(Path::new("tests/test-conf/test-shutdown.jsonnet"))?;
|
.write_file(Path::new("tests/test-conf/test-shutdown.jsonnet"))?;
|
||||||
|
|
||||||
let cmd = Command::cargo_bin("reaction")?;
|
let cmd = cargo_bin_cmd!("reaction");
|
||||||
let mut cmd = std::process::Command::new(cmd.get_program());
|
let mut cmd = std::process::Command::new(cmd.get_program());
|
||||||
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
|
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
|
||||||
cmd.current_dir(tmp_dir.path());
|
cmd.current_dir(tmp_dir.path());
|
||||||
|
|
@ -84,7 +84,7 @@ fn kill_stream_on_exit() -> Result<(), Box<dyn Error>> {
|
||||||
let _ = signal::kill(pid, signal::SIGKILL);
|
let _ = signal::kill(pid, signal::SIGKILL);
|
||||||
let _ = child.wait();
|
let _ = child.wait();
|
||||||
|
|
||||||
assert!(false, "Test timed out");
|
panic!("Test timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -103,7 +103,7 @@ fn non_utf8_is_stripped() -> Result<(), Box<dyn Error>> {
|
||||||
.child("config.jsonnet")
|
.child("config.jsonnet")
|
||||||
.write_file(Path::new("tests/test-conf/test-binary-input.jsonnet"))?;
|
.write_file(Path::new("tests/test-conf/test-binary-input.jsonnet"))?;
|
||||||
|
|
||||||
let mut cmd = Command::cargo_bin("reaction")?;
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
||||||
cmd.current_dir(tmp_dir.path());
|
cmd.current_dir(tmp_dir.path());
|
||||||
cmd.timeout(std::time::Duration::from_secs(1));
|
cmd.timeout(std::time::Duration::from_secs(1));
|
||||||
|
|
@ -124,7 +124,7 @@ fn capture_streams_stderr() -> Result<(), Box<dyn Error>> {
|
||||||
.child("config.jsonnet")
|
.child("config.jsonnet")
|
||||||
.write_file(Path::new("tests/test-conf/test-stream-stderr.jsonnet"))?;
|
.write_file(Path::new("tests/test-conf/test-stream-stderr.jsonnet"))?;
|
||||||
|
|
||||||
let mut cmd = Command::cargo_bin("reaction")?;
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
||||||
cmd.current_dir(tmp_dir.path());
|
cmd.current_dir(tmp_dir.path());
|
||||||
cmd.timeout(std::time::Duration::from_secs(1));
|
cmd.timeout(std::time::Duration::from_secs(1));
|
||||||
|
|
@ -149,7 +149,7 @@ fn manualy_trigger_filter() -> Result<(), Box<dyn Error>> {
|
||||||
.write_file(Path::new("tests/test-conf/test-trigger.jsonnet"))?;
|
.write_file(Path::new("tests/test-conf/test-trigger.jsonnet"))?;
|
||||||
|
|
||||||
// start daemon
|
// start daemon
|
||||||
let cmd = Command::cargo_bin("reaction")?;
|
let cmd = cargo_bin_cmd!("reaction");
|
||||||
let program = cmd.get_program();
|
let program = cmd.get_program();
|
||||||
let mut cmd = std::process::Command::new(program);
|
let mut cmd = std::process::Command::new(program);
|
||||||
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
||||||
|
|
@ -173,7 +173,7 @@ fn manualy_trigger_filter() -> Result<(), Box<dyn Error>> {
|
||||||
if elapsed > Duration::from_secs(1) {
|
if elapsed > Duration::from_secs(1) {
|
||||||
let _ = daemon.kill();
|
let _ = daemon.kill();
|
||||||
let _ = daemon.wait();
|
let _ = daemon.wait();
|
||||||
assert!(false, "Daemon did not create socket");
|
panic!("Daemon did not create socket");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -181,7 +181,7 @@ fn manualy_trigger_filter() -> Result<(), Box<dyn Error>> {
|
||||||
let socket_path = socket.path().to_str().unwrap();
|
let socket_path = socket.path().to_str().unwrap();
|
||||||
|
|
||||||
// trigger event manually
|
// trigger event manually
|
||||||
let mut cmd_trigger = Command::cargo_bin("reaction")?;
|
let mut cmd_trigger = cargo_bin_cmd!("reaction");
|
||||||
cmd_trigger.current_dir(tmp_dir.path());
|
cmd_trigger.current_dir(tmp_dir.path());
|
||||||
cmd_trigger.args(["trigger", "--socket", socket_path, "s1.f1", "num=95"]);
|
cmd_trigger.args(["trigger", "--socket", socket_path, "s1.f1", "num=95"]);
|
||||||
cmd_trigger.timeout(Duration::from_secs(1));
|
cmd_trigger.timeout(Duration::from_secs(1));
|
||||||
|
|
@ -204,7 +204,7 @@ fn manualy_trigger_filter() -> Result<(), Box<dyn Error>> {
|
||||||
if elapsed > Duration::from_secs(2) {
|
if elapsed > Duration::from_secs(2) {
|
||||||
let _ = daemon.kill();
|
let _ = daemon.kill();
|
||||||
let _ = daemon.wait();
|
let _ = daemon.wait();
|
||||||
assert!(false, "Daemon did not exit");
|
panic!("Daemon did not exit");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -221,7 +221,7 @@ fn filter_regex_match_eol() -> Result<(), Box<dyn Error>> {
|
||||||
.child("config.jsonnet")
|
.child("config.jsonnet")
|
||||||
.write_file(Path::new("tests/test-conf/test-eol-match.jsonnet"))?;
|
.write_file(Path::new("tests/test-conf/test-eol-match.jsonnet"))?;
|
||||||
|
|
||||||
let mut cmd = Command::cargo_bin("reaction")?;
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
|
||||||
cmd.current_dir(tmp_dir.path());
|
cmd.current_dir(tmp_dir.path());
|
||||||
cmd.timeout(std::time::Duration::from_secs(1));
|
cmd.timeout(std::time::Duration::from_secs(1));
|
||||||
|
|
|
||||||
|
|
@ -41,6 +41,23 @@
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
f2: {
|
||||||
|
regex: [
|
||||||
|
"^can't found <num>$",
|
||||||
|
],
|
||||||
|
retry: 2,
|
||||||
|
retryperiod: '60s',
|
||||||
|
actions: {
|
||||||
|
damn: {
|
||||||
|
cmd: ['notify-send', 'you should not see that', 'ban <num>'],
|
||||||
|
},
|
||||||
|
undamn: {
|
||||||
|
cmd: ['notify-send', 'you should not see that', 'unban <num>'],
|
||||||
|
after: '3s',
|
||||||
|
onexit: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
51
tests/persistence.rs
Normal file
51
tests/persistence.rs
Normal file
|
|
@ -0,0 +1,51 @@
|
||||||
|
use std::{error::Error, path::Path, time::Duration};
|
||||||
|
|
||||||
|
use assert_cmd::cargo::cargo_bin_cmd;
|
||||||
|
use assert_fs::prelude::*;
|
||||||
|
use predicates::prelude::predicate;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn resume_action() -> Result<(), Box<dyn Error>> {
|
||||||
|
let tmp_dir = assert_fs::TempDir::new()?;
|
||||||
|
|
||||||
|
tmp_dir
|
||||||
|
.child("config.jsonnet")
|
||||||
|
.write_file(Path::new("tests/test-conf/test-resume-action.jsonnet"))?;
|
||||||
|
|
||||||
|
// first run
|
||||||
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
|
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
|
||||||
|
cmd.current_dir(tmp_dir.path());
|
||||||
|
cmd.timeout(Duration::from_secs(5));
|
||||||
|
// Expected exit 1: all stream exited
|
||||||
|
cmd.assert().code(predicate::eq(1));
|
||||||
|
|
||||||
|
// expect a single match from the stream command
|
||||||
|
let expected = ["starting", "start4 10.1.0.1", "stopping"].join("\n") + "\n";
|
||||||
|
tmp_dir.child("log").assert(&expected);
|
||||||
|
|
||||||
|
// second run, expect to resume action
|
||||||
|
let mut cmd = cargo_bin_cmd!("reaction");
|
||||||
|
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
|
||||||
|
cmd.current_dir(tmp_dir.path());
|
||||||
|
cmd.timeout(Duration::from_secs(5));
|
||||||
|
// Expected exit 1: all stream exited
|
||||||
|
cmd.assert().code(predicate::eq(1));
|
||||||
|
|
||||||
|
let expected = [
|
||||||
|
"starting",
|
||||||
|
"start4 10.1.0.1", // from the stream command
|
||||||
|
"stopping",
|
||||||
|
"starting",
|
||||||
|
"start4 10.1.0.1", // previous action loaded from db
|
||||||
|
"stop4 10.1.0.1", // previous action lapses
|
||||||
|
"start4 10.1.0.1", // from the stream command
|
||||||
|
"stopping",
|
||||||
|
]
|
||||||
|
.join("\n")
|
||||||
|
+ "\n";
|
||||||
|
|
||||||
|
tmp_dir.child("log").assert(&expected);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
127
tests/plugin_cluster.rs
Normal file
127
tests/plugin_cluster.rs
Normal file
|
|
@ -0,0 +1,127 @@
|
||||||
|
use std::{fs::read_to_string, path::Path, thread, time::Duration};
|
||||||
|
|
||||||
|
use assert_cmd::{Command, cargo::cargo_bin_cmd};
|
||||||
|
use assert_fs::prelude::*;
|
||||||
|
|
||||||
|
const SECRET_KEY_A: &str = "g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=";
|
||||||
|
const PUBLIC_KEY_A: &str = "HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY=";
|
||||||
|
|
||||||
|
const SECRET_KEY_B: &str = "5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=";
|
||||||
|
const PUBLIC_KEY_B: &str = "LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc=";
|
||||||
|
|
||||||
|
// require UDP ports 9876-9879 to be free on 127.0.0.1
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore = "failing for now"]
|
||||||
|
fn plugin_cluster_same_startup() {
|
||||||
|
// First build reaction-plugin-cluster
|
||||||
|
Command::new("cargo")
|
||||||
|
.args(["build", "-p", "reaction-plugin-cluster"])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let config = read_to_string("tests/test-conf/test-cluster.jsonnet").unwrap();
|
||||||
|
|
||||||
|
let config_a = config
|
||||||
|
.replace("PUBLIC_KEY", PUBLIC_KEY_B)
|
||||||
|
.replace("NODE", "A")
|
||||||
|
.replace("1234", "9876")
|
||||||
|
.replace("4321", "9877");
|
||||||
|
let config_b = config
|
||||||
|
.replace("PUBLIC_KEY", PUBLIC_KEY_A)
|
||||||
|
.replace("NODE", "B")
|
||||||
|
.replace("1234", "9877")
|
||||||
|
.replace("4321", "9876");
|
||||||
|
|
||||||
|
let output_a = vec![
|
||||||
|
"B a0 1", "B a0 2", "B a0 3", "B a0 4", "B b0 1", "B b0 2", "B b0 3", "B b0 4", "",
|
||||||
|
];
|
||||||
|
let output_b = vec![
|
||||||
|
"A a0 1", "A a0 2", "A a0 3", "A a0 4", "A b0 1", "A b0 2", "A b0 3", "A b0 4", "",
|
||||||
|
];
|
||||||
|
|
||||||
|
let a_handle = thread::spawn(|| launch_node(config_a, SECRET_KEY_A, output_a));
|
||||||
|
let b_handle = thread::spawn(|| launch_node(config_b, SECRET_KEY_B, output_b));
|
||||||
|
|
||||||
|
a_handle.join().unwrap();
|
||||||
|
b_handle.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[ignore = "failing for now"]
|
||||||
|
fn plugin_cluster_different_startup() {
|
||||||
|
// First build reaction-plugin-cluster
|
||||||
|
Command::new("cargo")
|
||||||
|
.args(["build", "-p", "reaction-plugin-cluster"])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let config = read_to_string("tests/test-conf/test-cluster.jsonnet").unwrap();
|
||||||
|
|
||||||
|
let config_a = config
|
||||||
|
.replace("PUBLIC_KEY", PUBLIC_KEY_B)
|
||||||
|
.replace("NODE", "A")
|
||||||
|
.replace("1234", "9878")
|
||||||
|
.replace("4321", "9879");
|
||||||
|
let config_b = config
|
||||||
|
.replace("PUBLIC_KEY", PUBLIC_KEY_A)
|
||||||
|
.replace("NODE", "B")
|
||||||
|
.replace("1234", "9879")
|
||||||
|
.replace("4321", "9878");
|
||||||
|
|
||||||
|
let output_a = vec![
|
||||||
|
"B a0 1", "B a0 2", "B a0 3", "B a0 4", "B b0 1", "B b0 2", "B b0 3", "B b0 4", "",
|
||||||
|
];
|
||||||
|
let output_b = vec![
|
||||||
|
"A a0 1", "A a0 2", "A a0 3", "A a0 4", "A b0 1", "A b0 2", "A b0 3", "A b0 4", "",
|
||||||
|
];
|
||||||
|
|
||||||
|
let a_handle = thread::spawn(|| launch_node(config_a, SECRET_KEY_A, output_a));
|
||||||
|
let b_handle = thread::spawn(|| {
|
||||||
|
thread::sleep(Duration::from_secs(2));
|
||||||
|
launch_node(config_b, SECRET_KEY_B, output_b);
|
||||||
|
});
|
||||||
|
|
||||||
|
// thread::sleep(Duration::from_secs(60));
|
||||||
|
|
||||||
|
a_handle.join().unwrap();
|
||||||
|
b_handle.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn launch_node(config: String, my_secret: &'static str, expected_output: Vec<&'static str>) {
|
||||||
|
let tmp_dir = assert_fs::TempDir::new().unwrap();
|
||||||
|
|
||||||
|
// Write node config
|
||||||
|
tmp_dir.child("config.jsonnet").write_str(&config).unwrap();
|
||||||
|
tmp_dir
|
||||||
|
.child("plugin_data/cluster/secret_key_s1.txt")
|
||||||
|
.write_str(my_secret)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Copy cluster plugin
|
||||||
|
tmp_dir
|
||||||
|
.child("./target/debug/reaction-plugin-cluster")
|
||||||
|
.write_file(Path::new("./target/debug/reaction-plugin-cluster"))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let output = cargo_bin_cmd!("reaction")
|
||||||
|
.args([
|
||||||
|
"start",
|
||||||
|
"--socket",
|
||||||
|
"./s",
|
||||||
|
"--config",
|
||||||
|
"./config.jsonnet",
|
||||||
|
"-l",
|
||||||
|
"DEBUG",
|
||||||
|
])
|
||||||
|
.current_dir(tmp_dir.path())
|
||||||
|
.timeout(Duration::from_secs(5))
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"command output:\n{}",
|
||||||
|
String::from_utf8(output.stdout).unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Expected output
|
||||||
|
tmp_dir.child("log").assert(expected_output.join("\n"));
|
||||||
|
}
|
||||||
40
tests/plugin_virtual.rs
Normal file
40
tests/plugin_virtual.rs
Normal file
|
|
@ -0,0 +1,40 @@
|
||||||
|
use std::{path::Path, time::Duration};
|
||||||
|
|
||||||
|
use assert_cmd::{Command, cargo::cargo_bin_cmd};
|
||||||
|
use assert_fs::prelude::*;
|
||||||
|
use predicates::prelude::predicate;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn plugin_virtual() {
|
||||||
|
// First build reaction-plugin-virtual
|
||||||
|
Command::new("cargo")
|
||||||
|
.args(["build", "-p", "reaction-plugin-virtual"])
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let tmp_dir = assert_fs::TempDir::new().unwrap();
|
||||||
|
tmp_dir
|
||||||
|
.child("config.jsonnet")
|
||||||
|
.write_file(Path::new("tests/test-conf/test-virtual.jsonnet"))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Copy virtual plugin
|
||||||
|
tmp_dir
|
||||||
|
.child("./target/debug/reaction-plugin-virtual")
|
||||||
|
.write_file(Path::new("./target/debug/reaction-plugin-virtual"))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
cargo_bin_cmd!("reaction")
|
||||||
|
.args(["start", "--socket", "./s", "--config", "./config.jsonnet"])
|
||||||
|
.current_dir(tmp_dir.path())
|
||||||
|
.timeout(Duration::from_secs(5))
|
||||||
|
// Expected exit 1: all stream exited
|
||||||
|
.assert()
|
||||||
|
.code(predicate::eq(1));
|
||||||
|
|
||||||
|
// Expected output
|
||||||
|
let output = [
|
||||||
|
"a0 1", "a0 2", "a0 3", "a0 4", "b0 1", "b0 2", "b0 3", "b0 4", "",
|
||||||
|
];
|
||||||
|
tmp_dir.child("log").assert(output.join("\n"));
|
||||||
|
tmp_dir.child("log").write_str("").unwrap();
|
||||||
|
}
|
||||||
|
|
@ -129,8 +129,11 @@ async fn simple() {
|
||||||
|
|
||||||
let (daemon_exit, flush1, flush2) = tokio::join!(handle, handle2, handle3);
|
let (daemon_exit, flush1, flush2) = tokio::join!(handle, handle2, handle3);
|
||||||
assert!(daemon_exit.is_ok());
|
assert!(daemon_exit.is_ok());
|
||||||
|
assert!(daemon_exit.unwrap() == 1);
|
||||||
assert!(flush1.is_ok());
|
assert!(flush1.is_ok());
|
||||||
|
assert!(flush1.unwrap().is_ok());
|
||||||
assert!(flush2.is_ok());
|
assert!(flush2.is_ok());
|
||||||
|
assert!(flush2.unwrap().is_ok());
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
// 24 is encountered for the second time, then
|
// 24 is encountered for the second time, then
|
||||||
|
|
@ -153,18 +156,14 @@ async fn simple() {
|
||||||
|
|
||||||
config_with_cmd(
|
config_with_cmd(
|
||||||
config_path,
|
config_path,
|
||||||
"for i in 12 24 36 56 67; do echo here is $i; sleep 0.01; done",
|
"sleep 0.02; for i in 12 24 36 56 67; do echo here is $i; sleep 0.01; done",
|
||||||
);
|
);
|
||||||
|
|
||||||
file_with_contents(out_path, "");
|
file_with_contents(out_path, "");
|
||||||
file_with_contents(oneshot_path, "");
|
file_with_contents(oneshot_path, "");
|
||||||
|
|
||||||
let daemon_exit = daemon(config_path.into(), socket_path.into()).await;
|
let daemon_exit = daemon(config_path.into(), socket_path.into()).await;
|
||||||
assert!(daemon_exit.is_err());
|
assert!(daemon_exit == 1);
|
||||||
assert_eq!(
|
|
||||||
daemon_exit.unwrap_err().to_string(),
|
|
||||||
"quitting because all streams finished"
|
|
||||||
);
|
|
||||||
|
|
||||||
// 36 trigger from DB
|
// 36 trigger from DB
|
||||||
// 12 trigger from DB
|
// 12 trigger from DB
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
use std::{path::Path, time::Duration};
|
use std::{path::Path, time::Duration};
|
||||||
|
|
||||||
use assert_cmd::Command;
|
use assert_cmd::cargo::cargo_bin_cmd;
|
||||||
use assert_fs::{prelude::*, TempDir};
|
use assert_fs::{TempDir, prelude::*};
|
||||||
use predicates::prelude::predicate;
|
use predicates::prelude::predicate;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
@ -23,7 +23,7 @@ fn start_stop() {
|
||||||
"stop 2",
|
"stop 2",
|
||||||
"",
|
"",
|
||||||
];
|
];
|
||||||
tmp_dir.child("log").assert(&output.join("\n"));
|
tmp_dir.child("log").assert(output.join("\n"));
|
||||||
tmp_dir.child("log").write_str("").unwrap();
|
tmp_dir.child("log").write_str("").unwrap();
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
|
|
@ -80,8 +80,7 @@ fn run_reaction(tmp_dir: &TempDir) {
|
||||||
.write_file(Path::new("tests/start_stop.jsonnet"))
|
.write_file(Path::new("tests/start_stop.jsonnet"))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
Command::cargo_bin("reaction")
|
cargo_bin_cmd!("reaction")
|
||||||
.unwrap()
|
|
||||||
.args(["start", "--socket", "./s", "--config", "./config.jsonnet"])
|
.args(["start", "--socket", "./s", "--config", "./config.jsonnet"])
|
||||||
.current_dir(tmp_dir.path())
|
.current_dir(tmp_dir.path())
|
||||||
.timeout(Duration::from_secs(5))
|
.timeout(Duration::from_secs(5))
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
concurrency: 1
|
||||||
start:
|
start:
|
||||||
- - echo
|
- - echo
|
||||||
- start
|
- start
|
||||||
|
|
|
||||||
71
tests/test-conf/test-cluster.jsonnet
Normal file
71
tests/test-conf/test-cluster.jsonnet
Normal file
|
|
@ -0,0 +1,71 @@
|
||||||
|
{
|
||||||
|
patterns: {
|
||||||
|
num: {
|
||||||
|
regex: @"[0-9]+",
|
||||||
|
},
|
||||||
|
all: {
|
||||||
|
regex: @".*",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
plugins: {
|
||||||
|
cluster: {
|
||||||
|
path: './target/debug/reaction-plugin-cluster',
|
||||||
|
check_root: false,
|
||||||
|
systemd_options: {
|
||||||
|
DynamicUser: ['false'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
streams: {
|
||||||
|
s0: {
|
||||||
|
cmd: ['bash', '-c', 'sleep 1; for i in $(seq 4); do echo $i; sleep 0.1; done'],
|
||||||
|
filters: {
|
||||||
|
f0: {
|
||||||
|
regex: ['^<num>$'],
|
||||||
|
actions: {
|
||||||
|
a0: {
|
||||||
|
type: 'cluster_send',
|
||||||
|
options: {
|
||||||
|
send: 'NODE a0 <num>',
|
||||||
|
to: 's1',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
b0: {
|
||||||
|
type: 'cluster_send',
|
||||||
|
options: {
|
||||||
|
send: 'NODE b0 <num>',
|
||||||
|
to: 's1',
|
||||||
|
},
|
||||||
|
after: '1s',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
s1: {
|
||||||
|
type: 'cluster',
|
||||||
|
options: {
|
||||||
|
listen_port: 1234,
|
||||||
|
bind_ipv4: '127.0.0.1',
|
||||||
|
bind_ipv6: null,
|
||||||
|
message_timeout: '30s',
|
||||||
|
nodes: [{
|
||||||
|
public_key: 'PUBLIC_KEY',
|
||||||
|
addresses: ['127.0.0.1:4321'],
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
filters: {
|
||||||
|
f1: {
|
||||||
|
regex: ['^<all>$'],
|
||||||
|
actions: {
|
||||||
|
a1: {
|
||||||
|
cmd: ['sh', '-c', 'echo <all> >>./log'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
52
tests/test-conf/test-ipset.jsonnet
Normal file
52
tests/test-conf/test-ipset.jsonnet
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
{
|
||||||
|
patterns: {
|
||||||
|
ip: {
|
||||||
|
type: 'ip',
|
||||||
|
},
|
||||||
|
all: {
|
||||||
|
regex: @".*",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
plugins: {
|
||||||
|
ipset: {
|
||||||
|
path: './target/debug/reaction-plugin-ipset',
|
||||||
|
check_root: false,
|
||||||
|
systemd_options: {
|
||||||
|
CapabilityBoundingSet: ['~CAP_NET_ADMIN', '~CAP_PERFMON'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
streams: {
|
||||||
|
s0: {
|
||||||
|
cmd: ['bash', '-c', 'sleep 1; for i in $(seq 4); do echo 192.0.2.$i; echo 2001:db8:$i:a31b::$i; sleep 0.1; done; sleep 3'],
|
||||||
|
filters: {
|
||||||
|
f0: {
|
||||||
|
regex: ['^<ip>$'],
|
||||||
|
actions: {
|
||||||
|
a0: {
|
||||||
|
type: 'ipset',
|
||||||
|
options: {
|
||||||
|
set: 'reactiontest',
|
||||||
|
// pattern: 'ip',
|
||||||
|
// version: 'ip',
|
||||||
|
// chains: ['INPUT', 'FORWARD'],
|
||||||
|
// target: 'DROP',
|
||||||
|
// action: 'add',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
a1: {
|
||||||
|
after: '2s',
|
||||||
|
type: 'ipset',
|
||||||
|
options: {
|
||||||
|
set: 'reactiontest',
|
||||||
|
action: 'del',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
46
tests/test-conf/test-resume-action.jsonnet
Normal file
46
tests/test-conf/test-resume-action.jsonnet
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
{
|
||||||
|
patterns: {
|
||||||
|
ip: {
|
||||||
|
type: 'ip',
|
||||||
|
ipv6mask: 64,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
start: [
|
||||||
|
['sh', '-c', 'echo starting >>./log'],
|
||||||
|
],
|
||||||
|
stop: [
|
||||||
|
['sh', '-c', 'echo stopping >>./log'],
|
||||||
|
],
|
||||||
|
streams: {
|
||||||
|
s1: {
|
||||||
|
cmd: ['sh', '-c', 'sleep 2; echo T 10.1.0.1; sleep 0.2; echo T 10.1.0.1; sleep 0.8'],
|
||||||
|
filters: {
|
||||||
|
f1: {
|
||||||
|
regex: ['T <ip>'],
|
||||||
|
actions: {
|
||||||
|
log_start4: {
|
||||||
|
ipv4only: true,
|
||||||
|
cmd: ['sh', '-c', 'echo start4 <ip> >>./log'],
|
||||||
|
},
|
||||||
|
log_start6: {
|
||||||
|
ipv6only: true,
|
||||||
|
cmd: ['sh', '-c', 'echo start6 <ip> >>./log'],
|
||||||
|
},
|
||||||
|
log_stop4: {
|
||||||
|
ipv4only: true,
|
||||||
|
onexit: false,
|
||||||
|
after: '2s',
|
||||||
|
cmd: ['sh', '-c', 'echo stop4 <ip> >> ./log'],
|
||||||
|
},
|
||||||
|
log_stop6: {
|
||||||
|
ipv6only: true,
|
||||||
|
onexit: false,
|
||||||
|
after: '2s',
|
||||||
|
cmd: ['sh', '-c', 'echo stop6 <ip> >> ./log'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
61
tests/test-conf/test-virtual.jsonnet
Normal file
61
tests/test-conf/test-virtual.jsonnet
Normal file
|
|
@ -0,0 +1,61 @@
|
||||||
|
{
|
||||||
|
patterns: {
|
||||||
|
num: {
|
||||||
|
regex: @"[0-9]+",
|
||||||
|
},
|
||||||
|
all: {
|
||||||
|
regex: @".*",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
plugins: {
|
||||||
|
virtual: {
|
||||||
|
path: './target/debug/reaction-plugin-virtual',
|
||||||
|
check_root: false,
|
||||||
|
systemd_options: {
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
streams: {
|
||||||
|
s0: {
|
||||||
|
cmd: ['bash', '-c', 'for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2'],
|
||||||
|
filters: {
|
||||||
|
f0: {
|
||||||
|
regex: ['^<num>$'],
|
||||||
|
actions: {
|
||||||
|
a0: {
|
||||||
|
type: 'virtual',
|
||||||
|
options: {
|
||||||
|
send: 'a0 <num>',
|
||||||
|
to: 's1',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
b0: {
|
||||||
|
type: 'virtual',
|
||||||
|
options: {
|
||||||
|
send: 'b0 <num>',
|
||||||
|
to: 's1',
|
||||||
|
},
|
||||||
|
after: '600ms',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
s1: {
|
||||||
|
type: 'virtual',
|
||||||
|
options: {},
|
||||||
|
filters: {
|
||||||
|
f1: {
|
||||||
|
regex: ['^<all>$'],
|
||||||
|
actions: {
|
||||||
|
a1: {
|
||||||
|
cmd: ['sh', '-c', 'echo <all> >>./log'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue