Compare commits

..

No commits in common. "main" and "v2.1.2" have entirely different histories.

111 changed files with 2983 additions and 17021 deletions

1
.envrc
View file

@ -1 +0,0 @@
use_nix

13
.gitignore vendored
View file

@ -1,15 +1,22 @@
/reaction
reaction.db
reaction.db.old
/ip46tables
/nft46
reaction*.db
reaction*.db.old
/data
/lmdb
reaction*.export.json
/reaction*.sock
/result
/wiki
/deb
*.deb
*.minisig
*.qcow2
debian-packaging/*
*.swp
export-go-db/export-go-db
import-rust-db/target
/target
/local
.ccls-cache
.direnv

15
.gitlab-ci.yml Normal file
View file

@ -0,0 +1,15 @@
---
image: golang:1.20-bookworm
stages:
- build
variables:
DEBIAN_FRONTEND: noninteractive
test_building:
stage: build
before_script:
- apt-get -qq -y update
- apt-get -qq -y install build-essential devscripts debhelper quilt wget
script:
- make reaction ip46tables nft46

View file

@ -6,7 +6,6 @@ Here is a high-level overview of the codebase.
## Build
- `bench/`: Configuration that spawns a very high load on reaction. Useful to test performance improvements and regressions.
- `build.rs`: permits to create shell completions and man pages on build.
- `Cargo.toml`, `Cargo.lock`: manifest and dependencies.
- `config/`: example / test configuration files. Look at its git history to discover more.
@ -16,7 +15,8 @@ Here is a high-level overview of the codebase.
## Main source code
- `tests/`: Integration tests. They test reaction runtime behavior, persistance, client-daemon communication, plugin integrations.
- `helpers_c/`: C helpers. I wish to have special IP support in reaction and get rid of them. See #79 and #116.
- `tests/`: Integration tests. For now they test basic reaction runtime behavior, persistance, and client-daemon communication.
- `src/`: The source code, here we go!
### Top-level files
@ -25,13 +25,18 @@ Here is a high-level overview of the codebase.
- `src/lib.rs`: Second main entrypoint
- `src/cli.rs`: Command-line arguments
- `src/tests.rs`: Test utilities
- `src/protocol.rs`: de/serialization and client/daemon protocol messages.
### `src/concepts/`
reaction really is about its configuration, which is at the center of the code.
There is one file for each of its concepts: configuration, streams, filters, actions, patterns, plugins.
There is one file for each of its concepts: configuration, streams, filters, actions, patterns.
### `src/protocol/`
Low-level serialization/deserialization and client-daemon protocol messages.
Shared by the client and daemon's socket. Also used by daemon's database.
### `src/client/`
@ -53,9 +58,9 @@ This code has async code, to handle input streams and communication with clients
- `mod.rs`: High-level logic
- `state.rs`: Inner state operations
- `socket.rs`: The socket task, responsible for communication with clients.
- `plugin.rs`: Plugin startup, configuration loading and cleanup.
- `shutdown.rs`: Logic for passing shutdown signal across all tasks
### `crates/treedb`
### `src/tree`
Persistence layer.
@ -63,19 +68,5 @@ This is a database highly adapted to reaction workload, making reaction faster t
(heed, sled and fjall crates have been tested).
Its design is explained in the comments of its files:
- `lib.rs`: main database code, with its two API structs: Tree and Database.
- `raw.rs`: low-level part, directly interacting with de/serializisation and files.
- `time.rs`: time definitions shared with reaction.
- `helpers.rs`: utilities to ease db deserialization from disk.
### `plugins/reaction-plugin`
Shared plugin interface between reaction daemon and its plugins.
Also defines some shared logic between them:
- `shutdown.rs`: Logic for passing shutdown signal across all tasks
- `parse_duration.rs` Duration parsing
### `plugins/reaction-plugin-*`
All core plugins.
- `mod.rs`: main database code, with its two API structs: Tree and Database.
- `raw.rs` low-level part, directly interacting with de/serializisation and files.

4025
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
[package]
name = "reaction"
version = "2.3.0"
edition = "2024"
version = "2.1.2"
edition = "2021"
authors = ["ppom <reaction@ppom.me>"]
license = "AGPL-3.0"
description = "Scan logs and take action"
@ -10,58 +10,40 @@ homepage = "https://reaction.ppom.me"
repository = "https://framagit.org/ppom/reaction"
keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"]
build = "build.rs"
default-run = "reaction"
[package.metadata.deb]
section = "net"
extended-description = """A daemon that scans program outputs for repeated patterns, and takes action.
A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors.
reaction aims at being a successor to fail2ban."""
maintainer-scripts = "packaging/"
systemd-units = { enable = false }
assets = [
# Executables
[ "target/release/reaction", "/usr/bin/reaction", "755" ],
[ "target/release/reaction-plugin-virtual", "/usr/bin/reaction-plugin-virtual", "755" ],
[ "target/release/ip46tables", "/usr/bin/ip46tables", "755" ],
[ "target/release/nft46", "/usr/bin/nft46", "755" ],
# Man pages
[ "target/release/reaction*.1", "/usr/share/man/man1/", "644" ],
# Shell completions
[ "target/release/reaction.bash", "/usr/share/bash-completion/completions/reaction", "644" ],
[ "target/release/reaction.fish", "/usr/share/fish/completions/", "644" ],
[ "target/release/_reaction", "/usr/share/zsh/vendor-completions/", "644" ],
# Slice
[ "packaging/system-reaction.slice", "/usr/lib/systemd/system/", "644" ],
]
[dependencies]
# Time types
chrono.workspace = true
# CLI parsing
chrono = { version = "0.4.38", features = ["std", "clock", "serde"] }
clap = { version = "4.5.4", features = ["derive"] }
# Unix interfaces
jrsonnet-evaluator = "0.4.2"
nix = { version = "0.29.0", features = ["signal"] }
num_cpus = "1.16.0"
# Regex matching
regex = "1.10.4"
# Configuration languages, ser/deserialisation
serde.workspace = true
serde_json.workspace = true
serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.117"
serde_yaml = "0.9.34"
jrsonnet-evaluator = "0.4.2"
# Error macro
thiserror.workspace = true
# Async runtime & helpers
futures = { workspace = true }
tokio = { workspace = true, features = ["full", "tracing"] }
tokio-util = { workspace = true, features = ["codec"] }
# Async logging
tracing.workspace = true
thiserror = "1.0.63"
timer = "0.2.0"
futures = "0.3.30"
tokio = { version = "1.40.0", features = ["full", "tracing"] }
tokio-util = { version = "0.7.12", features = ["codec"] }
tracing = "0.1.40"
tracing-subscriber = "0.3.18"
# Database
treedb.workspace = true
# Reaction plugin system
remoc.workspace = true
reaction-plugin.workspace = true
[build-dependencies]
clap = { version = "4.5.4", features = ["derive"] }
@ -72,34 +54,4 @@ tracing = "0.1.40"
[dev-dependencies]
rand = "0.8.5"
treedb.workspace = true
treedb.features = ["test"]
tempfile.workspace = true
assert_fs.workspace = true
assert_cmd = "2.0.17"
predicates = "3.1.3"
[workspace]
members = [
"crates/treedb",
"plugins/reaction-plugin",
"plugins/reaction-plugin-cluster",
"plugins/reaction-plugin-ipset",
"plugins/reaction-plugin-nftables",
"plugins/reaction-plugin-virtual"
]
[workspace.dependencies]
assert_fs = "1.1.3"
chrono = { version = "0.4.38", features = ["std", "clock", "serde"] }
futures = "0.3.30"
remoc = { version = "0.18.3" }
serde = { version = "1.0.203", features = ["derive"] }
serde_json = { version = "1.0.117", features = ["arbitrary_precision"] }
tempfile = "3.12.0"
thiserror = "1.0.63"
tokio = { version = "1.40.0" }
tokio-util = { version = "0.7.12" }
tracing = "0.1.40"
reaction-plugin = { path = "plugins/reaction-plugin" }
treedb = { path = "crates/treedb" }

View file

@ -1,11 +0,0 @@
# This Dockerfile permits to build reaction and its plugins
# Use debian old-stable, so that it runs on both old-stable and stable
FROM rust:bookworm
RUN apt update && apt install -y \
clang \
libipset-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /reaction

View file

@ -14,6 +14,8 @@ reaction:
install: reaction
install -m755 target/release/reaction $(DESTDIR)$(BINDIR)
install -m755 target/release/ip46tables $(DESTDIR)$(BINDIR)
install -m755 target/release/nft46 $(DESTDIR)$(BINDIR)
install_systemd: install
install -m644 packaging/reaction.service $(SYSTEMDDIR)/system/reaction.service

View file

@ -4,7 +4,7 @@ A daemon that scans program outputs for repeated patterns, and takes action.
A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors.
🚧 This program hasn't received external security audit yet. However, it already works well on many servers 🚧
🚧 This program hasn't received external security audit. However, it already works well on my servers 🚧
## Rationale
@ -33,11 +33,11 @@ See https://blog.ppom.me/en-reaction-v2.
YAML and [JSONnet](https://jsonnet.org/) (more powerful) are supported.
both are extensions of JSON, so JSON is transitively supported.
- See [reaction.yml](./config/example.yml) or [reaction.jsonnet](./config/example.jsonnet) for a fully explained reference (ipv4 + ipv6)
- See [reaction.yml](./config/example.yml) or [reaction.jsonnet](./config/example.jsonnet) for a fully explained reference
- See the [wiki](https://reaction.ppom.me) for multiple examples, security recommendations and FAQ.
- See [server.jsonnet](https://reaction.ppom.me/configurations/ppom/server.jsonnet.html) for a real-world configuration
- See [reaction.service](./config/reaction.service) for a systemd service file
- This minimal example (ipv4 only) shows what's needed to prevent brute force attacks on an ssh server (please read at least the [Security](https://reaction.ppom.me/security.html) part of the wiki before starting 🆙):
- This minimal example shows what's needed to prevent brute force attacks on an ssh server (please read at least the [Security](https://reaction.ppom.me/security.html) part of the wiki before starting 🆙):
<details open>
@ -46,18 +46,21 @@ both are extensions of JSON, so JSON is transitively supported.
```yaml
patterns:
ip:
type: ipv4
regex: '(([0-9]{1,3}\.){3}[0-9]{1,3})|([0-9a-fA-F:]{2,90})'
ignore:
- '127.0.0.1'
- '::1'
start:
- [ 'iptables', '-w', '-N', 'reaction' ]
- [ 'iptables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'iptables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-N', 'reaction' ]
- [ 'ip46tables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
stop:
- [ 'iptables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'iptables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'iptables', '-w', '-F', 'reaction' ]
- [ 'iptables', '-w', '-X', 'reaction' ]
- [ 'ip46tables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-F', 'reaction' ]
- [ 'ip46tables', '-w', '-X', 'reaction' ]
streams:
ssh:
@ -67,15 +70,15 @@ streams:
regex:
- 'authentication failure;.*rhost=<ip>'
- 'Failed password for .* from <ip>'
- 'Invalid user .* from <ip>'
- 'banner exchange: Connection from <ip> port [0-9]*: invalid format'
- 'Invalid user .* from <ip>',
- 'banner exchange: Connection from <ip> port [0-9]*: invalid format',
retry: 3
retryperiod: '6h'
actions:
ban:
cmd: [ 'iptables', '-w', '-I', 'reaction', '1', '-s', '<ip>', '-j', 'DROP' ]
cmd: [ 'ip46tables', '-w', '-I', 'reaction', '1', '-s', '<ip>', '-j', 'DROP' ]
unban:
cmd: [ 'iptables', '-w', '-D', 'reaction', '1', '-s', '<ip>', '-j', 'DROP' ]
cmd: [ 'ip46tables', '-w', '-D', 'reaction', '1', '-s', '<ip>', '-j', 'DROP' ]
after: '48h'
```
@ -86,40 +89,41 @@ streams:
<summary><code>/etc/reaction.jsonnet</code></summary>
```jsonnet
local iptables(args) = [ 'ip46tables', '-w' ] + args;
local banFor(time) = {
ban: {
cmd: ['iptables', '-w', '-A', 'reaction', '-s', '<ip>', '-j', 'DROP'],
cmd: iptables(['-A', 'reaction', '-s', '<ip>', '-j', 'DROP']),
},
unban: {
cmd: ['iptables', '-w', '-D', 'reaction', '-s', '<ip>', '-j', 'DROP'],
after: time,
cmd: iptables(['-D', 'reaction', '-s', '<ip>', '-j', 'DROP']),
},
};
{
patterns: {
ip: {
type: 'ipv4',
regex: @'(?:(?:[ 0-9 ]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})',
},
},
start: [
['iptables', '-N', 'reaction'],
['iptables', '-I', 'INPUT', '-p', 'all', '-j', 'reaction'],
['iptables', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction'],
iptables([ '-N', 'reaction' ]),
iptables([ '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ]),
iptables([ '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ]),
],
stop: [
['iptables', '-D', 'INPUT', '-p', 'all', '-j', 'reaction'],
['iptables', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction'],
['iptables', '-F', 'reaction'],
['iptables', '-X', 'reaction'],
iptables([ '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ]),
iptables([ '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ]),
iptables([ '-F', 'reaction' ]),
iptables([ '-X', 'reaction' ]),
],
streams: {
ssh: {
cmd: ['journalctl', '-fu', 'sshd.service'],
cmd: [ 'journalctl', '-fu', 'sshd.service' ],
filters: {
failedlogin: {
regex: [
@'authentication failure;.*rhost=<ip>',
@'authentication failure;.*rhost=<ip>'
@'Failed password for .* from <ip>',
@'banner exchange: Connection from <ip> port [0-9]*: invalid format',
@'Invalid user .* from <ip>',
@ -136,9 +140,6 @@ local banFor(time) = {
</details>
> It is recommended to setup reaction with [`nftables`](https://reaction.ppom.me/actions/nftables.html)
> or [`ipset` + `iptables`](https://reaction.ppom.me/actions/ipset.html), which are much more performant
> solutions than `iptables` alone.
### Database
@ -155,10 +156,12 @@ If you don't know where to start reaction, `/var/lib/reaction` should be a sane
- `reaction test-config` shows loaded configuration
- `reaction help` for full usage.
### old binaries
### `ip46tables`
`ip46tables` and `nft46` binaries are no longer part of reaction. If you really need them, see
[the last commit that included them](https://framagit.org/ppom/reaction/-/tree/b7d997ca5e9a69c8572bb2ec9d27d0eb03b3cb9f/helpers_c).
`ip46tables` is a minimal c program present in its own subdirectory with only standard posix dependencies.
It permits to configure `iptables` and `ip6tables` at the same time.
It will execute `iptables` when detecting ipv4, `ip6tables` when detecting ipv6 and both if no ip address is present on the command line.
## Wiki
@ -239,7 +242,6 @@ French version: [#reaction-dev-fr:club1.fr](https://matrix.to/#/#reaction-dev-fr
You can ask for help in the issues or in this Matrix room: [#reaction-users-en:club1.fr](https://matrix.to/#/#reaction-users-en:club1.fr).
French version: [#reaction-users-fr:club1.fr](https://matrix.to/#/#reaction-users-fr:club1.fr).
You can alternatively send a mail: `reaction` on domain `ppom.me`.
## Funding

3
TODO
View file

@ -1,3 +0,0 @@
Test what happens when a Filter's pattern Set changes (I think it's shitty)
DB: add tests on stress testing (lines should always be in order)
conf: merge filters

View file

@ -14,7 +14,7 @@ then
fi
rm -f reaction.db
cargo build --release --bins
cargo build --release
sudo systemd-run --wait \
-p User="$(id -nu)" \
-p MemoryAccounting=yes \

View file

@ -1,86 +0,0 @@
---
# This configuration permits to test reaction's performance
# under a very high load
#
# It keeps regexes super simple, to avoid benchmarking the `regex` crate,
# and benchmark reaction's internals instead.
concurrency: 32
plugins:
- path: "/home/ppom/prg/reaction/target/release/reaction-plugin-virtual"
patterns:
num:
regex: '[0-9]{3}'
ip:
regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})'
ignore:
- 1.0.0.1
streams:
virtual:
type: virtual
filters:
find0:
regex:
- '^<num>$'
actions:
damn:
cmd: [ 'sleep', '0.0<num>' ]
undamn:
cmd: [ 'sleep', '0.0<num>' ]
after: 1m
onexit: false
tailDown1:
cmd: [ 'sh', '-c', 'sleep 2; seq 1001 | while read i; do echo found $i; done' ]
filters:
find1:
regex:
- '^found <num>'
retry: 9
retryperiod: 6m
actions:
virtual:
type: virtual
options:
send: '<num>'
to: virtual
tailDown2:
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
filters:
find2:
regex:
- '^found <num>'
retry: 480
retryperiod: 6m
actions:
virtual:
type: virtual
options:
send: '<num>'
to: virtual
tailDown3:
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
filters:
find3:
regex:
- '^found <num>'
retry: 480
retryperiod: 6m
actions:
virtual:
type: virtual
options:
send: '<num>'
to: virtual
find4:
regex:
- '^trouvé <num>'
retry: 480
retryperiod: 6m
actions:
virtual:
type: virtual
options:
send: '<num>'
to: virtual

View file

@ -1,74 +0,0 @@
---
# This configuration permits to test reaction's performance
# under a very high load
#
# It keeps regexes super simple, to avoid benchmarking the `regex` crate,
# and benchmark reaction's internals instead.
concurrency: 32
patterns:
num:
regex: '[0-9]{3}'
ip:
regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})'
ignore:
- 1.0.0.1
streams:
tailDown1:
cmd: [ 'sh', '-c', 'sleep 2; seq 1001 | while read i; do echo found $i; done' ]
filters:
find1:
regex:
- '^found <num>'
retry: 9
retryperiod: 6m
actions:
damn:
cmd: [ 'sleep', '0.0<num>' ]
undamn:
cmd: [ 'sleep', '0.0<num>' ]
after: 1m
onexit: false
tailDown2:
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
filters:
find2:
regex:
- '^found <num>'
retry: 480
retryperiod: 6m
actions:
damn:
cmd: [ 'sleep', '0.0<num>' ]
undamn:
cmd: [ 'sleep', '0.0<num>' ]
after: 1m
onexit: false
tailDown3:
cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ]
filters:
find3:
regex:
- '^found <num>'
retry: 480
retryperiod: 6m
actions:
damn:
cmd: [ 'sleep', '0.0<num>' ]
undamn:
cmd: [ 'sleep', '0.0<num>' ]
after: 1m
onexit: false
find4:
regex:
- '^trouvé <num>'
retry: 480
retryperiod: 6m
actions:
damn:
cmd: [ 'sleep', '0.0<num>' ]
undamn:
cmd: [ 'sleep', '0.0<num>' ]
after: 1m
onexit: false

View file

@ -1,6 +1,8 @@
use std::{
env::var_os,
env::{var, var_os},
io::{self, ErrorKind},
path::Path,
process,
};
use clap_complete::shells;
@ -8,10 +10,54 @@ use clap_complete::shells;
// SubCommand defined here
include!("src/cli.rs");
fn cc() -> String {
// TARGET looks like aarch64-unknown-linux-musl
let cc = match var("TARGET") {
Ok(target) => {
// We're looking for an environment variable looking like
// CC_aarch64_unknown_linux_musl
let target = target.replace("-", "_");
var(format!("CC_{}", target.replace("-", "_"))).ok()
}
Err(_) => None,
};
match cc {
Some(cc) => Some(cc),
// Else we're looking for CC environment variable
None => var("CC").ok(),
}
// Else we use `cc`
.unwrap_or("cc".into())
}
fn compile_helper(cc: &str, name: &str, out_dir: &Path) -> io::Result<()> {
let mut args = vec![
format!("helpers_c/{name}.c"),
"-o".into(),
out_dir
.join(name)
.to_str()
.expect("could not join path")
.to_owned(),
];
// We can build static executables in cross environment
if cc.ends_with("-gcc") {
args.push("-static".into());
}
process::Command::new(cc).args(args).spawn()?;
Ok(())
}
fn main() -> io::Result<()> {
if var_os("PROFILE").ok_or(ErrorKind::NotFound)? == "release" {
let out_dir = PathBuf::from(var_os("OUT_DIR").ok_or(ErrorKind::NotFound)?).join("../../..");
// Compile C helpers
let cc = cc();
println!("CC is: {}", cc);
compile_helper(&cc, "ip46tables", &out_dir)?;
compile_helper(&cc, "nft46", &out_dir)?;
// Build CLI
let cli = clap::Command::new("reaction");
let cli = SubCommand::augment_subcommands(cli);
@ -34,6 +80,8 @@ See usage examples, service configurations and good practices on the wiki: https
println!("cargo::rerun-if-changed=build.rs");
println!("cargo::rerun-if-changed=src/cli.rs");
println!("cargo::rerun-if-changed=helpers_c/ip46tables.c");
println!("cargo::rerun-if-changed=helpers_c/nft46.c");
Ok(())
}

View file

@ -7,76 +7,35 @@
// strongly encouraged to take a look at the full documentation: https://reaction.ppom.me
// JSONnet functions
local ipBan(cmd) = [cmd, '-w', '-A', 'reaction', '-s', '<ip>', '-j', 'DROP'];
local ipUnban(cmd) = [cmd, '-w', '-D', 'reaction', '-s', '<ip>', '-j', 'DROP'];
local iptables(args) = ['ip46tables', '-w'] + args;
// ip46tables is a minimal C program (only POSIX dependencies) present in a
// subdirectory of this repo.
// it permits to handle both ipv4/iptables and ipv6/ip6tables commands
// See meaning and usage of this function around L180
// See meaning and usage of this function around L106
local banFor(time) = {
ban4: {
cmd: ipBan('iptables'),
ipv4only: true,
ban: {
cmd: iptables(['-A', 'reaction', '-s', '<ip>', '-j', 'DROP']),
},
ban6: {
cmd: ipBan('ip6tables'),
ipv6only: true,
},
unban4: {
cmd: ipUnban('iptables'),
unban: {
after: time,
ipv4only: true,
},
unban6: {
cmd: ipUnban('ip6tables'),
after: time,
ipv6only: true,
cmd: iptables(['-D', 'reaction', '-s', '<ip>', '-j', 'DROP']),
},
};
// See usage of this function around L90
// Generates a command for iptables and ip46tables
local ip46tables(arguments) = [
['iptables', '-w'] + arguments,
['ip6tables', '-w'] + arguments,
];
{
// patterns are substitued in regexes.
// when a filter performs an action, it replaces the found pattern
patterns: {
name: {
// reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax
// common patterns have a 'regex' field
regex: '[a-z]+',
// patterns can ignore specific strings
ignore: ['cecilia'],
// patterns can also be ignored based on regexes, it will try to match the whole string detected by the pattern
ignoreregex: [
// ignore names starting with 'jo'
'jo.*',
],
},
ip: {
// patterns can have a special 'ip' type that matches both ipv4 and ipv6
// or 'ipv4' or 'ipv6' to match only that ip version
type: 'ip',
// reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax
// jsonnet's @'string' is for verbatim strings
// simple version: regex: @'(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})',
regex: @'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))',
ignore: ['127.0.0.1', '::1'],
// they can also ignore whole CIDR ranges of ip
ignorecidr: ['10.0.0.0/8'],
// last but not least, patterns of type ip, ipv4, ipv6 can also group their matched ips by mask
// ipv4mask: 30
// this means that ipv6 matches will be converted to their network part.
ipv6mask: 64,
// for example,"2001:db8:85a3:9de5::8a2e:370:7334" will be converted to "2001:db8:85a3:9de5::/64".
// Patterns can be ignored based on regexes, it will try to match the whole string detected by the pattern
// ignoreregex: [@'10\.0\.[0-9]{1,3}\.[0-9]{1,3}'],
},
// ipv4: {
// type: 'ipv4',
// ignore: ...
// ipv4mask: ...
// },
},
// where the state (database) must be read
@ -90,23 +49,24 @@ local ip46tables(arguments) = [
concurrency: 0,
// Those commands will be executed in order at start, before everything else
start:
start: [
// Create an iptables chain for reaction
ip46tables(['-N', 'reaction']) +
iptables(['-N', 'reaction']),
// Insert this chain as the first item of the INPUT & FORWARD chains (for incoming connections)
ip46tables(['-I', 'INPUT', '-p', 'all', '-j', 'reaction']) +
ip46tables(['-I', 'FORWARD', '-p', 'all', '-j', 'reaction']),
iptables(['-I', 'INPUT', '-p', 'all', '-j', 'reaction']),
iptables(['-I', 'FORWARD', '-p', 'all', '-j', 'reaction']),
],
// Those commands will be executed in order at stop, after everything else
stop:
stop: [
// Remove the chain from the INPUT & FORWARD chains
ip46tables(['-D', 'INPUT', '-p', 'all', '-j', 'reaction']) +
ip46tables(['-D', 'FORWARD', '-p', 'all', '-j', 'reaction']) +
iptables(['-D', 'INPUT', '-p', 'all', '-j', 'reaction']),
iptables(['-D', 'FORWARD', '-p', 'all', '-j', 'reaction']),
// Empty the chain
ip46tables(['-F', 'reaction']) +
iptables(['-F', 'reaction']),
// Delete the chain
ip46tables(['-X', 'reaction']),
iptables(['-X', 'reaction']),
],
// streams are commands
// they are run and their ouptut is captured
@ -118,7 +78,6 @@ local ip46tables(arguments) = [
// note that if the command is not in environment's `PATH`
// its full path must be given.
cmd: ['journalctl', '-n0', '-fu', 'sshd.service'],
// filters run actions when they match regexes on a stream
filters: {
// filters have a user-defined name
@ -133,7 +92,6 @@ local ip46tables(arguments) = [
@'Connection (reset|closed) by (authenticating|invalid) user .* <ip>',
@'banner exchange: Connection from <ip> port [0-9]*: invalid format',
],
// if retry and retryperiod are defined,
// the actions will only take place if a same pattern is
// found `retry` times in a `retryperiod` interval
@ -148,32 +106,14 @@ local ip46tables(arguments) = [
// - h / hour / hours
// - d / day / days
retryperiod: '6h',
// duplicate specify how to handle matches after an action has already been taken.
// 3 options are possible:
// - extend (default): update the pending actions' time, so they run later
// - ignore: don't do anything, ignore the match
// - rerun: run the actions again. so we may have the same pending actions multiple times.
// (this was the default before 2.2.0)
// duplicate: extend
// actions are run by the filter when regexes are matched
actions: {
// actions have a user-defined name
ban4: {
cmd: ['iptables', '-w', '-A', 'reaction', '-s', '<ip>', '-j', 'DROP'],
// this optional field permits to run an action only when a pattern of type ip contains an ipv4
ipv4only: true,
ban: {
cmd: iptables(['-A', 'reaction', '-s', '<ip>', '-j', 'DROP']),
},
ban6: {
cmd: ['ip6tables', '-w', '-A', 'reaction', '-s', '<ip>', '-j', 'DROP'],
// this optional field permits to run an action only when a pattern of type ip contains an ipv6
ipv6only: true,
},
unban4: {
cmd: ['iptables', '-w', '-D', 'reaction', '-s', '<ip>', '-j', 'DROP'],
unban: {
cmd: iptables(['-D', 'reaction', '-s', '<ip>', '-j', 'DROP']),
// if after is defined, the action will not take place immediately, but after a specified duration
// same format as retryperiod
after: '2 days',
@ -183,15 +123,7 @@ local ip46tables(arguments) = [
// (defaults to false)
// here it is not useful because we will flush and delete the chain containing the bans anyway
// (with the stop commands)
ipv4only: true,
},
unban6: {
cmd: ['ip6tables', '-w', '-D', 'reaction', '-s', '<ip>', '-j', 'DROP'],
after: '2 days',
ipv6only: true,
},
mail: {
cmd: ['sendmail', '...', '<ip>'],
// some commands, such as alerting commands, are "oneshot".

View file

@ -10,10 +10,8 @@
# using YAML anchors `&name` and pointers `*name`
# definitions are not readed by reaction
definitions:
- &ip4tablesban [ 'iptables', '-w', '-A', 'reaction', '-s', '<ip>', '-j', 'DROP' ]
- &ip6tablesban [ 'ip6tables', '-w', '-A', 'reaction', '-s', '<ip>', '-j', 'DROP' ]
- &ip4tablesunban [ 'iptables', '-w', '-D', 'reaction', '-s', '<ip>', '-j', 'DROP' ]
- &ip6tablesunban [ 'ip6tables', '-w', '-D', 'reaction', '-s', '<ip>', '-j', 'DROP' ]
- &iptablesban [ 'ip46tables', '-w', '-A', 'reaction', '-s', '<ip>', '-j', 'DROP' ]
- &iptablesunban [ 'ip46tables', '-w', '-D', 'reaction', '-s', '<ip>', '-j', 'DROP' ]
# ip46tables is a minimal C program (only POSIX dependencies) present as a subdirectory.
# it permits to handle both ipv4/iptables and ipv6/ip6tables commands
@ -30,57 +28,29 @@ concurrency: 0
# patterns are substitued in regexes.
# when a filter performs an action, it replaces the found pattern
patterns:
name:
# reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax
# common patterns have a 'regex' field
regex: '[a-z]+'
# patterns can ignore specific strings
ignore:
- 'cecilia'
# patterns can also be ignored based on regexes, it will try to match the whole string detected by the pattern
ignoreregex:
# ignore names starting with 'jo'
- 'jo.*'
ip:
# patterns can have a special 'ip' type that matches both ipv4 and ipv6
# or 'ipv4' or 'ipv6' to match only that ip version
type: ip
# reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax
# simple version: regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})'
regex: '(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))'
ignore:
- 127.0.0.1
- ::1
# they can also ignore whole CIDR ranges of ip
ignorecidr:
- 10.0.0.0/8
# last but not least, patterns of type ip, ipv4, ipv6 can also group their matched ips by mask
# ipv4mask: 30
# this means that ipv6 matches will be converted to their network part.
ipv6mask: 64
# for example,"2001:db8:85a3:9de5::8a2e:370:7334" will be converted to "2001:db8:85a3:9de5::/64".
# ipv4:
# type: ipv4
# ignore: ...
# Patterns can be ignored based on regexes, it will try to match the whole string detected by the pattern
# ignoreregex:
# - '10\.0\.[0-9]{1,3}\.[0-9]{1,3}'
# Those commands will be executed in order at start, before everything else
start:
- [ 'iptables', '-w', '-N', 'reaction' ]
- [ 'ip6tables', '-w', '-N', 'reaction' ]
- [ 'iptables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'ip6tables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'iptables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'ip6tables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-N', 'reaction' ]
- [ 'ip46tables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
# Those commands will be executed in order at stop, after everything else
stop:
- [ 'iptables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'ip6tables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'iptables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'ip6tables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'iptables', '-w', '-F', 'reaction' ]
- [ 'ip6tables', '-w', '-F', 'reaction' ]
- [ 'iptables', '-w', '-X', 'reaction' ]
- [ 'ip6tables', '-w', '-X', 'reaction' ]
- [ 'ip46tables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ]
- [ 'ip46tables', '-w', '-F', 'reaction' ]
- [ 'ip46tables', '-w', '-X', 'reaction' ]
# streams are commands
# they are run and their ouptut is captured
@ -92,7 +62,6 @@ streams:
# note that if the command is not in environment's `PATH`
# its full path must be given.
cmd: [ 'journalctl', '-n0', '-fu', 'sshd.service' ]
# filters run actions when they match regexes on a stream
filters:
# filters have a user-defined name
@ -106,7 +75,6 @@ streams:
- 'Invalid user .* from <ip>'
- 'Connection (reset|closed) by (authenticating|invalid) user .* <ip>'
- 'banner exchange: Connection from <ip> port [0-9]*: invalid format'
# if retry and retryperiod are defined,
# the actions will only take place if a same pattern is
# found `retry` times in a `retryperiod` interval
@ -121,31 +89,14 @@ streams:
# - h / hour / hours
# - d / day / days
retryperiod: 6h
# duplicate specify how to handle matches after an action has already been taken.
# 3 options are possible:
# - extend (default): update the pending actions' time, so they run later
# - ignore: don't do anything, ignore the match
# - rerun: run the actions again. so we may have the same pending actions multiple times.
# (this was the default before 2.2.0)
# duplicate: extend
# actions are run by the filter when regexes are matched
actions:
# actions have a user-defined name
ban4:
ban:
# YAML substitutes *reference by the value anchored at &reference
cmd: *ip4tablesban
# this optional field permits to run an action only when a pattern of type ip contains an ipv4
ipv4only: true
ban6:
cmd: *ip6tablesban
# this optional field permits to run an action only when a pattern of type ip contains an ipv6
ipv6only: true
unban4:
cmd: *ip4tablesunban
cmd: *iptablesban
unban:
cmd: *iptablesunban
# if after is defined, the action will not take place immediately, but after a specified duration
# same format as retryperiod
after: '2 days'
@ -155,13 +106,6 @@ streams:
# (defaults to false)
# here it is not useful because we will flush and delete the chain containing the bans anyway
# (with the stop commands)
ipv4only: true
unban6:
cmd: *ip6tablesunban
after: '2 days'
ipv6only: true
mail:
cmd: ['sendmail', '...', '<ip>']
# some commands, such as alerting commands, are "oneshot".

View file

@ -7,7 +7,7 @@ Documentation=https://reaction.ppom.me
# See `man systemd.exec` and `man systemd.service` for most options below
[Service]
ExecStart=/usr/local/bin/reaction start -c /etc/reaction/
ExecStart=/usr/local/bin/reaction start -c /etc/reaction.jsonnet
# Ask systemd to create /var/lib/reaction (/var/lib/ is implicit)
StateDirectory=reaction
@ -15,8 +15,6 @@ StateDirectory=reaction
RuntimeDirectory=reaction
# Start reaction in its state directory
WorkingDirectory=/var/lib/reaction
# Let reaction kill its child processes first
KillMode=mixed
[Install]
WantedBy=multi-user.target

View file

@ -1,23 +0,0 @@
[package]
name = "treedb"
version = "1.0.0"
edition = "2024"
[features]
test = []
[dependencies]
chrono.workspace = true
futures.workspace = true
serde.workspace = true
serde_json.workspace = true
thiserror.workspace = true
tokio.workspace = true
tokio.features = ["rt-multi-thread", "macros", "io-util", "time", "fs", "tracing"]
tokio-util.workspace = true
tokio-util.features = ["rt"]
tracing.workspace = true
[dev-dependencies]
tempfile.workspace = true

View file

@ -1,117 +0,0 @@
use std::{
fmt,
ops::{Add, Deref, Sub},
time::{Duration, SystemTime, UNIX_EPOCH},
};
use serde::{Deserialize, Serialize};
/// [`std::time::Duration`] since [`std::time::UNIX_EPOCH`]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Time(Duration);
impl Deref for Time {
type Target = Duration;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<Duration> for Time {
fn from(value: Duration) -> Self {
Time(value)
}
}
impl Into<Duration> for Time {
fn into(self) -> Duration {
self.0
}
}
impl Add<Duration> for Time {
type Output = Time;
fn add(self, rhs: Duration) -> Self::Output {
Time(self.0 + rhs)
}
}
impl Add<Time> for Time {
type Output = Time;
fn add(self, rhs: Time) -> Self::Output {
Time(self.0 + rhs.0)
}
}
impl Sub<Duration> for Time {
type Output = Time;
fn sub(self, rhs: Duration) -> Self::Output {
Time(self.0 - rhs)
}
}
impl Sub<Time> for Time {
type Output = Time;
fn sub(self, rhs: Time) -> Self::Output {
Time(self.0 - rhs.0)
}
}
impl Serialize for Time {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_nanos().to_string().serialize(serializer)
}
}
struct TimeVisitor;
impl<'de> serde::de::Visitor<'de> for TimeVisitor {
type Value = Time;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a string representing nanoseconds")
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match s.parse::<u128>() {
Ok(nanos) => Ok(Time(Duration::new(
(nanos / 1_000_000_000) as u64,
(nanos % 1_000_000_000) as u32,
))),
Err(_) => Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(s),
&self,
)),
}
}
}
impl<'de> Deserialize<'de> for Time {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(TimeVisitor)
}
}
impl Time {
pub fn new(secs: u64, nanos: u32) -> Time {
Time(Duration::new(secs, nanos))
}
pub fn from_hours(hours: u64) -> Time {
Time(Duration::from_hours(hours))
}
pub fn from_mins(mins: u64) -> Time {
Time(Duration::from_mins(mins))
}
pub fn from_secs(secs: u64) -> Time {
Time(Duration::from_secs(secs))
}
pub fn from_millis(millis: u64) -> Time {
Time(Duration::from_millis(millis))
}
pub fn from_nanos(nanos: u64) -> Time {
Time(Duration::from_nanos(nanos))
}
}
pub fn now() -> Time {
Time(SystemTime::now().duration_since(UNIX_EPOCH).unwrap())
}

12
helpers_c/README.md Normal file
View file

@ -0,0 +1,12 @@
# C helpers
Those helpers permit to handle IPv4 & IPv6 at the same time, waiting for [#79](https://framagit.org/ppom/reaction/-/issues/79) to be addressed.
Compilation:
```bash
# Produces nft46 binary
gcc -o nft46 nft46.c
# Produces ip46tables binary
gcc -o ip46tables ip46tables.c
```

91
helpers_c/ip46tables.c Normal file
View file

@ -0,0 +1,91 @@
#include<ctype.h>
#include<errno.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<unistd.h>
// If this programs
// - receives an ipv4 address in its arguments:
// → it will executes iptables with the same arguments in place.
//
// - receives an ipv6 address in its arguments:
// → it will executes ip6tables with the same arguments in place.
//
// - doesn't receive an ipv4 or ipv6 address in its arguments:
// → it will executes both, with the same arguments in place.
int isIPv4(char *tab) {
int i,len;
// IPv4 addresses are at least 7 chars long
len = strlen(tab);
if (len < 7 || !isdigit(tab[0]) || !isdigit(tab[len-1])) {
return 0;
}
// Each char must be a digit or a dot between 2 digits
for (i=1; i<len-1; i++) {
if (!isdigit(tab[i]) && !(tab[i] == '.' && isdigit(tab[i-1]) && isdigit(tab[i+1]))) {
return 0;
}
}
return 1;
}
int isIPv6(char *tab) {
int i,len, twodots = 0;
// IPv6 addresses are at least 3 chars long
len = strlen(tab);
if (len < 3) {
return 0;
}
// Each char must be a digit, :, a-f, or A-F
for (i=0; i<len; i++) {
if (!isdigit(tab[i]) && tab[i] != ':' && !(tab[i] >= 'a' && tab[i] <= 'f') && !(tab[i] >= 'A' && tab[i] <= 'F')) {
return 0;
}
}
return 1;
}
int guess_type(int len, char *tab[]) {
int i;
for (i=0; i<len; i++) {
if (isIPv4(tab[i])) {
return 4;
} else if (isIPv6(tab[i])) {
return 6;
}
}
return 0;
}
void exec(char *str, char **argv) {
argv[0] = str;
execvp(str, argv);
// returns only if fails
printf("ip46tables: exec failed %d\n", errno);
}
int main(int argc, char **argv) {
if (argc < 2) {
printf("ip46tables: At least one argument has to be given\n");
exit(1);
}
int type;
type = guess_type(argc, argv);
if (type == 4) {
exec("iptables", argv);
} else if (type == 6) {
exec("ip6tables", argv);
} else {
pid_t pid = fork();
if (pid == -1) {
printf("ip46tables: fork failed\n");
exit(1);
} else if (pid) {
exec("iptables", argv);
} else {
exec("ip6tables", argv);
}
}
}

97
helpers_c/nft46.c Normal file
View file

@ -0,0 +1,97 @@
#include<ctype.h>
#include<errno.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<unistd.h>
// nft46 'add element inet reaction ipvXbans { 1.2.3.4 }' → nft 'add element inet reaction ipv4bans { 1.2.3.4 }'
// nft46 'add element inet reaction ipvXbans { a:b::c:d }' → nft 'add element inet reaction ipv6bans { a:b::c:d }'
//
// the character X is replaced by 4 or 6 depending on the address family of the specified IP
//
// Limitations:
// - nft46 must receive exactly one argument
// - only one IP must be given per command
// - the IP must be between { braces }
int isIPv4(char *tab, int len) {
int i;
// IPv4 addresses are at least 7 chars long
if (len < 7 || !isdigit(tab[0]) || !isdigit(tab[len-1])) {
return 0;
}
// Each char must be a digit or a dot between 2 digits
for (i=1; i<len-1; i++) {
if (!isdigit(tab[i]) && !(tab[i] == '.' && isdigit(tab[i-1]) && isdigit(tab[i+1]))) {
return 0;
}
}
return 1;
}
int isIPv6(char *tab, int len) {
int i;
// IPv6 addresses are at least 3 chars long
if (len < 3) {
return 0;
}
// Each char must be a digit, :, a-f, or A-F
for (i=0; i<len; i++) {
if (!isdigit(tab[i]) && tab[i] != ':' && tab[i] != '.' && !(tab[i] >= 'a' && tab[i] <= 'f') && !(tab[i] >= 'A' && tab[i] <= 'F')) {
return 0;
}
}
return 1;
}
int findchar(char *tab, char c, int i, int len) {
while (i < len && tab[i] != c) i++;
if (i == len) {
printf("nft46: one %c must be present", c);
exit(1);
}
return i;
}
void adapt_args(char *tab) {
int i, len, X, startIP, endIP, startedIP;
X = startIP = endIP = -1;
startedIP = 0;
len = strlen(tab);
i = 0;
X = i = findchar(tab, 'X', i, len);
startIP = i = findchar(tab, '{', i, len);
while (startIP + 1 <= (i = findchar(tab, ' ', i, len))) startIP = i + 1;
i = startIP;
endIP = i = findchar(tab, ' ', i, len) - 1;
if (isIPv4(tab+startIP, endIP-startIP+1)) {
tab[X] = '4';
return;
}
if (isIPv6(tab+startIP, endIP-startIP+1)) {
tab[X] = '6';
return;
}
printf("nft46: no IP address found\n");
exit(1);
}
void exec(char *str, char **argv) {
argv[0] = str;
execvp(str, argv);
// returns only if fails
printf("nft46: exec failed %d\n", errno);
}
int main(int argc, char **argv) {
if (argc != 2) {
printf("nft46: Exactly one argument must be given\n");
exit(1);
}
adapt_args(argv[1]);
exec("nft", argv);
}

View file

@ -4,21 +4,17 @@ MANDIR = $(PREFIX)/share/man/man1
SYSTEMDDIR ?= /etc/systemd
install:
install -Dm755 reaction $(DESTDIR)$(BINDIR)
install -Dm755 reaction-plugin-virtual $(DESTDIR)$(BINDIR)
install -Dm755 reaction nft46 ip46tables $(DESTDIR)$(BINDIR)
install -Dm644 reaction*.1 -t $(DESTDIR)$(MANDIR)/
install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction
install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish
install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction
install -Dm644 reaction.service $(SYSTEMDDIR)/system/reaction.service
install-ipset:
install -Dm755 reaction-plugin-ipset $(DESTDIR)$(BINDIR)
remove:
rm -f $(DESTDIR)$(BINDIR)/bin/reaction
rm -f $(DESTDIR)$(BINDIR)/bin/reaction-plugin-virtual
rm -f $(DESTDIR)$(BINDIR)/bin/reaction-plugin-ipset
rm -f $(DESTDIR)$(BINDIR)/bin/nft46
rm -f $(DESTDIR)$(BINDIR)/bin/ip46tables
rm -f $(DESTDIR)$(MANDIR)/reaction*.1
rm -f $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction
rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish

View file

@ -1,13 +1,13 @@
# vim: ft=systemd
[Unit]
Description=reaction daemon
Description=A daemon that scans program outputs for repeated patterns, and takes action.
Documentation=https://reaction.ppom.me
# Ensure reaction will insert its chain after docker has inserted theirs. Only useful when iptables & docker are used
# After=docker.service
# See `man systemd.exec` and `man systemd.service` for most options below
[Service]
ExecStart=/usr/bin/reaction start -c /etc/reaction/
ExecStart=/usr/bin/reaction start -c /etc/%i
# Ask systemd to create /var/lib/reaction (/var/lib/ is implicit)
StateDirectory=reaction
@ -15,10 +15,6 @@ StateDirectory=reaction
RuntimeDirectory=reaction
# Start reaction in its state directory
WorkingDirectory=/var/lib/reaction
# Let reaction kill its child processes first
KillMode=mixed
# Put reaction in its own slice so that plugins can be grouped within.
Slice=system-reaction.slice
[Install]
WantedBy=multi-user.target

View file

@ -1 +0,0 @@
[Slice]

View file

@ -1,23 +0,0 @@
[package]
name = "reaction-plugin-cluster"
version = "0.1.0"
edition = "2024"
[dependencies]
reaction-plugin.workspace = true
chrono.workspace = true
futures.workspace = true
remoc.workspace = true
serde.workspace = true
serde_json.workspace = true
tokio.workspace = true
tokio.features = ["rt-multi-thread"]
treedb.workspace = true
data-encoding = "2.9.0"
iroh = { version = "0.95.1", default-features = false }
rand = "0.9.2"
[dev-dependencies]
assert_fs.workspace = true

View file

@ -1,165 +0,0 @@
use std::{
collections::BTreeMap,
net::{SocketAddrV4, SocketAddrV6},
sync::Arc,
time::Duration,
};
use futures::future::join_all;
use iroh::{
Endpoint,
endpoint::{ConnectOptions, TransportConfig},
};
use reaction_plugin::{Line, shutdown::ShutdownController};
use remoc::rch::mpsc as remocMpsc;
use tokio::sync::mpsc as tokioMpsc;
use treedb::{Database, time::Time};
use crate::{ActionInit, StreamInit, connection::ConnectionManager, endpoint::EndpointManager};
pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()];
pub type UtcLine = (Arc<String>, Time);
pub fn transport_config() -> TransportConfig {
// FIXME higher timeouts and keep alive
let mut transport = TransportConfig::default();
transport
.max_idle_timeout(Some(Duration::from_millis(2000).try_into().unwrap()))
.keep_alive_interval(Some(Duration::from_millis(200)));
transport
}
pub fn connect_config() -> ConnectOptions {
ConnectOptions::new().with_transport_config(transport_config().into())
}
pub async fn bind(stream: &StreamInit) -> Result<Endpoint, String> {
let mut builder = Endpoint::builder()
.secret_key(stream.secret_key.clone())
.alpns(ALPN.iter().map(|slice| slice.to_vec()).collect())
.relay_mode(iroh::RelayMode::Disabled)
.clear_discovery()
.transport_config(transport_config());
if let Some(ip) = stream.bind_ipv4 {
builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port));
}
if let Some(ip) = stream.bind_ipv6 {
builder = builder.bind_addr_v6(SocketAddrV6::new(ip, stream.listen_port, 0, 0));
}
builder.bind().await.map_err(|err| {
format!(
"Could not create socket address for cluster {}: {err}",
stream.name
)
})
}
pub async fn cluster_tasks(
endpoint: Endpoint,
mut stream: StreamInit,
mut actions: Vec<ActionInit>,
db: &mut Database,
shutdown: ShutdownController,
) -> Result<(), String> {
eprintln!("DEBUG cluster tasks starts running");
let (message_action2connection_txs, mut message_action2connection_rxs): (
Vec<tokioMpsc::Sender<UtcLine>>,
Vec<tokioMpsc::Receiver<UtcLine>>,
) = (0..stream.nodes.len())
.map(|_| tokioMpsc::channel(1))
.unzip();
// Spawn action tasks
while let Some(mut action) = actions.pop() {
let message_action2connection_txs = message_action2connection_txs.clone();
let own_cluster_tx = stream.tx.clone();
tokio::spawn(async move {
action
.serve(message_action2connection_txs, own_cluster_tx)
.await
});
}
let endpoint = Arc::new(endpoint);
let mut connection_endpoint2connection_txs = BTreeMap::new();
// Spawn connection managers
while let Some((pk, endpoint_addr)) = stream.nodes.pop_first() {
let cluster_name = stream.name.clone();
let endpoint = endpoint.clone();
let message_action2connection_rx = message_action2connection_rxs.pop().unwrap();
let stream_tx = stream.tx.clone();
let shutdown = shutdown.clone();
let (connection_manager, connection_endpoint2connection_tx) = ConnectionManager::new(
cluster_name,
endpoint_addr,
endpoint,
stream.message_timeout,
message_action2connection_rx,
stream_tx,
db,
shutdown,
)
.await?;
tokio::spawn(async move { connection_manager.task().await });
connection_endpoint2connection_txs.insert(pk, connection_endpoint2connection_tx);
}
// Spawn connection accepter
EndpointManager::new(
endpoint.clone(),
stream.name.clone(),
connection_endpoint2connection_txs,
shutdown.clone(),
);
eprintln!("DEBUG cluster tasks finished running");
Ok(())
}
impl ActionInit {
// Receive messages from its reaction action and dispatch them to all connections and to the reaction stream
async fn serve(
&mut self,
nodes_tx: Vec<tokioMpsc::Sender<UtcLine>>,
own_stream_tx: remocMpsc::Sender<Line>,
) {
while let Ok(Some(m)) = self.rx.recv().await {
eprintln!("DEBUG action: received a message to send to connections");
let line = self.send.line(m.match_);
if self.self_
&& let Err(err) = own_stream_tx.send((line.clone(), m.time)).await
{
eprintln!("ERROR while queueing message to be sent to own cluster stream: {err}");
}
let line = (Arc::new(line), m.time.into());
for result in join_all(nodes_tx.iter().map(|tx| tx.send(line.clone()))).await {
if let Err(err) = result {
eprintln!("ERROR while queueing message to be sent to cluster nodes: {err}");
};
}
}
}
}
#[cfg(test)]
mod tests {
use chrono::{DateTime, Local};
// As long as nodes communicate with UTC datetimes, them having different local timezones is not an issue!
#[test]
fn different_local_tz_is_ok() {
let dates: Vec<DateTime<Local>> = serde_json::from_str(
"[\"2025-11-02T17:47:21.716229569+01:00\",\"2025-11-02T18:47:21.716229569+02:00\"]",
)
.unwrap();
assert_eq!(dates[0].to_utc(), dates[1].to_utc());
}
}

View file

@ -1,668 +0,0 @@
use std::{cmp::max, io::Error as IoError, sync::Arc, time::Duration};
use futures::FutureExt;
use iroh::{
Endpoint, EndpointAddr,
endpoint::{Connection, RecvStream, SendStream, VarInt},
};
use rand::random_range;
use reaction_plugin::{Line, shutdown::ShutdownController};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt, BufReader, BufWriter},
sync::mpsc,
time::sleep,
};
use treedb::{
Database, Tree,
helpers::{to_string, to_time},
time::{Time, now},
};
use crate::{
cluster::{ALPN, UtcLine, connect_config},
key::Show,
};
const PROTOCOL_VERSION: u32 = 1;
const CLOSE_RECV: (u32, &[u8]) = (1, b"error receiving from your stream");
const CLOSE_CLOSED: (u32, &[u8]) = (2, b"you closed your stream");
const CLOSE_SEND: (u32, &[u8]) = (3, b"could not send a message to your channel so I quit");
type MaybeRemoteLine = Result<Option<(String, Time)>, IoError>;
enum Event {
LocalMessageReceived(Option<UtcLine>),
RemoteMessageReceived(MaybeRemoteLine),
ConnectionReceived(Option<ConnOrConn>),
}
pub struct OwnConnection {
connection: Connection,
id: u64,
line_tx: BufWriter<SendStream>,
line_rx: BufReader<RecvStream>,
next_time_secs: Option<u64>,
next_time_nanos: Option<u32>,
next_len: Option<usize>,
next_line: Option<Vec<u8>>,
}
impl OwnConnection {
fn new(
connection: Connection,
id: u64,
line_tx: BufWriter<SendStream>,
line_rx: BufReader<RecvStream>,
) -> Self {
Self {
connection,
id,
line_tx,
line_rx,
next_time_secs: None,
next_time_nanos: None,
next_len: None,
next_line: None,
}
}
/// Send a line to peer.
///
/// Time is a std::time::Duration since UNIX_EPOCH, which is defined as UTC
/// So it's safe to use between nodes using different timezones
async fn send_line(&mut self, line: &String, time: &Time) -> Result<(), std::io::Error> {
self.line_tx.write_u64(time.as_secs()).await?;
self.line_tx.write_u32(time.subsec_nanos()).await?;
self.line_tx.write_u32(line.len() as u32).await?;
self.line_tx.write_all(line.as_bytes()).await?;
self.line_tx.flush().await?;
Ok(())
}
/// Cancel-safe function that returns next line from peer
/// Returns None if we don't have all data yet.
async fn recv_line(&mut self) -> MaybeRemoteLine {
if self.next_time_secs.is_none() {
self.next_time_secs = Some(self.line_rx.read_u64().await?);
}
if self.next_time_nanos.is_none() {
self.next_time_nanos = Some(self.line_rx.read_u32().await?);
}
if self.next_len.is_none() {
self.next_len = Some(self.line_rx.read_u32().await? as usize);
}
// Ok we have next_len.is_some()
let next_len = self.next_len.clone().unwrap();
if self.next_line.is_none() {
self.next_line = Some(Vec::with_capacity(next_len));
}
// Ok we have next_line.is_some()
let next_line = self.next_line.as_mut().unwrap();
let actual_len = next_line.len();
// Resize to wanted length
next_line.resize(next_len, 0);
// Read bytes
let bytes_read = self
.line_rx
.read(&mut next_line[actual_len..next_len])
.await?;
// Truncate possibly unread bytes
next_line.truncate(actual_len + bytes_read);
// Let's test if we read all bytes
if next_line.len() == next_len {
// Ok we have a full line
self.next_len.take();
let line = String::try_from(self.next_line.take().unwrap()).map_err(|err| {
std::io::Error::new(std::io::ErrorKind::InvalidData, err.to_string())
})?;
let time = Time::new(
self.next_time_secs.take().unwrap(),
self.next_time_nanos.take().unwrap(),
);
Ok(Some((line, time)))
} else {
// Ok we don't have a full line, will be next time!
Ok(None)
}
}
}
pub enum ConnOrConn {
Connection(Connection),
OwnConnection(OwnConnection),
}
/// Handle a remote node.
/// Manage reception and sending of messages to this node.
/// Retry failed connections.
pub struct ConnectionManager {
/// Cluster's name (for logging)
cluster_name: String,
/// The remote node we're communicating with (for logging)
node_id: String,
/// Remote
remote: EndpointAddr,
/// Endpoint
endpoint: Arc<Endpoint>,
/// Cancel asking for a connection
cancel_ask_connection: Option<mpsc::Sender<()>>,
/// Create a delegated task to send ourselves a connection
connection_tx: mpsc::Sender<ConnOrConn>,
/// The EndpointManager or our delegated task sending us a connection (whether we asked for it or not)
connection_rx: mpsc::Receiver<ConnOrConn>,
/// Our own connection (when we have one)
connection: Option<OwnConnection>,
/// Last connexion ID, used to have a determinist way to choose between conflicting connections
last_connection_id: u64,
/// Max duration before we drop pending messages to a node we can't connect to.
message_timeout: Duration,
/// Message we receive from actions
message_rx: mpsc::Receiver<UtcLine>,
/// Our queue of messages to send
message_queue: Tree<Time, Arc<String>>,
/// Messages we send from remote nodes to our own stream
own_cluster_tx: remoc::rch::mpsc::Sender<Line>,
/// shutdown
shutdown: ShutdownController,
}
impl ConnectionManager {
pub async fn new(
cluster_name: String,
remote: EndpointAddr,
endpoint: Arc<Endpoint>,
message_timeout: Duration,
message_rx: mpsc::Receiver<UtcLine>,
own_cluster_tx: remoc::rch::mpsc::Sender<Line>,
db: &mut Database,
shutdown: ShutdownController,
) -> Result<(Self, mpsc::Sender<ConnOrConn>), String> {
let node_id = remote.id.show();
let message_queue = db
.open_tree(
format!("message_queue_{}_{}", endpoint.id().show(), node_id),
message_timeout,
|(key, value)| Ok((to_time(&key)?, Arc::new(to_string(&value)?))),
)
.await?;
let (connection_tx, connection_rx) = mpsc::channel(1);
Ok((
Self {
cluster_name,
node_id,
remote,
endpoint,
connection: None,
cancel_ask_connection: None,
connection_tx: connection_tx.clone(),
connection_rx,
last_connection_id: 0,
message_timeout,
message_rx,
message_queue,
own_cluster_tx,
shutdown,
},
connection_tx,
))
}
/// Main loop
pub async fn task(mut self) {
self.ask_connection();
loop {
let have_connection = self.connection.is_some();
let maybe_conn_rx = self
.connection
.as_mut()
.map(|conn| conn.recv_line().boxed())
// This Future will never be polled because of the if in select!
// It still needs to be present because the branch will be evaluated
// so we can't unwrap
.unwrap_or(false_recv().boxed());
let event = tokio::select! {
biased;
// Quitting
_ = self.shutdown.wait() => None,
// Receive a connection from EndpointManager
conn = self.connection_rx.recv() => Some(Event::ConnectionReceived(conn)),
// Receive remote message when we have a connection
msg = maybe_conn_rx, if have_connection => Some(Event::RemoteMessageReceived(msg)),
// Receive a message from local Actions
msg = self.message_rx.recv() => Some(Event::LocalMessageReceived(msg)),
};
match event {
Some(event) => {
self.handle_event(event).await;
self.send_queue_messages().await;
self.drop_timeout_messages().await;
}
None => break,
}
}
}
async fn handle_event(&mut self, event: Event) {
match event {
Event::ConnectionReceived(connection) => {
self.handle_connection(connection).await;
}
Event::LocalMessageReceived(utc_line) => {
self.handle_local_message(utc_line).await;
}
Event::RemoteMessageReceived(message) => {
self.handle_remote_message(message).await;
}
}
}
async fn send_queue_messages(&mut self) {
while let Some(connection) = &mut self.connection
&& let Some((time, line)) = self
.message_queue
.first_key_value()
.map(|(k, v)| (k.clone(), v.clone()))
{
if let Err(err) = connection.send_line(&line, &time).await {
eprintln!(
"INFO cluster {}: connection with node {} failed: {err}",
self.cluster_name, self.node_id,
);
self.close_connection(CLOSE_SEND).await;
} else {
self.message_queue.remove(&time).await;
eprintln!(
"DEBUG cluster {}: node {}: sent a local message to remote: {}",
self.cluster_name, self.node_id, line
);
}
}
}
async fn drop_timeout_messages(&mut self) {
let now = now();
let mut count = 0;
loop {
// We have a next key and it reached timeout
if let Some(next_key) = self.message_queue.first_key_value().map(|kv| kv.0.clone())
&& next_key + self.message_timeout < now
{
self.message_queue.remove(&next_key).await;
count += 1;
} else {
break;
}
}
if count > 0 {
eprintln!(
"DEBUG cluster {}: node {}: dropping {count} messages that reached timeout",
self.cluster_name, self.node_id,
)
}
}
/// Bootstrap a new Connection
/// Returns true if we have a valid connection now
async fn handle_connection(&mut self, connection: Option<ConnOrConn>) {
match connection {
None => {
eprintln!(
"DEBUG cluster {}: ConnectionManager {}: quitting because EndpointManager has quit",
self.cluster_name, self.node_id,
);
self.quit();
}
Some(connection) => {
if let Some(cancel) = self.cancel_ask_connection.take() {
let _ = cancel.send(()).await;
}
let last_connection_id = self.last_connection_id;
let mut insert_connection = |own_connection: OwnConnection| {
if self
.connection
.as_ref()
.is_none_or(|old_own| old_own.id < own_connection.id)
{
self.last_connection_id = own_connection.id;
self.connection = Some(own_connection);
} else {
eprintln!(
"WARN cluster {}: node {}: ignoring incoming connection, as we already have a valid connection with it and our connection id is greater",
self.cluster_name, self.node_id,
);
}
};
match connection {
ConnOrConn::Connection(connection) => {
match open_channels(
connection,
last_connection_id,
&self.cluster_name,
&self.node_id,
)
.await
{
Ok(own_connection) => insert_connection(own_connection),
Err(err) => {
eprintln!(
"ERROR cluster {}: trying to initialize connection to node {}: {err}",
self.cluster_name, self.node_id,
);
if self.connection.is_none() {
self.ask_connection();
}
}
}
}
ConnOrConn::OwnConnection(own_connection) => insert_connection(own_connection),
}
}
}
}
async fn handle_remote_message(&mut self, message: MaybeRemoteLine) {
match message {
Err(err) => {
eprintln!(
"WARN cluster {}: node {}: connection {}: error receiving remote message: {err}",
self.cluster_name, self.node_id, self.last_connection_id
);
self.close_connection(CLOSE_RECV).await;
}
Ok(None) => {
eprintln!(
"WARN cluster {}: node {} closed its stream",
self.cluster_name, self.node_id,
);
self.close_connection(CLOSE_CLOSED).await;
}
Ok(Some(line)) => {
if let Err(err) = self
.own_cluster_tx
.send((line.0.clone(), line.1.into()))
.await
{
eprintln!(
"ERROR cluster {}: could not send message to reaction stream: {err}",
self.cluster_name
);
eprintln!(
"INFO cluster {}: line that can't be sent: {}",
self.cluster_name, line.0
);
self.quit();
} else {
eprintln!(
"DEBUG cluster {}: node {}: sent a remote message to local stream: {}",
self.cluster_name, self.node_id, line.0
);
}
}
}
}
async fn handle_local_message(&mut self, message: Option<UtcLine>) {
eprintln!(
"DEBUG cluster {}: node {}: received a local message",
self.cluster_name, self.node_id,
);
match message {
None => {
eprintln!(
"INFO cluster {}: no action remaining, quitting",
self.cluster_name
);
self.quit();
}
Some(message) => match &mut self.connection {
Some(connection) => {
if let Err(err) = connection.send_line(&message.0, &message.1).await {
eprintln!(
"INFO cluster {}: connection with node {} failed: {err}",
self.cluster_name, self.node_id,
);
self.message_queue.insert(message.1, message.0).await;
self.close_connection(CLOSE_SEND).await;
} else {
eprintln!(
"DEBUG cluster {}: node {}: sent a local message to remote: {}",
self.cluster_name, self.node_id, message.0
);
}
}
None => {
eprintln!(
"DEBUG cluster {}: node {}: no connection, saving local message to send later: {}",
self.cluster_name, self.node_id, message.0
);
self.message_queue.insert(message.1, message.0).await;
}
},
}
}
async fn close_connection(&mut self, code: (u32, &[u8])) {
if let Some(connection) = self.connection.take() {
connection
.connection
.close(VarInt::from_u32(code.0), code.1);
}
self.ask_connection();
}
fn ask_connection(&mut self) {
// if self.node_id.starts_with('H') {
let (tx, rx) = mpsc::channel(1);
self.cancel_ask_connection = Some(tx);
try_connect(
self.cluster_name.clone(),
self.remote.clone(),
self.endpoint.clone(),
self.last_connection_id,
self.connection_tx.clone(),
rx,
);
}
fn quit(&mut self) {
self.shutdown.ask_shutdown();
}
}
/// Open accept one stream and create one stream.
/// This way, there is no need to know if we created or accepted the connection.
async fn open_channels(
connection: Connection,
last_connexion_id: u64,
cluster_name: &str,
node_id: &str,
) -> Result<OwnConnection, IoError> {
eprintln!(
"DEBUG cluster {}: node {}: opening uni channel",
cluster_name, node_id
);
let mut output = BufWriter::new(connection.open_uni().await?);
let our_id = random_range(last_connexion_id + 1..last_connexion_id + 1_000_000);
eprintln!(
"DEBUG cluster {}: node {}: sending handshake in uni channel",
cluster_name, node_id
);
output.write_u32(PROTOCOL_VERSION).await?;
output.write_u64(our_id).await?;
output.flush().await?;
eprintln!(
"DEBUG cluster {}: node {}: accepting uni channel",
cluster_name, node_id
);
let mut input = BufReader::new(connection.accept_uni().await?);
eprintln!(
"DEBUG cluster {}: node {}: reading handshake from uni channel",
cluster_name, node_id
);
let their_version = input.read_u32().await?;
if their_version != PROTOCOL_VERSION {
return Err(IoError::new(
std::io::ErrorKind::InvalidData,
format!(
"incompatible version: {their_version}. We use {PROTOCOL_VERSION}. Consider upgrading the node with the older version."
),
));
}
let their_id = input.read_u64().await?;
// FIXME Do we need to test this? If so, this function should return their_id even when error in order to retry better next time
// if their_id < last_connexion_id
// ERROR
// else
let chosen_id = max(our_id, their_id);
eprintln!(
"DEBUG cluster {}: node {}: version handshake complete: last id: {last_connexion_id}, our id: {our_id}, their id: {their_id}: chosen id: {chosen_id}",
cluster_name, node_id
);
Ok(OwnConnection::new(connection, chosen_id, output, input))
}
async fn false_recv() -> MaybeRemoteLine {
Ok(None)
}
const START_TIMEOUT: Duration = Duration::from_millis(500);
const MAX_TIMEOUT: Duration = Duration::from_hours(1);
const TIMEOUT_FACTOR: f64 = 1.5;
fn with_random(d: Duration) -> Duration {
let max_delta = d.as_micros() as f32 * 0.2;
d + Duration::from_micros(rand::random_range(0.0..max_delta) as u64)
}
// Compute the next wait Duration.
// We're multiplying the Duration by [`TIMEOUT_FACTOR`] and cap it to [`MAX_TIMEOUT`].
fn next_delta(delta: Option<Duration>) -> Duration {
with_random(match delta {
None => START_TIMEOUT,
Some(delta) => {
// Multiply timeout by TIMEOUT_FACTOR
let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64);
// Cap to MAX_TIMEOUT
if delta > MAX_TIMEOUT {
MAX_TIMEOUT
} else {
delta
}
}
})
}
#[cfg(test)]
#[test]
fn test_with_random() {
for d in [
123, 1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890,
] {
let rd = with_random(Duration::from_micros(d)).as_micros();
assert!(rd as f32 >= d as f32, "{rd} < {d}");
assert!(rd as f32 <= (d + 1) as f32 * 1.2, "{rd} > {d} * 1.2");
}
}
fn try_connect(
cluster_name: String,
remote: EndpointAddr,
endpoint: Arc<Endpoint>,
last_connection_id: u64,
connection_tx: mpsc::Sender<ConnOrConn>,
mut order_stop: mpsc::Receiver<()>,
) {
tokio::spawn(async move {
let node_id = remote.id.show();
// Until we have a connection or we're requested to stop
let mut keep_trying = true;
let mut delta = None;
while keep_trying {
delta = Some(next_delta(delta));
keep_trying = tokio::select! {
_ = sleep(delta.unwrap_or_default()) => true,
_ = order_stop.recv() => false,
};
if keep_trying {
eprintln!("DEBUG cluster {cluster_name}: node {node_id}: trying to connect...");
let connect = tokio::select! {
// conn = endpoint.connect(remote.clone(), ALPN[0]) => Some(conn),
conn = endpoint.connect_with_opts(remote.clone(), ALPN[0], connect_config()) => Some(conn),
_ = order_stop.recv() => None,
};
if let Some(connect) = connect {
let res = match connect {
Ok(connecting) => match connecting.await {
Ok(connection) => {
eprintln!(
"DEBUG cluster {cluster_name}: node {node_id}: created connection"
);
match open_channels(
connection,
last_connection_id,
&cluster_name,
&node_id,
)
.await
{
Ok(own_connection) => {
if let Err(err) = connection_tx
.send(ConnOrConn::OwnConnection(own_connection))
.await
{
eprintln!(
"DEBUG cluster {cluster_name}: node {node_id}: quitting because ConnectionManager has quit: {err}"
);
}
// successfully opened connection
keep_trying = false;
Ok(())
}
Err(err) => Err(err.to_string()),
}
}
Err(err) => Err(err.to_string()),
},
Err(err) => Err(err.to_string()),
};
if let Err(err) = res {
eprintln!(
"WARN cluster {cluster_name}: node {node_id}: while trying to connect: {err}"
);
}
} else {
// received stop order
eprintln!(
"DEBUG cluster {cluster_name}: node {node_id}: stop to try connecting to node be cause we received a connection from it"
);
keep_trying = false;
}
}
}
});
}

View file

@ -1,128 +0,0 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use iroh::{Endpoint, PublicKey, endpoint::Incoming};
use reaction_plugin::shutdown::ShutdownController;
use tokio::sync::mpsc;
use crate::{connection::ConnOrConn, key::Show};
enum Break {
Yes,
No,
}
pub struct EndpointManager {
/// The [`iroh::Endpoint`] to manage
endpoint: Arc<Endpoint>,
/// Cluster's name (for logging)
cluster_name: String,
/// Connection sender to the Connection Managers
connections_tx: BTreeMap<PublicKey, mpsc::Sender<ConnOrConn>>,
/// shutdown
shutdown: ShutdownController,
}
impl EndpointManager {
pub fn new(
endpoint: Arc<Endpoint>,
cluster_name: String,
connections_tx: BTreeMap<PublicKey, mpsc::Sender<ConnOrConn>>,
shutdown: ShutdownController,
) {
tokio::spawn(async move {
Self {
endpoint,
cluster_name,
connections_tx,
shutdown,
}
.task()
.await
});
}
async fn task(&mut self) {
loop {
// Uncomment this line and comment the select! for faster development in this function
// let event = Event::TryConnect(self.endpoint_addr_rx.recv().await);
let incoming = tokio::select! {
incoming = self.endpoint.accept() => incoming,
_ = self.shutdown.wait() => None,
};
match incoming {
Some(incoming) => {
if let Break::Yes = self.handle_incoming(incoming).await {
break;
}
}
None => break,
}
}
self.endpoint.close().await
}
async fn handle_incoming(&mut self, incoming: Incoming) -> Break {
eprintln!(
"DEBUG cluster {}: EndpointManager: receiving connection",
self.cluster_name,
);
// FIXME a malicious actor could maybe prevent a node from connecting to
// its cluster by sending lots of invalid slow connection requests?
// This function could be moved to a new 'oneshot' task instead
let remote_address = incoming.remote_address();
let remote_address_validated = incoming.remote_address_validated();
let connection = match incoming.await {
Ok(connection) => connection,
Err(err) => {
if remote_address_validated {
eprintln!("INFO refused connection from {}: {err}", remote_address)
} else {
eprintln!("INFO refused connection: {err}")
}
return Break::No;
}
};
let remote_id = connection.remote_id();
match self.connections_tx.get(&remote_id) {
None => {
eprintln!(
"WARN cluster {}: incoming connection from node '{}', ip: {} is not in our list, refusing incoming connection.",
self.cluster_name,
remote_id.show(),
remote_address
);
eprintln!(
"INFO cluster {}: {}, {}",
self.cluster_name,
"maybe it's not from our cluster,",
"maybe this node's configuration has not yet been updated to add this new node."
);
return Break::No;
}
Some(tx) => {
if let Err(_) = tx.send(ConnOrConn::Connection(connection)).await {
eprintln!(
"DEBUG cluster {}: EndpointManager: quitting because ConnectionManager has quit",
self.cluster_name,
);
self.shutdown.ask_shutdown();
return Break::Yes;
}
eprintln!(
"DEBUG cluster {}: EndpointManager: receiving connection from {}",
self.cluster_name,
remote_id.show(),
);
}
}
// TODO persist the incoming address, so that we don't forget this address
Break::No
}
}

View file

@ -1,188 +0,0 @@
use std::io;
use data_encoding::DecodeError;
use iroh::{PublicKey, SecretKey};
use tokio::{
fs::{self, File},
io::AsyncWriteExt,
};
pub fn secret_key_path(dir: &str, cluster_name: &str) -> String {
format!("{dir}/secret_key_{cluster_name}.txt")
}
pub async fn secret_key(dir: &str, cluster_name: &str) -> Result<SecretKey, String> {
let path = secret_key_path(dir, cluster_name);
if let Some(key) = get_secret_key(&path).await? {
Ok(key)
} else {
let key = SecretKey::generate(&mut rand::rng());
set_secret_key(&path, &key).await?;
Ok(key)
}
}
async fn get_secret_key(path: &str) -> Result<Option<SecretKey>, String> {
let key = match fs::read_to_string(path).await {
Ok(key) => Ok(key),
Err(err) => match err.kind() {
io::ErrorKind::NotFound => return Ok(None),
_ => Err(format!("can't read secret key file: {err}")),
},
}?;
let bytes = match key_b64_to_bytes(&key) {
Ok(key) => Ok(key),
Err(err) => Err(format!(
"invalid secret key read from file: {err}. Please remove the `{path}` file from plugin directory.",
)),
}?;
Ok(Some(SecretKey::from_bytes(&bytes)))
}
async fn set_secret_key(path: &str, key: &SecretKey) -> Result<(), String> {
let secret_key = key.show();
File::options()
.mode(0o600)
.write(true)
.create(true)
.open(path)
.await
.map_err(|err| format!("can't open `{path}` in plugin directory: {err}"))?
.write_all(secret_key.as_bytes())
.await
.map_err(|err| format!("can't write to `{path}` in plugin directory: {err}"))
}
pub fn key_b64_to_bytes(key: &str) -> Result<[u8; 32], DecodeError> {
let vec = data_encoding::BASE64URL.decode(key.as_bytes())?;
if vec.len() != 32 {
return Err(DecodeError {
position: vec.len(),
kind: data_encoding::DecodeKind::Length,
});
}
let mut bytes = [0u8; 32];
for i in 0..32 {
bytes[i] = vec[i];
}
Ok(bytes)
}
pub fn key_bytes_to_b64(key: &[u8; 32]) -> String {
data_encoding::BASE64URL.encode(key)
}
/// Implemented by PublicKey & SecretKey to display keys as base64 instead of hexadecimal.
/// Similar to Display/ToString
pub trait Show {
fn show(&self) -> String;
}
impl Show for PublicKey {
fn show(&self) -> String {
key_bytes_to_b64(self.as_bytes())
}
}
impl Show for SecretKey {
fn show(&self) -> String {
key_bytes_to_b64(&self.to_bytes())
}
}
#[cfg(test)]
mod tests {
use assert_fs::{
TempDir,
prelude::{FileWriteStr, PathChild},
};
use iroh::{PublicKey, SecretKey};
use tokio::fs::read_to_string;
use crate::key::{
get_secret_key, key_b64_to_bytes, key_bytes_to_b64, secret_key_path, set_secret_key,
};
#[test]
fn secret_key_encode_decode() {
for (secret_key, public_key) in [
(
"g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=",
"HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY=",
),
(
"5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=",
"LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc=",
),
] {
assert_eq!(
secret_key,
&key_bytes_to_b64(&key_b64_to_bytes(secret_key).unwrap())
);
assert_eq!(
public_key,
&key_bytes_to_b64(&key_b64_to_bytes(public_key).unwrap())
);
let secret_key_parsed = SecretKey::from_bytes(&key_b64_to_bytes(secret_key).unwrap());
let public_key_parsed =
PublicKey::from_bytes(&key_b64_to_bytes(public_key).unwrap()).unwrap();
assert_eq!(secret_key_parsed.public(), public_key_parsed);
}
}
#[tokio::test]
async fn secret_key_get() {
let tmp_dir = TempDir::new().unwrap();
let tmp_dir_str = tmp_dir.to_str().unwrap();
for (secret_key, cluster_name) in [
("g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=", "my_cluster"),
("5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=", "name"),
] {
tmp_dir
.child(&format!("secret_key_{cluster_name}.txt"))
.write_str(secret_key)
.unwrap();
let secret_key_parsed = SecretKey::from_bytes(&key_b64_to_bytes(secret_key).unwrap());
let path = secret_key_path(tmp_dir_str, cluster_name);
let secret_key_from_file = get_secret_key(&path).await.unwrap();
assert_eq!(
secret_key_parsed.to_bytes(),
secret_key_from_file.unwrap().to_bytes()
)
}
assert_eq!(
Ok(None),
get_secret_key(&format!("{tmp_dir_str}/non_existent"))
.await
// Can't compare secret keys so we map to bytes
// even if we don't want one
.map(|opt| opt.map(|pk| pk.to_bytes()))
);
// Will fail if we're root, but who runs this as root??
assert!(
get_secret_key(&format!("/root/non_existent"))
.await
.is_err()
);
}
#[tokio::test]
async fn secret_key_set() {
let tmp_dir = TempDir::new().unwrap();
let tmp_dir_str = tmp_dir.to_str().unwrap();
let path = format!("{tmp_dir_str}/secret");
let key = SecretKey::generate(&mut rand::rng());
assert!(set_secret_key(&path, &key).await.is_ok());
let read_file = read_to_string(&path).await;
assert!(read_file.is_ok());
assert_eq!(read_file.unwrap(), key_bytes_to_b64(&key.to_bytes()));
}
}

View file

@ -1,273 +0,0 @@
use std::{
collections::{BTreeMap, BTreeSet},
net::{Ipv4Addr, Ipv6Addr, SocketAddr},
path::PathBuf,
time::Duration,
};
use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr};
use reaction_plugin::{
ActionConfig, ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamConfig,
StreamImpl, line::PatternLine, main_loop, shutdown::ShutdownController, time::parse_duration,
};
use remoc::{rch::mpsc, rtc};
use serde::{Deserialize, Serialize};
use treedb::Database;
use crate::key::Show;
mod cluster;
mod connection;
mod endpoint;
mod key;
#[cfg(test)]
mod tests;
#[tokio::main]
async fn main() {
let plugin = Plugin::default();
main_loop(plugin).await;
}
#[derive(Default)]
struct Plugin {
init: BTreeMap<String, (StreamInit, Vec<ActionInit>)>,
cluster_shutdown: ShutdownController,
}
/// Stream options as defined by the user
#[derive(Serialize, Deserialize)]
struct StreamOptions {
/// The UDP port to open
listen_port: u16,
/// The IPv4 to bind to. Defaults to 0.0.0.0.
/// Set to `null` to use IPv6 only.
#[serde(default = "ipv4_unspecified")]
bind_ipv4: Option<Ipv4Addr>,
/// The IPv6 to bind to. Defaults to 0.0.0.0.
/// Set to `null` to use IPv4 only.
#[serde(default = "ipv6_unspecified")]
bind_ipv6: Option<Ipv6Addr>,
/// Other nodes which are part of the cluster.
nodes: Vec<NodeOption>,
/// Max duration before we drop pending messages to a node we can't connect to.
message_timeout: String,
}
fn ipv4_unspecified() -> Option<Ipv4Addr> {
Some(Ipv4Addr::UNSPECIFIED)
}
fn ipv6_unspecified() -> Option<Ipv6Addr> {
Some(Ipv6Addr::UNSPECIFIED)
}
#[derive(Serialize, Deserialize)]
struct NodeOption {
public_key: String,
#[serde(default)]
addresses: Vec<SocketAddr>,
}
/// Stream information before start
struct StreamInit {
name: String,
listen_port: u16,
bind_ipv4: Option<Ipv4Addr>,
bind_ipv6: Option<Ipv6Addr>,
secret_key: SecretKey,
message_timeout: Duration,
nodes: BTreeMap<PublicKey, EndpointAddr>,
tx: mpsc::Sender<Line>,
}
#[derive(Serialize, Deserialize)]
struct ActionOptions {
/// The line to send to the corresponding cluster, example: "ban \<ip\>"
send: String,
/// The name of the corresponding cluster, example: "my_cluster_stream"
to: String,
/// Whether the stream of this node also receives the line
#[serde(default, rename = "self")]
self_: bool,
}
struct ActionInit {
name: String,
send: PatternLine,
self_: bool,
rx: mpsc::Receiver<Exec>,
}
impl PluginInfo for Plugin {
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
Ok(Manifest {
hello: Hello::new(),
streams: BTreeSet::from(["cluster".into()]),
actions: BTreeSet::from(["cluster_send".into()]),
})
}
async fn load_config(
&mut self,
streams: Vec<StreamConfig>,
actions: Vec<ActionConfig>,
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
let mut ret_streams = Vec::with_capacity(streams.len());
let mut ret_actions = Vec::with_capacity(actions.len());
for StreamConfig {
stream_name,
stream_type,
config,
} in streams
{
if &stream_type != "cluster" {
return Err("This plugin can't handle other stream types than cluster".into());
}
let options: StreamOptions = serde_json::from_value(config.into())
.map_err(|err| format!("invalid options: {err}"))?;
let mut nodes = BTreeMap::default();
let message_timeout = parse_duration(&options.message_timeout)
.map_err(|err| format!("invalid message_timeout: {err}"))?;
if options.bind_ipv4.is_none() && options.bind_ipv6.is_none() {
Err(
"At least one of bind_ipv4 and bind_ipv6 must be enabled. Unset at least one of them or set at least one of them to an IP.",
)?;
}
if options.nodes.is_empty() {
Err("At least one remote node has to be configured for a cluster")?;
}
for node in options.nodes.into_iter() {
let bytes = key::key_b64_to_bytes(&node.public_key)
.map_err(|err| format!("invalid public key {}: {err}", node.public_key))?;
let public_key = PublicKey::from_bytes(&bytes)
.map_err(|err| format!("invalid public key {}: {err}", node.public_key))?;
nodes.insert(
public_key,
EndpointAddr {
id: public_key,
addrs: node
.addresses
.into_iter()
.map(|addr| TransportAddr::Ip(addr))
.collect(),
},
);
}
let secret_key = key::secret_key(".", &stream_name).await?;
eprintln!(
"INFO public key of this node for cluster {stream_name}: {}",
secret_key.public().show()
);
let (tx, rx) = mpsc::channel(1);
let stream = StreamInit {
name: stream_name.clone(),
listen_port: options.listen_port,
bind_ipv4: options.bind_ipv4,
bind_ipv6: options.bind_ipv6,
secret_key,
message_timeout,
nodes,
tx,
};
if let Some(_) = self.init.insert(stream_name, (stream, vec![])) {
return Err("this virtual stream has already been initialized".into());
}
ret_streams.push(StreamImpl {
stream: rx,
standalone: true,
})
}
for ActionConfig {
stream_name,
filter_name,
action_name,
action_type,
config,
patterns,
} in actions
{
if &action_type != "cluster_send" {
return Err(
"This plugin can't handle other action types than 'cluster_send'".into(),
);
}
let options: ActionOptions = serde_json::from_value(config.into())
.map_err(|err| format!("invalid options: {err}"))?;
let (tx, rx) = mpsc::channel(1);
let init_action = ActionInit {
name: format!("{}.{}.{}", stream_name, filter_name, action_name),
send: PatternLine::new(options.send, patterns),
self_: options.self_,
rx,
};
match self.init.get_mut(&options.to) {
Some((_, actions)) => actions.push(init_action),
None => {
return Err(format!(
"ERROR action '{}' sends 'to' unknown stream '{}'",
init_action.name, options.to
)
.into());
}
}
ret_actions.push(ActionImpl { tx })
}
Ok((ret_streams, ret_actions))
}
async fn start(&mut self) -> RemoteResult<()> {
self.cluster_shutdown.delegate().handle_quit_signals()?;
let mut db = {
let path = PathBuf::from(".");
let (cancellation_token, task_tracker_token) = self.cluster_shutdown.token().split();
Database::open(&path, cancellation_token, task_tracker_token)
.await
.map_err(|err| format!("Can't open database: {err}"))?
};
while let Some((_, (stream, actions))) = self.init.pop_first() {
let endpoint = cluster::bind(&stream).await?;
cluster::cluster_tasks(
endpoint,
stream,
actions,
&mut db,
self.cluster_shutdown.clone(),
)
.await?;
}
// Free containers
self.init = Default::default();
eprintln!("DEBUG started");
Ok(())
}
async fn close(self) -> RemoteResult<()> {
self.cluster_shutdown.ask_shutdown();
self.cluster_shutdown.wait_all_task_shutdown().await;
Ok(())
}
}

View file

@ -1,293 +0,0 @@
use std::env::set_current_dir;
use assert_fs::TempDir;
use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig};
use serde_json::json;
use crate::{Plugin, tests::insert_secret_key};
use super::{PUBLIC_KEY_A, TEST_MUTEX, stream_ok};
#[tokio::test]
async fn conf_stream() {
// Minimal node configuration
let nodes = json!([{
"public_key": PUBLIC_KEY_A,
}]);
// Invalid type
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "clust".into(),
config: stream_ok().into(),
}],
vec![]
)
.await
.is_err()
);
for (json, is_ok) in [
(
json!({
"listen_port": 2048,
"nodes": nodes,
"message_timeout": "30m",
}),
Result::is_ok as fn(&_) -> bool,
),
(
// invalid time
json!({
"listen_port": 2048,
"nodes": nodes,
"message_timeout": "30pv",
}),
Result::is_err,
),
(
json!({
"listen_port": 2048,
"bind_ipv4": "0.0.0.0",
"nodes": nodes,
"message_timeout": "30m",
}),
Result::is_ok,
),
(
json!({
"listen_port": 2048,
"bind_ipv6": "::",
"nodes": nodes,
"message_timeout": "30m",
}),
Result::is_ok,
),
(
json!({
"listen_port": 2048,
"bind_ipv4": "0.0.0.0",
"bind_ipv6": "::",
"nodes": nodes,
"message_timeout": "30m",
}),
Result::is_ok,
),
(
json!({
"listen_port": 2048,
"bind_ipv4": null,
"nodes": nodes,
"message_timeout": "30m",
}),
Result::is_ok,
),
(
json!({
"listen_port": 2048,
"bind_ipv6": null,
"nodes": nodes,
"message_timeout": "30m",
}),
Result::is_ok,
),
(
// No bind
json!({
"listen_port": 2048,
"bind_ipv4": null,
"bind_ipv6": null,
"nodes": nodes,
"message_timeout": "30m",
}),
Result::is_err,
),
(json!({}), Result::is_err),
] {
assert!(is_ok(
&Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "cluster".into(),
config: json.into(),
}],
vec![]
)
.await
));
}
}
#[tokio::test]
async fn conf_action() {
let patterns = vec!["p1".into(), "p2".into()];
// Invalid type
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "cluster".into(),
config: stream_ok().into(),
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "cluster_sen".into(),
config: json!({
"send": "<p1>",
"to": "stream",
})
.into(),
patterns: patterns.clone(),
}]
)
.await
.is_err()
);
for (json, is_ok) in [
(
json!({
"send": "<p1>",
"to": "stream",
}),
true,
),
(
json!({
"send": "<p1>",
"to": "stream",
"self": true,
}),
true,
),
(
json!({
"send": "<p1>",
"to": "stream",
"self": false,
}),
true,
),
(
// missing to
json!({
"send": "<p1>",
}),
false,
),
(
// missing send
json!({
"to": "stream",
}),
false,
),
(
// invalid self
json!({
"send": "<p1>",
"to": "stream",
"self": "true",
}),
false,
),
(
// missing conf
json!({}),
false,
),
] {
let ret = Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "cluster".into(),
config: stream_ok().into(),
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "cluster_send".into(),
config: json.clone().into(),
patterns: patterns.clone(),
}],
)
.await;
assert!(
ret.is_ok() == is_ok,
"is_ok: {is_ok}, ret: {:?}, action conf: {json:?}",
ret.map(|_| ())
);
}
}
#[tokio::test]
async fn conf_send() {
let _lock = TEST_MUTEX.lock();
let dir = TempDir::new().unwrap();
set_current_dir(&dir).unwrap();
insert_secret_key().await;
// No action is ok
let res = Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "cluster".into(),
config: stream_ok().into(),
}],
vec![],
)
.await;
assert!(res.is_ok(), "{:?}", res.map(|_| ()));
// An action is ok
let res = Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "cluster".into(),
config: stream_ok().into(),
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "cluster_send".into(),
config: json!({ "send": "message", "to": "stream" }).into(),
patterns: vec![],
}],
)
.await;
assert!(res.is_ok(), "{:?}", res.map(|_| ()));
// Invalid to: option
let res = Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "cluster".into(),
config: stream_ok().into(),
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "cluster_send".into(),
config: json!({ "send": "message", "to": "stream1" }).into(),
patterns: vec![],
}],
)
.await;
assert!(res.is_err(), "{:?}", res.map(|_| ()));
}

View file

@ -1,319 +0,0 @@
use std::{env::set_current_dir, time::Duration};
use assert_fs::TempDir;
use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig};
use serde_json::json;
use tokio::{fs, time::timeout};
use treedb::time::now;
use crate::{
Plugin,
key::secret_key_path,
tests::{PUBLIC_KEY_A, PUBLIC_KEY_B, SECRET_KEY_A, SECRET_KEY_B, TEST_MUTEX},
};
#[derive(Clone)]
struct TestNode {
public_key: &'static str,
private_key: &'static str,
port: u16,
}
const POOL: [TestNode; 15] = [
TestNode {
public_key: PUBLIC_KEY_A,
private_key: SECRET_KEY_A,
port: 2055,
},
TestNode {
public_key: PUBLIC_KEY_B,
private_key: SECRET_KEY_B,
port: 2056,
},
TestNode {
public_key: "ZjEPlIdGikV_sPIAUzO3RFUidlERJUhJ9XwNAlieuvU=",
private_key: "SCbd8Ids3Dg9MwzyMNV1KFcUtsyRbeCp7GDmu-xXBSs=",
port: 2057,
},
TestNode {
public_key: "2FUpABLl9I6bU9a2XtWKMLDzwHfrVcNEG6K8Ix6sxWQ=",
private_key: "F0W8nIlVmuFVpelwYH4PDaBDM0COYOyXDmBEmnHyo5s=",
port: 2058,
},
TestNode {
public_key: "qR4JDI_yyPWUBrmBbQjqfFbGP14v9dEaQVPHPOjId1o=",
private_key: "S5pxTafNXPd_9TMT4_ERuPXlZ882UmggAHrf8Yntfqg=",
port: 2059,
},
TestNode {
public_key: "NjkPBwDO4IEOBjkcxufYtVXspJNQZ0qF6GamRq2TOB4=",
private_key: "zM_lXiFuwTkmPuuXqIghW_J0uwq0a53L_yhM57uy_R8=",
port: 2060,
},
TestNode {
public_key: "_mgTzrlE8b_zvka3LgfD5qH2h_d3S0hcDU1WzIL6C74=",
private_key: "6Obq7fxOXK-u-P3QB5FJvNnwXdKwP1FsVJ0555o7DXs=",
port: 2061,
},
TestNode {
public_key: "FLKxCSSjjzxH0ZWTpQ8xXcSIRutXUhIDhZimjamxO2s=",
private_key: "pBPcJ32bt4xGZIGZDLDtj0eedg7p5DENjAwA-wM-1vk=",
port: 2062,
},
TestNode {
public_key: "yYBWzhzXO4isdPW2SzI-Sv3mcy3dUl6Kl0oFN6YpuzE=",
private_key: "nC8F6prLAY9-86EZlfXwpOjQeghlPKf3PtT-zXsJZsA=",
port: 2063,
},
TestNode {
public_key: "QLbNxlLEUt0tieD9BX9of663gCm9WjKeqch0BIFJ3CE=",
private_key: "JL4bKNHJMaMX_ElnaDHc6Ql74HZbovcswNOrY6fN1sU=",
port: 2064,
},
TestNode {
public_key: "2cmAmcaEFW-9val6WMoHSfTW25IxiQHes7Jwy6NqLLc=",
private_key: "TCvfDLHLQ5RxfAs7_2Th2u1XF48ygxTLAAsUzVPBn_o=",
port: 2065,
},
TestNode {
public_key: "PfKYILyGmu0C6GFUOLw4MSLxN6gtkj0XUdvQW50A2xA=",
private_key: "LaQgDWsXpwSQlZZXd8UEllrgpeXw9biSye4zcjLclU0=",
port: 2066,
},
TestNode {
public_key: "OQMXwPl90gr-2y-f5qZIZuVG4WEae5cc8JOB39LTNYE=",
private_key: "blcigXzk0oeQ8J1jwYFiYHJ-pMiUqbUM4SJBlxA0MiI=",
port: 2067,
},
TestNode {
public_key: "DHpkBgnQUfpC7s4-mTfpn1_PN4dzj7hCCMF6GwO3Bus=",
private_key: "sw7-2gPOswznF2OJHJdbfyJxdjS-P5O0lie6SdOL_08=",
port: 2068,
},
TestNode {
public_key: "odjjaYd6lL1DG8N9AXHW9LGsrKIb5IlW0KZz-rgxfXA=",
private_key: "6JU6YHRBM_rJkuQmMaGaio_PZiyzZlTIU0qE8AHPGSE=",
port: 2069,
},
];
async fn stream_action(
name: &str,
index: usize,
nodes: &[TestNode],
) -> (StreamConfig, ActionConfig) {
let stream_name = format!("stream_{name}");
let this_node = &nodes[index];
let other_nodes: Vec<_> = nodes
.iter()
.filter(|node| node.public_key != this_node.public_key)
.map(|node| {
json!({
"public_key": node.public_key,
"addresses": [format!("[::1]:{}", node.port)]
})
})
.collect();
fs::write(secret_key_path(".", &stream_name), this_node.private_key)
.await
.unwrap();
(
StreamConfig {
stream_name: stream_name.clone(),
stream_type: "cluster".into(),
config: json!({
"message_timeout": "30s",
"listen_port": this_node.port,
"nodes": other_nodes,
})
.into(),
},
ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "cluster_send".into(),
config: json!({
"send": format!("from {name}: <test>"),
"to": stream_name,
})
.into(),
patterns: vec!["test".into()],
},
)
}
#[tokio::test]
async fn two_nodes_simultaneous_startup() {
for separate_plugin in [true /*, false */] {
let _lock = TEST_MUTEX.lock();
let dir = TempDir::new().unwrap();
set_current_dir(&dir).unwrap();
let ((mut stream_a, action_a), (mut stream_b, action_b)) = if separate_plugin {
let mut plugin_a = Plugin::default();
let (sa, aa) = stream_action("a", 0, &POOL[0..2]).await;
let (mut streams_a, mut actions_a) =
plugin_a.load_config(vec![sa], vec![aa]).await.unwrap();
plugin_a.start().await.unwrap();
let mut plugin_b = Plugin::default();
let (sb, ab) = stream_action("b", 1, &POOL[0..2]).await;
let (mut streams_b, mut actions_b) =
plugin_b.load_config(vec![sb], vec![ab]).await.unwrap();
plugin_b.start().await.unwrap();
(
(streams_a.remove(0), actions_a.remove(0)),
(streams_b.remove(0), actions_b.remove(0)),
)
} else {
let mut plugin = Plugin::default();
let a = stream_action("a", 0, &POOL[0..2]).await;
let b = stream_action("b", 1, &POOL[0..2]).await;
let (mut streams, mut actions) = plugin
.load_config(vec![a.0, b.0], vec![a.1, b.1])
.await
.unwrap();
plugin.start().await.unwrap();
(
(streams.remove(0), actions.remove(0)),
(streams.remove(1), actions.remove(1)),
)
};
for m in ["test1", "test2", "test3"] {
let time = now().into();
for (stream, action, from) in [
(&mut stream_b, &action_a, "a"),
(&mut stream_a, &action_b, "b"),
] {
assert!(
action
.tx
.send(Exec {
match_: vec![m.into()],
time,
})
.await
.is_ok(),
"separate_plugin: {separate_plugin}, message: {m}, from: {from}"
);
let received = timeout(Duration::from_millis(5000), stream.stream.recv()).await;
assert!(
received.is_ok(),
"separate_plugin: {separate_plugin}, message: {m}, from: {from}, did timeout"
);
let received = received.unwrap();
assert!(
received.is_ok(),
"separate_plugin: {separate_plugin}, message: {m}, from: {from}, remoc receive error"
);
let received = received.unwrap();
assert_eq!(
received,
Some((format!("from {from}: {m}"), time)),
"separate_plugin: {separate_plugin}, message: {m}, from: {from}"
);
}
}
}
}
#[tokio::test]
async fn n_nodes_simultaneous_startup() {
let _lock = TEST_MUTEX.lock();
// Ports can take some time to be really closed
let mut port_delta = 0;
for n in 3..=POOL.len() {
println!("\nNODES: {n}\n");
port_delta += n;
// for n in 3..=3 {
let dir = TempDir::new().unwrap();
set_current_dir(&dir).unwrap();
let mut plugins = Vec::with_capacity(n);
let mut streams = Vec::with_capacity(n);
let mut actions = Vec::with_capacity(n);
for i in 0..n {
let mut plugin = Plugin::default();
let name = format!("n{i}");
let (stream, action) = stream_action(
&name,
i,
&POOL[0..n]
.iter()
.map(|node| node.clone())
.map(|node| TestNode {
port: node.port + port_delta as u16,
..node
})
.collect::<Vec<_>>()
.as_slice(),
)
.await;
let (mut stream, mut action) = plugin
.load_config(vec![stream], vec![action])
.await
.unwrap();
plugin.start().await.unwrap();
plugins.push(plugin);
streams.push(stream.pop().unwrap());
actions.push((action.pop().unwrap(), name));
}
for m in ["test1", "test2", "test3", "test4", "test5"] {
let time = now().into();
for (i, (action, from)) in actions.iter().enumerate() {
assert!(
action
.tx
.send(Exec {
match_: vec![m.into()],
time,
})
.await
.is_ok(),
"n nodes: {n}, n°action{i}, message: {m}, from: {from}"
);
for (j, stream) in streams.iter_mut().enumerate().filter(|(j, _)| *j != i) {
let received = timeout(Duration::from_millis(5000), stream.stream.recv()).await;
assert!(
received.is_ok(),
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}, did timeout"
);
let received = received.unwrap();
assert!(
received.is_ok(),
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}, remoc receive error"
);
let received = received.unwrap();
assert_eq!(
received,
Some((format!("from {from}: {m}"), time)),
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}"
);
println!(
"n nodes: {n}, n°action: {i}, n°stream: {j}, message: {m}, from: {from}"
);
}
}
}
for plugin in plugins {
plugin.close().await.unwrap();
}
}
}
// TODO test:
// with inexisting nodes
// different startup times
// stopping & restarting a node mid exchange

View file

@ -1,40 +0,0 @@
use std::sync::{LazyLock, Mutex};
use serde_json::json;
use tokio::fs::write;
mod conf;
mod e2e;
mod self_;
const SECRET_KEY_A: &str = "g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=";
const PUBLIC_KEY_A: &str = "HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY=";
const SECRET_KEY_B: &str = "5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=";
const PUBLIC_KEY_B: &str = "LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc=";
// Tests that spawn a database in current directory must be run one at a time
static TEST_MUTEX: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
fn stream_ok_port(port: u16) -> serde_json::Value {
json!({
"listen_port": port,
"nodes": [{
"public_key": PUBLIC_KEY_A,
}],
"message_timeout": "30m",
})
}
fn stream_ok() -> serde_json::Value {
stream_ok_port(2048)
}
async fn insert_secret_key() {
write(
"./secret_key_stream.txt",
b"pBPcJ32bt4xGZIGZDLDtj0eedg7p5DENjAwA-wM-1vk=",
)
.await
.unwrap();
}

View file

@ -1,78 +0,0 @@
use std::{env::set_current_dir, time::Duration};
use assert_fs::TempDir;
use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig};
use serde_json::json;
use tokio::time::timeout;
use treedb::time::now;
use crate::{Plugin, tests::insert_secret_key};
use super::{TEST_MUTEX, stream_ok_port};
#[tokio::test]
async fn run_with_self() {
let _lock = TEST_MUTEX.lock();
let dir = TempDir::new().unwrap();
set_current_dir(&dir).unwrap();
insert_secret_key().await;
for self_ in [true, false] {
let mut plugin = Plugin::default();
let (mut streams, mut actions) = plugin
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "cluster".into(),
config: stream_ok_port(2052).into(),
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "cluster_send".into(),
config: json!({
"send": "message <test>",
"to": "stream",
"self": self_,
})
.into(),
patterns: vec!["test".into()],
}],
)
.await
.unwrap();
let mut stream = streams.pop().unwrap();
let action = actions.pop().unwrap();
assert!(stream.standalone);
assert!(plugin.start().await.is_ok());
for m in ["test1", "test2", "test3", " a a a aa a a"] {
let time = now().into();
assert!(
action
.tx
.send(Exec {
match_: vec![m.into()],
time,
})
.await
.is_ok()
);
if self_ {
assert_eq!(
stream.stream.recv().await.unwrap().unwrap(),
(format!("message {m}"), time),
);
} else {
// Don't receive anything
assert!(
timeout(Duration::from_millis(100), stream.stream.recv())
.await
.is_err()
);
}
}
}
}

View file

@ -1,26 +0,0 @@
[package]
name = "reaction-plugin-ipset"
description = "ipset plugin for reaction"
version = "1.0.0"
edition = "2024"
authors = ["ppom <reaction@ppom.me>"]
license = "AGPL-3.0"
homepage = "https://reaction.ppom.me"
repository = "https://framagit.org/ppom/reaction"
keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"]
default-run = "reaction-plugin-ipset"
[dependencies]
tokio = { workspace = true, features = ["rt-multi-thread"] }
remoc.workspace = true
reaction-plugin.path = "../reaction-plugin"
serde.workspace = true
serde_json.workspace = true
ipset = "0.9.0"
[package.metadata.deb]
section = "net"
assets = [
[ "target/release/reaction-plugin-ipset", "/usr/bin/reaction-plugin-ipset", "755" ],
]
depends = ["libipset-dev", "reaction"]

View file

@ -1,419 +0,0 @@
use std::{fmt::Debug, u32, usize};
use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration};
use remoc::rch::mpsc as remocMpsc;
use serde::{Deserialize, Serialize};
use crate::ipset::{CreateSet, IpSet, Order, SetChain, Version};
#[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)]
pub enum IpVersion {
#[default]
#[serde(rename = "ip")]
Ip,
#[serde(rename = "ipv4")]
Ipv4,
#[serde(rename = "ipv6")]
Ipv6,
}
impl Debug for IpVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
IpVersion::Ipv4 => "ipv4",
IpVersion::Ipv6 => "ipv6",
IpVersion::Ip => "ip",
}
)
}
}
#[derive(Default, Serialize, Deserialize)]
pub enum AddDel {
#[default]
#[serde(alias = "add")]
Add,
#[serde(alias = "del")]
Del,
}
/// User-facing action options
#[derive(Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct ActionOptions {
/// The set that should be used by this action
pub set: String,
/// The pattern name of the IP.
/// Defaults to "ip"
#[serde(default = "serde_ip")]
pub pattern: String,
#[serde(skip)]
ip_index: usize,
// Whether the action is to "add" or "del" the ip from the set
#[serde(default)]
action: AddDel,
#[serde(flatten)]
pub set_options: SetOptions,
}
fn serde_ip() -> String {
"ip".into()
}
impl ActionOptions {
pub fn set_ip_index(&mut self, patterns: Vec<String>) -> Result<(), ()> {
self.ip_index = patterns
.into_iter()
.enumerate()
.filter(|(_, name)| name == &self.pattern)
.next()
.ok_or(())?
.0;
Ok(())
}
}
/// Merged set options
#[derive(Default, Clone, Deserialize, Serialize, Debug, PartialEq, Eq)]
pub struct SetOptions {
/// The IP type.
/// Defaults to `46`.
/// If `ipv4`: creates an IPv4 set with this name
/// If `ipv6`: creates an IPv6 set with this name
/// If `ip`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6'
/// *Merged set-wise*.
#[serde(default)]
version: Option<IpVersion>,
/// Chains where the IP set should be inserted.
/// Defaults to `["INPUT", "FORWARD"]`
/// *Merged set-wise*.
#[serde(default)]
chains: Option<Vec<String>>,
// Optional timeout, letting linux/netfilter handle set removal instead of reaction
// Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action
// Same syntax as after and retryperiod in reaction.
/// *Merged set-wise*.
#[serde(skip_serializing_if = "Option::is_none")]
timeout: Option<String>,
#[serde(skip)]
timeout_u32: Option<u32>,
// Target that iptables should use when the IP is encountered.
// Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain
/// *Merged set-wise*.
#[serde(default)]
target: Option<String>,
}
impl SetOptions {
pub fn merge(&mut self, options: &SetOptions) -> Result<(), String> {
// merge two Option<T> and fail if there is conflict
fn inner_merge<T: Eq + Clone + std::fmt::Debug>(
a: &mut Option<T>,
b: &Option<T>,
name: &str,
) -> Result<(), String> {
match (&a, &b) {
(Some(aa), Some(bb)) => {
if aa != bb {
return Err(format!(
"Conflicting options for {name}: `{aa:?}` and `{bb:?}`"
));
}
}
(None, Some(_)) => {
*a = b.clone();
}
_ => (),
};
Ok(())
}
inner_merge(&mut self.version, &options.version, "version")?;
inner_merge(&mut self.timeout, &options.timeout, "timeout")?;
inner_merge(&mut self.chains, &options.chains, "chains")?;
inner_merge(&mut self.target, &options.target, "target")?;
if let Some(timeout) = &self.timeout {
let duration = parse_duration(timeout)
.map_err(|err| format!("failed to parse timeout: {}", err))?
.as_secs();
if duration > u32::MAX as u64 {
return Err(format!(
"timeout is limited to {} seconds (approx {} days)",
u32::MAX,
49_000
));
}
self.timeout_u32 = Some(duration as u32);
}
Ok(())
}
}
pub struct Set {
sets: SetNames,
chains: Vec<String>,
timeout: Option<u32>,
target: String,
}
impl Set {
pub fn from(name: String, options: SetOptions) -> Self {
Self {
sets: SetNames::new(name, options.version),
timeout: options.timeout_u32,
target: options.target.unwrap_or("DROP".into()),
chains: options
.chains
.unwrap_or(vec!["INPUT".into(), "FORWARD".into()]),
}
}
pub async fn init(&self, ipset: &mut IpSet) -> Result<(), (usize, String)> {
for (set, version) in [
(&self.sets.ipv4, Version::IPv4),
(&self.sets.ipv6, Version::IPv6),
] {
if let Some(set) = set {
// create set
ipset
.order(Order::CreateSet(CreateSet {
name: set.clone(),
version,
timeout: self.timeout,
}))
.await
.map_err(|err| (0, err.to_string()))?;
// insert set in chains
for (i, chain) in self.chains.iter().enumerate() {
ipset
.order(Order::InsertSet(SetChain {
set: set.clone(),
chain: chain.clone(),
target: self.target.clone(),
}))
.await
.map_err(|err| (i + 1, err.to_string()))?;
}
}
}
Ok(())
}
pub async fn destroy(&self, ipset: &mut IpSet, until: Option<usize>) {
for set in [&self.sets.ipv4, &self.sets.ipv6] {
if let Some(set) = set {
for chain in self
.chains
.iter()
.take(until.map(|until| until - 1).unwrap_or(usize::MAX))
{
let _ = ipset
.order(Order::RemoveSet(SetChain {
set: set.clone(),
chain: chain.clone(),
target: self.target.clone(),
}))
.await;
}
if until.is_none_or(|until| until != 0) {
let _ = ipset.order(Order::DestroySet(set.clone())).await;
}
}
}
}
}
pub struct SetNames {
pub ipv4: Option<String>,
pub ipv6: Option<String>,
}
impl SetNames {
pub fn new(name: String, version: Option<IpVersion>) -> Self {
Self {
ipv4: match version {
Some(IpVersion::Ipv4) => Some(name.clone()),
Some(IpVersion::Ipv6) => None,
None | Some(IpVersion::Ip) => Some(format!("{}v4", name)),
},
ipv6: match version {
Some(IpVersion::Ipv4) => None,
Some(IpVersion::Ipv6) => Some(name),
None | Some(IpVersion::Ip) => Some(format!("{}v6", name)),
},
}
}
}
pub struct Action {
ipset: IpSet,
rx: remocMpsc::Receiver<Exec>,
shutdown: ShutdownToken,
sets: SetNames,
// index of pattern ip in match vec
ip_index: usize,
action: AddDel,
}
impl Action {
pub fn new(
ipset: IpSet,
shutdown: ShutdownToken,
rx: remocMpsc::Receiver<Exec>,
options: ActionOptions,
) -> Result<Self, String> {
Ok(Action {
ipset,
rx,
shutdown,
sets: SetNames::new(options.set, options.set_options.version),
ip_index: options.ip_index,
action: options.action,
})
}
pub async fn serve(mut self) {
loop {
let event = tokio::select! {
exec = self.rx.recv() => Some(exec),
_ = self.shutdown.wait() => None,
};
match event {
// shutdown asked
None => break,
// channel closed
Some(Ok(None)) => break,
// error from channel
Some(Err(err)) => {
eprintln!("ERROR {err}");
break;
}
// ok
Some(Ok(Some(exec))) => {
if let Err(err) = self.handle_exec(exec).await {
eprintln!("ERROR {err}");
break;
}
}
}
}
// eprintln!("DEBUG Asking for shutdown");
// self.shutdown.ask_shutdown();
}
async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> {
// safeguard against Vec::remove's panic
if exec.match_.len() <= self.ip_index {
return Err(format!(
"match received from reaction is smaller than expected. looking for index {} but size is {}. this is a bug!",
self.ip_index,
exec.match_.len()
));
}
let ip = exec.match_.remove(self.ip_index);
// select set
let set = match (&self.sets.ipv4, &self.sets.ipv6) {
(None, None) => return Err(format!("action is neither IPv4 nor IPv6, this is a bug!")),
(None, Some(set)) => set,
(Some(set), None) => set,
(Some(set4), Some(set6)) => {
if ip.contains(':') {
set6
} else {
set4
}
}
};
// add/remove ip to set
self.ipset
.order(match self.action {
AddDel::Add => Order::Add(set.clone(), ip),
AddDel::Del => Order::Del(set.clone(), ip),
})
.await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::action::{IpVersion, SetOptions};
#[tokio::test]
async fn set_options_merge() {
let s1 = SetOptions {
version: None,
chains: None,
timeout: None,
timeout_u32: None,
target: None,
};
let s2 = SetOptions {
version: Some(IpVersion::Ipv4),
chains: Some(vec!["INPUT".into()]),
timeout: Some("3h".into()),
timeout_u32: Some(3 * 3600),
target: Some("DROP".into()),
};
assert_ne!(s1, s2);
assert_eq!(s1, SetOptions::default());
{
// s2 can be merged in s1
let mut s1 = s1.clone();
assert!(s1.merge(&s2).is_ok());
assert_eq!(s1, s2);
}
{
// s1 can be merged in s2
let mut s2 = s2.clone();
assert!(s2.merge(&s1).is_ok());
}
{
// s1 can be merged in itself
let mut s3 = s1.clone();
assert!(s3.merge(&s1).is_ok());
assert_eq!(s1, s3);
}
{
// s2 can be merged in itself
let mut s3 = s2.clone();
assert!(s3.merge(&s2).is_ok());
assert_eq!(s2, s3);
}
for s3 in [
SetOptions {
version: Some(IpVersion::Ipv6),
..Default::default()
},
SetOptions {
chains: Some(vec!["damn".into()]),
..Default::default()
},
SetOptions {
timeout: Some("30min".into()),
..Default::default()
},
SetOptions {
target: Some("log-refuse".into()),
..Default::default()
},
] {
// none with some is ok
assert!(s3.clone().merge(&s1).is_ok(), "s3: {s3:?}");
assert!(s1.clone().merge(&s3).is_ok(), "s3: {s3:?}");
// different some is ko
assert!(s3.clone().merge(&s2).is_err(), "s3: {s3:?}");
assert!(s2.clone().merge(&s3).is_err(), "s3: {s3:?}");
}
}
}

View file

@ -1,248 +0,0 @@
use std::{collections::BTreeMap, fmt::Display, net::Ipv4Addr, process::Command, thread};
use ipset::{
Session,
types::{HashNet, NetDataType, Parse},
};
use tokio::sync::{mpsc, oneshot};
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
pub enum Version {
IPv4,
IPv6,
}
impl Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Version::IPv4 => "IPv4",
Version::IPv6 => "IPv6",
})
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct CreateSet {
pub name: String,
pub version: Version,
pub timeout: Option<u32>,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct SetChain {
pub set: String,
pub chain: String,
pub target: String,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
pub enum Order {
CreateSet(CreateSet),
DestroySet(String),
InsertSet(SetChain),
RemoveSet(SetChain),
Add(String, String),
Del(String, String),
}
#[derive(Clone)]
pub struct IpSet {
tx: mpsc::Sender<OrderType>,
}
impl Default for IpSet {
fn default() -> Self {
let (tx, rx) = mpsc::channel(1);
thread::spawn(move || IPsetManager::default().serve(rx));
Self { tx }
}
}
impl IpSet {
pub async fn order(&mut self, order: Order) -> Result<(), IpSetError> {
let (tx, rx) = oneshot::channel();
self.tx
.send((order, tx))
.await
.map_err(|err| IpSetError::Thread(format!("ipset thread has quit: {err}")))?;
rx.await
.map_err(|err| IpSetError::Thread(format!("ipset thread didn't respond: {err}")))?
.map_err(IpSetError::IpSet)
}
}
pub enum IpSetError {
Thread(String),
IpSet(()),
}
impl Display for IpSetError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
IpSetError::Thread(err) => err,
IpSetError::IpSet(()) => "ipset error",
}
)
}
}
impl From<IpSetError> for String {
fn from(value: IpSetError) -> Self {
match value {
IpSetError::Thread(err) => err,
IpSetError::IpSet(()) => "ipset error".to_string(),
}
}
}
pub type OrderType = (Order, oneshot::Sender<Result<(), ()>>);
struct Set {
session: Session<HashNet>,
version: Version,
}
#[derive(Default)]
struct IPsetManager {
// IPset sessions
sessions: BTreeMap<String, Set>,
}
impl IPsetManager {
fn serve(mut self, mut rx: mpsc::Receiver<OrderType>) {
loop {
match rx.blocking_recv() {
None => break,
Some((order, response)) => {
let result = self.handle_order(order);
let _ = response.send(result);
}
}
}
}
fn handle_order(&mut self, order: Order) -> Result<(), ()> {
match order {
Order::CreateSet(CreateSet {
name,
version,
timeout,
}) => {
eprintln!("INFO creating {version} set {name}");
let mut session: Session<HashNet> = Session::new(name.clone());
session
.create(|builder| {
let builder = if let Some(timeout) = timeout {
builder.with_timeout(timeout)?
} else {
builder
};
builder.with_ipv6(version == Version::IPv6)?.build()
})
.map_err(|err| eprintln!("ERROR Could not create set {name}: {err}"))?;
self.sessions.insert(name, Set { session, version });
}
Order::DestroySet(set) => {
if let Some(mut session) = self.sessions.remove(&set) {
eprintln!("INFO destroying {} set {set}", session.version);
session
.session
.destroy()
.map_err(|err| eprintln!("ERROR Could not destroy set {set}: {err}"))?;
}
}
Order::InsertSet(options) => self.insert_remove_set(options, true)?,
Order::RemoveSet(options) => self.insert_remove_set(options, false)?,
Order::Add(set, ip) => self.insert_remove_ip(set, ip, true)?,
Order::Del(set, ip) => self.insert_remove_ip(set, ip, false)?,
};
Ok(())
}
fn insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), ()> {
self._insert_remove_ip(set, ip, insert)
.map_err(|err| eprintln!("ERROR {err}"))
}
fn _insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), String> {
let session = self.sessions.get_mut(&set).ok_or(format!(
"No set handled by this plugin with this name: {set}. This likely is a bug."
))?;
let mut net_data = NetDataType::new(Ipv4Addr::LOCALHOST, 0);
net_data
.parse(&ip)
.map_err(|err| format!("`{ip}` is not recognized as an IP: {err}"))?;
if insert {
session.session.add(net_data, &[])
} else {
session.session.del(net_data)
}
.map_err(|err| format!("Could not add `{ip}` to set {set}: {err}"))?;
Ok(())
}
fn insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), ()> {
self._insert_remove_set(options, insert)
.map_err(|err| eprintln!("ERROR {err}"))
}
fn _insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), String> {
let SetChain { set, chain, target } = options;
let version = self
.sessions
.get(&set)
.ok_or(format!(
"No set managed by this plugin with this name: {set}"
))?
.version;
let (verb, verbing, from) = if insert {
("insert", "inserting", "in")
} else {
("remove", "removing", "from")
};
eprintln!("INFO {verbing} {version} set {set} {from} chain {chain}");
let command = match version {
Version::IPv4 => "iptables",
Version::IPv6 => "ip6tables",
};
let mut child = Command::new(command)
.args([
"-w",
if insert { "-I" } else { "-D" },
&chain,
"-m",
"set",
"--match-set",
&set,
"src",
"-j",
&target,
])
.spawn()
.map_err(|err| format!("Could not {verb} ipset {set} {from} chain {chain}: Could not execute {command}: {err}"))?;
let exit = child
.wait()
.map_err(|err| format!("Could not {verb} ipset {set} {from} chain {chain}: {err}"))?;
if exit.success() {
Ok(())
} else {
Err(format!(
"Could not {verb} ipset: exit code {}",
exit.code()
.map(|c| c.to_string())
.unwrap_or_else(|| "<unknown>".to_string())
))
}
}
}

View file

@ -1,159 +0,0 @@
use std::collections::{BTreeMap, BTreeSet};
use reaction_plugin::{
ActionConfig, ActionImpl, Hello, Manifest, PluginInfo, RemoteError, RemoteResult, StreamConfig,
StreamImpl,
shutdown::{ShutdownController, ShutdownToken},
};
use remoc::rtc;
use crate::{
action::{Action, ActionOptions, Set, SetOptions},
ipset::IpSet,
};
#[cfg(test)]
mod tests;
mod action;
mod ipset;
#[tokio::main]
async fn main() {
let plugin = Plugin::default();
reaction_plugin::main_loop(plugin).await;
}
#[derive(Default)]
struct Plugin {
ipset: IpSet,
sets: Vec<Set>,
actions: Vec<Action>,
shutdown: ShutdownController,
}
impl PluginInfo for Plugin {
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
Ok(Manifest {
hello: Hello::new(),
streams: BTreeSet::default(),
actions: BTreeSet::from(["ipset".into()]),
})
}
async fn load_config(
&mut self,
streams: Vec<StreamConfig>,
actions: Vec<ActionConfig>,
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
if !streams.is_empty() {
return Err("This plugin can't handle any stream type".into());
}
let mut ret_actions = Vec::with_capacity(actions.len());
let mut set_options: BTreeMap<String, SetOptions> = BTreeMap::new();
for ActionConfig {
stream_name,
filter_name,
action_name,
action_type,
config,
patterns,
} in actions
{
if &action_type != "ipset" {
return Err("This plugin can't handle other action types than ipset".into());
}
let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| {
format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}")
})?;
options.set_ip_index(patterns).map_err(|_|
format!(
"No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'",
&options.pattern
)
)?;
// Merge option
set_options
.entry(options.set.clone())
.or_default()
.merge(&options.set_options)
.map_err(|err| format!("ipset {}: {err}", options.set))?;
let (tx, rx) = remoc::rch::mpsc::channel(1);
self.actions.push(Action::new(
self.ipset.clone(),
self.shutdown.token(),
rx,
options,
)?);
ret_actions.push(ActionImpl { tx });
}
// Init all sets
while let Some((name, options)) = set_options.pop_first() {
self.sets.push(Set::from(name, options));
}
Ok((vec![], ret_actions))
}
async fn start(&mut self) -> RemoteResult<()> {
self.shutdown.delegate().handle_quit_signals()?;
let mut first_error = None;
for (i, set) in self.sets.iter().enumerate() {
// Retain if error
if let Err((failed_step, err)) = set.init(&mut self.ipset).await {
first_error = Some((i, failed_step, RemoteError::Plugin(err)));
break;
}
}
// Destroy initialized sets if error
if let Some((last_set, failed_step, err)) = first_error {
eprintln!("DEBUG last_set: {last_set} failed_step: {failed_step} err: {err}");
for (curr_set, set) in self.sets.iter().enumerate().take(last_set + 1) {
let until = if last_set == curr_set {
Some(failed_step)
} else {
None
};
let _ = set.destroy(&mut self.ipset, until).await;
}
return Err(err);
}
// Launch a task that will destroy the sets on shutdown
tokio::spawn(destroy_sets_at_shutdown(
self.ipset.clone(),
std::mem::take(&mut self.sets),
self.shutdown.token(),
));
// Launch all actions
while let Some(action) = self.actions.pop() {
tokio::spawn(async move { action.serve().await });
}
self.actions = Default::default();
Ok(())
}
async fn close(self) -> RemoteResult<()> {
self.shutdown.ask_shutdown();
self.shutdown.wait_all_task_shutdown().await;
Ok(())
}
}
async fn destroy_sets_at_shutdown(mut ipset: IpSet, sets: Vec<Set>, shutdown: ShutdownToken) {
shutdown.wait().await;
for set in sets {
set.destroy(&mut ipset, None).await;
}
}

View file

@ -1,253 +0,0 @@
use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig, Value};
use serde_json::json;
use crate::Plugin;
#[tokio::test]
async fn conf_stream() {
// No stream is supported by ipset
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "ipset".into(),
config: Value::Null
}],
vec![]
)
.await
.is_err()
);
// Nothing is ok
assert!(Plugin::default().load_config(vec![], vec![]).await.is_ok());
}
#[tokio::test]
async fn conf_action_standalone() {
let p = vec!["name".into(), "ip".into(), "ip2".into()];
let p_noip = vec!["name".into(), "ip2".into()];
for (is_ok, conf, patterns) in [
// minimal set
(true, json!({ "set": "test" }), &p),
// missing set key
(false, json!({}), &p),
(false, json!({ "version": "ipv4" }), &p),
// unknown key
(false, json!({ "set": "test", "unknown": "yes" }), &p),
(false, json!({ "set": "test", "ip_index": 1 }), &p),
(false, json!({ "set": "test", "timeout_u32": 1 }), &p),
// pattern //
(true, json!({ "set": "test" }), &p),
(true, json!({ "set": "test", "pattern": "ip" }), &p),
(true, json!({ "set": "test", "pattern": "ip2" }), &p),
(true, json!({ "set": "test", "pattern": "ip2" }), &p_noip),
// unknown pattern "ip"
(false, json!({ "set": "test" }), &p_noip),
(false, json!({ "set": "test", "pattern": "ip" }), &p_noip),
// unknown pattern
(false, json!({ "set": "test", "pattern": "unknown" }), &p),
(false, json!({ "set": "test", "pattern": "uwu" }), &p_noip),
// bad type
(false, json!({ "set": "test", "pattern": 0 }), &p_noip),
(false, json!({ "set": "test", "pattern": true }), &p_noip),
// action //
(true, json!({ "set": "test", "action": "add" }), &p),
(true, json!({ "set": "test", "action": "del" }), &p),
// unknown action
(false, json!({ "set": "test", "action": "create" }), &p),
(false, json!({ "set": "test", "action": "insert" }), &p),
(false, json!({ "set": "test", "action": "delete" }), &p),
(false, json!({ "set": "test", "action": "destroy" }), &p),
// bad type
(false, json!({ "set": "test", "action": true }), &p),
(false, json!({ "set": "test", "action": 1 }), &p),
// ip version //
// ok
(true, json!({ "set": "test", "version": "ipv4" }), &p),
(true, json!({ "set": "test", "version": "ipv6" }), &p),
(true, json!({ "set": "test", "version": "ip" }), &p),
// unknown version
(false, json!({ "set": "test", "version": 4 }), &p),
(false, json!({ "set": "test", "version": 6 }), &p),
(false, json!({ "set": "test", "version": 46 }), &p),
(false, json!({ "set": "test", "version": "5" }), &p),
(false, json!({ "set": "test", "version": "ipv5" }), &p),
(false, json!({ "set": "test", "version": "4" }), &p),
(false, json!({ "set": "test", "version": "6" }), &p),
(false, json!({ "set": "test", "version": "46" }), &p),
// bad type
(false, json!({ "set": "test", "version": true }), &p),
// chains //
// everything is fine really
(true, json!({ "set": "test", "chains": [] }), &p),
(true, json!({ "set": "test", "chains": ["INPUT"] }), &p),
(true, json!({ "set": "test", "chains": ["FORWARD"] }), &p),
(
true,
json!({ "set": "test", "chains": ["custom_chain"] }),
&p,
),
(
true,
json!({ "set": "test", "chains": ["INPUT", "FORWARD"] }),
&p,
),
(
true,
json!({
"set": "test",
"chains": ["INPUT", "FORWARD", "my_iptables_chain"]
}),
&p,
),
// timeout //
(true, json!({ "set": "test", "timeout": "1m" }), &p),
(true, json!({ "set": "test", "timeout": "3 days" }), &p),
// bad
(false, json!({ "set": "test", "timeout": "3 dayz"}), &p),
(false, json!({ "set": "test", "timeout": 12 }), &p),
// target //
// anything is fine too
(true, json!({ "set": "test", "target": "DROP" }), &p),
(true, json!({ "set": "test", "target": "ACCEPT" }), &p),
(true, json!({ "set": "test", "target": "RETURN" }), &p),
(true, json!({ "set": "test", "target": "custom_chain" }), &p),
// bad
(false, json!({ "set": "test", "target": 11 }), &p),
(false, json!({ "set": "test", "target": ["DROP"] }), &p),
] {
let res = Plugin::default()
.load_config(
vec![],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "ipset".into(),
config: conf.clone().into(),
patterns: patterns.clone(),
}],
)
.await;
assert!(
res.is_ok() == is_ok,
"conf: {:?}, must be ok: {is_ok}, result: {:?}",
conf,
// empty Result::Ok because ActionImpl is not Debug
res.map(|_| ())
);
}
}
// TODO
#[tokio::test]
async fn conf_action_merge() {
let mut plugin = Plugin::default();
let set1 = ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action1".into(),
action_type: "ipset".into(),
config: json!({
"set": "test",
"target": "DROP",
"chains": ["INPUT"],
"action": "add",
})
.into(),
patterns: vec!["ip".into()],
};
let set2 = ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action2".into(),
action_type: "ipset".into(),
config: json!({
"set": "test",
"target": "DROP",
"version": "ip",
"action": "add",
})
.into(),
patterns: vec!["ip".into()],
};
let set3 = ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action2".into(),
action_type: "ipset".into(),
config: json!({
"set": "test",
"action": "del",
})
.into(),
patterns: vec!["ip".into()],
};
let res = plugin
.load_config(
vec![],
vec![
// First set
set1.clone(),
// Same set, adding options, no conflict
set2.clone(),
// Same set, no new options, no conflict
set3.clone(),
// Unrelated set, so no conflict
ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action3".into(),
action_type: "ipset".into(),
config: json!({
"set": "test2",
"target": "target1",
"version": "ipv6",
})
.into(),
patterns: vec!["ip".into()],
},
],
)
.await;
assert!(res.is_ok(), "res: {:?}", res.map(|_| ()));
// Another set with conflict is not ok
let res = plugin
.load_config(
vec![],
vec![
// First set
set1,
// Same set, adding options, no conflict
set2,
// Same set, no new options, no conflict
set3,
// Another set with conflict
ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action3".into(),
action_type: "ipset".into(),
config: json!({
"set": "test",
"target": "target2",
"action": "del",
})
.into(),
patterns: vec!["ip".into()],
},
],
)
.await;
assert!(res.is_err(), "res: {:?}", res.map(|_| ()));
}

View file

@ -1,13 +0,0 @@
[package]
name = "reaction-plugin-nftables"
version = "0.1.0"
edition = "2024"
[dependencies]
tokio = { workspace = true, features = ["rt-multi-thread"] }
remoc.workspace = true
reaction-plugin.path = "../reaction-plugin"
serde.workspace = true
serde_json.workspace = true
nftables = { version = "0.6.3", features = ["tokio"] }
libnftables1-sys = { version = "0.1.1" }

View file

@ -1,493 +0,0 @@
use std::{
borrow::Cow,
collections::HashSet,
fmt::{Debug, Display},
u32,
};
use nftables::{
batch::Batch,
expr::Expression,
schema::{Element, NfListObject, Rule, SetFlag, SetType, SetTypeValue},
stmt::Statement,
types::{NfFamily, NfHook},
};
use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration};
use remoc::rch::mpsc as remocMpsc;
use serde::{Deserialize, Serialize};
use crate::{helpers::Version, nft::NftClient};
#[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)]
pub enum IpVersion {
#[default]
#[serde(rename = "ip")]
Ip,
#[serde(rename = "ipv4")]
Ipv4,
#[serde(rename = "ipv6")]
Ipv6,
}
impl Debug for IpVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
IpVersion::Ipv4 => "ipv4",
IpVersion::Ipv6 => "ipv6",
IpVersion::Ip => "ip",
}
)
}
}
#[derive(Default, Debug, Serialize, Deserialize)]
pub enum AddDel {
#[default]
#[serde(alias = "add")]
Add,
#[serde(alias = "delete")]
Delete,
}
/// User-facing action options
#[derive(Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct ActionOptions {
/// The set that should be used by this action
pub set: String,
/// The pattern name of the IP.
/// Defaults to "ip"
#[serde(default = "serde_ip")]
pub pattern: String,
#[serde(skip)]
ip_index: usize,
// Whether the action is to "add" or "del" the ip from the set
#[serde(default)]
action: AddDel,
#[serde(flatten)]
pub set_options: SetOptions,
}
fn serde_ip() -> String {
"ip".into()
}
impl ActionOptions {
pub fn set_ip_index(&mut self, patterns: Vec<String>) -> Result<(), ()> {
self.ip_index = patterns
.into_iter()
.enumerate()
.filter(|(_, name)| name == &self.pattern)
.next()
.ok_or(())?
.0;
Ok(())
}
}
/// Merged set options
#[derive(Default, Clone, Deserialize, Serialize, Debug, PartialEq, Eq)]
pub struct SetOptions {
/// The IP type.
/// Defaults to `46`.
/// If `ipv4`: creates an IPv4 set with this name
/// If `ipv6`: creates an IPv6 set with this name
/// If `ip`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6'
/// *Merged set-wise*.
#[serde(default)]
version: Option<IpVersion>,
/// Chains where the IP set should be inserted.
/// Defaults to `["input", "forward"]`
/// *Merged set-wise*.
#[serde(default)]
hooks: Option<Vec<RHook>>,
// Optional timeout, letting linux/netfilter handle set removal instead of reaction
// Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action
// Same syntax as after and retryperiod in reaction.
/// *Merged set-wise*.
#[serde(skip_serializing_if = "Option::is_none")]
timeout: Option<String>,
#[serde(skip)]
timeout_u32: Option<u32>,
// Target that iptables should use when the IP is encountered.
// Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain
/// *Merged set-wise*.
#[serde(default)]
target: Option<RStatement>,
}
impl SetOptions {
pub fn merge(&mut self, options: &SetOptions) -> Result<(), String> {
// merge two Option<T> and fail if there is conflict
fn inner_merge<T: Eq + Clone + std::fmt::Debug>(
a: &mut Option<T>,
b: &Option<T>,
name: &str,
) -> Result<(), String> {
match (&a, &b) {
(Some(aa), Some(bb)) => {
if aa != bb {
return Err(format!(
"Conflicting options for {name}: `{aa:?}` and `{bb:?}`"
));
}
}
(None, Some(_)) => {
*a = b.clone();
}
_ => (),
};
Ok(())
}
inner_merge(&mut self.version, &options.version, "version")?;
inner_merge(&mut self.timeout, &options.timeout, "timeout")?;
inner_merge(&mut self.hooks, &options.hooks, "chains")?;
inner_merge(&mut self.target, &options.target, "target")?;
if let Some(timeout) = &self.timeout {
let duration = parse_duration(timeout)
.map_err(|err| format!("failed to parse timeout: {}", err))?
.as_secs();
if duration > u32::MAX as u64 {
return Err(format!(
"timeout is limited to {} seconds (approx {} days)",
u32::MAX,
49_000
));
}
self.timeout_u32 = Some(duration as u32);
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RHook {
Ingress,
Prerouting,
Forward,
Input,
Output,
Postrouting,
Egress,
}
impl RHook {
pub fn as_str(&self) -> &'static str {
match self {
RHook::Ingress => "ingress",
RHook::Prerouting => "prerouting",
RHook::Forward => "forward",
RHook::Input => "input",
RHook::Output => "output",
RHook::Postrouting => "postrouting",
RHook::Egress => "egress",
}
}
}
impl Display for RHook {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<&RHook> for NfHook {
fn from(value: &RHook) -> Self {
match value {
RHook::Ingress => Self::Ingress,
RHook::Prerouting => Self::Prerouting,
RHook::Forward => Self::Forward,
RHook::Input => Self::Input,
RHook::Output => Self::Output,
RHook::Postrouting => Self::Postrouting,
RHook::Egress => Self::Egress,
}
}
}
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RStatement {
Accept,
Drop,
Continue,
Return,
}
pub struct Set {
pub sets: SetNames,
pub hooks: Vec<RHook>,
pub timeout: Option<u32>,
pub target: RStatement,
}
impl Set {
pub fn from(name: String, options: SetOptions) -> Self {
Self {
sets: SetNames::new(name, options.version),
timeout: options.timeout_u32,
target: options.target.unwrap_or(RStatement::Drop),
hooks: options.hooks.unwrap_or(vec![RHook::Input, RHook::Forward]),
}
}
pub fn init<'a>(&self, batch: &mut Batch<'a>) -> Result<(), String> {
for (set, version) in [
(&self.sets.ipv4, Version::IPv4),
(&self.sets.ipv6, Version::IPv6),
] {
if let Some(set) = set {
let family = NfFamily::INet;
let table = Cow::from("reaction");
// create set
batch.add(NfListObject::<'a>::Set(Box::new(nftables::schema::Set::<
'a,
> {
family,
table: table.to_owned(),
name: Cow::Owned(set.to_owned()),
// TODO Try a set which is both ipv4 and ipv6?
set_type: SetTypeValue::Single(match version {
Version::IPv4 => SetType::Ipv4Addr,
Version::IPv6 => SetType::Ipv6Addr,
}),
flags: Some({
let mut flags = HashSet::from([SetFlag::Interval]);
if self.timeout.is_some() {
flags.insert(SetFlag::Timeout);
}
flags
}),
timeout: self.timeout.clone(),
..Default::default()
})));
// insert set in chains
let expr = vec![match self.target {
RStatement::Accept => Statement::Accept(None),
RStatement::Drop => Statement::Drop(None),
RStatement::Continue => Statement::Continue(None),
RStatement::Return => Statement::Return(None),
}];
for hook in &self.hooks {
batch.add(NfListObject::Rule(Rule {
family,
table: table.to_owned(),
chain: Cow::from(hook.to_string()),
expr: Cow::Owned(expr.clone()),
..Default::default()
}));
}
}
}
Ok(())
}
}
pub struct SetNames {
pub ipv4: Option<String>,
pub ipv6: Option<String>,
}
impl SetNames {
pub fn new(name: String, version: Option<IpVersion>) -> Self {
Self {
ipv4: match version {
Some(IpVersion::Ipv4) => Some(name.clone()),
Some(IpVersion::Ipv6) => None,
None | Some(IpVersion::Ip) => Some(format!("{}v4", name)),
},
ipv6: match version {
Some(IpVersion::Ipv4) => None,
Some(IpVersion::Ipv6) => Some(name),
None | Some(IpVersion::Ip) => Some(format!("{}v6", name)),
},
}
}
}
pub struct Action {
nft: NftClient,
rx: remocMpsc::Receiver<Exec>,
shutdown: ShutdownToken,
sets: SetNames,
// index of pattern ip in match vec
ip_index: usize,
action: AddDel,
}
impl Action {
pub fn new(
nft: NftClient,
shutdown: ShutdownToken,
rx: remocMpsc::Receiver<Exec>,
options: ActionOptions,
) -> Result<Self, String> {
Ok(Action {
nft,
rx,
shutdown,
sets: SetNames::new(options.set, options.set_options.version),
ip_index: options.ip_index,
action: options.action,
})
}
pub async fn serve(mut self) {
loop {
let event = tokio::select! {
exec = self.rx.recv() => Some(exec),
_ = self.shutdown.wait() => None,
};
match event {
// shutdown asked
None => break,
// channel closed
Some(Ok(None)) => break,
// error from channel
Some(Err(err)) => {
eprintln!("ERROR {err}");
break;
}
// ok
Some(Ok(Some(exec))) => {
if let Err(err) = self.handle_exec(exec).await {
eprintln!("ERROR {err}");
break;
}
}
}
}
// eprintln!("DEBUG Asking for shutdown");
// self.shutdown.ask_shutdown();
}
async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> {
// safeguard against Vec::remove's panic
if exec.match_.len() <= self.ip_index {
return Err(format!(
"match received from reaction is smaller than expected. looking for index {} but size is {}. this is a bug!",
self.ip_index,
exec.match_.len()
));
}
let ip = exec.match_.remove(self.ip_index);
// select set
let set = match (&self.sets.ipv4, &self.sets.ipv6) {
(None, None) => return Err(format!("action is neither IPv4 nor IPv6, this is a bug!")),
(None, Some(set)) => set,
(Some(set), None) => set,
(Some(set4), Some(set6)) => {
if ip.contains(':') {
set6
} else {
set4
}
}
};
// add/remove ip to set
let element = NfListObject::Element(Element {
family: NfFamily::INet,
table: Cow::from("reaction"),
name: Cow::from(set),
elem: Cow::from(vec![Expression::String(Cow::from(ip.clone()))]),
});
let mut batch = Batch::new();
match self.action {
AddDel::Add => batch.add(element),
AddDel::Delete => batch.delete(element),
};
match self.nft.send(batch).await {
Ok(ok) => {
eprintln!("DEBUG action ok {:?} {ip}: {ok}", self.action);
Ok(())
}
Err(err) => Err(format!("action ko {:?} {ip}: {err}", self.action)),
}
}
}
#[cfg(test)]
mod tests {
use crate::action::{IpVersion, RHook, RStatement, SetOptions};
#[tokio::test]
async fn set_options_merge() {
let s1 = SetOptions {
version: None,
hooks: None,
timeout: None,
timeout_u32: None,
target: None,
};
let s2 = SetOptions {
version: Some(IpVersion::Ipv4),
hooks: Some(vec![RHook::Input]),
timeout: Some("3h".into()),
timeout_u32: Some(3 * 3600),
target: Some(RStatement::Drop),
};
assert_ne!(s1, s2);
assert_eq!(s1, SetOptions::default());
{
// s2 can be merged in s1
let mut s1 = s1.clone();
assert!(s1.merge(&s2).is_ok());
assert_eq!(s1, s2);
}
{
// s1 can be merged in s2
let mut s2 = s2.clone();
assert!(s2.merge(&s1).is_ok());
}
{
// s1 can be merged in itself
let mut s3 = s1.clone();
assert!(s3.merge(&s1).is_ok());
assert_eq!(s1, s3);
}
{
// s2 can be merged in itself
let mut s3 = s2.clone();
assert!(s3.merge(&s2).is_ok());
assert_eq!(s2, s3);
}
for s3 in [
SetOptions {
version: Some(IpVersion::Ipv6),
..Default::default()
},
SetOptions {
hooks: Some(vec![RHook::Output]),
..Default::default()
},
SetOptions {
timeout: Some("30min".into()),
..Default::default()
},
SetOptions {
target: Some(RStatement::Continue),
..Default::default()
},
] {
// none with some is ok
assert!(s3.clone().merge(&s1).is_ok(), "s3: {s3:?}");
assert!(s1.clone().merge(&s3).is_ok(), "s3: {s3:?}");
// different some is ko
assert!(s3.clone().merge(&s2).is_err(), "s3: {s3:?}");
assert!(s2.clone().merge(&s3).is_err(), "s3: {s3:?}");
}
}
}

View file

@ -1,15 +0,0 @@
use std::fmt::Display;
#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
pub enum Version {
IPv4,
IPv6,
}
impl Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Version::IPv4 => "IPv4",
Version::IPv6 => "IPv6",
})
}
}

View file

@ -1,176 +0,0 @@
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet},
};
use nftables::{
batch::Batch,
schema::{Chain, NfListObject, Table},
types::{NfChainType, NfFamily},
};
use reaction_plugin::{
ActionConfig, ActionImpl, Hello, Manifest, PluginInfo, RemoteResult, StreamConfig, StreamImpl,
shutdown::ShutdownController,
};
use remoc::rtc;
use crate::{
action::{Action, ActionOptions, Set, SetOptions},
nft::NftClient,
};
#[cfg(test)]
mod tests;
mod action;
pub mod helpers;
mod nft;
#[tokio::main]
async fn main() {
let plugin = Plugin::default();
reaction_plugin::main_loop(plugin).await;
}
#[derive(Default)]
struct Plugin {
nft: NftClient,
sets: Vec<Set>,
actions: Vec<Action>,
shutdown: ShutdownController,
}
impl PluginInfo for Plugin {
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
Ok(Manifest {
hello: Hello::new(),
streams: BTreeSet::default(),
actions: BTreeSet::from(["nftables".into()]),
})
}
async fn load_config(
&mut self,
streams: Vec<StreamConfig>,
actions: Vec<ActionConfig>,
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
if !streams.is_empty() {
return Err("This plugin can't handle any stream type".into());
}
let mut ret_actions = Vec::with_capacity(actions.len());
let mut set_options: BTreeMap<String, SetOptions> = BTreeMap::new();
for ActionConfig {
stream_name,
filter_name,
action_name,
action_type,
config,
patterns,
} in actions
{
if &action_type != "nftables" {
return Err("This plugin can't handle other action types than nftables".into());
}
let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| {
format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}")
})?;
options.set_ip_index(patterns).map_err(|_|
format!(
"No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'",
&options.pattern
)
)?;
// Merge option
set_options
.entry(options.set.clone())
.or_default()
.merge(&options.set_options)
.map_err(|err| format!("set {}: {err}", options.set))?;
let (tx, rx) = remoc::rch::mpsc::channel(1);
self.actions.push(Action::new(
self.nft.clone(),
self.shutdown.token(),
rx,
options,
)?);
ret_actions.push(ActionImpl { tx });
}
// Init all sets
while let Some((name, options)) = set_options.pop_first() {
self.sets.push(Set::from(name, options));
}
Ok((vec![], ret_actions))
}
async fn start(&mut self) -> RemoteResult<()> {
self.shutdown.delegate().handle_quit_signals()?;
let mut batch = Batch::new();
batch.add(reaction_table());
// Create a chain for each registered netfilter hook
for hook in self
.sets
.iter()
.flat_map(|set| &set.hooks)
.collect::<BTreeSet<_>>()
{
batch.add(NfListObject::Chain(Chain {
family: NfFamily::INet,
table: Cow::Borrowed("reaction"),
name: Cow::from(hook.as_str()),
_type: Some(NfChainType::Filter),
hook: Some(hook.into()),
prio: Some(0),
..Default::default()
}));
}
for set in &self.sets {
set.init(&mut batch)?;
}
// TODO apply batch
self.nft.send(batch).await?;
// Launch a task that will destroy the table on shutdown
{
let token = self.shutdown.token();
tokio::spawn(async move {
token.wait().await;
Batch::new().delete(reaction_table());
});
}
// Launch all actions
while let Some(action) = self.actions.pop() {
tokio::spawn(async move { action.serve().await });
}
self.actions = Default::default();
Ok(())
}
async fn close(self) -> RemoteResult<()> {
self.shutdown.ask_shutdown();
self.shutdown.wait_all_task_shutdown().await;
Ok(())
}
}
fn reaction_table() -> NfListObject<'static> {
NfListObject::Table(Table {
family: NfFamily::INet,
name: Cow::Borrowed("reaction"),
handle: None,
})
}

View file

@ -1,81 +0,0 @@
use std::{
ffi::{CStr, CString},
thread,
};
use libnftables1_sys::Nftables;
use nftables::batch::Batch;
use tokio::sync::{mpsc, oneshot};
/// A client with a dedicated server thread to libnftables.
/// Calling [`Default::default()`] spawns a new server thread.
/// Cloning just creates a new client to the same server thread.
#[derive(Clone)]
pub struct NftClient {
tx: mpsc::Sender<NftCommand>,
}
impl Default for NftClient {
fn default() -> Self {
let (tx, mut rx) = mpsc::channel(10);
thread::spawn(move || {
let mut conn = Nftables::new();
while let Some(NftCommand { json, ret }) = rx.blocking_recv() {
let (rc, output, error) = conn.run_cmd(json.as_ptr());
let res = match rc {
0 => to_rust_string(output)
.ok_or_else(|| "unknown ok (rc = 0 but no output buffer)".into()),
_ => to_rust_string(error)
.map(|err| format!("error (rc = {rc}: {err})"))
.ok_or_else(|| format!("unknown error (rc = {rc} but no error buffer)")),
};
let _ = ret.send(res);
}
});
NftClient { tx }
}
}
impl NftClient {
/// Send a batch to nftables.
pub async fn send(&self, batch: Batch<'_>) -> Result<String, String> {
// convert JSON to CString
let mut json = serde_json::to_vec(&batch.to_nftables())
.map_err(|err| format!("couldn't build json to send to nftables: {err}"))?;
json.push('\0' as u8);
let json = CString::from_vec_with_nul(json)
.map_err(|err| format!("invalid json with null char: {err}"))?;
// Send command
let (tx, rx) = oneshot::channel();
let command = NftCommand { json, ret: tx };
self.tx
.send(command)
.await
.map_err(|err| format!("nftables thread has quit, can't send command: {err}"))?;
// Wait for result
rx.await
.map_err(|_| format!("nftables thread has quit, no response for command"))?
}
}
struct NftCommand {
json: CString,
ret: oneshot::Sender<Result<String, String>>,
}
fn to_rust_string(c_ptr: *const i8) -> Option<String> {
if c_ptr.is_null() {
None
} else {
Some(
unsafe { CStr::from_ptr(c_ptr) }
.to_string_lossy()
.into_owned(),
)
}
}

View file

@ -1,247 +0,0 @@
use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig, Value};
use serde_json::json;
use crate::Plugin;
#[tokio::test]
async fn conf_stream() {
// No stream is supported by nftables
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "nftables".into(),
config: Value::Null
}],
vec![]
)
.await
.is_err()
);
// Empty config is ok
assert!(Plugin::default().load_config(vec![], vec![]).await.is_ok());
}
#[tokio::test]
async fn conf_action_standalone() {
let p = vec!["name".into(), "ip".into(), "ip2".into()];
let p_noip = vec!["name".into(), "ip2".into()];
for (is_ok, conf, patterns) in [
// minimal set
(true, json!({ "set": "test" }), &p),
// missing set key
(false, json!({}), &p),
(false, json!({ "version": "ipv4" }), &p),
// unknown key
(false, json!({ "set": "test", "unknown": "yes" }), &p),
(false, json!({ "set": "test", "ip_index": 1 }), &p),
(false, json!({ "set": "test", "timeout_u32": 1 }), &p),
// pattern //
(true, json!({ "set": "test" }), &p),
(true, json!({ "set": "test", "pattern": "ip" }), &p),
(true, json!({ "set": "test", "pattern": "ip2" }), &p),
(true, json!({ "set": "test", "pattern": "ip2" }), &p_noip),
// unknown pattern "ip"
(false, json!({ "set": "test" }), &p_noip),
(false, json!({ "set": "test", "pattern": "ip" }), &p_noip),
// unknown pattern
(false, json!({ "set": "test", "pattern": "unknown" }), &p),
(false, json!({ "set": "test", "pattern": "uwu" }), &p_noip),
// bad type
(false, json!({ "set": "test", "pattern": 0 }), &p_noip),
(false, json!({ "set": "test", "pattern": true }), &p_noip),
// action //
(true, json!({ "set": "test", "action": "add" }), &p),
(true, json!({ "set": "test", "action": "delete" }), &p),
// unknown action
(false, json!({ "set": "test", "action": "create" }), &p),
(false, json!({ "set": "test", "action": "insert" }), &p),
(false, json!({ "set": "test", "action": "del" }), &p),
(false, json!({ "set": "test", "action": "destroy" }), &p),
// bad type
(false, json!({ "set": "test", "action": true }), &p),
(false, json!({ "set": "test", "action": 1 }), &p),
// ip version //
// ok
(true, json!({ "set": "test", "version": "ipv4" }), &p),
(true, json!({ "set": "test", "version": "ipv6" }), &p),
(true, json!({ "set": "test", "version": "ip" }), &p),
// unknown version
(false, json!({ "set": "test", "version": 4 }), &p),
(false, json!({ "set": "test", "version": 6 }), &p),
(false, json!({ "set": "test", "version": 46 }), &p),
(false, json!({ "set": "test", "version": "5" }), &p),
(false, json!({ "set": "test", "version": "ipv5" }), &p),
(false, json!({ "set": "test", "version": "4" }), &p),
(false, json!({ "set": "test", "version": "6" }), &p),
(false, json!({ "set": "test", "version": "46" }), &p),
// bad type
(false, json!({ "set": "test", "version": true }), &p),
// hooks //
// everything is fine really
(true, json!({ "set": "test", "hooks": [] }), &p),
(
true,
json!({ "set": "test", "hooks": ["input", "forward", "ingress", "prerouting", "output", "postrouting", "egress"] }),
&p,
),
(false, json!({ "set": "test", "hooks": ["INPUT"] }), &p),
(false, json!({ "set": "test", "hooks": ["FORWARD"] }), &p),
(
false,
json!({ "set": "test", "hooks": ["unknown_hook"] }),
&p,
),
// timeout //
(true, json!({ "set": "test", "timeout": "1m" }), &p),
(true, json!({ "set": "test", "timeout": "3 days" }), &p),
// bad
(false, json!({ "set": "test", "timeout": "3 dayz"}), &p),
(false, json!({ "set": "test", "timeout": 12 }), &p),
// target //
// anything is fine too
(true, json!({ "set": "test", "target": "drop" }), &p),
(true, json!({ "set": "test", "target": "accept" }), &p),
(true, json!({ "set": "test", "target": "return" }), &p),
(true, json!({ "set": "test", "target": "continue" }), &p),
// bad
(false, json!({ "set": "test", "target": "custom" }), &p),
(false, json!({ "set": "test", "target": "DROP" }), &p),
(false, json!({ "set": "test", "target": 11 }), &p),
(false, json!({ "set": "test", "target": ["DROP"] }), &p),
] {
let res = Plugin::default()
.load_config(
vec![],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "nftables".into(),
config: conf.clone().into(),
patterns: patterns.clone(),
}],
)
.await;
assert!(
res.is_ok() == is_ok,
"conf: {:?}, must be ok: {is_ok}, result: {:?}",
conf,
// empty Result::Ok because ActionImpl is not Debug
res.map(|_| ())
);
}
}
// TODO
#[tokio::test]
async fn conf_action_merge() {
let mut plugin = Plugin::default();
let set1 = ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action1".into(),
action_type: "nftables".into(),
config: json!({
"set": "test",
"target": "drop",
"hooks": ["input"],
"action": "add",
})
.into(),
patterns: vec!["ip".into()],
};
let set2 = ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action2".into(),
action_type: "nftables".into(),
config: json!({
"set": "test",
"target": "drop",
"version": "ip",
"action": "add",
})
.into(),
patterns: vec!["ip".into()],
};
let set3 = ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action2".into(),
action_type: "nftables".into(),
config: json!({
"set": "test",
"action": "delete",
})
.into(),
patterns: vec!["ip".into()],
};
let res = plugin
.load_config(
vec![],
vec![
// First set
set1.clone(),
// Same set, adding options, no conflict
set2.clone(),
// Same set, no new options, no conflict
set3.clone(),
// Unrelated set, so no conflict
ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action3".into(),
action_type: "nftables".into(),
config: json!({
"set": "test2",
"target": "return",
"version": "ipv6",
})
.into(),
patterns: vec!["ip".into()],
},
],
)
.await;
assert!(res.is_ok(), "res: {:?}", res.map(|_| ()));
// Another set with conflict is not ok
let res = plugin
.load_config(
vec![],
vec![
// First set
set1,
// Same set, adding options, no conflict
set2,
// Same set, no new options, no conflict
set3,
// Another set with conflict
ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action3".into(),
action_type: "nftables".into(),
config: json!({
"set": "test",
"target": "target2",
"action": "del",
})
.into(),
patterns: vec!["ip".into()],
},
],
)
.await;
assert!(res.is_err(), "res: {:?}", res.map(|_| ()));
}

View file

@ -1,11 +0,0 @@
[package]
name = "reaction-plugin-virtual"
version = "1.0.0"
edition = "2024"
[dependencies]
tokio = { workspace = true, features = ["rt-multi-thread"] }
remoc.workspace = true
reaction-plugin.path = "../reaction-plugin"
serde.workspace = true
serde_json.workspace = true

View file

@ -1,179 +0,0 @@
use std::collections::{BTreeMap, BTreeSet};
use reaction_plugin::{
ActionConfig, ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamConfig,
StreamImpl, Value, line::PatternLine,
};
use remoc::{rch::mpsc, rtc};
use serde::{Deserialize, Serialize};
#[cfg(test)]
mod tests;
#[tokio::main]
async fn main() {
let plugin = Plugin::default();
reaction_plugin::main_loop(plugin).await;
}
#[derive(Default)]
struct Plugin {}
impl PluginInfo for Plugin {
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError> {
Ok(Manifest {
hello: Hello::new(),
streams: BTreeSet::from(["virtual".into()]),
actions: BTreeSet::from(["virtual".into()]),
})
}
async fn load_config(
&mut self,
streams: Vec<StreamConfig>,
actions: Vec<ActionConfig>,
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)> {
let mut ret_streams = Vec::with_capacity(streams.len());
let mut ret_actions = Vec::with_capacity(actions.len());
let mut local_streams = BTreeMap::new();
for StreamConfig {
stream_name,
stream_type,
config,
} in streams
{
if stream_type != "virtual" {
return Err("This plugin can't handle other stream types than virtual".into());
}
let (virtual_stream, receiver) = VirtualStream::new(config)?;
if let Some(_) = local_streams.insert(stream_name, virtual_stream) {
return Err("this virtual stream has already been initialized".into());
}
ret_streams.push(StreamImpl {
stream: receiver,
standalone: false,
});
}
for ActionConfig {
stream_name,
filter_name,
action_name,
action_type,
config,
patterns,
} in actions
{
if &action_type != "virtual" {
return Err("This plugin can't handle other action types than virtual".into());
}
let (mut virtual_action, tx) = VirtualAction::new(
stream_name,
filter_name,
action_name,
config,
patterns,
&local_streams,
)?;
tokio::spawn(async move { virtual_action.serve().await });
ret_actions.push(ActionImpl { tx });
}
Ok((ret_streams, ret_actions))
}
async fn start(&mut self) -> RemoteResult<()> {
Ok(())
}
async fn close(self) -> RemoteResult<()> {
Ok(())
}
}
#[derive(Clone)]
struct VirtualStream {
tx: mpsc::Sender<Line>,
}
impl VirtualStream {
fn new(config: Value) -> Result<(Self, mpsc::Receiver<Line>), String> {
const CONFIG_ERROR: &'static str = "streams of type virtual take no options";
match config {
Value::Null => (),
Value::Object(map) => {
if map.len() != 0 {
return Err(CONFIG_ERROR.into());
}
}
_ => return Err(CONFIG_ERROR.into()),
}
let (tx, rx) = mpsc::channel(1);
Ok((Self { tx }, rx))
}
}
#[derive(Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
struct ActionOptions {
/// The line to send to the corresponding virtual stream, example: "ban \<ip\>"
send: String,
/// The name of the corresponding virtual stream, example: "my_stream"
to: String,
}
struct VirtualAction {
rx: mpsc::Receiver<Exec>,
send: PatternLine,
to: VirtualStream,
}
impl VirtualAction {
fn new(
stream_name: String,
filter_name: String,
action_name: String,
config: Value,
patterns: Vec<String>,
streams: &BTreeMap<String, VirtualStream>,
) -> Result<(Self, mpsc::Sender<Exec>), String> {
let options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| {
format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}")
})?;
let send = PatternLine::new(options.send, patterns);
let stream = streams.get(&options.to).ok_or_else(|| {
format!(
"action {}.{}.{}: send \"{}\" matches no stream name",
stream_name, filter_name, action_name, options.to
)
})?;
let (tx, rx) = mpsc::channel(1);
Ok((
Self {
rx,
send: send,
to: stream.clone(),
},
tx,
))
}
async fn serve(&mut self) {
while let Ok(Some(exec)) = self.rx.recv().await {
let line = self.send.line(exec.match_);
self.to.tx.send((line, exec.time)).await.unwrap();
}
}
}

View file

@ -1,322 +0,0 @@
use std::time::{SystemTime, UNIX_EPOCH};
use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig, Value};
use serde_json::json;
use crate::Plugin;
#[tokio::test]
async fn conf_stream() {
// Invalid type
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtu".into(),
config: Value::Null
}],
vec![]
)
.await
.is_err()
);
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: Value::Null
}],
vec![]
)
.await
.is_ok()
);
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: json!({}).into(),
}],
vec![]
)
.await
.is_ok()
);
// Invalid conf: must be empty
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: json!({"key": "value" }).into(),
}],
vec![]
)
.await
.is_err()
);
}
#[tokio::test]
async fn conf_action() {
let streams = vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: Value::Null,
}];
let valid_conf = json!({ "send": "message", "to": "stream" });
let missing_send_conf = json!({ "to": "stream" });
let missing_to_conf = json!({ "send": "stream" });
let extra_attr_conf = json!({ "send": "message", "send2": "message", "to": "stream" });
let patterns = Vec::default();
// Invalid type
assert!(
Plugin::default()
.load_config(
streams.clone(),
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtu".into(),
config: Value::Null,
patterns: patterns.clone(),
}]
)
.await
.is_err()
);
assert!(
Plugin::default()
.load_config(
streams.clone(),
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtual".into(),
config: valid_conf.into(),
patterns: patterns.clone()
}]
)
.await
.is_ok()
);
for conf in [missing_send_conf, missing_to_conf, extra_attr_conf] {
assert!(
Plugin::default()
.load_config(
streams.clone(),
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtual".into(),
config: conf.clone().into(),
patterns: patterns.clone()
}]
)
.await
.is_err(),
"conf: {:?}",
conf
);
}
}
#[tokio::test]
async fn conf_send() {
// Valid to: option
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: Value::Null,
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtual".into(),
config: json!({ "send": "message", "to": "stream" }).into(),
patterns: vec![],
}]
)
.await
.is_ok(),
);
// Invalid to: option
assert!(
Plugin::default()
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: Value::Null,
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtual".into(),
config: json!({ "send": "message", "to": "stream1" }).into(),
patterns: vec![],
}]
)
.await
.is_err(),
);
}
// Let's allow empty streams for now.
// I guess it can be useful to have manual only actions.
//
// #[tokio::test]
// async fn conf_empty_stream() {
// assert!(
// Plugin::default()
// .load_config(
// vec![StreamConfig {
// stream_name: "stream".into(),
// stream_type: "virtual".into(),
// config: Value::Null,
// }],
// vec![],
// )
// .await
// .is_err(),
// );
// }
#[tokio::test]
async fn run_simple() {
let mut plugin = Plugin::default();
let (mut streams, mut actions) = plugin
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: Value::Null,
}],
vec![ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtual".into(),
config: json!({ "send": "message <test>", "to": "stream" }).into(),
patterns: vec!["test".into()],
}],
)
.await
.unwrap();
let mut stream = streams.pop().unwrap();
let action = actions.pop().unwrap();
assert!(!stream.standalone);
for m in ["test1", "test2", "test3", " a a a aa a a"] {
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
assert!(
action
.tx
.send(Exec {
match_: vec![m.into()],
time,
})
.await
.is_ok()
);
assert_eq!(
stream.stream.recv().await.unwrap().unwrap(),
(format!("message {m}"), time),
);
}
}
#[tokio::test]
async fn run_two_actions() {
let mut plugin = Plugin::default();
let (mut streams, mut actions) = plugin
.load_config(
vec![StreamConfig {
stream_name: "stream".into(),
stream_type: "virtual".into(),
config: Value::Null,
}],
vec![
ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtual".into(),
config: json!({ "send": "send <a>", "to": "stream" }).into(),
patterns: vec!["a".into(), "b".into()],
},
ActionConfig {
stream_name: "stream".into(),
filter_name: "filter".into(),
action_name: "action".into(),
action_type: "virtual".into(),
config: json!({ "send": "<b> send", "to": "stream" }).into(),
patterns: vec!["a".into(), "b".into()],
},
],
)
.await
.unwrap();
let mut stream = streams.pop().unwrap();
assert!(!stream.standalone);
let action2 = actions.pop().unwrap();
let action1 = actions.pop().unwrap();
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
assert!(
action1
.tx
.send(Exec {
match_: vec!["aa".into(), "bb".into()],
time,
})
.await
.is_ok(),
);
assert_eq!(
stream.stream.recv().await.unwrap().unwrap(),
("send aa".into(), time),
);
assert!(
action2
.tx
.send(Exec {
match_: vec!["aa".into(), "bb".into()],
time,
})
.await
.is_ok(),
);
assert_eq!(
stream.stream.recv().await.unwrap().unwrap(),
("bb send".into(), time),
);
}

View file

@ -1,20 +0,0 @@
[package]
name = "reaction-plugin"
version = "1.0.0"
edition = "2024"
authors = ["ppom <reaction@ppom.me>"]
license = "AGPL-3.0"
homepage = "https://reaction.ppom.me"
repository = "https://framagit.org/ppom/reaction"
keywords = ["security", "sysadmin", "logs", "monitoring", "plugin"]
categories = ["security"]
description = "Plugin interface for reaction, a daemon that scans logs and takes action (alternative to fail2ban)"
[dependencies]
remoc.workspace = true
serde.workspace = true
serde_json.workspace = true
tokio.workspace = true
tokio.features = ["io-std", "signal"]
tokio-util.workspace = true
tokio-util.features = ["rt"]

View file

@ -1,599 +0,0 @@
//! This crate defines the API between reaction's core and plugins.
//!
//! Plugins must be written in Rust, for now.
//!
//! This documentation assumes the reader has some knowledge of Rust.
//! However, if you find that something is unclear, don't hesitate to
//! [ask for help](https://framagit.org/ppom/reaction/#help), even if you're new to Rust.
//!
//! To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides
//! the entrypoint for a plugin.
//! It permits to define `0` to `n` custom stream and action types.
//!
//! ## Note on reaction-plugin API stability
//!
//! This is the v1 of reaction's plugin interface.
//! It's quite efficient and complete, but it has the big drawback of being Rust-only and [`tokio`]-only.
//!
//! In the future, I'd like to define a language-agnostic interface, which will be a major breaking change in the API.
//! However, I'll try my best to reduce the necessary code changes for plugins that use this v1.
//!
//! ## Naming & calling conventions
//!
//! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql`.
//! It will be invoked with one positional argument "serve".
//! ```bash
//! reaction-plugin-$NAME serve
//! ```
//! This can be useful if you want to provide CLI functionnality to your users,
//! so you can distinguish between a human user and reaction.
//!
//! ### State directory
//!
//! It will be executed in its own directory, in which it should have write access.
//! The directory is `$reaction_state_directory/plugin_data/$NAME`.
//! reaction's [state_directory](https://reaction.ppom.me/reference.html#state_directory)
//! defaults to its working directory, which is `/var/lib/reaction` in most setups.
//!
//! So your plugin directory should most often be `/var/lib/reaction/plugin_data/$NAME`,
//! but the plugin shouldn't expect that and use the current working directory instead.
//!
//! ## Communication
//!
//! Communication between the plugin and reaction is based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait
//! calls over a single transport channel.
//! The channels read and write channels are stdin and stdout, so you shouldn't use them for something else.
//!
//! [`remoc`] builds upon [`tokio`], so you'll need to use tokio too.
//!
//! ### Errors
//!
//! Errors during:
//! - config loading in [`PluginInfo::load_config`]
//! - startup in [`PluginInfo::start`]
//!
//! should be returned to reaction by the function's return value, permitting reaction to abort startup.
//!
//! During normal runtime, after the plugin has loaded its config and started, and before reaction is quitting, there is no *rusty* way to send errors to reaction.
//! Then errors can be printed to stderr.
//! They'll be captured line by line and re-printed by reaction, with the plugin name prepended.
//!
//! A line can start with `DEBUG `, `INFO `, `WARN `, `ERROR `.
//! If it starts with none of the above, the line is assumed to be an error.
//!
//! Example:
//! Those lines:
//! ```log
//! WARN This is an official warning from the plugin
//! Freeeee errrooooorrr
//! ```
//! Will become:
//! ```log
//! WARN plugin test: This is an official warning from the plugin
//! ERROR plugin test: Freeeee errrooooorrr
//! ```
//!
//! Plugins should not exit when there is an error: reaction quits only when told to do so,
//! or if all its streams exit, and won't retry starting a failing plugin or stream.
//! Please only exit if you're in a 100% failing state.
//! It's considered better to continue operating in a degraded state than exiting.
//!
//! ## Getting started
//!
//! If you don't have Rust already installed, follow their [*Getting Started* documentation](https://rust-lang.org/learn/get-started/)
//! to get rust build tools and learn about editor support.
//!
//! Then create a new repository with cargo:
//!
//! ```bash
//! cargo new reaction-plugin-$NAME
//! cd reaction-plugin-$NAME
//! ```
//!
//! Add required dependencies:
//!
//! ```bash
//! cargo add reaction-plugin tokio
//! ```
//!
//! Replace `src/main.rs` with those contents:
//!
//! ```ignore
//! use reaction_plugin::PluginInfo;
//!
//! #[tokio::main]
//! async fn main() {
//! let plugin = MyPlugin::default();
//! reaction_plugin::main_loop(plugin).await;
//! }
//!
//! #[derive(Default)]
//! struct MyPlugin {}
//!
//! impl PluginInfo for MyPlugin {
//! // ...
//! }
//! ```
//!
//! Your IDE should now propose to implement missing members of the [`PluginInfo`] trait.
//! Your journey starts!
//!
//! ## Examples
//!
//! Core plugins can be found here: <https://framagit.org/ppom/reaction/-/tree/main/plugins>.
//!
//! - The "virtual" plugin is the simplest and can serve as a good complete example that links custom stream types and custom action types.
//! - The "ipset" plugin is a good example of an action-only plugin.
use std::{
collections::{BTreeMap, BTreeSet},
env::args,
error::Error,
fmt::Display,
process::exit,
time::Duration,
};
use remoc::{
Connect, rch,
rtc::{self, Server},
};
use serde::{Deserialize, Serialize};
use serde_json::{Number, Value as JValue};
use tokio::io::{stdin, stdout};
pub mod line;
pub mod shutdown;
pub mod time;
/// The only trait that **must** be implemented by a plugin.
/// It provides lists of stream, filter and action types implemented by a dynamic plugin.
#[rtc::remote]
pub trait PluginInfo {
/// Return the manifest of the plugin.
/// This should not be dynamic, and return always the same manifest.
///
/// Example implementation:
/// ```
/// Ok(Manifest {
/// hello: Hello::new(),
/// streams: BTreeSet::from(["mystreamtype".into()]),
/// actions: BTreeSet::from(["myactiontype".into()]),
/// })
/// ```
///
/// First function called.
async fn manifest(&mut self) -> Result<Manifest, rtc::CallError>;
/// Load all plugin stream and action configurations.
/// Must error if config is invalid.
///
/// The plugin should not start running mutable commands here:
/// It should be ok to quit without cleanup for now.
///
/// Each [`StreamConfig`] from the `streams` arg should result in a corresponding [`StreamImpl`] returned, in the same order.
/// Each [`ActionConfig`] from the `actions` arg should result in a corresponding [`ActionImpl`] returned, in the same order.
///
/// Function called after [`PluginInfo::manifest`].
async fn load_config(
&mut self,
streams: Vec<StreamConfig>,
actions: Vec<ActionConfig>,
) -> RemoteResult<(Vec<StreamImpl>, Vec<ActionImpl>)>;
/// Notify the plugin that setup is finished, permitting a last occasion to report an error that'll make reaction exit.
/// All initialization (opening remote connections, starting streams, etc) should happen here.
///
/// Function called after [`PluginInfo::load_config`].
async fn start(&mut self) -> RemoteResult<()>;
/// Notify the plugin that reaction is quitting and that the plugin should quit too.
/// A few seconds later, the plugin will receive SIGTERM.
/// A few seconds later, the plugin will receive SIGKILL.
///
/// Function called after [`PluginInfo::start`], when reaction is quitting.
async fn close(mut self) -> RemoteResult<()>;
}
/// The config for one Stream of a type advertised by this plugin.
///
/// For example this user config:
/// ```jsonnet
/// {
/// streams: {
/// mystream: {
/// type: "mystreamtype",
/// options: {
/// key: "value",
/// num: 3,
/// },
/// // filters: ...
/// },
/// },
/// }
/// ```
///
/// would result in the following `StreamConfig`:
///
/// ```
/// StreamConfig {
/// stream_name: "mystream",
/// stream_type: "mystreamtype",
/// config: Value::Object(BTreeMap::from([
/// ("key", Value::String("value")),
/// ("num", Value::Integer(3)),
/// ])),
/// }
/// ```
///
/// Don't hesitate to take advantage of [`serde_json::from_value`], to deserialize the [`Value`] into a Rust struct:
///
/// ```
/// #[derive(Deserialize)]
/// struct MyStreamOptions {
/// key: String,
/// num: i64,
/// }
///
/// fn validate_config(stream_config: Value) -> Result<MyStreamOptions, serde_json::Error> {
/// serde_json::from_value(stream_config.into())
/// }
/// ```
#[derive(Serialize, Deserialize, Clone)]
pub struct StreamConfig {
pub stream_name: String,
pub stream_type: String,
pub config: Value,
}
/// The config for one Stream of a type advertised by this plugin.
///
/// For example this user config:
/// ```jsonnet
/// {
/// streams: {
/// mystream: {
/// // ...
/// filters: {
/// myfilter: {
/// // ...
/// actions: {
/// myaction: {
/// type: "myactiontype",
/// options: {
/// boolean: true,
/// array: ["item"],
/// },
/// },
/// },
/// },
/// },
/// },
/// },
/// }
/// ```
///
/// would result in the following `ActionConfig`:
///
/// ```rust
/// ActionConfig {
/// action_name: "myaction",
/// action_type: "myactiontype",
/// config: Value::Object(BTreeMap::from([
/// ("boolean", Value::Boolean(true)),
/// ("array", Value::Array([Value::String("item")])),
/// ])),
/// }
/// ```
///
/// Don't hesitate to take advantage of [`serde_json::from_value`], to deserialize the [`Value`] into a Rust struct:
///
/// ```rust
/// #[derive(Deserialize)]
/// struct MyActionOptions {
/// boolean: bool,
/// array: Vec<String>,
/// }
///
/// fn validate_config(action_config: Value) -> Result<MyActionOptions, serde_json::Error> {
/// serde_json::from_value(action_config.into())
/// }
/// ```
#[derive(Serialize, Deserialize, Clone)]
pub struct ActionConfig {
pub stream_name: String,
pub filter_name: String,
pub action_name: String,
pub action_type: String,
pub config: Value,
pub patterns: Vec<String>,
}
/// Mandatory announcement of a plugin's protocol version, stream and action types.
#[derive(Serialize, Deserialize)]
pub struct Manifest {
// Protocol version.
// Just use the [`Hello::new`] constructor that uses this crate's current version.
pub hello: Hello,
/// Stream types that should be made available to reaction users
///
/// ```jsonnet
/// {
/// streams: {
/// my_stream: {
/// type: "..."
/// # ↑ all those exposed types
/// }
/// }
/// }
/// ```
pub streams: BTreeSet<String>,
/// Action types that should be made available to reaction users
///
/// ```jsonnet
/// {
/// streams: {
/// mystream: {
/// filters: {
/// myfilter: {
/// actions: {
/// myaction: {
/// type: "myactiontype",
/// # ↑ all those exposed types
/// },
/// },
/// },
/// },
/// },
/// },
/// }
/// ```
pub actions: BTreeSet<String>,
}
#[derive(Default, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Hello {
/// Major version of the protocol
/// Increment means breaking change
pub version_major: u32,
/// Minor version of the protocol
/// Increment means reaction core can handle older version plugins
pub version_minor: u32,
}
impl Hello {
/// Constructor that fills a [`Hello`] struct with [`crate`]'s version.
/// You should use this in your plugin [`Manifest`].
pub fn new() -> Hello {
Hello {
version_major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(),
version_minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(),
}
}
/// Used by the reaction daemon. Permits to check compatibility between two versions.
/// Major versions must be the same between the daemon and plugin.
/// Minor version of the daemon must be greater than or equal minor version of the plugin.
pub fn is_compatible(server: &Hello, plugin: &Hello) -> std::result::Result<(), String> {
if server.version_major == plugin.version_major
&& server.version_minor >= plugin.version_minor
{
Ok(())
} else if plugin.version_major > server.version_major
|| (plugin.version_major == server.version_major
&& plugin.version_minor > server.version_minor)
{
Err("consider upgrading reaction".into())
} else {
Err("consider upgrading the plugin".into())
}
}
}
/// A clone of [`serde_json::Value`].
/// Implements From & Into [`serde_json::Value`].
#[derive(Serialize, Deserialize, Clone)]
pub enum Value {
Null,
Bool(bool),
Integer(i64),
Float(f64),
String(String),
Array(Vec<Value>),
Object(BTreeMap<String, Value>),
}
impl From<JValue> for Value {
fn from(value: serde_json::Value) -> Self {
match value {
JValue::Null => Value::Null,
JValue::Bool(b) => Value::Bool(b),
JValue::Number(number) => {
if let Some(number) = number.as_i64() {
Value::Integer(number)
} else if let Some(number) = number.as_f64() {
Value::Float(number)
} else {
Value::Null
}
}
JValue::String(s) => Value::String(s.into()),
JValue::Array(v) => Value::Array(v.into_iter().map(|e| e.into()).collect()),
JValue::Object(m) => Value::Object(m.into_iter().map(|(k, v)| (k, v.into())).collect()),
}
}
}
impl Into<JValue> for Value {
fn into(self) -> JValue {
match self {
Value::Null => JValue::Null,
Value::Bool(v) => JValue::Bool(v),
Value::Integer(v) => JValue::Number(v.into()),
Value::Float(v) => JValue::Number(Number::from_f64(v).unwrap()),
Value::String(v) => JValue::String(v),
Value::Array(v) => JValue::Array(v.into_iter().map(|e| e.into()).collect()),
Value::Object(m) => JValue::Object(m.into_iter().map(|(k, v)| (k, v.into())).collect()),
}
}
}
/// Represents a Stream handled by a plugin on reaction core's side.
///
/// During [`PluginInfo::load_config`], the plugin should create a [`remoc::rch::mpsc::channel`] of [`Line`].
/// It will keep the sending side for itself and put the receiving side in a [`StreamImpl`].
///
/// The plugin should start sending [`Line`]s in the channel only after [`PluginInfo::start`] has been called by reaction core.
#[derive(Debug, Serialize, Deserialize)]
pub struct StreamImpl {
pub stream: rch::mpsc::Receiver<Line>,
/// Whether this stream works standalone, or if it needs other streams or actions to be fed.
/// Defaults to true.
/// When `false`, reaction will exit if it's the last one standing.
#[serde(default = "_true")]
pub standalone: bool,
}
fn _true() -> bool {
true
}
/// Messages passed from the [`StreamImpl`] of a plugin to reaction core
pub type Line = (String, Duration);
// // Filters
// // For now, plugins can't handle custom filter implementations.
// #[derive(Serialize, Deserialize)]
// pub struct FilterImpl {
// pub stream: rch::lr::Sender<Exec>,
// }
// #[derive(Serialize, Deserialize)]
// pub struct Match {
// pub match_: String,
// pub result: rch::oneshot::Sender<bool>,
// }
/// Represents an Action handled by a plugin on reaction core's side.
///
/// During [`PluginInfo::load_config`], the plugin should create a [`remoc::rch::mpsc::channel`] of [`Exec`].
/// It will keep the receiving side for itself and put the sending side in a [`ActionImpl`].
///
/// The plugin will start receiving [`Exec`]s in the channel from reaction only after [`PluginInfo::start`] has been called by reaction core.
#[derive(Clone, Serialize, Deserialize)]
pub struct ActionImpl {
pub tx: rch::mpsc::Sender<Exec>,
}
/// A [trigger](https://reaction.ppom.me/reference.html#trigger) of the Action, sent by reaction core to the plugin.
///
/// The plugin should perform the configured action for each received [`Exec`].
///
/// Any error during its execution should be logged to stderr, see [`crate#Errors`] for error handling recommandations.
#[derive(Serialize, Deserialize)]
pub struct Exec {
pub match_: Vec<String>,
pub time: Duration,
}
/// The main loop for a plugin.
///
/// Bootstraps the communication with reaction core on the process' stdin and stdout,
/// then holds the connection and maintains the plugin in a server state.
///
/// Your main function should only create a struct that implements [`PluginInfo`]
/// and then call [`main_loop`]:
/// ```ignore
/// #[tokio::main]
/// async fn main() {
/// let plugin = MyPlugin::default();
/// reaction_plugin::main_loop(plugin).await;
/// }
/// ```
pub async fn main_loop<T: PluginInfo + Send + Sync + 'static>(plugin_info: T) {
// First check that we're called by reaction
let mut args = args();
// skip 0th argument
let _skip = args.next();
if args.next().is_none_or(|arg| arg != "serve") {
eprintln!("This plugin is not meant to be called as-is.");
eprintln!(
"reaction daemon starts plugins itself and communicates with them on stdin, stdout and stderr."
);
eprintln!("See the doc on plugin configuration: https://reaction.ppom.me/plugins/");
exit(1);
} else {
let (conn, mut tx, _rx): (
_,
remoc::rch::base::Sender<PluginInfoClient>,
remoc::rch::base::Receiver<()>,
) = Connect::io(remoc::Cfg::default(), stdin(), stdout())
.await
.unwrap();
let (server, client) = PluginInfoServer::new(plugin_info, 1);
let (res1, (_, res2), res3) = tokio::join!(tx.send(client), server.serve(), conn);
let mut exit_code = 0;
if let Err(err) = res1 {
eprintln!("ERROR could not send plugin info to reaction: {err}");
exit_code = 1;
}
if let Err(err) = res2 {
eprintln!("ERROR could not launch plugin service for reaction: {err}");
exit_code = 2;
}
if let Err(err) = res3 {
eprintln!("ERROR connection error with reaction: {err}");
exit_code = 3;
}
exit(exit_code);
}
}
// Errors
pub type RemoteResult<T> = Result<T, RemoteError>;
/// reaction-plugin's Error type.
#[derive(Debug, Serialize, Deserialize)]
pub enum RemoteError {
/// A connection error that origins from [`remoc`], the crate used for communication on the plugin's `stdin`/`stdout`.
///
/// You should not instantiate this type of error yourself.
Remoc(rtc::CallError),
/// A free String for application-specific errors.
///
/// You should only instantiate this type of error yourself, for any error that you encounter at startup and shutdown.
///
/// Otherwise, any error during the plugin's runtime should be logged to stderr, see [`crate#Errors`] for error handling recommandations.
Plugin(String),
}
impl Display for RemoteError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RemoteError::Remoc(call_error) => write!(f, "communication error: {call_error}"),
RemoteError::Plugin(err) => write!(f, "{err}"),
}
}
}
impl Error for RemoteError {}
impl From<String> for RemoteError {
fn from(value: String) -> Self {
Self::Plugin(value)
}
}
impl From<&str> for RemoteError {
fn from(value: &str) -> Self {
Self::Plugin(value.into())
}
}
impl From<rtc::CallError> for RemoteError {
fn from(value: rtc::CallError) -> Self {
Self::Remoc(value)
}
}

View file

@ -1,237 +0,0 @@
//! Helper module that permits to use templated lines (ie. `bad password for <ip>`), like in Stream's and Action's `cmd`.
//!
//! Corresponding reaction core settings:
//! - [Stream's `cmd`](https://reaction.ppom.me/reference.html#cmd)
//! - [Action's `cmd`](https://reaction.ppom.me/reference.html#cmd-1)
//!
#[derive(Debug, PartialEq, Eq)]
enum SendItem {
Index(usize),
Str(String),
}
impl SendItem {
fn min_size(&self) -> usize {
match self {
Self::Index(_) => 0,
Self::Str(s) => s.len(),
}
}
}
/// Helper struct that permits to transform a template line with patterns into an instantiated line from a match.
///
/// Useful when you permit the user to reconstruct lines from an action, like in reaction's native actions and in the virtual plugin:
/// ```yaml
/// actions:
/// native:
/// cmd: ["iptables", "...", "<ip>"]
///
/// virtual:
/// type: virtual
/// options:
/// send: "<ip>: bad password on user <user>"
/// to: "my_virtual_stream"
/// ```
///
/// Usage example:
/// ```
/// # use reaction_plugin::line::PatternLine;
/// #
/// let template = "<ip>: bad password on user <user>".to_string();
/// let patterns = vec!["ip".to_string(), "user".to_string()];
/// let pattern_line = PatternLine::new(template, patterns);
///
/// assert_eq!(
/// pattern_line.line(vec!["1.2.3.4".to_string(), "root".to_string()]),
/// "1.2.3.4: bad password on user root".to_string(),
/// );
/// ```
///
/// You can find full examples in those plugins:
/// `reaction-plugin-virtual`,
/// `reaction-plugin-cluster`.
///
#[derive(Debug)]
pub struct PatternLine {
line: Vec<SendItem>,
min_size: usize,
}
impl PatternLine {
/// Construct [`PatternLine`] from a template line and the list of patterns of the underlying [Filter](https://reaction.ppom.me/reference.html#filter).
///
/// This list of patterns comes from [`super::ActionConfig`].
pub fn new(template: String, patterns: Vec<String>) -> Self {
let line = Self::_from(patterns, Vec::from([SendItem::Str(template)]));
Self {
min_size: line.iter().map(SendItem::min_size).sum(),
line,
}
}
fn _from(mut patterns: Vec<String>, acc: Vec<SendItem>) -> Vec<SendItem> {
match patterns.pop() {
None => acc,
Some(pattern) => {
let enclosed_pattern = format!("<{pattern}>");
let acc = acc
.into_iter()
.flat_map(|item| match &item {
SendItem::Index(_) => vec![item],
SendItem::Str(str) => match str.find(&enclosed_pattern) {
Some(i) => {
let pattern_index = patterns.len();
let mut ret = vec![];
let (left, mid) = str.split_at(i);
if !left.is_empty() {
ret.push(SendItem::Str(left.into()))
}
ret.push(SendItem::Index(pattern_index));
if mid.len() > enclosed_pattern.len() {
let (_, right) = mid.split_at(enclosed_pattern.len());
ret.push(SendItem::Str(right.into()))
}
ret
}
None => vec![item],
},
})
.collect();
Self::_from(patterns, acc)
}
}
}
pub fn line(&self, match_: Vec<String>) -> String {
let mut res = String::with_capacity(self.min_size);
for item in &self.line {
match item {
SendItem::Index(i) => {
if let Some(element) = match_.get(*i) {
res.push_str(element);
}
}
SendItem::Str(str) => res.push_str(str),
}
}
res
}
}
#[cfg(test)]
mod tests {
use crate::line::{PatternLine, SendItem};
#[test]
fn line_0_pattern() {
let msg = "my message".to_string();
let line = PatternLine::new(msg.clone(), vec![]);
assert_eq!(line.line, vec![SendItem::Str(msg.clone())]);
assert_eq!(line.min_size, msg.len());
assert_eq!(line.line(vec![]), msg.clone());
}
#[test]
fn line_1_pattern() {
let patterns = vec![
"ignored".into(),
"oh".into(),
"ignored".into(),
"my".into(),
"test".into(),
];
let matches = vec!["yay", "oh", "my", "test", "<oh>", "<my>", "<test>"];
let tests = [
(
"<oh> my test",
1,
vec![SendItem::Index(1), SendItem::Str(" my test".into())],
vec![
("yay", "yay my test"),
("oh", "oh my test"),
("my", "my my test"),
("test", "test my test"),
("<oh>", "<oh> my test"),
("<my>", "<my> my test"),
("<test>", "<test> my test"),
],
),
(
"oh <my> test",
3,
vec![
SendItem::Str("oh ".into()),
SendItem::Index(3),
SendItem::Str(" test".into()),
],
vec![
("yay", "oh yay test"),
("oh", "oh oh test"),
("my", "oh my test"),
("test", "oh test test"),
("<oh>", "oh <oh> test"),
("<my>", "oh <my> test"),
("<test>", "oh <test> test"),
],
),
(
"oh my <test>",
4,
vec![SendItem::Str("oh my ".into()), SendItem::Index(4)],
vec![
("yay", "oh my yay"),
("oh", "oh my oh"),
("my", "oh my my"),
("test", "oh my test"),
("<oh>", "oh my <oh>"),
("<my>", "oh my <my>"),
("<test>", "oh my <test>"),
],
),
];
for (msg, index, expected_pl, lines) in tests {
let pattern_line = PatternLine::new(msg.to_string(), patterns.clone());
assert_eq!(pattern_line.line, expected_pl);
for (match_element, line) in lines {
for match_default in &matches {
let mut match_ = vec![
match_default.to_string(),
match_default.to_string(),
match_default.to_string(),
match_default.to_string(),
match_default.to_string(),
];
match_[index] = match_element.to_string();
assert_eq!(
pattern_line.line(match_.clone()),
line,
"match: {match_:?}, pattern_line: {pattern_line:?}"
);
}
}
}
}
#[test]
fn line_2_pattern() {
let pattern_line = PatternLine::new("<a> ; <b>".into(), vec!["a".into(), "b".into()]);
let matches = ["a", "b", "ab", "<a>", "<b>"];
for a in &matches {
for b in &matches {
assert_eq!(
pattern_line.line(vec![a.to_string(), b.to_string()]),
format!("{a} ; {b}"),
);
}
}
}
}

View file

@ -1,162 +0,0 @@
//! Helper module that provides structures to ease the quitting process when having multiple tokio tasks.
//!
//! It defines a [`ShutdownController`], that permits to keep track of ongoing tasks, ask them to shutdown and wait for all of them to quit.
//!
//! You can have it as an attribute of your plugin struct.
//! ```
//! struct MyPlugin {
//! shutdown: ShutdownController
//! }
//! ```
//!
//! You can then give a [`ShutdownToken`] to other tasks when creating them:
//!
//! ```
//! impl PluginInfo for MyPlugin {
//! async fn start(&mut self) -> RemoteResult<()> {
//! let token = self.shutdown.token();
//!
//! tokio::spawn(async move {
//! token.wait().await;
//! eprintln!("DEBUG shutdown asked to quit, now quitting")
//! })
//! }
//! }
//! ```
//!
//! On closing, calling [`ShutdownController::ask_shutdown`] will inform all tasks waiting on [`ShutdownToken::wait`] that it's time to leave.
//! Then we can wait for [`ShutdownController::wait_all_task_shutdown`] to complete.
//!
//! ```
//! impl PluginInfo for MyPlugin {
//! async fn close(self) -> RemoteResult<()> {
//! self.shutdown.ask_shutdown();
//! self.shutdown.wait_all_task_shutdown().await;
//! Ok(())
//! }
//! }
//! ```
//!
//! [`ShutdownDelegate::handle_quit_signals`] permits to handle SIGHUP, SIGINT and SIGTERM by gracefully shutting down tasks.
use tokio::signal::unix::{SignalKind, signal};
use tokio_util::{
sync::{CancellationToken, WaitForCancellationFuture},
task::task_tracker::{TaskTracker, TaskTrackerToken},
};
/// Permits to keep track of ongoing tasks, ask them to shutdown and wait for all of them to quit.
/// Stupid wrapper around [`tokio_util::sync::CancellationToken`] and [`tokio_util::task::task_tracker::TaskTracker`].
#[derive(Default, Clone)]
pub struct ShutdownController {
shutdown_notifyer: CancellationToken,
task_tracker: TaskTracker,
}
impl ShutdownController {
pub fn new() -> Self {
Self::default()
}
/// Ask for all tasks to quit
pub fn ask_shutdown(&self) {
self.shutdown_notifyer.cancel();
self.task_tracker.close();
}
/// Wait for all tasks to quit.
/// This task may return even without having called [`ShutdownController::ask_shutdown`]
/// first, if all tasks quit by themselves.
pub async fn wait_all_task_shutdown(self) {
self.task_tracker.close();
self.task_tracker.wait().await;
}
/// Returns a new shutdown token, to be held by a task.
pub fn token(&self) -> ShutdownToken {
ShutdownToken::new(self.shutdown_notifyer.clone(), self.task_tracker.token())
}
/// Returns a [`ShutdownDelegate`], which is able to ask for shutdown,
/// without counting as a task that needs to be awaited.
pub fn delegate(&self) -> ShutdownDelegate {
ShutdownDelegate(self.shutdown_notifyer.clone())
}
/// Returns a future that will resolve only when a shutdown request happened.
pub fn wait(&self) -> WaitForCancellationFuture<'_> {
self.shutdown_notifyer.cancelled()
}
}
/// Permits to ask for shutdown, without counting as a task that needs to be awaited.
pub struct ShutdownDelegate(CancellationToken);
impl ShutdownDelegate {
/// Ask for all tasks to quit
pub fn ask_shutdown(&self) {
self.0.cancel();
}
/// Ensure [`Self::ask_shutdown`] is called whenever we receive SIGHUP,
/// SIGTERM or SIGINT. Spawns a task that consumes self.
pub fn handle_quit_signals(self) -> Result<(), String> {
let err_str = |err| format!("could not register signal: {err}");
let mut sighup = signal(SignalKind::hangup()).map_err(err_str)?;
let mut sigint = signal(SignalKind::interrupt()).map_err(err_str)?;
let mut sigterm = signal(SignalKind::terminate()).map_err(err_str)?;
tokio::spawn(async move {
let signal = tokio::select! {
_ = sighup.recv() => "SIGHUP",
_ = sigint.recv() => "SIGINT",
_ = sigterm.recv() => "SIGTERM",
};
eprintln!("received {signal}, closing...");
self.ask_shutdown();
});
Ok(())
}
}
/// Created by a [`ShutdownController`].
/// Serves two purposes:
///
/// - Wait for a shutdown request to happen with [`Self::wait`]
/// - Keep track of the current task. While this token is held,
/// [`ShutdownController::wait_all_task_shutdown`] will block.
#[derive(Clone)]
pub struct ShutdownToken {
shutdown_notifyer: CancellationToken,
_task_tracker_token: TaskTrackerToken,
}
impl ShutdownToken {
fn new(shutdown_notifyer: CancellationToken, _task_tracker_token: TaskTrackerToken) -> Self {
Self {
shutdown_notifyer,
_task_tracker_token,
}
}
/// Returns underlying [`CancellationToken`] and [`TaskTrackerToken`], consuming self.
pub fn split(self) -> (CancellationToken, TaskTrackerToken) {
(self.shutdown_notifyer, self._task_tracker_token)
}
/// Returns a future that will resolve only when a shutdown request happened.
pub fn wait(&self) -> WaitForCancellationFuture<'_> {
self.shutdown_notifyer.cancelled()
}
/// Returns true if the shutdown request happened
pub fn is_shutdown(&self) -> bool {
self.shutdown_notifyer.is_cancelled()
}
/// Ask for all tasks to quit
pub fn ask_shutdown(&self) {
self.shutdown_notifyer.cancel();
}
}

View file

@ -1,11 +1,11 @@
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" -p debian-devscripts git minisign docker cargo-deb
#!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" -p debian-devscripts git minisign cargo-cross rustup cargo-deb
import argparse
import http.client
import json
import os
import shutil
import subprocess
import shutil
import sys
import tempfile
@ -56,14 +56,14 @@ def main():
print("exiting.")
sys.exit(1)
# Minisign password
cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True)
minisign_password = cmd.stdout
if args.publish:
# Git push
run_command(["git", "push", "--tags"])
# Minisign password
cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True)
minisign_password = cmd.stdout
# Create directory
run_command(
[
@ -86,11 +86,8 @@ def main():
pass
architectures = {
"x86_64-unknown-linux-gnu": "amd64",
# I would like to build for those targets instead:
# "x86_64-unknown-linux-musl": "amd64",
# "aarch64-unknown-linux-musl": "arm64",
# "arm-unknown-linux-gnueabihf": "armhf",
"x86_64-unknown-linux-musl": "amd64",
"aarch64-unknown-linux-musl": "arm64",
}
all_files = []
@ -102,8 +99,9 @@ def main():
You'll need to install minisign to check the authenticity of the package.
After installing reaction, create your configuration file(s) in JSON, YAML or JSONnet in the
`/etc/reaction/` directory.
After installing reaction, create your configuration file at
`/etc/reaction.json`, `/etc/reaction.jsonnet` or `/etc/reaction.yml`.
You can also provide a directory containing multiple configuration files in the previous formats.
See <https://reaction.ppom.me> for documentation.
Reload systemd:
@ -113,63 +111,40 @@ $ sudo systemctl daemon-reload
Then enable and start reaction with this command
```bash
# write first your configuration file(s) in /etc/reaction/
$ sudo systemctl enable --now reaction.service
# replace `reaction.jsonnet` with the name of your configuration file in /etc/
$ sudo systemctl enable --now reaction@reaction.jsonnet.service
```
""".strip(),
]
for architecture_rs, architecture_pretty in architectures.items():
for (architecture_rs, architecture_pretty) in architectures.items():
# Cargo clean
# run_command(["cargo", "clean"])
run_command(["cargo", "clean"])
# Build docker image
run_command(["docker", "pull", "rust:bookworm"])
run_command(["docker", "build", "-t", "rust:reaction", "."])
binaries = [
# Binaries
"reaction",
"reaction-plugin-virtual",
"reaction-plugin-ipset",
]
# Build
# Install toolchain
run_command(
[
"docker",
"run",
"--rm",
"-u", str(os.getuid()),
"-v", ".:/reaction",
"rust:reaction",
"sh", "-c",
" && ".join([
f"cargo build --release --target {architecture_rs} --package {binary}"
for binary in binaries
])
"rustup",
"toolchain",
"install",
f"stable-{architecture_rs}",
"--force-non-host", # I know, I know!
"--profile",
"minimal",
]
)
# Build
run_command(["cross", "build", "--release", "--target", architecture_rs])
# Build .deb
debs = [
"reaction",
"reaction-plugin-ipset",
]
for deb in debs:
cmd = run_command(
[
"cargo-deb",
"--target", architecture_rs,
"--package", deb,
"--no-build",
"--no-strip"
]
)
cmd = run_command(
["cargo-deb", f"--target={architecture_rs}", "--no-build", "--no-strip"]
)
deb_dir = os.path.join("./target", architecture_rs, "debian")
deb_names = [f for f in os.listdir(deb_dir) if f.endswith(".deb")]
deb_paths = [os.path.join(deb_dir, deb_name) for deb_name in deb_names]
deb_name = [f for f in os.listdir(deb_dir) if f.endswith(".deb")][0]
deb_path = os.path.join(deb_dir, deb_name)
# Archive
files_path = os.path.join("./target", architecture_rs, "release")
@ -183,7 +158,11 @@ $ sudo systemctl enable --now reaction.service
except FileExistsError:
pass
files = binaries + [
files = [
# Binaries
"reaction",
"nft46",
"ip46tables",
# Shell completion
"reaction.bash",
"reaction.fish",
@ -209,17 +188,16 @@ $ sudo systemctl enable --now reaction.service
os.chdir(root_dir)
# Sign
run_command(
["minisign", "-Sm", tar_path] + deb_paths,
text=True,
input=minisign_password,
)
deb_sig_paths = [f"{deb_path}.minisig" for deb_path in deb_paths]
deb_sig_names = [f"{deb_name}.minisig" for deb_name in deb_names]
tar_sig = f"{tar_path}.minisig"
if args.publish:
# Sign
run_command(
["minisign", "-Sm", deb_path, tar_path],
text=True,
input=minisign_password,
)
deb_sig = f"{deb_path}.minisig"
tar_sig = f"{tar_path}.minisig"
# Push
run_command(
[
@ -227,25 +205,18 @@ $ sudo systemctl enable --now reaction.service
"-az", # "-e", "ssh -J pica01",
tar_path,
tar_sig,
]
+ deb_paths
+ deb_sig_paths
+ [
deb_path,
deb_sig,
f"akesi:/var/www/static/reaction/releases/{tag}/",
]
)
else:
# Copy
run_command(["cp", tar_path, tar_sig] + deb_paths + deb_sig_paths + [local_dir])
all_files.extend([tar_path, tar_sig])
all_files.extend(deb_paths)
all_files.extend(deb_sig_paths)
all_files.extend([tar_path, tar_sig, deb_path, deb_sig])
# Instructions
# Instructions
instructions.append(
f"""
instructions.append(
f"""
## Tar installation ({architecture_pretty} linux)
```bash
@ -253,42 +224,30 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{tar_name} \\
-O https://static.ppom.me/reaction/releases/{tag}/{tar_name}.minisig \\
&& minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {tar_name} \\
&& rm {tar_name}.minisig \\
&& tar xvf {tar_name} \\
&& cd {pkg_name} \\
&& cd {tar_name} \\
&& sudo make install
```
""".strip()
)
If you want to install the ipset plugin as well:
```bash
sudo apt install -y libipset-dev && sudo make install-ipset
```
""".strip()
)
instructions.append(
f"""
instructions.append(
f"""
## Debian installation ({architecture_pretty} linux)
```bash
curl \\
{"\n".join([
f" -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\"
for deb_name in deb_names + deb_sig_names
])}
{"\n".join([
f" && minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {deb_name} \\"
for deb_name in deb_names
])}
&& rm {" ".join(deb_sig_names)} \\
&& sudo apt install {" ".join([f"./{deb_name}" for deb_name in deb_names])}
curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\
-O https://static.ppom.me/reaction/releases/{tag}/{deb_name}.minisig \\
&& minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {deb_name} \\
&& rm {deb_name}.minisig \\
&& sudo apt install ./{deb_name}
```
*You can also use [this third-party package repository](https://packages.azlux.fr).*
""".strip()
)
""".strip()
)
else:
# Copy
run_command(["cp", tar_path, deb_path, local_dir])
if not args.publish:
print("\n\n".join(instructions))
return
# Release

View file

@ -1,14 +0,0 @@
# This shell.nix for NixOS users is only needed when building reaction-plugin-ipset
with import <nixpkgs> {};
pkgs.mkShell {
name = "libipset";
buildInputs = [
ipset
nftables
clang
];
src = null;
shellHook = ''
export LIBCLANG_PATH="$(clang -print-file-name=libclang.so)"
'';
}

View file

@ -19,7 +19,7 @@ pub fn test_regex(
// Code close to Filter::setup()
let mut used_patterns: BTreeSet<Arc<Pattern>> = BTreeSet::new();
for pattern in config.patterns.values() {
for pattern in config.patterns().values() {
if let Some(index) = regex.find(pattern.name_with_braces()) {
// we already `find` it, so we must be able to `rfind` it
#[allow(clippy::unwrap_used)]
@ -43,7 +43,7 @@ pub fn test_regex(
let mut result = Vec::new();
if !used_patterns.is_empty() {
for pattern in used_patterns.iter() {
if let Some(match_) = matches.name(&pattern.name) {
if let Some(match_) = matches.name(pattern.name()) {
result.push(match_.as_str().to_string());
if pattern.is_ignore(match_.as_str()) {
ignored = true;

View file

@ -1,52 +1,41 @@
use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc, time::Duration};
use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc};
use chrono::TimeDelta;
use reaction_plugin::{ActionConfig, time::parse_duration};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::process::Command;
use super::{Match, Pattern, PatternType};
use super::parse_duration::*;
use super::{Match, Pattern};
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[serde(deny_unknown_fields)]
pub struct Action {
#[serde(default)]
pub cmd: Vec<String>,
cmd: Vec<String>,
// TODO one shot time deserialization
#[serde(skip_serializing_if = "Option::is_none")]
pub after: Option<String>,
after: Option<String>,
#[serde(skip)]
pub after_duration: Option<Duration>,
after_duration: Option<TimeDelta>,
#[serde(
rename = "onexit",
default = "set_false",
skip_serializing_if = "is_false"
)]
pub on_exit: bool,
on_exit: bool,
#[serde(default = "set_false", skip_serializing_if = "is_false")]
pub oneshot: bool,
#[serde(default = "set_false", skip_serializing_if = "is_false")]
pub ipv4only: bool,
#[serde(default = "set_false", skip_serializing_if = "is_false")]
pub ipv6only: bool,
oneshot: bool,
#[serde(skip)]
pub patterns: Arc<BTreeSet<Arc<Pattern>>>,
patterns: Arc<BTreeSet<Arc<Pattern>>>,
#[serde(skip)]
pub name: String,
name: String,
#[serde(skip)]
pub filter_name: String,
filter_name: String,
#[serde(skip)]
pub stream_name: String,
// Plugin-specific
#[serde(default, rename = "type", skip_serializing_if = "Option::is_none")]
pub action_type: Option<String>,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub options: Value,
stream_name: String,
}
fn set_false() -> bool {
@ -58,10 +47,20 @@ fn is_false(b: &bool) -> bool {
}
impl Action {
pub fn is_plugin(&self) -> bool {
self.action_type
.as_ref()
.is_some_and(|action_type| action_type != "cmd")
pub fn name(&self) -> &str {
&self.name
}
pub fn after_duration(&self) -> Option<TimeDelta> {
self.after_duration
}
pub fn on_exit(&self) -> bool {
self.on_exit
}
pub fn oneshot(&self) -> bool {
self.oneshot
}
pub fn setup(
@ -94,18 +93,11 @@ impl Action {
return Err("character '.' is not allowed in filter name".into());
}
if !self.is_plugin() {
if self.cmd.is_empty() {
return Err("cmd is empty".into());
}
if self.cmd[0].is_empty() {
return Err("cmd's first item is empty".into());
}
if !self.options.is_null() {
return Err("can't define options without a plugin type".into());
}
} else if !self.cmd.is_empty() {
return Err("can't define a cmd and a plugin type".into());
if self.cmd.is_empty() {
return Err("cmd is empty".into());
}
if self.cmd[0].is_empty() {
return Err("cmd's first item is empty".into());
}
if let Some(after) = &self.after {
@ -118,22 +110,6 @@ impl Action {
return Err("cannot have `onexit: true`, without an `after` directive".into());
}
if self.ipv4only && self.ipv6only {
return Err("cannot have `ipv4only: true` and `ipv6only: true` in one action".into());
}
if self
.patterns
.iter()
.all(|pattern| pattern.pattern_type() != PatternType::Ip)
{
if self.ipv4only {
return Err("it makes no sense to have an action with `ipv4only: true` when no pattern of type ip is defined on the filter".into());
}
if self.ipv6only {
return Err("it makes no sense to have an action with `ipv6only: true` when no pattern of type ip is defined on the filter".into());
}
}
Ok(())
}
@ -157,24 +133,6 @@ impl Action {
cmd.args(&computed_command[1..]);
cmd
}
pub fn to_action_config(&self) -> Result<ActionConfig, String> {
Ok(ActionConfig {
stream_name: self.stream_name.clone(),
filter_name: self.filter_name.clone(),
action_name: self.name.clone(),
action_type: self
.action_type
.clone()
.ok_or_else(|| format!("action {} doesn't load a plugin. this is a bug!", self))?,
config: self.options.clone().into(),
patterns: self
.patterns
.iter()
.map(|pattern| pattern.name.clone())
.collect(),
})
}
}
impl PartialEq for Action {
@ -209,7 +167,6 @@ impl Display for Action {
#[cfg(test)]
impl Action {
/// Test-only constructor designed to be easy to call
#[allow(clippy::too_many_arguments)]
pub fn new(
cmd: Vec<&str>,
after: Option<&str>,
@ -218,14 +175,11 @@ impl Action {
filter_name: &str,
name: &str,
config_patterns: &super::Patterns,
ip_only: u8,
) -> Self {
let mut action = Self {
cmd: cmd.into_iter().map(|s| s.into()).collect(),
after: after.map(|s| s.into()),
on_exit,
ipv4only: ip_only == 4,
ipv6only: ip_only == 6,
..Default::default()
};
action
@ -249,19 +203,30 @@ pub mod tests {
use super::*;
pub fn ok_action() -> Action {
fn default_action() -> Action {
Action {
cmd: vec!["command".into()],
..Default::default()
cmd: Vec::new(),
name: "".into(),
filter_name: "".into(),
stream_name: "".into(),
after: None,
after_duration: None,
on_exit: false,
oneshot: false,
patterns: Arc::new(BTreeSet::default()),
}
}
pub fn ok_action() -> Action {
let mut action = default_action();
action.cmd = vec!["command".into()];
action
}
pub fn ok_action_with_after(d: String, name: &str) -> Action {
let mut action = Action {
cmd: vec!["command".into()],
after: Some(d),
..Default::default()
};
let mut action = default_action();
action.cmd = vec!["command".into()];
action.after = Some(d);
action
.setup("", "", name, Arc::new(BTreeSet::default()))
.unwrap();
@ -275,16 +240,16 @@ pub mod tests {
let patterns = Arc::new(BTreeSet::default());
// No command
action = Action::default();
action = default_action();
assert!(action.setup(&name, &name, &name, patterns.clone()).is_err());
// No command
action = Action::default();
action = default_action();
action.cmd = vec!["".into()];
assert!(action.setup(&name, &name, &name, patterns.clone()).is_err());
// No command
action = Action::default();
action = default_action();
action.cmd = vec!["".into(), "arg1".into()];
assert!(action.setup(&name, &name, &name, patterns.clone()).is_err());

View file

@ -1,5 +1,5 @@
use std::{
collections::{BTreeMap, btree_map::Entry},
collections::{btree_map::Entry, BTreeMap},
fs::File,
io,
path::Path,
@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize};
use thiserror::Error;
use tracing::{debug, error, info, warn};
use super::{Pattern, Plugin, Stream, merge_attrs};
use super::{Filter, Pattern, Stream};
pub type Patterns = BTreeMap<String, Arc<Pattern>>;
@ -20,23 +20,20 @@ pub type Patterns = BTreeMap<String, Arc<Pattern>>;
#[serde(deny_unknown_fields)]
pub struct Config {
#[serde(default = "num_cpus::get")]
pub concurrency: usize,
concurrency: usize,
#[serde(default = "dot", skip_serializing_if = "String::is_empty")]
pub state_directory: String,
#[serde(default, skip_serializing_if = "BTreeMap::is_empty")]
pub plugins: BTreeMap<String, Plugin>,
state_directory: String,
#[serde(default)]
pub patterns: Patterns,
patterns: Patterns,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub start: Vec<Vec<String>>,
start: Vec<Vec<String>>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub stop: Vec<Vec<String>>,
stop: Vec<Vec<String>>,
#[serde(default)]
pub streams: BTreeMap<String, Stream>,
streams: BTreeMap<String, Stream>,
// This field only serve the purpose of having a top-level place for saving YAML variables
#[serde(default, skip_serializing, rename = "definitions")]
@ -48,31 +45,43 @@ fn dot() -> String {
}
impl Config {
fn merge(&mut self, mut other: Config) -> Result<(), String> {
for (key, plugin) in other.plugins.into_iter() {
match self.plugins.entry(key) {
Entry::Vacant(e) => {
e.insert(plugin);
}
Entry::Occupied(e) => {
return Err(format!(
"plugin {} is already defined. plugin definitions can't be spread accross multiple files.",
e.key()
));
}
}
}
pub fn streams(&self) -> &BTreeMap<String, Stream> {
&self.streams
}
pub fn patterns(&self) -> &Patterns {
&self.patterns
}
pub fn concurrency(&self) -> usize {
self.concurrency
}
pub fn state_directory(&self) -> &str {
&self.state_directory
}
pub fn filters(&self) -> Vec<&Filter> {
self.streams
.values()
.flat_map(|stream| stream.filters().values())
.collect()
}
pub fn get_filter(&self, name: &(String, String)) -> Option<&Filter> {
self.streams
.get(&name.0)
.and_then(|stream| stream.get_filter(&name.1))
}
fn merge(&mut self, mut other: Config) -> Result<(), String> {
for (key, pattern) in other.patterns.into_iter() {
match self.patterns.entry(key) {
Entry::Vacant(e) => {
e.insert(pattern);
}
Entry::Occupied(e) => {
return Err(format!(
"pattern {} is already defined. pattern definitions can't be spread accross multiple files.",
e.key()
));
return Err(format!("pattern {} is already defined. pattern definitions can't be spread accross multiple files.", e.key()));
}
}
}
@ -93,19 +102,25 @@ impl Config {
self.start.append(&mut other.start);
self.stop.append(&mut other.stop);
self.state_directory = merge_attrs(
self.state_directory.clone(),
other.state_directory,
".".into(),
"state_directory",
)?;
if !(self.state_directory == dot()
|| other.state_directory == dot()
|| self.state_directory == other.state_directory)
{
return Err("state_directory have conflicting definitions".into());
}
if self.state_directory == dot() {
self.state_directory = other.state_directory;
}
self.concurrency = merge_attrs(
self.concurrency,
other.concurrency,
num_cpus::get(),
"concurrency",
)?;
if !(self.concurrency == num_cpus::get()
|| other.concurrency == num_cpus::get()
|| self.concurrency == other.concurrency)
{
return Err("concurrency have conflicting definitions".into());
}
if self.concurrency == num_cpus::get() {
self.concurrency = other.concurrency;
}
Ok(())
}
@ -118,10 +133,6 @@ impl Config {
// Nullify this useless field
self._definitions = serde_json::Value::Null;
for (key, value) in &mut self.plugins {
value.setup(key)?;
}
if self.patterns.is_empty() {
return Err("no patterns configured".into());
}
@ -345,7 +356,7 @@ enum ConfigError {
mod jsonnet {
use std::path::Path;
use jrsonnet_evaluator::{EvaluationState, FileImportResolver, error::LocError};
use jrsonnet_evaluator::{error::LocError, EvaluationState, FileImportResolver};
use super::ConfigError;
@ -370,7 +381,6 @@ mod jsonnet {
}
fn run_commands(commands: &Vec<Vec<String>>, moment: &str) -> bool {
debug!("Running {moment} commands...");
let mut ok = true;
for command in commands {
info!("{} command: run {:?}\n", moment, command);
@ -634,7 +644,7 @@ mod tests {
assert!(cfg_org.streams.contains_key("echo"));
assert_eq!(cfg_org.streams.len(), 1);
let filters = &cfg_org.streams.get("echo").unwrap().filters;
let filters = cfg_org.streams.get("echo").unwrap().filters();
assert!(filters.contains_key("f1"));
assert!(filters.contains_key("f2"));
assert_eq!(filters.len(), 2);
@ -694,8 +704,8 @@ mod tests {
assert!(cfg_org.streams.contains_key("echo"));
assert_eq!(cfg_org.streams.len(), 1);
let stream = cfg_org.streams.get("echo").unwrap();
assert_eq!(stream.cmd.len(), 1);
assert_eq!(stream.filters.len(), 1);
assert_eq!(stream.cmd().len(), 1);
assert_eq!(stream.filters().len(), 1);
}
#[test]

View file

@ -4,25 +4,15 @@ use std::{
fmt::Display,
hash::Hash,
sync::Arc,
time::Duration,
};
use reaction_plugin::time::parse_duration;
use chrono::TimeDelta;
use regex::Regex;
use serde::{Deserialize, Serialize};
use tracing::info;
use super::{Action, Match, Pattern, PatternType, Patterns};
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
pub enum Duplicate {
#[default]
#[serde(rename = "extend")]
Extend,
#[serde(rename = "ignore")]
Ignore,
#[serde(rename = "rerun")]
Rerun,
}
use super::parse_duration;
use super::{Action, Match, Pattern, Patterns};
// Only names are serialized
// Only computed fields are not deserialized
@ -30,39 +20,29 @@ pub enum Duplicate {
#[serde(deny_unknown_fields)]
pub struct Filter {
#[serde(skip)]
pub longuest_action_duration: Duration,
#[serde(skip)]
pub has_ip: bool,
longuest_action_duration: TimeDelta,
pub regex: Vec<String>,
regex: Vec<String>,
#[serde(skip)]
pub compiled_regex: Vec<Regex>,
compiled_regex: Vec<Regex>,
// We want patterns to be ordered
// This is necessary when using matches which contain multiple patterns
#[serde(skip)]
pub patterns: Arc<BTreeSet<Arc<Pattern>>>,
patterns: Arc<BTreeSet<Arc<Pattern>>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub retry: Option<u32>,
retry: Option<u32>,
#[serde(rename = "retryperiod", skip_serializing_if = "Option::is_none")]
pub retry_period: Option<String>,
retry_period: Option<String>,
#[serde(skip)]
pub retry_duration: Option<Duration>,
retry_duration: Option<TimeDelta>,
#[serde(default)]
pub duplicate: Duplicate,
pub actions: BTreeMap<String, Action>,
actions: BTreeMap<String, Action>,
#[serde(skip)]
pub name: String,
name: String,
#[serde(skip)]
pub stream_name: String,
// // Plugin-specific
// #[serde(default, rename = "type")]
// pub filter_type: Option<String>,
// #[serde(default = "null_value")]
// pub options: Value,
stream_name: String,
}
impl Filter {
@ -89,6 +69,38 @@ impl Filter {
}
}
pub fn name(&self) -> &str {
&self.name
}
pub fn stream_name(&self) -> &str {
&self.stream_name
}
pub fn retry(&self) -> Option<u32> {
self.retry
}
pub fn retry_duration(&self) -> Option<TimeDelta> {
self.retry_duration
}
pub fn longuest_action_duration(&self) -> TimeDelta {
self.longuest_action_duration
}
pub fn regex(&self) -> &Vec<String> {
&self.regex
}
pub fn actions(&self) -> &BTreeMap<String, Action> {
&self.actions
}
pub fn patterns(&self) -> &BTreeSet<Arc<Pattern>> {
&self.patterns
}
pub fn setup(
&mut self,
stream_name: &str,
@ -126,7 +138,7 @@ impl Filter {
if let Some(retry_period) = &self.retry_period {
self.retry_duration = Some(
parse_duration(retry_period)
.map_err(|err| format!("failed to parse retry period: {}", err))?,
.map_err(|err| format!("failed to parse retry time: {}", err))?,
);
self.retry_period = None;
}
@ -160,9 +172,9 @@ impl Filter {
}
} else if !first && new_patterns.contains(pattern) {
return Err(format!(
"pattern {} is present in the first regex but is not present in a following regex. all regexes should contain the same set of regexes",
&pattern.name_with_braces()
));
"pattern {} is present in the first regex but is not present in a following regex. all regexes should contain the same set of regexes",
&pattern.name_with_braces()
));
}
regex_buf = regex_buf.replacen(pattern.name_with_braces(), &pattern.regex, 1);
}
@ -181,18 +193,12 @@ impl Filter {
for (key, action) in &mut self.actions {
action.setup(stream_name, name, key, self.patterns.clone())?;
}
self.has_ip = self
.actions
.values()
.any(|action| action.ipv4only || action.ipv6only);
self.longuest_action_duration =
self.actions
.values()
.fold(Duration::from_secs(0), |acc, v| {
v.after_duration
.map_or(acc, |v| if v > acc { v } else { acc })
});
self.actions.values().fold(TimeDelta::seconds(0), |acc, v| {
v.after_duration()
.map_or(acc, |v| if v > acc { v } else { acc })
});
Ok(())
}
@ -205,19 +211,19 @@ impl Filter {
for pattern in self.patterns.as_ref() {
// if the pattern is in an optional part of the regex,
// there may be no captured group for it.
if let Some(match_) = matches.name(&pattern.name)
&& !pattern.is_ignore(match_.as_str())
{
let mut match_ = match_.as_str().to_string();
pattern.normalize(&mut match_);
result.push(match_);
if let Some(match_) = matches.name(pattern.name()) {
if !pattern.is_ignore(match_.as_str()) {
result.push(match_.as_str().to_string());
}
}
}
if result.len() == self.patterns.len() {
info!("{}: match {:?}", self, result);
return Some(result);
}
} else {
return Some(vec![]);
info!("{}: match []", self);
return Some(vec![".".to_string()]);
}
}
}
@ -228,103 +234,64 @@ impl Filter {
/// Then returns a corresponding [`Match`].
pub fn get_match_from_patterns(
&self,
mut patterns: BTreeMap<Arc<Pattern>, String>,
patterns: BTreeMap<Arc<Pattern>, String>,
) -> Result<Match, String> {
// Check pattern length
if patterns.len() != self.patterns.len() {
if patterns.len() != self.patterns().len() {
return Err(format!(
"{} patterns specified, while the {}.{} filter has {} pattern: ({})",
patterns.len(),
self.stream_name,
self.name,
self.patterns.len(),
self.patterns
self.stream_name(),
self.name(),
self.patterns().len(),
self.patterns()
.iter()
.map(|pattern| pattern.name.clone())
.map(|pattern| pattern.name().clone())
.reduce(|acc, pattern| acc + ", " + &pattern)
.unwrap_or("".into()),
));
}
for (pattern, match_) in &mut patterns {
for (pattern, match_) in &patterns {
if self.patterns.get(pattern).is_none() {
return Err(format!(
"pattern {} is not present in the filter {}.{}",
pattern.name, self.stream_name, self.name
pattern.name(),
self.stream_name,
self.name
));
}
if !pattern.is_match(match_) {
return Err(format!(
"'{}' doesn't match pattern {}",
match_, pattern.name,
match_,
pattern.name(),
));
}
if pattern.is_ignore(match_) {
return Err(format!(
"'{}' is explicitly ignored by pattern {}",
match_, pattern.name,
match_,
pattern.name(),
));
}
pattern.normalize(match_);
}
for pattern in self.patterns.iter() {
if !patterns.contains_key(pattern) {
return Err(format!(
"pattern {} is missing, because it's in the filter {}.{}",
pattern.name, self.stream_name, self.name
pattern.name(),
self.stream_name,
self.name
));
}
}
Ok(patterns.into_values().collect())
}
/// Filters [`Filter`]'s [`Action`]s according to its [`Pattern`]s [`PatternType`]
/// and those of the given [`Match`]
pub fn filtered_actions_from_match(&self, m: &Match) -> Vec<&Action> {
let ip_type = if self.has_ip {
self.patterns
.iter()
.zip(m)
.find(|(p, _)| p.pattern_type() == PatternType::Ip)
.map(|(_, m)| -> _ {
// Using this dumb heuristic is ok,
// because we know we have a valid IP address.
if m.contains(':') {
PatternType::Ipv6
} else if m.contains('.') {
PatternType::Ipv4
} else {
// This else should not happen, but better falling back on something than
// panicking, right? Maybe we should add a warning there?
PatternType::Regex
}
})
.unwrap_or(PatternType::Regex)
} else {
PatternType::Regex
};
let mut actions: Vec<_> = self
.actions
.values()
// If specific ip version, check it
.filter(move |action| !action.ipv4only || ip_type == PatternType::Ipv4)
.filter(move |action| !action.ipv6only || ip_type == PatternType::Ipv6)
.collect();
// Sort by after
actions.sort_by(|a, b| {
a.after_duration
.unwrap_or_default()
.cmp(&b.after_duration.unwrap_or_default())
});
actions
}
}
impl Display for Filter {
@ -360,7 +327,6 @@ impl Hash for Filter {
}
#[cfg(test)]
#[allow(clippy::too_many_arguments)]
impl Filter {
/// Test-only constructor designed to be easy to call
pub fn new(
@ -370,15 +336,13 @@ impl Filter {
retry_period: Option<&str>,
stream_name: &str,
name: &str,
duplicate: Duplicate,
config_patterns: &Patterns,
) -> Self {
let mut filter = Self {
actions: actions.into_iter().map(|a| (a.name.clone(), a)).collect(),
actions: actions.into_iter().map(|a| (a.name().into(), a)).collect(),
regex: regex.into_iter().map(|s| s.into()).collect(),
retry,
retry_period: retry_period.map(|s| s.into()),
duplicate,
..Default::default()
};
filter.setup(stream_name, name, config_patterns).unwrap();
@ -392,7 +356,6 @@ impl Filter {
retry_period: Option<&str>,
stream_name: &str,
name: &str,
duplicate: Duplicate,
config_patterns: &Patterns,
) -> &'static Self {
Box::leak(Box::new(Self::new(
@ -402,7 +365,6 @@ impl Filter {
retry_period,
stream_name,
name,
duplicate,
config_patterns,
)))
}
@ -411,7 +373,6 @@ impl Filter {
#[cfg(test)]
pub mod tests {
use crate::concepts::action::tests::{ok_action, ok_action_with_after};
use crate::concepts::pattern::PatternIp;
use crate::concepts::pattern::tests::{
boubou_pattern_with_ignore, default_pattern, number_pattern, ok_pattern_with_ignore,
};
@ -482,14 +443,14 @@ pub mod tests {
let name = "name".to_string();
let empty_patterns = Patterns::new();
let minute_str = "1m".to_string();
let minute = Duration::from_secs(60);
let two_minutes = Duration::from_secs(60 * 2);
let minute = TimeDelta::seconds(60);
let two_minutes = TimeDelta::seconds(60 * 2);
let two_minutes_str = "2m".to_string();
// duration 0
filter = ok_filter();
filter.setup(&name, &name, &empty_patterns).unwrap();
assert_eq!(filter.longuest_action_duration, Duration::default());
assert_eq!(filter.longuest_action_duration, TimeDelta::default());
let minute_action = ok_action_with_after(minute_str.clone(), &minute_str);
@ -546,7 +507,6 @@ pub mod tests {
.unwrap()
.to_string()
);
assert_eq!(&filter.regex[0].to_string(), "insert (?P<name>[abc]) here$");
assert_eq!(filter.patterns.len(), 1);
let stored_pattern = filter.patterns.first().unwrap();
assert_eq!(stored_pattern.regex, pattern.regex);
@ -572,10 +532,6 @@ pub mod tests {
.unwrap()
.to_string()
);
assert_eq!(
&filter.compiled_regex[0].to_string(),
"insert (?P<name>[abc]) here and (?P<boubou>(?:bou){1,3}) there"
);
assert_eq!(filter.patterns.len(), 2);
let stored_pattern = filter.patterns.first().unwrap();
assert_eq!(stored_pattern.regex, boubou.regex);
@ -594,20 +550,12 @@ pub mod tests {
.unwrap()
.to_string()
);
assert_eq!(
&filter.compiled_regex[0].to_string(),
"insert (?P<name>[abc]) here"
);
assert_eq!(
filter.compiled_regex[1].to_string(),
Regex::new("also add (?P<name>[abc]) there")
.unwrap()
.to_string()
);
assert_eq!(
&filter.compiled_regex[1].to_string(),
"also add (?P<name>[abc]) there"
);
assert_eq!(filter.patterns.len(), 1);
let stored_pattern = filter.patterns.first().unwrap();
assert_eq!(stored_pattern.regex, pattern.regex);
@ -634,10 +582,6 @@ pub mod tests {
.unwrap()
.to_string()
);
assert_eq!(
&filter.compiled_regex[1].to_string(),
"also add (?P<boubou>(?:bou){1,3}) here and (?P<name>[abc]) there"
);
assert_eq!(filter.patterns.len(), 2);
let stored_pattern = filter.patterns.first().unwrap();
assert_eq!(stored_pattern.regex, boubou.regex);
@ -710,32 +654,24 @@ pub mod tests {
Ok(vec!("b".into()))
);
// Doesn't match
assert!(
filter
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "abc".into())]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "abc".into())]))
.is_err());
// Ignored match
assert!(
filter
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "a".into())]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "a".into())]))
.is_err());
// Bad pattern
assert!(
filter
.get_match_from_patterns(BTreeMap::from([(boubou.clone(), "bou".into())]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([(boubou.clone(), "bou".into())]))
.is_err());
// Bad number of patterns
assert!(
filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(boubou.clone(), "bou".into()),
]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(boubou.clone(), "bou".into()),
]))
.is_err());
// Bad number of patterns
assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err());
@ -763,42 +699,34 @@ pub mod tests {
Ok(vec!("bou".into(), "b".into()))
);
// Doesn't match
assert!(
filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "abc".into()),
(boubou.clone(), "bou".into()),
]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "abc".into()),
(boubou.clone(), "bou".into()),
]))
.is_err());
// Ignored match
assert!(
filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(boubou.clone(), "boubou".into()),
]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(boubou.clone(), "boubou".into()),
]))
.is_err());
// Bad pattern
assert!(
filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(number_pattern.clone(), "1".into()),
]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(number_pattern.clone(), "1".into()),
]))
.is_err());
// Bad number of patterns
assert!(
filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(boubou.clone(), "bou".into()),
(number_pattern.clone(), "1".into()),
]))
.is_err()
);
assert!(filter
.get_match_from_patterns(BTreeMap::from([
(pattern.clone(), "b".into()),
(boubou.clone(), "bou".into()),
(number_pattern.clone(), "1".into()),
]))
.is_err());
// Bad number of patterns
assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err());
@ -839,155 +767,4 @@ pub mod tests {
assert_eq!(filter.get_match("insert b here and boubou there"), None);
assert_eq!(filter.get_match("also add boubou here and b there"), None);
}
#[test]
fn get_match_from_patterns() {
// TODO
}
#[test]
fn filtered_actions_from_match_one_regex_pattern() {
let az_patterns = Pattern::new_map("az", "[a-z]+").unwrap();
let action = Action::new(
vec!["zblorg <az>"],
None,
false,
"test",
"test",
"a1",
&az_patterns,
0,
);
let filter = Filter::new(
vec![action.clone()],
vec![""],
None,
None,
"test",
"test",
Duplicate::default(),
&az_patterns,
);
assert_eq!(
vec![&action],
filter.filtered_actions_from_match(&vec!["zboum".into()])
);
}
#[test]
fn filtered_actions_from_match_two_regex_patterns() {
let patterns = BTreeMap::from([
(
"az".to_string(),
Arc::new(Pattern::new("az", "[a-z]+").unwrap()),
),
(
"num".to_string(),
Arc::new(Pattern::new("num", "[0-9]{1,3}").unwrap()),
),
]);
let action1 = Action::new(
vec!["zblorg <az> <num>"],
None,
false,
"test",
"test",
"a1",
&patterns,
0,
);
let action2 = Action::new(
vec!["zbleurg <num> <az>"],
None,
false,
"test",
"test",
"a2",
&patterns,
0,
);
let filter = Filter::new(
vec![action1.clone(), action2.clone()],
vec![""],
None,
None,
"test",
"test",
Duplicate::default(),
&patterns,
);
assert_eq!(
vec![&action1, &action2],
filter.filtered_actions_from_match(&vec!["zboum".into()])
);
}
#[test]
fn filtered_actions_from_match_one_regex_one_ip() {
let patterns = BTreeMap::from([
(
"az".to_string(),
Arc::new(Pattern::new("az", "[a-z]+").unwrap()),
),
("ip".to_string(), {
let mut pattern = Pattern {
ip: PatternIp {
pattern_type: PatternType::Ip,
..Default::default()
},
..Default::default()
};
pattern.setup("ip").unwrap();
Arc::new(pattern)
}),
]);
let action4 = Action::new(
vec!["zblorg4 <az> <ip>"],
None,
false,
"test",
"test",
"action4",
&patterns,
4,
);
let action6 = Action::new(
vec!["zblorg6 <az> <ip>"],
None,
false,
"test",
"test",
"action6",
&patterns,
6,
);
let action = Action::new(
vec!["zblorg <az> <ip>"],
None,
false,
"test",
"test",
"action",
&patterns,
0,
);
let filter = Filter::new(
vec![action4.clone(), action6.clone(), action.clone()],
vec!["<az>: <ip>"],
None,
None,
"test",
"test",
Duplicate::default(),
&patterns,
);
assert_eq!(
filter.filtered_actions_from_match(&vec!["zboum".into(), "1.2.3.4".into()]),
vec![&action, &action4],
);
assert_eq!(
filter.filtered_actions_from_match(&vec!["zboum".into(), "ab4:35f::1".into()]),
vec![&action, &action6],
);
}
}

View file

@ -1,22 +1,21 @@
mod action;
mod config;
mod filter;
mod parse_duration;
mod pattern;
mod plugin;
mod stream;
use std::fmt::Debug;
use serde::{Deserialize, Serialize};
pub use action::Action;
pub use config::{Config, Patterns};
pub use filter::{Duplicate, Filter};
pub use pattern::{Pattern, PatternType};
pub use plugin::Plugin;
pub use filter::Filter;
use parse_duration::parse_duration;
pub use pattern::Pattern;
use serde::{Deserialize, Serialize};
pub use stream::Stream;
pub use treedb::time::{Time, now};
use chrono::{DateTime, Local};
pub type Time = DateTime<Local>;
pub type Match = Vec<String>;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
@ -25,66 +24,5 @@ pub struct MatchTime {
pub t: Time,
}
fn merge_attrs<A: Default + Debug + PartialEq + Eq + Clone>(
this: A,
other: A,
default: A,
name: &str,
) -> Result<A, String> {
if !(this == default || other == default || this == other) {
return Err(format!(
"'{name}' has conflicting definitions: '{this:?}', '{other:?}'"
));
}
if this == default {
return Ok(other);
}
Ok(this)
}
#[cfg(test)]
pub use filter::tests as filter_tests;
#[cfg(test)]
mod tests {
use crate::concepts::merge_attrs;
#[test]
fn test_merge_attrs() {
assert_eq!(merge_attrs(None::<String>, None, None, "t"), Ok(None));
assert_eq!(
merge_attrs(Some("coucou"), None, None, "t"),
Ok(Some("coucou"))
);
assert_eq!(
merge_attrs(None, Some("coucou"), None, "t"),
Ok(Some("coucou"))
);
assert_eq!(
merge_attrs(Some("coucou"), Some("coucou"), None, "t"),
Ok(Some("coucou"))
);
assert_eq!(
merge_attrs(Some("coucou"), Some("hello"), None, "t"),
Err("'t' has conflicting definitions: 'Some(\"coucou\")', 'Some(\"hello\")'".into())
);
assert_eq!(merge_attrs("", "", "", "t"), Ok(""));
assert_eq!(merge_attrs("coucou", "", "", "t"), Ok("coucou"));
assert_eq!(merge_attrs("", "coucou", "", "t"), Ok("coucou"));
assert_eq!(merge_attrs("coucou", "coucou", "", "t"), Ok("coucou"));
assert_eq!(
merge_attrs("coucou", "hello", "", "t"),
Err("'t' has conflicting definitions: '\"coucou\"', '\"hello\"'".into())
);
assert_eq!(merge_attrs(0, 0, 0, "t"), Ok(0));
assert_eq!(merge_attrs(5, 0, 0, "t"), Ok(5));
assert_eq!(merge_attrs(0, 5, 0, "t"), Ok(5));
assert_eq!(merge_attrs(5, 5, 0, "t"), Ok(5));
assert_eq!(
merge_attrs(5, 6, 0, "t"),
Err("'t' has conflicting definitions: '5', '6'".into())
);
}
}

View file

@ -1,13 +1,7 @@
//! This module provides [`parse_duration`], which parses duration in reaction's format (ie. `6h`, `3 days`)
//!
//! Like in those reaction core settings:
//! - [Filters' `retryperiod`](https://reaction.ppom.me/reference.html#retryperiod)
//! - [Actions' `after`](https://reaction.ppom.me/reference.html#after).
use std::time::Duration;
use chrono::TimeDelta;
/// Parses the &str argument as a Duration
/// Returns Ok(Duration) if successful, or Err(String).
/// Returns Ok(TimeDelta) if successful, or Err(String).
///
/// Format is defined as follows: `<integer> <unit>`
/// - whitespace between the integer and unit is optional
@ -18,7 +12,7 @@ use std::time::Duration;
/// - `m` / `min` / `mins` / `minute` / `minutes`
/// - `h` / `hour` / `hours`
/// - `d` / `day` / `days`
pub fn parse_duration(d: &str) -> Result<Duration, String> {
pub fn parse_duration(d: &str) -> Result<TimeDelta, String> {
let d_trimmed = d.trim();
let chars = d_trimmed.as_bytes();
let mut value = 0;
@ -30,14 +24,14 @@ pub fn parse_duration(d: &str) -> Result<Duration, String> {
if i == 0 {
return Err(format!("duration '{}' doesn't start with digits", d));
}
let ok_as = |func: fn(u64) -> Duration| -> Result<_, String> { Ok(func(value as u64)) };
let ok_as = |func: fn(i64) -> TimeDelta| -> Result<_, String> { Ok(func(value as i64)) };
match d_trimmed[i..].trim() {
"ms" | "millis" | "millisecond" | "milliseconds" => ok_as(Duration::from_millis),
"s" | "sec" | "secs" | "second" | "seconds" => ok_as(Duration::from_secs),
"m" | "min" | "mins" | "minute" | "minutes" => ok_as(Duration::from_mins),
"h" | "hour" | "hours" => ok_as(Duration::from_hours),
"d" | "day" | "days" => ok_as(|d: u64| Duration::from_hours(d * 24)),
"ms" | "millis" | "millisecond" | "milliseconds" => ok_as(TimeDelta::milliseconds),
"s" | "sec" | "secs" | "second" | "seconds" => ok_as(TimeDelta::seconds),
"m" | "min" | "mins" | "minute" | "minutes" => ok_as(TimeDelta::minutes),
"h" | "hour" | "hours" => ok_as(TimeDelta::hours),
"d" | "day" | "days" => ok_as(TimeDelta::days),
unit => Err(format!(
"unit {} not recognised. must be one of s/sec/seconds, m/min/minutes, h/hours, d/days",
unit
@ -48,6 +42,8 @@ pub fn parse_duration(d: &str) -> Result<Duration, String> {
#[cfg(test)]
mod tests {
use chrono::TimeDelta;
use super::*;
#[test]
@ -57,13 +53,13 @@ mod tests {
#[test]
fn parse_duration_test() {
assert_eq!(parse_duration("1s"), Ok(Duration::from_secs(1)));
assert_eq!(parse_duration("12s"), Ok(Duration::from_secs(12)));
assert_eq!(parse_duration(" 12 secs "), Ok(Duration::from_secs(12)));
assert_eq!(parse_duration("2m"), Ok(Duration::from_mins(2)));
assert_eq!(parse_duration("6 hours"), Ok(Duration::from_hours(6)));
assert_eq!(parse_duration("1d"), Ok(Duration::from_hours(1 * 24)));
assert_eq!(parse_duration("365d"), Ok(Duration::from_hours(365 * 24)));
assert_eq!(parse_duration("1s"), Ok(TimeDelta::seconds(1)));
assert_eq!(parse_duration("12s"), Ok(TimeDelta::seconds(12)));
assert_eq!(parse_duration(" 12 secs "), Ok(TimeDelta::seconds(12)));
assert_eq!(parse_duration("2m"), Ok(TimeDelta::minutes(2)));
assert_eq!(parse_duration("6 hours"), Ok(TimeDelta::hours(6)));
assert_eq!(parse_duration("1d"), Ok(TimeDelta::days(1)));
assert_eq!(parse_duration("365d"), Ok(TimeDelta::days(365)));
assert!(parse_duration("d 3").is_err());
assert!(parse_duration("d3").is_err());

View file

@ -3,32 +3,24 @@ use std::cmp::Ordering;
use regex::{Regex, RegexSet};
use serde::{Deserialize, Serialize};
mod ip;
pub use ip::{PatternIp, PatternType};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(test, derive(Default))]
#[serde(deny_unknown_fields)]
pub struct Pattern {
#[serde(default)]
pub regex: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub ignore: Vec<String>,
ignore: Vec<String>,
#[serde(default, rename = "ignoreregex", skip_serializing_if = "Vec::is_empty")]
pub ignore_regex: Vec<String>,
ignore_regex: Vec<String>,
#[serde(skip)]
pub compiled_ignore_regex: RegexSet,
#[serde(flatten)]
pub ip: PatternIp,
compiled_ignore_regex: RegexSet,
#[serde(skip)]
pub name: String,
name: String,
#[serde(skip)]
pub name_with_braces: String,
name_with_braces: String,
}
impl Pattern {
@ -40,14 +32,13 @@ impl Pattern {
}
}
pub fn name(&self) -> &String {
&self.name
}
pub fn name_with_braces(&self) -> &String {
&self.name_with_braces
}
pub fn pattern_type(&self) -> PatternType {
self.ip.pattern_type
}
pub fn setup(&mut self, name: &str) -> Result<(), String> {
self._setup(name)
.map_err(|msg| format!("pattern {}: {}", name, msg))
@ -63,13 +54,6 @@ impl Pattern {
return Err("character '.' is not allowed in pattern name".into());
}
if let Some(regex) = self.ip.setup()? {
if !self.regex.is_empty() {
return Err("patterns of type ip, ipv4, ipv6 have a built-in regex defined. you should not define it yourself".into());
}
self.regex = regex;
}
if self.regex.is_empty() {
return Err("regex is empty".into());
}
@ -105,7 +89,7 @@ impl Pattern {
Ok(())
}
/// Returns the pattern's regex compiled standalone, enclosed in ^ and $
/// Returns the pattern's regex compiled standalone.
/// It's not kept as a field of the [`Pattern`] struct
/// because it's only used during setup and for the `trigger` manual command.
///
@ -115,15 +99,6 @@ impl Pattern {
Regex::new(&format!("^{}$", self.regex)).map_err(|err| err.to_string())
}
/// Normalize the pattern.
/// This should happen after checking on ignores.
/// No-op when the pattern is not an IP.
/// Otherwise BitAnd the IP with its configured mask,
/// and add the /<mask>
pub fn normalize(&self, match_: &mut String) {
self.ip.normalize(match_)
}
/// Whether the provided string is a match for this pattern or not.
///
/// Doesn't take into account ignore and ignore_regex:
@ -144,7 +119,6 @@ impl Pattern {
pub fn is_ignore(&self, match_: &str) -> bool {
self.ignore.iter().any(|ignore| ignore == match_)
|| self.compiled_ignore_regex.is_match(match_)
|| self.ip.is_ignore(match_)
}
}
@ -181,7 +155,7 @@ impl Pattern {
}
/// Test-only constructor designed to be easy to call.
/// Constructs a full [`super::Patterns`] collection with one given pattern
/// Constructs a full super::Paterns collection with one given pattern
pub fn new_map(name: &str, regex: &str) -> Result<super::Patterns, String> {
Ok(std::iter::once((name.into(), Self::new(name, regex)?.into())).collect())
}
@ -198,7 +172,6 @@ pub mod tests {
ignore: Vec::new(),
ignore_regex: Vec::new(),
compiled_ignore_regex: RegexSet::default(),
ip: PatternIp::default(),
name: "".into(),
name_with_braces: "".into(),
}
@ -303,26 +276,6 @@ pub mod tests {
assert!(pattern.setup("name").is_err());
}
#[test]
fn setup_yml() {
let mut pattern: Pattern = serde_yaml::from_str("{}").unwrap();
assert!(pattern.setup("name").is_err());
let mut pattern: Pattern = serde_yaml::from_str(r#"regex: "[abc]""#).unwrap();
assert!(pattern.setup("name").is_ok());
let mut pattern: Pattern = serde_yaml::from_str(r#"type: ip"#).unwrap();
assert!(pattern.setup("name").is_ok());
let mut pattern: Pattern = serde_yaml::from_str(r#"type: ipv4"#).unwrap();
assert!(pattern.setup("name").is_ok());
let mut pattern: Pattern = serde_yaml::from_str(r#"type: ipv6"#).unwrap();
assert!(pattern.setup("name").is_ok());
assert!(serde_yaml::from_str::<Pattern>(r#"type: zblorg"#).is_err());
}
#[test]
fn is_ignore() {
let mut pattern;

View file

@ -1,239 +0,0 @@
use std::{
fmt::Display,
net::{IpAddr, Ipv4Addr, Ipv6Addr},
str::FromStr,
};
use super::*;
/// Stores an IP and an associated mask.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Cidr {
IPv4((Ipv4Addr, Ipv4Addr)),
IPv6((Ipv6Addr, Ipv6Addr)),
}
impl FromStr for Cidr {
type Err = String;
fn from_str(cidr: &str) -> Result<Self, Self::Err> {
let (ip, mask) = cidr.split_once('/').ok_or(format!(
"malformed IP/MASK. '{cidr}' doesn't contain any '/'"
))?;
let ip = normalize(ip).map_err(|err| format!("malformed IP '{ip}' in '{cidr}': {err}"))?;
let mask_count = u8::from_str(mask)
.map_err(|err| format!("malformed mask '{mask}' in '{cidr}': {err}"))?;
// Let's accept any mask size for now, as useless as it may seem
// if mask_count < 2 {
// return Err("Can't have a network mask of 0 or 1. You're either ignoring all Internet or half of it.".into());
// } else if mask_count
// < (match ip {
// IpAddr::V4(_) => 8,
// IpAddr::V6(_) => 16,
// })
// {
// warn!("With a mask of {mask_count}, you're ignoring a big part of Internet. Are you sure you want to do this?");
// }
Self::from_ip_and_mask(ip, mask_count)
}
}
impl Display for Cidr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}/{}", self.network(), self.mask())
}
}
impl Cidr {
fn from_ip_and_mask(ip: IpAddr, mask_count: u8) -> Result<Self, String> {
match ip {
IpAddr::V4(mut ipv4_addr) => {
// Create bitmask
let mask = mask_to_ipv4(mask_count)?;
// Normalize IP from mask
ipv4_addr &= mask;
Ok(Cidr::IPv4((ipv4_addr, mask)))
}
IpAddr::V6(mut ipv6_addr) => {
let mask = mask_to_ipv6(mask_count)?;
// Normalize IP from mask
ipv6_addr &= mask;
Ok(Cidr::IPv6((ipv6_addr, mask)))
}
}
}
/// Whether an IP is included in this IP CIDR.
/// If IP is not the same version as CIDR, returns always false.
pub fn includes(&self, ip: &IpAddr) -> bool {
let ip = normalize_ip(*ip);
match self {
Cidr::IPv4((network_ipv4, mask)) => match ip {
IpAddr::V6(_) => false,
IpAddr::V4(ipv4_addr) => *network_ipv4 == ipv4_addr & mask,
},
Cidr::IPv6((network_ipv6, mask)) => match ip {
IpAddr::V4(_) => false,
IpAddr::V6(ipv6_addr) => *network_ipv6 == ipv6_addr & mask,
},
}
}
fn network(&self) -> IpAddr {
match self {
Cidr::IPv4((network, _)) => IpAddr::from(*network),
Cidr::IPv6((network, _)) => IpAddr::from(*network),
}
}
fn mask(&self) -> u8 {
let mut raw_mask = match self {
Cidr::IPv4((_, mask)) => mask.to_bits() as u128,
Cidr::IPv6((_, mask)) => mask.to_bits(),
};
let mut ret = 0;
for _ in 0..128 {
if raw_mask % 2 == 1 {
ret += 1;
}
raw_mask >>= 1;
}
ret
}
}
#[cfg(test)]
mod cidr_tests {
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
str::FromStr,
};
use super::Cidr;
#[test]
fn cidrv4_from_str() {
assert_eq!(
Ok(Cidr::IPv4((Ipv4Addr::new(192, 168, 1, 4), u32::MAX.into()))),
Cidr::from_str("192.168.1.4/32")
);
// Test IP normalization from mask
assert_eq!(
Ok(Cidr::IPv4((
Ipv4Addr::new(192, 168, 1, 0),
Ipv4Addr::new(255, 255, 255, 0),
))),
Cidr::from_str("192.168.1.4/24")
);
// Another ok-test "pour la route"
assert_eq!(
Ok(Cidr::IPv4((
Ipv4Addr::new(1, 1, 0, 0),
Ipv4Addr::new(255, 255, 0, 0),
))),
Cidr::from_str("1.1.248.25/16")
);
// Errors
assert!(Cidr::from_str("256.1.1.1/8").is_err());
// assert!(Cidr::from_str("1.1.1.1/0").is_err());
// assert!(Cidr::from_str("1.1.1.1/1").is_err());
// assert!(Cidr::from_str("1.1.1.1.1").is_err());
assert!(Cidr::from_str("1.1.1.1/16/16").is_err());
}
#[test]
fn cidrv6_from_str() {
assert_eq!(
Ok(Cidr::IPv6((
Ipv6Addr::new(0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68),
u128::MAX.into()
))),
Cidr::from_str("fe80::df68:2ee:e4f9:e68/128")
);
// Test IP normalization from mask
assert_eq!(
Ok(Cidr::IPv6((
Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9de5, 0, 0, 0, 0),
Ipv6Addr::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX, 0, 0, 0, 0),
))),
Cidr::from_str("2001:db8:85a3:9de5::8a2e:370:7334/64")
);
// Another ok-test "pour la route"
assert_eq!(
Ok(Cidr::IPv6((
Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9d00, 0, 0, 0, 0),
Ipv6Addr::new(
u16::MAX,
u16::MAX,
u16::MAX,
u16::MAX - u8::MAX as u16,
0,
0,
0,
0
),
))),
Cidr::from_str("2001:db8:85a3:9d00::8a2e:370:7334/56")
);
assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/56").is_ok());
assert!(Cidr::from_str("2001:DB8:85A3:0:0:8A2E:370:7334/56").is_ok());
// Errors
assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:g334/56").is_err());
// assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/0").is_err());
// assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/1").is_err());
assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334:11/56").is_err());
assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/11/56").is_err());
}
#[test]
fn cidrv4_includes() {
let cidr = Cidr::from_str("192.168.1.0/24").unwrap();
assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0))));
assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1))));
assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 234))));
assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 0, 1))));
assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new(
0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68
),)));
}
#[test]
fn cidrv6_includes() {
let cidr = Cidr::from_str("2001:db8:85a3:9d00:0:8a2e:370:7334/56").unwrap();
assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new(
0x2001, 0x0db8, 0x85a3, 0x9d00, 0, 0, 0, 0
))));
assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new(
0x2001, 0x0db8, 0x85a3, 0x9da4, 0x34fc, 0x0d8b, 0xffff, 0x1111
))));
assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new(
0x2001, 0x0db8, 0x85a3, 0xad00, 0, 0, 0, 1
))));
assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0))));
}
#[test]
fn cidr_display() {
let cidrs = [
("192.168.1.4/32", "192.168.1.4/32"),
("192.168.1.4/24", "192.168.1.0/24"),
("1.1.248.25/16", "1.1.0.0/16"),
("fe80::df68:2ee:e4f9:e68/128", "fe80::df68:2ee:e4f9:e68/128"),
(
"2001:db8:85a3:9de5::8a2e:370:7334/64",
"2001:db8:85a3:9de5::/64",
),
(
"2001:db8:85a3:9d00::8a2e:370:7334/56",
"2001:db8:85a3:9d00::/56",
),
];
for (from, to) in cidrs {
assert_eq!(Cidr::from_str(from).unwrap().to_string(), to);
}
}
}

View file

@ -1,730 +0,0 @@
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
str::FromStr,
};
use serde::{Deserialize, Serialize};
use tracing::warn;
use cidr::Cidr;
use utils::*;
mod cidr;
mod utils;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
pub enum PatternType {
#[default]
#[serde(rename = "regex")]
Regex,
#[serde(rename = "ip")]
Ip,
#[serde(rename = "ipv4")]
Ipv4,
#[serde(rename = "ipv6")]
Ipv6,
}
impl PatternType {
pub fn is_default(&self) -> bool {
*self == PatternType::default()
}
pub fn regex(&self) -> Option<String> {
// Those orders of preference are very important for <ip>
// patterns that have greedy catch-all regexes becore or after them,
// for example: "Failed password .*<ip>.*"
let num4 = [
// Order is important, first is preferred.
// first 25x
"(?:25[0-5]",
// then 2xx
"2[0-4][0-9]",
// then 1xx
"1[0-9][0-9]",
// then 0xx
"[1-9][0-9]",
// then 0x
"[0-9])",
]
.join("|");
let numsix = "[0-9a-fA-F]{1,4}";
let ipv4 = format!(r#"{num4}(?:\.{num4}){{3}}"#);
#[allow(clippy::useless_format)]
let ipv6 = [
// We're unrolling all possibilities, longer IPv6 first,
// to make it super-greedy,
// more than an eventual .* before or after <ip> ,
// that would "eat" its first or last blocks.
// Order is important, first is preferred.
// We put IPv4-suffixed regexes first
format!(r#"::(?:ffff(?::0{{1,4}})?:)?{ipv4}"#),
format!(r#"(?:{numsix}:){{1,4}}:{ipv4}"#),
// Then link-local addresses with interface name
format!(r#"fe80:(?::[0-9a-fA-F]{{0,4}}){{0,4}}%[0-9a-zA-Z]+"#),
// Full IPv6
format!("(?:{numsix}:){{7}}{numsix}"),
// 1 block cut
format!("(?:{numsix}:){{7}}:"),
format!("(?:{numsix}:){{6}}:{numsix}"),
format!("(?:{numsix}:){{5}}(?::{numsix}){{2}}"),
format!("(?:{numsix}:){{4}}(?::{numsix}){{3}}"),
format!("(?:{numsix}:){{3}}(?::{numsix}){{4}}"),
format!("(?:{numsix}:){{2}}(?::{numsix}){{5}}"),
format!("{numsix}:(?:(?::{numsix}){{6}})"),
format!(":(?:(?::{numsix}){{7}})"),
// 2 blocks cut
format!("(?:{numsix}:){{6}}:"),
format!("(?:{numsix}:){{5}}:{numsix}"),
format!("(?:{numsix}:){{4}}(?::{numsix}){{2}}"),
format!("(?:{numsix}:){{3}}(?::{numsix}){{3}}"),
format!("(?:{numsix}:){{2}}(?::{numsix}){{4}}"),
format!("{numsix}:(?:(?::{numsix}){{5}})"),
format!(":(?:(?::{numsix}){{6}})"),
// 3 blocks cut
format!("(?:{numsix}:){{5}}:"),
format!("(?:{numsix}:){{4}}:{numsix}"),
format!("(?:{numsix}:){{3}}(?::{numsix}){{2}}"),
format!("(?:{numsix}:){{2}}(?::{numsix}){{3}}"),
format!("{numsix}:(?:(?::{numsix}){{4}})"),
format!(":(?:(?::{numsix}){{5}})"),
// 4 blocks cut
format!("(?:{numsix}:){{4}}:"),
format!("(?:{numsix}:){{3}}:{numsix}"),
format!("(?:{numsix}:){{2}}(?::{numsix}){{2}}"),
format!("{numsix}:(?:(?::{numsix}){{3}})"),
format!(":(?:(?::{numsix}){{4}})"),
// 5 blocks cut
format!("(?:{numsix}:){{3}}:"),
format!("(?:{numsix}:){{2}}:{numsix}"),
format!("{numsix}:(?:(?::{numsix}){{2}})"),
format!(":(?:(?::{numsix}){{3}})"),
// 6 blocks cut
format!("(?:{numsix}:){{2}}:"),
format!("{numsix}::{numsix}"),
format!(":(?:(?::{numsix}){{2}})"),
// 7 blocks cut
format!("{numsix}::"),
format!("::{numsix}"),
// special cuts
// 8 blocks cut
format!("::"),
]
.join("|");
match self {
PatternType::Ipv4 => Some(ipv4),
PatternType::Ipv6 => Some(ipv6),
PatternType::Ip => Some(format!("{ipv4}|{ipv6}")),
PatternType::Regex => None,
}
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)]
pub struct PatternIp {
#[serde(
default,
rename = "type",
skip_serializing_if = "PatternType::is_default"
)]
pub pattern_type: PatternType,
#[serde(default, rename = "ipv4mask")]
pub ipv4_mask: Option<u8>,
#[serde(default, rename = "ipv6mask")]
pub ipv6_mask: Option<u8>,
#[serde(skip)]
pub ipv4_bitmask: Option<Ipv4Addr>,
#[serde(skip)]
pub ipv6_bitmask: Option<Ipv6Addr>,
#[serde(default, rename = "ignorecidr", skip_serializing_if = "Vec::is_empty")]
pub ignore_cidr: Vec<String>,
#[serde(skip)]
pub ignore_cidr_normalized: Vec<Cidr>,
}
impl PatternIp {
pub fn pattern_type(&self) -> PatternType {
self.pattern_type
}
/// Setup the IP-specific part of a Pattern.
/// Returns an optional regex string if of type IP, else None
/// Returns an error if one of:
/// - the type is not IP but there is IP-specific config
/// - the type is IP/IPv4/IPv6 and there is invalid IP-specific config
/// - the type is IPv4 and there is IPv6-specific config
/// - the type is IPv6 and there is IPv4-specific config
pub fn setup(&mut self) -> Result<Option<String>, String> {
match self.pattern_type {
PatternType::Regex => {
if self.ipv4_mask.is_some() {
return Err("ipv4mask is only allowed for patterns of `type: 'ip'`".into());
}
if self.ipv6_mask.is_some() {
return Err("ipv6mask is only allowed for patterns of `type: 'ip'`".into());
}
if !self.ignore_cidr.is_empty() {
return Err("ignorecidr is only allowed for patterns of `type: 'ip'`".into());
}
}
PatternType::Ip | PatternType::Ipv4 | PatternType::Ipv6 => {
if let Some(mask) = self.ipv4_mask {
self.ipv4_bitmask = Some(mask_to_ipv4(mask)?);
}
if let Some(mask) = self.ipv6_mask {
self.ipv6_bitmask = Some(mask_to_ipv6(mask)?);
}
for cidr in &self.ignore_cidr {
let cidr_normalized = Cidr::from_str(cidr)?;
let cidr_normalized_string = cidr_normalized.to_string();
if &cidr_normalized_string != cidr {
warn!(
"CIDR {cidr} should be rewritten in its normalized form: {cidr_normalized_string}"
);
}
self.ignore_cidr_normalized.push(cidr_normalized);
}
self.ignore_cidr = Vec::default();
match self.pattern_type {
PatternType::Regex => (),
PatternType::Ip => (),
PatternType::Ipv4 => {
if self.ipv6_mask.is_some() {
return Err("An IPv4-only pattern can't have an ipv6mask".into());
}
for cidr in &self.ignore_cidr_normalized {
if let Cidr::IPv6(_) = cidr {
return Err(format!(
"An IPv4-only pattern can't have an IPv6 ({}) as an ignore",
cidr
));
}
}
}
PatternType::Ipv6 => {
if self.ipv4_mask.is_some() {
return Err("An IPv6-only pattern can't have an ipv4mask".into());
}
for cidr in &self.ignore_cidr_normalized {
if let Cidr::IPv4(_) = cidr {
return Err(format!(
"An IPv6-only pattern can't have an IPv4 ({}) as an ignore",
cidr
));
}
}
}
}
}
}
Ok(self.pattern_type.regex())
}
/// Whether the IP match is included in one of [`Self::ignore_cidr`]
pub fn is_ignore(&self, match_: &str) -> bool {
let match_ip = match IpAddr::from_str(match_) {
Ok(ip) => ip,
Err(_) => return false,
};
self.ignore_cidr_normalized
.iter()
.any(|cidr| cidr.includes(&match_ip))
}
/// Normalize the pattern.
/// This should happen after checking on ignores.
/// No-op when the pattern is not an IP.
/// Otherwise BitAnd the IP with its configured mask,
/// and add the /<mask>
pub fn normalize(&self, match_: &mut String) {
let ip = match self.pattern_type {
PatternType::Regex => None,
// Attempt to normalize only if type is IP*
_ => normalize(match_)
.ok()
.and_then(|ip| match self.pattern_type {
PatternType::Ip => Some(ip),
PatternType::Ipv4 => match ip {
IpAddr::V4(_) => Some(ip),
_ => None,
},
PatternType::Ipv6 => match ip {
IpAddr::V6(_) => Some(ip),
_ => None,
},
_ => None,
}),
};
if let Some(ip) = ip {
*match_ = match ip {
IpAddr::V4(addr) => match self.ipv4_bitmask {
Some(bitmask) => {
format!("{}/{}", addr & bitmask, self.ipv4_mask.unwrap_or(32))
}
None => addr.to_string(),
},
IpAddr::V6(addr) => match self.ipv6_bitmask {
Some(bitmask) => {
format!("{}/{}", addr & bitmask, self.ipv6_mask.unwrap_or(128))
}
None => addr.to_string(),
},
};
}
}
}
#[cfg(test)]
mod patternip_tests {
use std::net::{Ipv4Addr, Ipv6Addr};
use tokio::{fs::read_to_string, task::JoinSet};
use crate::{
concepts::{Action, Duplicate, Filter, Pattern, now},
daemon::{React, tests::TestBed},
};
use super::{Cidr, PatternIp, PatternType};
#[test]
fn test_setup_type_regex() {
let mut regex_struct = PatternIp {
pattern_type: PatternType::Regex,
..Default::default()
};
let copy = regex_struct.clone();
// All default patterns is ok for regex type
assert!(regex_struct.setup().is_ok());
// Setup changes nothing
assert_eq!(regex_struct, copy);
// Any non-default field is err
let mut regex_struct = PatternIp {
pattern_type: PatternType::Regex,
ipv4_mask: Some(24),
..Default::default()
};
assert!(regex_struct.setup().is_err());
let mut regex_struct = PatternIp {
pattern_type: PatternType::Regex,
ipv6_mask: Some(64),
..Default::default()
};
assert!(regex_struct.setup().is_err());
let mut regex_struct = PatternIp {
pattern_type: PatternType::Regex,
ignore_cidr: vec!["192.168.1/24".into()],
..Default::default()
};
assert!(regex_struct.setup().is_err());
}
#[test]
fn test_setup_type_ip() {
for pattern_type in [PatternType::Ip, PatternType::Ipv4, PatternType::Ipv6] {
let mut ip_struct = PatternIp {
pattern_type,
..Default::default()
};
assert!(ip_struct.setup().is_ok());
let mut ip_struct = PatternIp {
pattern_type,
ipv4_mask: Some(24),
..Default::default()
};
match pattern_type {
PatternType::Ipv6 => assert!(ip_struct.setup().is_err()),
_ => {
assert!(ip_struct.setup().is_ok());
assert_eq!(
ip_struct.ipv4_bitmask,
Some(Ipv4Addr::new(255, 255, 255, 0))
);
}
}
let mut ip_struct = PatternIp {
pattern_type,
ipv6_mask: Some(64),
..Default::default()
};
match pattern_type {
PatternType::Ipv4 => assert!(ip_struct.setup().is_err()),
_ => {
assert!(ip_struct.setup().is_ok());
assert_eq!(
ip_struct.ipv6_bitmask,
Some(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0))
);
}
}
let mut ip_struct = PatternIp {
pattern_type,
ignore_cidr: vec!["192.168.1.0/24".into()],
..Default::default()
};
match pattern_type {
PatternType::Ipv6 => assert!(ip_struct.setup().is_err()),
_ => {
assert!(ip_struct.setup().is_ok());
assert_eq!(
ip_struct.ignore_cidr_normalized,
vec![Cidr::IPv4((
Ipv4Addr::new(192, 168, 1, 0),
Ipv4Addr::new(255, 255, 255, 0)
))]
);
}
}
let mut ip_struct = PatternIp {
pattern_type,
ignore_cidr: vec!["::ffff:192.168.1.0/24".into()],
..Default::default()
};
match pattern_type {
PatternType::Ipv6 => assert!(ip_struct.setup().is_err()),
_ => {
assert!(ip_struct.setup().is_ok());
assert_eq!(
ip_struct.ignore_cidr_normalized,
vec![Cidr::IPv4((
Ipv4Addr::new(192, 168, 1, 0),
Ipv4Addr::new(255, 255, 255, 0)
))]
);
}
}
let mut ip_struct = PatternIp {
pattern_type,
ignore_cidr: vec!["2001:db8:85a3:9de5::8a2e:370:7334/64".into()],
..Default::default()
};
match pattern_type {
PatternType::Ipv4 => assert!(ip_struct.setup().is_err()),
_ => {
assert!(ip_struct.setup().is_ok());
assert_eq!(
ip_struct.ignore_cidr_normalized,
vec![Cidr::IPv6((
Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9de5, 0, 0, 0, 0),
Ipv6Addr::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX, 0, 0, 0, 0),
))]
);
}
}
}
}
#[test]
fn test_is_ignore() {
let mut ip_struct = PatternIp {
pattern_type: PatternType::Ip,
ignore_cidr: vec!["10.0.0.0/8".into(), "2001:db8:85a3:9de5::/64".into()],
..Default::default()
};
ip_struct.setup().unwrap();
assert!(!ip_struct.is_ignore("prout"));
assert!(!ip_struct.is_ignore("1.1.1.1"));
assert!(!ip_struct.is_ignore("11.1.1.1"));
assert!(!ip_struct.is_ignore("2001:db8:85a3:9de6::1"));
assert!(ip_struct.is_ignore("10.1.1.1"));
assert!(ip_struct.is_ignore("2001:db8:85a3:9de5::1"));
}
#[test]
fn test_normalize() {
let ipv4_32 = "1.1.1.1";
let ipv4_32_norm = "1.1.1.1";
let ipv4_24 = "1.1.1.0";
let ipv4_24_norm = "1.1.1.0";
let ipv4_24_mask = "1.1.1.0/24";
let ipv6_128 = "2001:db8:85a3:9de5:0:0:01:02";
let ipv6_128_norm = "2001:db8:85a3:9de5::1:2";
let ipv6_64 = "2001:db8:85a3:9de5:0:0:0:0";
let ipv6_64_norm = "2001:db8:85a3:9de5::";
let ipv6_64_mask = "2001:db8:85a3:9de5::/64";
for (ipv4_mask, ipv6_mask) in [(Some(24), None), (None, Some(64)), (Some(24), Some(64))] {
let mut ip_struct = PatternIp {
pattern_type: PatternType::Ip,
ipv4_mask,
ipv6_mask,
..Default::default()
};
ip_struct.setup().unwrap();
let mut ipv4_32_modified = ipv4_32.to_string();
let mut ipv4_24_modified = ipv4_24.to_string();
let mut ipv6_128_modified = ipv6_128.to_string();
let mut ipv6_64_modified = ipv6_64.to_string();
ip_struct.normalize(&mut ipv4_32_modified);
ip_struct.normalize(&mut ipv4_24_modified);
ip_struct.normalize(&mut ipv6_128_modified);
ip_struct.normalize(&mut ipv6_64_modified);
match ipv4_mask {
Some(_) => {
// modified with mask
assert_eq!(
ipv4_32_modified, ipv4_24_mask,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
assert_eq!(
ipv4_24_modified, ipv4_24_mask,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
}
None => {
// only normaized
assert_eq!(
ipv4_32_modified, ipv4_32_norm,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
assert_eq!(
ipv4_24_modified, ipv4_24_norm,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
}
}
match ipv6_mask {
Some(_) => {
// modified with mask
assert_eq!(
ipv6_128_modified, ipv6_64_mask,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
assert_eq!(
ipv6_64_modified, ipv6_64_mask,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
}
None => {
// only normalized
assert_eq!(
ipv6_128_modified, ipv6_128_norm,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
assert_eq!(
ipv6_64_modified, ipv6_64_norm,
"ipv4mask: {:?}, ipv6mask: {:?}",
ipv4_mask, ipv6_mask
);
}
}
}
}
pub const VALID_IPV4: [&str; 8] = [
"252.4.92.250",
"212.4.92.210",
"112.4.92.110",
"83.4.92.35",
"83.4.92.0",
"3.254.92.4",
"1.2.3.4",
"255.255.255.255",
];
pub const VALID_IPV6: [&str; 42] = [
// all accepted characters
"0123:4567:89:ab:cdef:AB:CD:EF",
// ipv6-mapped ipv4
"::ffff:1.2.3.4",
"ffff::1.2.3.4",
// 8 blocks
"1111:2:3:4:5:6:7:8888",
// 7 blocks
"::2:3:4:5:6:7:8888",
"1111::3:4:5:6:7:8888",
"1111:2::4:5:6:7:8888",
"1111:2:3::5:6:7:8888",
"1111:2:3:4::6:7:8888",
"1111:2:3:4:5::7:8888",
"1111:2:3:4:5:6::8888",
"1111:2:3:4:5:6:7::",
// 6 blocks
"::3:4:5:6:7:8888",
"1111::4:5:6:7:8888",
"1111:2::5:6:7:8888",
"1111:2:3::6:7:8888",
"1111:2:3:4::7:8888",
"1111:2:3:4:5::8888",
"1111:2:3:4:5:6::",
// 5 blocks
"::4:5:6:7:8888",
"1111::5:6:7:8888",
"1111:2::6:7:8888",
"1111:2:3::7:8888",
"1111:2:3:4::8888",
"1111:2:3:4:5::",
// 4 blocks
"::5:6:7:8888",
"1111::6:7:8888",
"1111:2::7:8888",
"1111:2:3::8888",
"1111:2:3:4::",
// 3 blocks
"::6:7:8888",
"1111::7:8888",
"1111:2::8888",
"1111:2:3::",
// 2 blocks
"::7:8888",
"1111::8888",
"1111:2::",
// 1 block
"::8",
"::8888",
"1::",
"1111::",
// 0 block
"::",
];
#[test]
fn test_ip_regexes() {
for pattern_type in [PatternType::Ip, PatternType::Ipv4, PatternType::Ipv6] {
let mut pattern = Pattern {
ip: PatternIp {
pattern_type,
..Default::default()
},
..Default::default()
};
assert!(pattern.setup("zblorg").is_ok());
let regex = pattern.compiled().unwrap();
let accepts_ipv4 = pattern_type == PatternType::Ip || pattern_type == PatternType::Ipv4;
let accepts_ipv6 = pattern_type == PatternType::Ip || pattern_type == PatternType::Ipv6;
macro_rules! assert2 {
($a:expr) => {
assert!($a, "PatternType: {pattern_type:?}");
};
}
for ip in VALID_IPV4 {
assert2!(accepts_ipv4 == regex.is_match(ip));
}
assert2!(!regex.is_match(".1.2.3.4"));
assert2!(!regex.is_match(" 1.2.3.4"));
assert2!(!regex.is_match("1.2.3.4 "));
assert2!(!regex.is_match("1.2. 3.4"));
assert2!(!regex.is_match("257.2.3.4"));
assert2!(!regex.is_match("074.2.3.4"));
assert2!(!regex.is_match("1.2.3.4.5"));
assert2!(!regex.is_match("1.2..4"));
assert2!(!regex.is_match("1.2..3.4"));
for ip in VALID_IPV6 {
assert2!(accepts_ipv6 == regex.is_match(ip));
}
assert2!(!regex.is_match("1:"));
assert2!(!regex.is_match("1:::"));
assert2!(!regex.is_match("1:::2"));
assert2!(!regex.is_match("1:2:3:4:5:6:7:8:9"));
assert2!(!regex.is_match("1:23456:3:4:5:6:7:8"));
assert2!(!regex.is_match("1:2:3:4:5:6:7:8:"));
}
}
#[tokio::test(flavor = "multi_thread")]
async fn ip_pattern_matches() {
let mut join_set = JoinSet::new();
for ip in VALID_IPV4.iter().chain(&VALID_IPV6) {
for line in [
format!("borned {ip} test"),
//
format!("right-unborned {ip} text"),
format!("right-unborned {ip}text"),
format!("right-unborned {ip}:"),
//
format!("left-unborned text {ip}"),
format!("left-unborned text{ip}"),
format!("left-unborned :{ip}"),
//
format!("full-unborned text {ip} text"),
format!("full-unborned text{ip} text"),
format!("full-unborned text {ip}text"),
format!("full-unborned text{ip}text"),
format!("full-unborned :{ip}:"),
format!("full-unborned : {ip}:"),
] {
join_set.spawn(tokio::spawn(async move {
let bed = TestBed::default();
let filter = Filter::new_static(
vec![Action::new(
vec!["sh", "-c", &format!("echo <ip> >> {}", &bed.out_file)],
None,
false,
"test",
"test",
"a1",
&bed.ip_patterns,
0,
)],
vec![
"^borned <ip> test",
"^right-unborned <ip>.*",
"^left-unborned .*<ip>",
"^full-unborned .*<ip>.*",
],
None,
None,
"test",
"test",
Duplicate::Ignore,
&bed.ip_patterns,
);
let bed = bed.part2(filter, now(), None).await;
assert_eq!(
bed.manager.handle_line(&line, now()).await,
React::Trigger,
"line: {line}"
);
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
assert_eq!(
&read_to_string(&bed.out_file).await.unwrap().trim_end(),
ip,
"line: {line}"
);
}));
}
}
join_set.join_all().await;
}
}

View file

@ -1,122 +0,0 @@
use std::{
net::{AddrParseError, IpAddr, Ipv4Addr, Ipv6Addr},
str::FromStr,
};
/// Normalize a string as an IP address.
/// IPv6-mapped IPv4 addresses are casted to IPv4.
pub fn normalize(ip: &str) -> Result<IpAddr, AddrParseError> {
IpAddr::from_str(ip).map(normalize_ip)
}
/// Normalize a string as an IP address.
/// IPv6-mapped IPv4 addresses are casted to IPv4.
pub fn normalize_ip(ip: IpAddr) -> IpAddr {
match ip {
IpAddr::V4(_) => ip,
IpAddr::V6(ipv6) => match ipv6.to_ipv4_mapped() {
Some(ipv4) => IpAddr::V4(ipv4),
None => ip,
},
}
}
/// Creates an [`Ipv4Addr`] from a mask
pub fn mask_to_ipv4(mask_count: u8) -> Result<Ipv4Addr, String> {
if mask_count > 32 {
Err(format!(
"an IPv4 mask must be 32 max. {mask_count} is too big."
))
} else {
let mask = match mask_count {
0 => 0u32,
n => u32::MAX << (32 - n),
};
let mask = Ipv4Addr::from_bits(mask);
Ok(mask)
}
}
/// Creates an [`Ipv4Addr`] from a mask
pub fn mask_to_ipv6(mask_count: u8) -> Result<Ipv6Addr, String> {
if mask_count > 128 {
Err(format!(
"an IPv4 mask must be 128 max. {mask_count} is too big."
))
} else {
let mask = match mask_count {
0 => 0u128,
n => u128::MAX << (128 - n),
};
let mask = Ipv6Addr::from_bits(mask);
Ok(mask)
}
}
#[cfg(test)]
mod utils_tests {
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use super::{mask_to_ipv4, mask_to_ipv6, normalize};
#[test]
fn test_normalize_ip() {
assert_eq!(
normalize("83.44.23.14"),
Ok(IpAddr::V4(Ipv4Addr::new(83, 44, 23, 14)))
);
assert_eq!(
normalize("2001:db8:85a3::8a2e:370:7334"),
Ok(IpAddr::V6(Ipv6Addr::new(
0x2001, 0xdb8, 0x85a3, 0x0, 0x0, 0x8a2e, 0x370, 0x7334
)))
);
assert_eq!(
normalize("::ffff:192.168.1.34"),
Ok(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 34)))
);
assert_eq!(
normalize("::ffff:1.2.3.4"),
Ok(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)))
);
// octal numbers are forbidden
assert!(normalize("083.44.23.14").is_err());
}
#[test]
fn test_mask_to_ipv4() {
assert!(mask_to_ipv4(33).is_err());
assert!(mask_to_ipv4(100).is_err());
assert_eq!(mask_to_ipv4(16), Ok(Ipv4Addr::new(255, 255, 0, 0)));
assert_eq!(mask_to_ipv4(24), Ok(Ipv4Addr::new(255, 255, 255, 0)));
assert_eq!(mask_to_ipv4(25), Ok(Ipv4Addr::new(255, 255, 255, 128)));
assert_eq!(mask_to_ipv4(26), Ok(Ipv4Addr::new(255, 255, 255, 192)));
assert_eq!(mask_to_ipv4(32), Ok(Ipv4Addr::new(255, 255, 255, 255)));
}
#[test]
fn test_mask_to_ipv6() {
assert!(mask_to_ipv6(129).is_err());
assert!(mask_to_ipv6(254).is_err());
assert_eq!(
mask_to_ipv6(56),
Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xff00, 0, 0, 0, 0))
);
assert_eq!(
mask_to_ipv6(64),
Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0))
);
assert_eq!(
mask_to_ipv6(112),
Ok(Ipv6Addr::new(
0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0
))
);
assert_eq!(
mask_to_ipv6(128),
Ok(Ipv6Addr::new(
0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff
))
);
}
}

View file

@ -1,218 +0,0 @@
use std::{collections::BTreeMap, io::Error, path, process::Stdio};
#[cfg(target_os = "macos")]
use std::os::darwin::fs::MetadataExt;
#[cfg(target_os = "freebsd")]
use std::os::freebsd::fs::MetadataExt;
#[cfg(target_os = "illumos")]
use std::os::illumos::fs::MetadataExt;
#[cfg(target_os = "linux")]
use std::os::linux::fs::MetadataExt;
#[cfg(target_os = "netbsd")]
use std::os::netbsd::fs::MetadataExt;
#[cfg(target_os = "openbsd")]
use std::os::openbsd::fs::MetadataExt;
#[cfg(target_os = "solaris")]
use std::os::solaris::fs::MetadataExt;
use serde::{Deserialize, Serialize};
use tokio::{
fs,
process::{Child, Command},
};
use tracing::{debug, warn};
// TODO commented options block execution of program,
// while developping in my home directory.
// Some options may still be useful in production environments.
fn systemd_default_options(working_directory: &str) -> BTreeMap<String, Vec<String>> {
BTreeMap::from(
[
// reaction slice (does nothing if inexistent)
("Slice", vec!["reaction.slice"]),
// Started in its own directory
("WorkingDirectory", vec![working_directory]),
// No file access except own directory
("ReadWritePaths", vec![working_directory]),
("ReadOnlyPaths", vec!["/"]),
("InaccessiblePaths", vec!["/boot", "/etc"]),
// Protect special filesystems
("PrivateDevices", vec!["true"]),
("PrivateMounts", vec!["true"]),
("PrivateTmp", vec!["true"]),
// ("PrivateUsers", vec!["true"]),
("ProcSubset", vec!["pid"]),
("ProtectClock", vec!["true"]),
("ProtectControlGroups", vec!["true"]),
#[cfg(not(debug_assertions))]
("ProtectHome", vec!["true"]),
("ProtectHostname", vec!["true"]),
("ProtectKernelLogs", vec!["true"]),
("ProtectKernelModules", vec!["true"]),
("ProtectKernelTunables", vec!["true"]),
("ProtectProc", vec!["invisible"]),
("ProtectSystem", vec!["strict"]),
// Various Protections
("LockPersonality", vec!["true"]),
("NoNewPrivileges", vec!["true"]),
("AmbientCapabilities", vec![""]),
("CapabilityBoundingSet", vec![""]),
// Isolate File
("RemoveIPC", vec!["true"]),
("RestrictNamespaces", vec!["true"]),
("RestrictSUIDSGID", vec!["true"]),
("SystemCallArchitectures", vec!["native"]),
(
"SystemCallFilter",
vec!["@system-service", "~@privileged", "~@resources", "~@setuid"],
),
// User
// FIXME Setting another user doesn't work, because of stdio pipe permission errors
// ("DynamicUser", vec!["true"]),
// ("User", vec!["reaction-plugin-test"]),
// Too restrictive
// ("NoExecPaths", vec!["/"]),
// ("RestrictAddressFamilies", vec![""]),
]
.map(|(k, v)| (k.into(), v.into_iter().map(|v| v.into()).collect())),
)
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(test, derive(Default))]
#[serde(deny_unknown_fields)]
pub struct Plugin {
#[serde(skip)]
pub name: String,
pub path: String,
/// Check that plugin file owner is root
#[serde(default = "_true")]
pub check_root: bool,
/// Enable systemd containerization
#[serde(default = "_true")]
pub systemd: bool,
/// Options for `run0`
#[serde(default)]
pub systemd_options: BTreeMap<String, Vec<String>>,
}
fn _true() -> bool {
true
}
// NOTE
// `run0` can be used for security customisation.
// with the --pipe option, raw stdio fd are transmitted to the underlying command, so there is no overhead.
impl Plugin {
pub fn setup(&mut self, name: &str) -> Result<(), String> {
self.name = name.to_string();
if self.path.is_empty() {
return Err("can't specify empty plugin path".into());
}
// Only when testing, make relative paths absolute
#[cfg(debug_assertions)]
if !self.path.starts_with("/") {
self.path = format!(
"{}/{}",
std::env::current_dir()
.map_err(|err| format!("error on working directory: {err}"))?
.to_string_lossy(),
self.path
);
}
// Disallow relative paths
if !self.path.starts_with("/") {
return Err(format!("plugin paths must be absolute: {}", self.path));
}
Ok(())
}
/// Override default options with user-defined options, when defined.
pub fn systemd_setup(&self, working_directory: &str) -> BTreeMap<String, Vec<String>> {
let mut new_options = systemd_default_options(working_directory);
for (option, value) in self.systemd_options.iter() {
new_options.insert(option.clone(), value.clone());
}
new_options
}
pub async fn launch(&self, state_directory: &str) -> Result<Child, std::io::Error> {
// owner check
if self.check_root {
let path = self.path.clone();
let stat = fs::metadata(path).await?;
if stat.st_uid() != 0 {
return Err(Error::other("plugin file is not owned by root"));
}
}
let self_uid = if self.systemd {
Some(
// Well well we want to check if we're root
#[allow(unsafe_code)]
unsafe {
nix::libc::geteuid()
},
)
} else {
None
};
// Create plugin working directory (also state directory)
let plugin_working_directory = format!("{state_directory}/plugin_data/{}", self.name);
fs::create_dir_all(&plugin_working_directory).await?;
let mut command = if self_uid.is_some_and(|self_uid| self_uid == 0) {
let mut command = Command::new("run0");
// --pipe gives direct, non-emulated stdio access, for better performance.
command.arg("--pipe");
// run the command inside the same slice as reaction
command.arg("--slice-inherit");
// Make path absolute for systemd
let full_workdir = path::absolute(&plugin_working_directory)?;
let full_workdir = full_workdir.to_str().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidFilename,
format!(
"Could not absolutize plugin working directory {plugin_working_directory}"
),
)
})?;
let merged_systemd_options = self.systemd_setup(full_workdir);
// run0 options
for (option, values) in merged_systemd_options.iter() {
for value in values.iter() {
command.arg("--property").arg(format!("{option}={value}"));
}
}
command.arg(&self.path);
command
} else {
if self.systemd {
warn!("Disabling systemd because reaction does not run as root");
}
let mut command = Command::new(&self.path);
command.current_dir(plugin_working_directory);
command
};
command.arg("serve");
debug!(
"plugin {}: running command: {:?}",
self.name,
command.as_std()
);
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.env("RUST_BACKTRACE", "1")
.spawn()
}
}

View file

@ -1,64 +1,57 @@
use std::{cmp::Ordering, collections::BTreeMap, hash::Hash};
use reaction_plugin::StreamConfig;
use regex::RegexSet;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use super::{Filter, Patterns, merge_attrs};
use super::{Filter, Patterns};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(test, derive(Default))]
#[serde(deny_unknown_fields)]
pub struct Stream {
#[serde(default)]
pub cmd: Vec<String>,
cmd: Vec<String>,
#[serde(default)]
pub filters: BTreeMap<String, Filter>,
filters: BTreeMap<String, Filter>,
#[serde(skip)]
pub name: String,
#[serde(skip)]
pub compiled_regex_set: RegexSet,
#[serde(skip)]
pub regex_index_to_filter_name: Vec<String>,
// Plugin-specific
#[serde(default, rename = "type", skip_serializing_if = "Option::is_none")]
pub stream_type: Option<String>,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub options: Value,
name: String,
}
impl Stream {
pub fn filters(&self) -> &BTreeMap<String, Filter> {
&self.filters
}
pub fn get_filter(&self, filter_name: &str) -> Option<&Filter> {
self.filters.get(filter_name)
}
pub fn name(&self) -> &str {
&self.name
}
pub fn cmd(&self) -> &Vec<String> {
&self.cmd
}
pub fn merge(&mut self, other: Stream) -> Result<(), String> {
self.cmd = merge_attrs(self.cmd.clone(), other.cmd, Vec::default(), "cmd")?;
self.stream_type = merge_attrs(self.stream_type.clone(), other.stream_type, None, "type")?;
if !(self.cmd.is_empty() || other.cmd.is_empty() || self.cmd == other.cmd) {
return Err("cmd has conflicting definitions".into());
}
if self.cmd.is_empty() {
self.cmd = other.cmd;
}
for (key, filter) in other.filters.into_iter() {
if self.filters.insert(key.clone(), filter).is_some() {
return Err(format!(
"filter {} is already defined. filter definitions can't be spread accross multiple files.",
key
));
return Err(format!("filter {} is already defined. filter definitions can't be spread accross multiple files.", key));
}
}
Ok(())
}
pub fn is_plugin(&self) -> bool {
self.stream_type
.as_ref()
.is_some_and(|stream_type| stream_type != "cmd")
}
pub fn setup(&mut self, name: &str, patterns: &Patterns) -> Result<(), String> {
self._setup(name, patterns)
.map_err(|msg| format!("stream {}: {}", name, msg))
@ -74,18 +67,11 @@ impl Stream {
return Err("character '.' is not allowed in stream name".into());
}
if !self.is_plugin() {
if self.cmd.is_empty() {
return Err("cmd is empty".into());
}
if self.cmd[0].is_empty() {
return Err("cmd's first item is empty".into());
}
if !self.options.is_null() {
return Err("can't define options without a plugin type".into());
}
} else if !self.cmd.is_empty() {
return Err("can't define cmd and a plugin type".into());
if self.cmd.is_empty() {
return Err("cmd is empty".into());
}
if self.cmd[0].is_empty() {
return Err("cmd's first item is empty".into());
}
if self.filters.is_empty() {
@ -96,33 +82,8 @@ impl Stream {
filter.setup(name, key, patterns)?;
}
let all_regexes: BTreeMap<_, _> = self
.filters
.values()
.flat_map(|filter| {
filter
.regex
.iter()
.map(|regex| (regex, filter.name.clone()))
})
.collect();
self.compiled_regex_set = RegexSet::new(all_regexes.keys())
.map_err(|err| format!("too much regexes on the filters of this stream: {err}"))?;
self.regex_index_to_filter_name = all_regexes.into_values().collect();
Ok(())
}
pub fn to_stream_config(&self) -> Result<StreamConfig, String> {
Ok(StreamConfig {
stream_name: self.name.clone(),
stream_type: self.stream_type.clone().ok_or_else(|| {
format!("stream {} doesn't load a plugin. this is a bug!", self.name)
})?,
config: self.options.clone().into(),
})
}
}
impl PartialEq for Stream {
@ -153,14 +114,21 @@ mod tests {
use super::*;
use crate::concepts::filter::tests::ok_filter;
fn ok_stream() -> Stream {
fn default_stream() -> Stream {
Stream {
cmd: vec!["command".into()],
filters: BTreeMap::from([("name".into(), ok_filter())]),
..Default::default()
cmd: Vec::new(),
name: "".into(),
filters: BTreeMap::new(),
}
}
fn ok_stream() -> Stream {
let mut stream = default_stream();
stream.cmd = vec!["command".into()];
stream.filters.insert("name".into(), ok_filter());
stream
}
#[test]
fn test() {
let mut stream;

View file

@ -1,25 +1,28 @@
#[cfg(test)]
pub mod tests;
mod tests;
mod state;
use std::{collections::BTreeMap, process::Stdio, sync::Arc};
use std::{
collections::BTreeMap,
process::Stdio,
sync::{Arc, Mutex, MutexGuard},
};
use chrono::TimeZone;
use reaction_plugin::{ActionImpl, shutdown::ShutdownToken};
use regex::Regex;
use tokio::sync::{Mutex, MutexGuard, Semaphore};
use tokio::sync::Semaphore;
use tracing::{error, info};
use crate::{
concepts::{Action, Duplicate, Filter, Match, Pattern, Time},
daemon::plugin::Plugins,
concepts::{Action, Filter, Match, Pattern, Time},
protocol::{Order, PatternStatus},
treedb::Database,
};
use treedb::Database;
use state::State;
use super::shutdown::ShutdownToken;
/// Responsible for handling all runtime logic dedicated to a [`Filter`].
/// Notably handles incoming lines from [`super::stream::stream_manager`]
/// and orders from the [`super::socket::socket_manager`]
@ -31,8 +34,6 @@ pub struct FilterManager {
exec_limit: Option<Arc<Semaphore>>,
/// Permits to run pending actions on shutdown
shutdown: ShutdownToken,
/// Action Plugins
action_plugins: BTreeMap<&'static String, ActionImpl>,
/// Inner state.
/// Protected by a [`Mutex`], permitting FilterManager to be cloned
/// and concurrently owned by its stream manager, the socket manager,
@ -40,50 +41,41 @@ pub struct FilterManager {
state: Arc<Mutex<State>>,
}
/// The react to a line handling.
#[derive(Debug, PartialEq, Eq)]
pub enum React {
/// This line doesn't match
NoMatch,
/// This line matches, but no execution is triggered
Match,
/// This line matches, and an execution is triggered
Trigger,
Exec,
}
#[allow(clippy::unwrap_used)]
impl FilterManager {
pub async fn new(
pub fn new(
filter: &'static Filter,
exec_limit: Option<Arc<Semaphore>>,
shutdown: ShutdownToken,
db: &mut Database,
plugins: &mut Plugins,
now: Time,
) -> Result<Self, String> {
let mut action_plugins = BTreeMap::default();
for (action_name, action) in filter.actions.iter().filter(|action| action.1.is_plugin()) {
action_plugins.insert(
action_name,
plugins.get_action_impl(action.to_string()).ok_or_else(|| {
format!("action {action} doesn't load a plugin. this is a bug!")
})?,
);
}
let this = Self {
filter,
exec_limit,
shutdown,
action_plugins,
state: Arc::new(Mutex::new(State::new(filter, db, now).await?)),
state: Arc::new(Mutex::new(State::new(
filter,
!filter.longuest_action_duration().is_zero(),
db,
now,
)?)),
};
this.clear_past_triggers_and_schedule_future_actions(now);
Ok(this)
}
pub async fn handle_line(&self, line: &str, now: Time) -> React {
pub fn handle_line(&self, line: &str, now: Time) -> React {
if let Some(match_) = self.filter.get_match(line) {
if self.handle_match(match_, now).await {
React::Trigger
if self.handle_match(match_, now) {
React::Exec
} else {
React::Match
}
@ -92,48 +84,30 @@ impl FilterManager {
}
}
async fn handle_match(&self, m: Match, now: Time) -> bool {
fn handle_match(&self, m: Match, now: Time) -> bool {
#[allow(clippy::unwrap_used)] // propagating panics is ok
let mut state = self.state.lock().await;
state.clear_past_matches(now).await;
let mut state = self.state.lock().unwrap();
state.clear_past_matches(now);
// if Duplicate::Ignore and already triggered, skip
if state.triggers.contains_key(&m) && Duplicate::Ignore == self.filter.duplicate {
return false;
}
info!("{}: match {:?}", self.filter, &m);
let trigger = match self.filter.retry {
let exec = match self.filter.retry() {
None => true,
Some(retry) => {
state.add_match(m.clone(), now).await;
state.add_match(m.clone(), now);
// Number of stored times for this match >= configured retry for this filter
state.get_times(&m).await >= retry as usize
state.get_times(&m) >= retry as usize
}
};
if trigger {
state.remove_match(&m).await;
let actions_left = if Duplicate::Extend == self.filter.duplicate {
// Get number of actions left from last trigger
state
.remove_trigger(&m)
.await
// Only one entry in the map because Duplicate::Extend
.and_then(|map| map.first_key_value().map(|(_, n)| *n))
} else {
None
};
state.add_trigger(m.clone(), now, actions_left).await;
self.schedule_exec(m, now, now, &mut state, false, actions_left)
.await;
if exec {
state.remove_match(&m);
state.add_trigger(m.clone(), now);
self.schedule_exec(m, now, now, &mut state, false);
}
trigger
exec
}
pub async fn handle_trigger(
pub fn handle_trigger(
&self,
patterns: BTreeMap<Arc<Pattern>, String>,
now: Time,
@ -141,16 +115,15 @@ impl FilterManager {
let match_ = self.filter.get_match_from_patterns(patterns)?;
#[allow(clippy::unwrap_used)] // propagating panics is ok
let mut state = self.state.lock().await;
state.remove_match(&match_).await;
state.add_trigger(match_.clone(), now, None).await;
self.schedule_exec(match_, now, now, &mut state, false, None)
.await;
let mut state = self.state.lock().unwrap();
state.remove_match(&match_);
state.add_trigger(match_.clone(), now);
self.schedule_exec(match_, now, now, &mut state, false);
Ok(())
}
pub async fn handle_order(
pub fn handle_order(
&self,
patterns: &BTreeMap<Arc<Pattern>, Regex>,
order: Order,
@ -159,7 +132,7 @@ impl FilterManager {
let is_match = |match_: &Match| {
match_
.iter()
.zip(self.filter.patterns.as_ref())
.zip(self.filter.patterns())
.filter_map(|(a_match, pattern)| {
patterns.get(pattern.as_ref()).map(|regex| (a_match, regex))
})
@ -167,7 +140,7 @@ impl FilterManager {
};
#[allow(clippy::unwrap_used)] // propagating panics is ok
let mut state = self.state.lock().await;
let mut state = self.state.lock().unwrap();
let mut cs: BTreeMap<_, _> = {
let cloned_matches = state
@ -179,74 +152,62 @@ impl FilterManager {
.cloned()
.collect::<Vec<_>>();
let mut cs = BTreeMap::new();
for match_ in cloned_matches {
// mutable State required here
if let Order::Flush = order {
state.remove_match(&match_).await;
}
let matches = state
.matches
.get(&match_)
.map(|times| times.len())
.unwrap_or(0);
cs.insert(
match_,
PatternStatus {
matches,
..Default::default()
},
);
}
cs
cloned_matches
.into_iter()
.map(|match_| {
// mutable State required here
if let Order::Flush = order {
state.remove_match(&match_);
}
let matches = state
.matches
.get(&match_)
.map(|times| times.len())
.unwrap_or(0);
(
match_,
PatternStatus {
matches,
..Default::default()
},
)
})
.collect()
};
let cloned_triggers = state
.triggers
.keys()
// match filtering
.filter(|match_| is_match(match_))
.filter(|match_| is_match(&match_.m))
// clone necessary to drop all references to State
.cloned()
.collect::<Vec<_>>();
for m in cloned_triggers.into_iter() {
let map = state.triggers.get(&m).unwrap().clone();
for mt in cloned_triggers.into_iter() {
// mutable State required here
// Remove the match from the triggers
if let Order::Flush = order {
state.remove_trigger(&m).await;
// delete specific (Match, Time) tuple
state.remove_trigger(&mt.m, &mt.t);
}
for (t, remaining) in map {
if remaining > 0 {
let pattern_status = cs.entry(m.clone()).or_default();
let m = mt.m.clone();
let pattern_status = cs.entry(m).or_default();
for action in self.filter.filtered_actions_from_match(&m) {
let action_time = t + action.after_duration.unwrap_or_default();
if action_time > now {
// Pretty print time
let time = chrono::Local
.timestamp_opt(
action_time.as_secs() as i64,
action_time.subsec_nanos(),
)
.unwrap()
.to_rfc3339()
.chars()
.take(19)
.collect();
// Insert action
pattern_status
.actions
.entry(action.name.clone())
.or_default()
.push(time);
for action in self.filter.actions().values() {
let action_time = mt.t + action.after_duration().unwrap_or_default();
if action_time > now {
// Insert action
pattern_status
.actions
.entry(action.name().into())
.or_default()
.push(action_time.to_rfc3339().chars().take(19).collect());
// Execute the action early
if let Order::Flush = order {
self.exec_now(action, m.clone(), t);
}
}
// Execute the action early
if let Order::Flush = order {
exec_now(&self.exec_limit, action, mt.m.clone());
}
}
}
@ -258,55 +219,48 @@ impl FilterManager {
/// Schedule execution for a given Match.
/// We check first if the trigger is still here
/// because pending actions can be flushed.
async fn schedule_exec(
fn schedule_exec(
&self,
m: Match,
t: Time,
now: Time,
state: &mut MutexGuard<'_, State>,
state: &mut MutexGuard<State>,
startup: bool,
actions_left: Option<u64>,
) {
let actions = self
for action in self
.filter
.filtered_actions_from_match(&m)
.into_iter()
.actions()
.values()
// On startup, skip oneshot actions
.filter(|action| !startup || !action.oneshot)
// skip any actions
.skip(match actions_left {
Some(actions_left) => {
self.filter.filtered_actions_from_match(&m).len() - actions_left as usize
}
None => 0,
});
// Scheduling each action
for action in actions {
let exec_time = t + action.after_duration.unwrap_or_default();
.filter(|action| !startup || !action.oneshot())
{
let exec_time = t + action.after_duration().unwrap_or_default();
let m = m.clone();
if exec_time <= now {
if state.decrement_trigger(&m, t, false).await {
self.exec_now(action, m, t);
if state.decrement_trigger(&m, t) {
exec_now(&self.exec_limit, action, m);
}
} else {
let this = self.clone();
let action_impl = self.action_plugins.get(&action.name).cloned();
tokio::spawn(async move {
let dur = exec_time - now;
let dur = (exec_time - now)
.to_std()
// Could cause an error if t + after < now
// In this case, 0 is fine
.unwrap_or_default();
// Wait either for end of sleep
// or reaction exiting
let exiting = tokio::select! {
_ = tokio::time::sleep(dur.into()) => false,
_ = tokio::time::sleep(dur) => false,
_ = this.shutdown.wait() => true,
};
// Exec action if triggered hasn't been already flushed
if !exiting || action.on_exit {
if !exiting || action.on_exit() {
#[allow(clippy::unwrap_used)] // propagating panics is ok
let mut state = this.state.lock().await;
if state.decrement_trigger(&m, t, exiting).await {
exec_now(&this.exec_limit, this.shutdown, action, action_impl, m, t);
let mut state = this.state.lock().unwrap();
if state.decrement_trigger(&m, t) {
exec_now(&this.exec_limit, action, m);
}
}
});
@ -314,127 +268,60 @@ impl FilterManager {
}
}
/// Clear past triggers and schedule future actions
pub async fn start(&self, now: Time) {
let longuest_action_duration = self.filter.longuest_action_duration;
fn clear_past_triggers_and_schedule_future_actions(&self, now: Time) {
let longuest_action_duration = self.filter.longuest_action_duration();
let number_of_actions = self
.filter
.actions
.actions()
.values()
// On startup, skip oneshot actions
.filter(|action| !action.oneshot)
.count() as u64;
.filter(|action| !action.oneshot())
.count();
#[allow(clippy::unwrap_used)] // propagating panics is ok
let mut state = self.state.lock().await;
let mut state = self.state.lock().unwrap();
let cloned_triggers = state
.triggers
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.map(|(k, v)| (k.clone(), *v))
.collect::<Vec<_>>();
for (m, map) in cloned_triggers.into_iter() {
let map: BTreeMap<_, _> = map
.into_iter()
// Keep only up-to-date triggers
.filter(|(t, remaining)| *remaining > 0 && *t + longuest_action_duration > now)
// Reset action count
.map(|(t, _)| (t, number_of_actions))
.collect();
if map.is_empty() {
state.triggers.remove(&m).await;
for (mt, remaining) in cloned_triggers.into_iter() {
if remaining > 0 && mt.t + longuest_action_duration > now {
// Insert back the upcoming times
state.triggers.insert(mt.clone(), number_of_actions as u64);
// Schedule the upcoming times
self.schedule_exec(mt.m, mt.t, now, &mut state, true);
} else {
// Filter duplicates
// unwrap is fine because map is not empty (see if)
let map = match self.filter.duplicate {
// Keep only last item
Duplicate::Extend => BTreeMap::from([map.into_iter().next_back().unwrap()]),
// Keep only first item
Duplicate::Ignore => BTreeMap::from([map.into_iter().next().unwrap()]),
// No filtering
Duplicate::Rerun => map,
};
state.triggers.insert(m.clone(), map.clone()).await;
for (t, _) in map {
// Schedule the upcoming times
self.schedule_exec(m.clone(), t, now, &mut state, true, None)
.await;
}
state.triggers.remove(&mt);
}
}
}
fn exec_now(&self, action: &'static Action, m: Match, t: Time) {
let action_impl = self.action_plugins.get(&action.name).cloned();
exec_now(
&self.exec_limit,
self.shutdown.clone(),
action,
action_impl,
m,
t,
)
}
}
fn exec_now(
exec_limit: &Option<Arc<Semaphore>>,
shutdown: ShutdownToken,
action: &'static Action,
action_impl: Option<ActionImpl>,
m: Match,
t: Time,
) {
fn exec_now(exec_limit: &Option<Arc<Semaphore>>, action: &'static Action, m: Match) {
let exec_limit = exec_limit.clone();
tokio::spawn(async move {
// Move ShutdownToken in task
let _shutdown = shutdown;
// Wait for semaphore's permission, if it is Some
let _permit = match exec_limit {
#[allow(clippy::unwrap_used)] // We know the semaphore is not closed
Some(semaphore) => Some(semaphore.acquire_owned().await.unwrap()),
None => None,
};
match action_impl {
Some(action_impl) => {
info!(
"{action}: run {} {:?}",
action.action_type.clone().unwrap_or_default(),
&m,
);
// Construct command
let mut command = action.exec(&m);
// Sending action
if let Err(err) = action_impl
.tx
.send(reaction_plugin::Exec {
match_: m,
time: t.into(),
})
.await
{
error!("{action}: communication with plugin failed: {err}");
return;
}
}
None => {
// Wait for semaphore's permission, if it is Some
let _permit = match exec_limit {
#[allow(clippy::unwrap_used)] // We know the semaphore is not closed
Some(semaphore) => Some(semaphore.acquire_owned().await.unwrap()),
None => None,
};
// Construct command
let mut command = action.exec(&m);
info!("{action}: run [{:?}]", command.as_std());
if let Err(err) = command
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::piped())
.status()
.await
{
error!("{action}: run [{:?}], code {err}", command.as_std());
}
}
info!("{}: run [{:?}]", &action, command.as_std());
if let Err(err) = command
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::piped())
.status()
.await
{
error!("{}: run [{:?}], code {}", &action, command.as_std(), err);
}
});
}

View file

@ -1,23 +1,23 @@
use std::collections::{BTreeMap, BTreeSet};
use serde_json::Value;
use treedb::{Database, Tree, helpers::*};
use crate::concepts::{Filter, Match, MatchTime, Time};
use crate::{
concepts::{Filter, Match, MatchTime, Time},
treedb::{
helpers::{to_match, to_matchtime, to_time, to_u64},
Database, Tree,
},
};
pub fn filter_ordered_times_db_name(filter: &Filter) -> String {
format!(
"filter_ordered_times_{}.{}",
filter.stream_name, filter.name
filter.stream_name(),
filter.name()
)
}
pub fn filter_triggers_old_db_name(filter: &Filter) -> String {
format!("filter_triggers_{}.{}", filter.stream_name, filter.name)
}
pub fn filter_triggers_db_name(filter: &Filter) -> String {
format!("filter_triggers2_{}.{}", filter.stream_name, filter.name)
format!("filter_triggers_{}.{}", filter.stream_name(), filter.name())
}
/// Internal state of a [`FilterManager`].
@ -34,161 +34,85 @@ pub struct State {
/// Alternative view of the current Matches for O(1) cleaning of old Matches
/// without added async Tasks to remove them
/// Persisted
///
/// I'm pretty confident that Time will always be unique, because it has enough precision.
/// See this code that gives different times, even in a minimal loop:
/// ```rust
/// use reaction::concepts::now;
///
/// let mut res = vec![];
/// for _ in 0..10 {
/// let now = now();
/// res.push(format!("Now: {}", now.as_nanos()));
/// }
/// for s in res {
/// println!("{s}");
/// }
/// ```
pub ordered_times: Tree<Time, Match>,
/// Saves all the current Triggers for this Filter
/// Persisted
pub triggers: Tree<Match, BTreeMap<Time, u64>>,
pub triggers: Tree<MatchTime, u64>,
}
impl State {
pub async fn new(
pub fn new(
filter: &'static Filter,
has_after: bool,
db: &mut Database,
now: Time,
) -> Result<Self, String> {
let ordered_times = db
.open_tree(
filter_ordered_times_db_name(filter),
filter.retry_duration.unwrap_or_default(),
|(key, value)| Ok((to_time(&key)?, to_match(&value)?)),
)
.await?;
let mut triggers = db
.open_tree(
filter_triggers_db_name(filter),
filter.longuest_action_duration,
|(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)),
)
.await?;
if triggers.is_empty() {
let old_triggers = db
.open_tree(
filter_triggers_old_db_name(filter),
filter.longuest_action_duration,
|(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)),
)
.await?;
for (mt, n) in old_triggers.iter() {
triggers
.fetch_update(mt.m.clone(), |map| {
Some(match map {
None => [(mt.t, *n)].into(),
Some(mut map) => {
map.insert(mt.t, *n);
map
}
})
})
.await;
}
}
let mut this = Self {
filter,
has_after: !filter.longuest_action_duration.is_zero(),
has_after,
matches: BTreeMap::new(),
ordered_times,
triggers,
ordered_times: db.open_tree(
filter_ordered_times_db_name(filter),
filter.retry_duration().unwrap_or_default(),
|(key, value)| Ok((to_time(&key)?, to_match(&value)?)),
)?,
triggers: db.open_tree(
filter_triggers_db_name(filter),
filter.longuest_action_duration(),
|(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)),
)?,
};
this.clear_past_matches(now).await;
this.load_matches_from_ordered_times().await;
this.clear_past_matches(now);
this.load_matches_from_ordered_times();
Ok(this)
}
pub async fn add_match(&mut self, m: Match, t: Time) {
pub fn add_match(&mut self, m: Match, t: Time) {
let set = self.matches.entry(m.clone()).or_default();
set.insert(t);
self.ordered_times.insert(t, m).await;
self.ordered_times.insert(t, m);
}
pub async fn add_trigger(&mut self, m: Match, t: Time, action_count: Option<u64>) {
pub fn add_trigger(&mut self, m: Match, t: Time) {
// We record triggered filters only when there is an action with an `after` directive
if self.has_after {
// Add the (Match, Time) to the triggers map
let n = action_count
.unwrap_or_else(|| self.filter.filtered_actions_from_match(&m).len() as u64);
self.triggers
.fetch_update(m, |map| {
Some(match map {
None => [(t, n)].into(),
Some(mut value) => {
value.insert(t, n);
value
}
})
})
.await;
.insert(MatchTime { m, t }, self.filter.actions().len() as u64);
}
}
// Completely remove a Match from the matches
pub async fn remove_match(&mut self, m: &Match) {
pub fn remove_match(&mut self, m: &Match) {
if let Some(set) = self.matches.get(m) {
for t in set {
self.ordered_times.remove(t).await;
self.ordered_times.remove(t);
}
self.matches.remove(m);
}
}
/// Completely remove a Match from the triggers
pub async fn remove_trigger(&mut self, m: &Match) -> Option<BTreeMap<Time, u64>> {
self.triggers.remove(m).await
pub fn remove_trigger(&mut self, m: &Match, t: &Time) {
self.triggers.remove(&MatchTime {
m: m.clone(),
t: *t,
});
}
/// Returns whether we should still execute an action for this (Match, Time) trigger
pub async fn decrement_trigger(&mut self, m: &Match, t: Time, exiting: bool) -> bool {
pub fn decrement_trigger(&mut self, m: &Match, t: Time) -> bool {
// We record triggered filters only when there is an action with an `after` directive
if self.has_after {
let mut exec_needed = false;
let mt = MatchTime { m: m.clone(), t };
let count = self
.triggers
.get(&mt.m)
.and_then(|map| map.get(&mt.t))
.cloned();
let count = self.triggers.get(&mt);
if let Some(count) = count {
exec_needed = true;
if count <= 1 {
if !exiting {
self.triggers
.fetch_update(mt.m, |map| {
map.and_then(|mut map| {
map.remove(&mt.t);
if map.is_empty() { None } else { Some(map) }
})
})
.await;
}
// else don't do anything
// Because that will remove the entry in the DB, and make
// it forget this trigger.
// Maybe we should have 2 maps for triggers:
// - The current for action counting, not persisted
// - Another like ordered_times, Tree<Time, Match>, persisted
if *count <= 1 {
self.triggers.remove(&mt);
} else {
self.triggers
.fetch_update(mt.m, |map| {
map.map(|mut map| {
map.insert(mt.t, count - 1);
map
})
})
.await;
self.triggers.insert(mt, count - 1);
}
}
exec_needed
@ -197,8 +121,8 @@ impl State {
}
}
pub async fn clear_past_matches(&mut self, now: Time) {
let retry_duration = self.filter.retry_duration.unwrap_or_default();
pub fn clear_past_matches(&mut self, now: Time) {
let retry_duration = self.filter.retry_duration().unwrap_or_default();
while self
.ordered_times
.first_key_value()
@ -210,7 +134,7 @@ impl State {
let (t, m) = self.ordered_times.first_key_value().unwrap();
(*t, m.clone())
};
self.ordered_times.remove(&t).await;
self.ordered_times.remove(&t);
if let Some(set) = self.matches.get(&m) {
let mut set = set.clone();
set.remove(&t);
@ -223,433 +147,17 @@ impl State {
}
}
pub async fn get_times(&self, m: &Match) -> usize {
pub fn get_times(&self, m: &Match) -> usize {
match self.matches.get(m) {
Some(vec) => vec.len(),
None => 0,
}
}
async fn load_matches_from_ordered_times(&mut self) {
fn load_matches_from_ordered_times(&mut self) {
for (t, m) in self.ordered_times.iter() {
let set = self.matches.entry(m.clone()).or_default();
set.insert(*t);
}
}
}
/// Tries to convert a [`Value`] into a [`MatchTime`]
pub fn to_matchtime(val: &Value) -> Result<MatchTime, String> {
let map = val.as_object().ok_or("not an object")?;
Ok(MatchTime {
m: to_match(map.get("m").ok_or("no m in object")?)?,
t: to_time(map.get("t").ok_or("no t in object")?)?,
})
}
#[cfg(test)]
mod tests {
use std::collections::{BTreeMap, HashMap};
use serde_json::{Map, Value, json};
use crate::{
concepts::{
Action, Duplicate, Filter, MatchTime, Pattern, Time, filter_tests::ok_filter, now,
},
tests::TempDatabase,
};
use super::{State, to_matchtime};
// Tests `new`, `clear_past_matches` and `load_matches_from_ordered_times`
#[tokio::test]
async fn state_new() {
let patterns = Pattern::new_map("az", "[a-z]+").unwrap();
let filter = Filter::new_static(
vec![
Action::new(vec!["true"], None, false, "s1", "f1", "a1", &patterns, 0),
Action::new(
vec!["true"],
Some("3s"),
false,
"s1",
"f1",
"a2",
&patterns,
0,
),
],
vec!["test <az>"],
Some(3),
Some("2s"),
"s1",
"f1",
Duplicate::default(),
&patterns,
);
let now = Time::from_secs(1234567);
// DateTime::parse_from_rfc3339("2025-07-10T12:35:00.000+00:00")
// .unwrap()
// .with_timezone(&Local);
let now_plus_1m = now + Time::from_mins(1);
let now_plus_1m01 = now_plus_1m + Time::from_secs(1);
let now_less_1m = now - Time::from_mins(1);
let now_less_1s = now - Time::from_secs(1);
let now_less_4s = now - Time::from_secs(4);
let now_less_5s = now - Time::from_secs(5);
let triggers = [
// format v1
(
"filter_triggers_s1.f1".into(),
HashMap::from([
// Will stay
(
json!({
"t": now_plus_1m,
"m": ["one"],
}),
json!(1),
),
(
json!({
"t": now_less_1s,
"m": ["one"],
}),
json!(1),
),
// Will not get cleaned because it's FilterManager's task
(
json!({
"t": now_less_5s,
"m": ["one"],
}),
json!(1),
),
]),
),
// format v2 (since v2.2.0)
(
"filter_triggers2_s1.f1".into(),
HashMap::from([(
json!(["one"]),
json!({
// Will stay
now_plus_1m.as_nanos().to_string(): 1,
now_less_1s.as_nanos().to_string(): 1,
// Will not get cleaned because it's FilterManager's task
now_less_5s.as_nanos().to_string(): 1,
}),
)]),
),
];
for trigger_db in triggers {
let mut db = TempDatabase::from_loaded_db(HashMap::from([
(
"filter_ordered_times_s1.f1".into(),
HashMap::from([
// Will stay
(now_plus_1m.as_nanos().to_string().into(), ["one"].into()),
(now_plus_1m01.as_nanos().to_string().into(), ["one"].into()),
(now_less_1s.as_nanos().to_string().into(), ["two"].into()), // stays because retry: 2s
// Will get cleaned
(now_less_4s.as_nanos().to_string().into(), ["two"].into()),
(now_less_5s.as_nanos().to_string().into(), ["three"].into()),
(now_less_1m.as_nanos().to_string().into(), ["two"].into()),
]),
),
trigger_db,
]))
.await;
let state = State::new(filter, &mut db, now).await.unwrap();
assert_eq!(
state.ordered_times.tree(),
&BTreeMap::from([
(now_less_1s, vec!["two".into()]),
(now_plus_1m, vec!["one".into()]),
(now_plus_1m01, vec!["one".into()]),
])
);
assert_eq!(
state.matches,
BTreeMap::from([
(vec!["one".into()], [now_plus_1m, now_plus_1m01].into()),
(vec!["two".into()], [now_less_1s].into()),
])
);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(
vec!["one".into()],
BTreeMap::from([
(now_less_5s, 1u64),
(now_less_1s, 1u64),
(now_plus_1m, 1u64),
]),
)])
);
}
}
#[tokio::test]
async fn state_match_add_remove() {
let filter = Box::leak(Box::new(ok_filter()));
let one = vec!["one".into()];
let now = Time::from_secs(1234567);
let now_less_1s = now - Time::from_secs(1);
let now_less_4s = now - Time::from_secs(4);
let mut db = TempDatabase::default().await;
let mut state = State::new(filter, &mut db, now).await.unwrap();
assert!(state.ordered_times.tree().is_empty());
assert!(state.matches.is_empty());
// Add non-previously added match
state.add_match(one.clone(), now_less_1s).await;
assert_eq!(
state.ordered_times.tree(),
&BTreeMap::from([(now_less_1s, one.clone()),])
);
assert_eq!(
state.matches,
BTreeMap::from([(one.clone(), [now_less_1s].into())])
);
// Add previously added match
state.add_match(one.clone(), now_less_4s).await;
assert_eq!(
state.ordered_times.tree(),
&BTreeMap::from([(now_less_1s, one.clone()), (now_less_4s, one.clone())])
);
assert_eq!(
state.matches,
BTreeMap::from([(one.clone(), [now_less_1s, now_less_4s].into())])
);
// Remove added match
state.remove_match(&one).await;
assert!(state.ordered_times.tree().is_empty());
assert!(state.matches.is_empty());
}
#[tokio::test]
async fn state_trigger_no_after_add_remove_decrement() {
let filter = Box::leak(Box::new(ok_filter()));
let one = vec!["one".into()];
let now = now();
let mut db = TempDatabase::default().await;
let mut state = State::new(filter, &mut db, now).await.unwrap();
assert!(state.triggers.tree().is_empty());
// Add unique trigger
state.add_trigger(one.clone(), now, None).await;
// Nothing is really added
assert!(state.triggers.tree().is_empty());
// Will be called immediately after, it returns true
assert!(state.decrement_trigger(&one, now, false).await);
}
#[tokio::test]
async fn state_trigger_has_after_add_remove_decrement() {
let patterns = Pattern::new_map("az", "[a-z]+").unwrap();
let filter = Filter::new_static(
vec![
Action::new(vec!["true"], None, false, "s1", "f1", "a1", &patterns, 0),
Action::new(
vec!["true"],
Some("1s"),
false,
"s1",
"f1",
"a2",
&patterns,
0,
),
Action::new(
vec!["true"],
Some("3s"),
false,
"s1",
"f1",
"a3",
&patterns,
0,
),
],
vec!["test <az>"],
Some(3),
Some("2s"),
"s1",
"f1",
Duplicate::default(),
&patterns,
);
let one = vec!["one".into()];
let now = now();
let now_plus_1s = now + Time::from_secs(1);
let mut db = TempDatabase::default().await;
let mut state = State::new(filter, &mut db, now).await.unwrap();
assert!(state.triggers.tree().is_empty());
// Add unique trigger
state.add_trigger(one.clone(), now, None).await;
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now, 3)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, false).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now, 2)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, false).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, false).await);
assert!(state.triggers.tree().is_empty());
// Decrement → false
assert!(!state.decrement_trigger(&one, now, false).await);
// Add unique trigger (but decrement exiting-like)
state.add_trigger(one.clone(), now, None).await;
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now, 3)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, true).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now, 2)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, true).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
);
// Decrement but exiting → true, does nothing
assert!(state.decrement_trigger(&one, now, true).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now, 1)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, false).await);
assert!(state.triggers.tree().is_empty());
// Decrement → false
assert!(!state.decrement_trigger(&one, now, false).await);
// Add trigger with neighbour
state.add_trigger(one.clone(), now, None).await;
state.add_trigger(one.clone(), now_plus_1s, None).await;
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, false).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 2)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, false).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 1)].into())])
);
// Decrement → true
assert!(state.decrement_trigger(&one, now, false).await);
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3)].into())])
);
// Decrement → false
assert!(!state.decrement_trigger(&one, now, false).await);
// Remove neighbour
state.remove_trigger(&one).await;
assert!(state.triggers.tree().is_empty());
// Add two neighbour triggers
state.add_trigger(one.clone(), now, None).await;
state.add_trigger(one.clone(), now_plus_1s, None).await;
assert_eq!(
state.triggers.tree(),
&BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())])
);
// Remove them
state.remove_trigger(&one).await;
assert!(state.triggers.tree().is_empty());
}
#[test]
fn test_to_matchtime() {
assert_eq!(
to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([
("m".into(), ["plip", "ploup"].into()),
("t".into(), "12345678".into()),
])
.into_iter()
))),
Ok(MatchTime {
m: vec!["plip".into(), "ploup".into()],
t: Time::from_nanos(12345678),
})
);
assert!(
to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([("m".into(), ["plip", "ploup"].into()),]).into_iter()
)))
.is_err()
);
assert!(
to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([("t".into(), 12345678.into()),]).into_iter()
)))
.is_err()
);
assert!(
to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([("m".into(), "ploup".into()), ("t".into(), 12345678.into()),])
.into_iter()
)))
.is_err()
);
assert!(
to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([
("m".into(), ["plip", "ploup"].into()),
("t".into(), [1234567].into()),
])
.into_iter()
)))
.is_err()
);
}
}

File diff suppressed because it is too large Load diff

View file

@ -3,182 +3,122 @@ use std::{
error::Error,
path::PathBuf,
sync::{
Arc,
atomic::{AtomicBool, Ordering},
Arc,
},
};
use futures::future::join_all;
use reaction_plugin::shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken};
use chrono::Local;
use tokio::{
select,
signal::unix::{SignalKind, signal},
signal::unix::{signal, SignalKind},
sync::Semaphore,
};
use tracing::{debug, error, info};
use treedb::Database;
use tracing::{debug, info};
use crate::concepts::{Config, now};
use crate::{concepts::Config, treedb::Database};
use filter::FilterManager;
pub use filter::React;
use plugin::Plugins;
use socket::Socket;
pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken};
use socket::socket_manager;
use stream::StreamManager;
#[cfg(test)]
pub use filter::tests;
mod filter;
mod plugin;
mod shutdown;
mod socket;
mod stream;
mod utils;
pub async fn daemon(config_path: PathBuf, socket: PathBuf) -> i32 {
// Load config or quit
let config: &'static Config = Box::leak(Box::new(match Config::from_path(&config_path) {
Ok(config) => config,
Err(err) => {
error!("{err}");
return 1;
}
}));
pub async fn daemon(
config_path: PathBuf,
socket: PathBuf,
) -> Result<(), Box<dyn Error + Send + Sync>> {
let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?));
if !config.start() {
return Err("a start command failed, exiting.".into());
}
// Cancellation Token
let shutdown = ShutdownController::new();
// Cancel when we receive a quit signal
// Semaphore limiting action execution concurrency
let exec_limit = match config.concurrency() {
0 => None,
n => Some(Arc::new(Semaphore::new(n))),
};
// Open Database
let mut db = Database::open(config).await?;
// Filter managers
let now = Local::now();
let mut state = HashMap::new();
let mut stream_managers = Vec::new();
for stream in config.streams().values() {
let mut filter_managers = HashMap::new();
for filter in stream.filters().values() {
let manager =
FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now)?;
filter_managers.insert(filter, manager);
}
state.insert(stream, filter_managers.clone());
stream_managers.push(StreamManager::new(
stream,
filter_managers,
shutdown.token(),
)?);
}
drop(exec_limit);
// Start Stream managers
let mut stream_task_handles = Vec::new();
for stream_manager in stream_managers {
stream_task_handles.push(tokio::spawn(async move { stream_manager.start().await }));
}
// Run database task
let mut db_status_rx = {
let token = shutdown.token();
db.manager(token)
};
// Close streams when we receive a quit signal
let signal_received = Arc::new(AtomicBool::new(false));
if let Err(err) = handle_signals(shutdown.delegate(), signal_received.clone()) {
error!("{err}");
return 1;
}
handle_signals(shutdown.delegate(), signal_received.clone())?;
let mut db = None;
let mut config_started = false;
let mut daemon_err = false;
// Start the real daemon 👹
if let Err(err) = daemon_start(
config,
socket,
shutdown.token(),
&mut db,
&mut config_started,
)
.await
// Run socket task
{
error!("{err}");
daemon_err = true;
let socket = socket.to_owned();
let token = shutdown.token();
tokio::spawn(async move { socket_manager(config, socket, state, token).await });
}
// Release last db's sender
let mut db_status = None;
if let Some(db) = db {
db_status = Some(db.quit());
// Wait for all streams to quit
for task_handle in stream_task_handles {
let _ = task_handle.await;
}
debug!("Asking for all tasks to quit...");
shutdown.ask_shutdown();
debug!("Waiting for all tasks to quit...");
shutdown.wait_all_task_shutdown().await;
shutdown.wait_shutdown().await;
let mut stop_ok = true;
if config_started {
stop_ok = config.stop();
}
let db_status = db_status_rx.try_recv();
if daemon_err || !stop_ok {
return 1;
} else if let Some(mut db_status) = db_status
&& let Ok(Err(err)) = db_status.try_recv()
{
error!("database error: {}", err);
return 1;
let stop_ok = config.stop();
if let Ok(Err(err)) = db_status {
Err(format!("database error: {}", err).into())
} else if !signal_received.load(Ordering::SeqCst) {
error!("quitting because all streams finished");
return 1;
Err("quitting because all streams finished".into())
} else if !stop_ok {
Err("while executing stop command".into())
} else {
return 0;
Ok(())
}
}
async fn daemon_start(
config: &'static Config,
socket: PathBuf,
shutdown: ShutdownToken,
db: &mut Option<Database>,
config_started: &mut bool,
) -> Result<(), Box<dyn Error + Send + Sync>> {
let mut plugins = Plugins::new(config, shutdown.clone()).await?;
// Open Database
let (cancellation, task_tracker) = shutdown.clone().split();
let path = PathBuf::from(config.state_directory.clone());
*db = Some(Database::open(&path, cancellation, task_tracker).await?);
let (state, stream_managers) = {
// Semaphore limiting action execution concurrency
let exec_limit = match config.concurrency {
0 => None,
n => Some(Arc::new(Semaphore::new(n))),
};
// Filter managers
let now = now();
let mut state = HashMap::new();
let mut stream_managers = Vec::new();
for stream in config.streams.values() {
let mut filter_managers = HashMap::new();
for filter in stream.filters.values() {
let manager = FilterManager::new(
filter,
exec_limit.clone(),
shutdown.clone(),
db.as_mut().unwrap(),
&mut plugins,
now,
)
.await?;
filter_managers.insert(filter, manager);
}
state.insert(stream, filter_managers.clone());
stream_managers.push(
StreamManager::new(stream, filter_managers, shutdown.clone(), &mut plugins).await?,
);
}
(state, stream_managers)
};
// Open socket and run task
let socket = Socket::open(socket).await?;
socket.manager(config, state, shutdown.clone());
// all core systems started, we can run start commands
*config_started = true;
if !config.start() {
return Err("a start command failed, exiting.".into());
}
// Finish plugin setup
plugins.start().await?;
plugins.manager();
// Start Stream managers
let stream_task_handles = stream_managers.into_iter().filter_map(|stream_manager| {
let standalone = stream_manager.is_standalone();
let handle = tokio::spawn(async move { stream_manager.start().await });
// Only wait for standalone streams
if standalone { Some(handle) } else { None }
});
// Wait for all streams to quit
join_all(stream_task_handles).await;
Ok(())
}
fn handle_signals(
shutdown: ShutdownDelegate,
signal_received: Arc<AtomicBool>,

View file

@ -1,405 +0,0 @@
use std::{
collections::{BTreeMap, BTreeSet},
fmt::Display,
io,
ops::{Deref, DerefMut},
process::ExitStatus,
time::Duration,
};
use futures::{StreamExt, future::join_all};
use reaction_plugin::{
ActionConfig, ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamConfig, StreamImpl,
};
use remoc::Connect;
use tokio::{
process::{Child, ChildStderr},
time::timeout,
};
use tracing::{error, info};
use crate::{
concepts::{Action, Config, Plugin, Stream},
daemon::{ShutdownToken, stream::reader_to_stream, utils::kill_child},
};
pub struct PluginManager {
child: Child,
shutdown: ShutdownToken,
plugin: &'static Plugin,
plugin_info: PluginInfoClient,
streams: BTreeSet<String>,
actions: BTreeSet<String>,
}
impl Deref for PluginManager {
type Target = PluginInfoClient;
fn deref(&self) -> &Self::Target {
&self.plugin_info
}
}
impl DerefMut for PluginManager {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.plugin_info
}
}
impl PluginManager {
async fn new(
plugin: &'static Plugin,
state_directory: &str,
shutdown: ShutdownToken,
) -> Result<Self, String> {
let mut child = plugin
.launch(state_directory)
.await
.map_err(|err| systemd_error(plugin, "could not launch plugin", err))?;
{
let stderr = child.stderr.take().unwrap();
// let shutdown = shutdown.clone();
tokio::spawn(async move { handle_stderr(stderr, plugin.name.clone()).await });
}
let stdin = child.stdin.take().unwrap();
let stdout = child.stdout.take().unwrap();
let (conn, _tx, mut rx): (
_,
remoc::rch::base::Sender<()>,
remoc::rch::base::Receiver<PluginInfoClient>,
) = Connect::io(remoc::Cfg::default(), stdout, stdin)
.await
.map_err(|err| {
systemd_error(plugin, "could not init communication with plugin", err)
})?;
tokio::spawn(conn);
let mut plugin_info = rx
.recv()
.await
.map_err(|err| format!("could not retrieve initial information from plugin: {err}"))?
.ok_or("could not retrieve initial information from plugin: no data")?;
let manifest = plugin_info
.manifest()
.await
.map_err(|err| format!("error while getting plugin {} manifest: {err}", plugin.name))?;
let my_hello = Hello::new();
if let Err(hint) = Hello::is_compatible(&my_hello, &manifest.hello) {
return Err(format!(
"reaction can't handle plugin {} with incompatible version {}.{}: current version: {}.{}. {}",
plugin.name,
manifest.hello.version_major,
manifest.hello.version_minor,
my_hello.version_major,
my_hello.version_minor,
hint
));
}
Ok(Self {
child,
shutdown,
plugin,
plugin_info,
streams: manifest.streams,
actions: manifest.actions,
})
}
async fn handle_child(mut self) {
const PLUGIN_STOP_GRACE_TIME: u64 = 15;
// wait either for the child process to exit on its own or for the shutdown signal
tokio::select! {
status = self.child.wait() => {
self.print_exit(status);
return;
}
_ = self.shutdown.wait() => {}
}
match timeout(
Duration::from_secs(PLUGIN_STOP_GRACE_TIME),
self.plugin_info.close(),
)
.await
{
Ok(Ok(())) => (),
Ok(Err(err)) => {
error!("plugin {}: {err}", self.plugin.name);
}
// got timeout
Err(_) => {
error!(
"plugin {} did not respond to close request in time, killing",
self.plugin.name
);
kill_child(self.child, format!("plugin {}", self.plugin.name), 5).await;
}
}
}
fn print_exit(&self, status: io::Result<ExitStatus>) {
match status {
Ok(status) => match status.code() {
Some(code) => {
error!(
"plugin {}: process exited. exit code: {}",
self.plugin.name, code
);
}
None => {
error!("plugin {}: process exited.", self.plugin.name);
}
},
Err(err) => {
error!("plugin {}: process exited. {err}", self.plugin.name);
}
}
}
}
fn systemd_error(plugin: &Plugin, message: &str, err: impl Display) -> String {
if plugin.systemd {
format!(
"{message}: {err}. \
`plugins.{0}.systemd` is set to true, so this may be an issue with systemd's run0. \
please make sure `sudo run0 ls /` returns the same thing as `sudo ls /` as a test. \
if run0 can't be found or doesn't output anything, set `plugins.{0}.systemd` to false.",
plugin.name,
)
} else {
format!("{message}: {err}")
}
}
async fn handle_stderr(stderr: ChildStderr, plugin_name: String) {
// read lines until shutdown
let lines = reader_to_stream(stderr);
tokio::pin!(lines);
loop {
match lines.next().await {
Some(Ok(line)) => {
// sad: I can't factorize this because the tracing::event! macro
// requires its log level to be a constant.
if line.starts_with("DEBUG ") {
tracing::debug!("plugin {plugin_name}: {}", line.split_at(6).1)
} else if line.starts_with("INFO ") {
tracing::info!("plugin {plugin_name}: {}", line.split_at(5).1)
} else if line.starts_with("WARN ") {
tracing::warn!("plugin {plugin_name}: {}", line.split_at(5).1)
} else if line.starts_with("ERROR ") {
tracing::error!("plugin {plugin_name}: {}", line.split_at(6).1)
} else {
// If there is no log level, we suppose it's an error (may be a panic or something)
tracing::error!("plugin {plugin_name}: {}", line)
}
}
Some(Err(err)) => {
tracing::error!("while trying to read plugin {plugin_name} stderr: {err}");
break;
}
None => break,
}
}
}
#[derive(Default)]
pub struct Plugins {
/// Loaded plugins
plugins: BTreeMap<String, PluginManager>,
/// stream_type to plugin name
stream_to_plugin: BTreeMap<String, String>,
/// action_type to plugin name
action_to_plugin: BTreeMap<String, String>,
/// plugin name to config list
plugin_to_confs: BTreeMap<String, (Vec<&'static Stream>, Vec<&'static Action>)>,
/// stream name to impl
stream_to_impl: BTreeMap<String, StreamImpl>,
/// action name to impl
action_to_impl: BTreeMap<String, ActionImpl>,
}
impl Plugins {
pub async fn new(config: &'static Config, shutdown: ShutdownToken) -> Result<Self, String> {
let mut this = Self::default();
for plugin in config.plugins.values() {
let name = plugin.name.clone();
this.load_plugin(plugin, &config.state_directory, shutdown.clone())
.await
.map_err(|err| format!("plugin {name}: {err}]"))?;
}
this.aggregate_plugin_configs(config)?;
this.load_plugin_configs().await?;
Ok(this)
}
async fn load_plugin(
&mut self,
plugin: &'static Plugin,
state_directory: &str,
shutdown: ShutdownToken,
) -> Result<(), String> {
let name = plugin.name.clone();
let manager = PluginManager::new(plugin, state_directory, shutdown).await?;
for stream in &manager.streams {
if let Some(name) = self.stream_to_plugin.insert(stream.clone(), name.clone()) {
return Err(format!(
"plugin {name} already exposed a stream with type name '{stream}'",
));
}
}
for action in &manager.actions {
if let Some(name) = self.action_to_plugin.insert(action.clone(), name.clone()) {
return Err(format!(
"plugin {name} already exposed a action with type name '{action}'",
));
}
}
self.plugins.insert(name, manager);
Ok(())
}
fn aggregate_plugin_configs(&mut self, config: &'static Config) -> Result<(), String> {
for stream in config.streams.values() {
if stream.is_plugin()
&& let Some(stream_type) = &stream.stream_type
{
let plugin_name = self.stream_to_plugin.get(stream_type).ok_or_else(|| {
display_plugin_exposed_types(&self.stream_to_plugin, "stream", stream_type)
})?;
let (streams, _) = self
.plugin_to_confs
.entry(plugin_name.to_owned())
.or_default();
streams.push(stream);
}
for action in stream
.filters
.values()
.flat_map(|filter| filter.actions.values())
{
if action.is_plugin()
&& let Some(action_type) = &action.action_type
{
let plugin_name = self.action_to_plugin.get(action_type).ok_or_else(|| {
display_plugin_exposed_types(&self.action_to_plugin, "action", action_type)
})?;
let (_, actions) = self
.plugin_to_confs
.entry(plugin_name.to_owned())
.or_default();
actions.push(action);
}
}
}
Ok(())
}
async fn load_plugin_configs(&mut self) -> Result<(), String> {
let plugin_to_confs = std::mem::take(&mut self.plugin_to_confs);
for (plugin_name, (streams, actions)) in plugin_to_confs {
let plugin = self
.plugins
.get_mut(&plugin_name)
.ok_or_else(|| format!("could not find plugin {plugin_name}. this is a bug!"))?;
let stream_names: Vec<String> =
streams.iter().map(|stream| stream.name.clone()).collect();
let action_names: Vec<String> =
actions.iter().map(|action| action.to_string()).collect();
let (stream_impls, action_impls) = plugin
.load_config(
streams
.into_iter()
.map(Stream::to_stream_config)
.collect::<Result<Vec<StreamConfig>, String>>()?,
actions
.into_iter()
.map(Action::to_action_config)
.collect::<Result<Vec<ActionConfig>, String>>()?,
)
.await
.map_err(|err| {
format!("plugin {plugin_name} is not happy with your config: {err}")
})?;
self.stream_to_impl
.extend(stream_names.into_iter().zip(stream_impls));
self.action_to_impl
.extend(action_names.into_iter().zip(action_impls));
}
Ok(())
}
pub fn get_stream_impl(&mut self, stream_name: String) -> Option<StreamImpl> {
self.stream_to_impl.remove(&stream_name)
}
pub fn get_action_impl(&mut self, action_fullname: String) -> Option<ActionImpl> {
self.action_to_impl.remove(&action_fullname)
}
pub async fn start(&mut self) -> Result<(), String> {
// Finish setup of all plugins
join_all(
self.plugins
.values_mut()
.map(|plugin_manager| plugin_manager.start()),
)
.await
// Convert Vec<Result<Result>> into Result
.into_iter()
.zip(self.plugins.values())
.try_for_each(|(result, plugin_manager)| {
result.map_err(|err| {
format!(
"plugin {}: {}",
plugin_manager.plugin.name,
err.to_string().replace('\n', " ")
)
})
})
}
pub fn manager(self) {
for plugin in self.plugins.into_values() {
tokio::spawn(async move {
plugin.handle_child().await;
});
}
}
}
fn display_plugin_exposed_types(
type_to_plugin: &BTreeMap<String, String>,
name: &str,
invalid: &str,
) -> String {
let mut plugin_to_types: BTreeMap<&str, Vec<&str>> = BTreeMap::new();
for (type_, plugin) in type_to_plugin {
plugin_to_types.entry(plugin).or_default().push(type_);
}
for (plugin, types) in plugin_to_types {
info!(
"Plugin {plugin} exposes those {name} types: '{}'",
types.join("', '")
);
}
format!("No plugin provides the {name} type: {invalid}")
}

91
src/daemon/shutdown.rs Normal file
View file

@ -0,0 +1,91 @@
use tokio::sync::mpsc;
use tokio_util::sync::{CancellationToken, WaitForCancellationFuture};
// Thanks to this article for inspiration
// https://www.wcygan.io/post/tokio-graceful-shutdown/
// Now TaskTracker exist, but I don't know what I'd gain for using it instead?
// https://docs.rs/tokio-util/0.7.13/tokio_util/task/task_tracker/struct.TaskTracker.html
/// Permits to keep track of ongoing tasks and ask them to shutdown.
pub struct ShutdownController {
shutdown_notifyer: CancellationToken,
task_tracker: mpsc::Sender<()>,
task_waiter: mpsc::Receiver<()>,
}
impl ShutdownController {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let (task_tracker, task_waiter) = mpsc::channel(1);
Self {
shutdown_notifyer: CancellationToken::new(),
task_tracker,
task_waiter,
}
}
/// Ask for all tasks to quit
pub fn ask_shutdown(&self) {
self.shutdown_notifyer.cancel();
}
/// Wait for all tasks to quit.
/// This task may return even without having called [`ShutdownController::ask_shutdown`]
/// first, if all tasks quit by themselves.
pub async fn wait_shutdown(mut self) {
drop(self.task_tracker);
self.task_waiter.recv().await;
}
/// Returns a new shutdown token, to be held by a task.
pub fn token(&self) -> ShutdownToken {
ShutdownToken::new(self.shutdown_notifyer.clone(), self.task_tracker.clone())
}
/// Returns a [`ShutdownDelegate`], which is able to ask for shutdown,
/// without counting as a task that needs to be awaited.
pub fn delegate(&self) -> ShutdownDelegate {
ShutdownDelegate(self.shutdown_notifyer.clone())
}
}
/// Permits to ask for shutdown, without counting as a task that needs to be awaited.
pub struct ShutdownDelegate(CancellationToken);
impl ShutdownDelegate {
/// Ask for all tasks to quit
pub fn ask_shutdown(&self) {
self.0.cancel();
}
}
/// Created by a [`ShutdownController`].
/// Serves two purposes:
///
/// - Wait for a shutdown request to happen.
/// - Keep track of the current task. While this token is held,
/// the [`ShutdownController::wait_shutdown`] will block.
#[derive(Clone)]
pub struct ShutdownToken {
shutdown_notifyer: CancellationToken,
_task_tracker: mpsc::Sender<()>,
}
impl ShutdownToken {
fn new(shutdown_notifyer: CancellationToken, _task_tracker: mpsc::Sender<()>) -> Self {
Self {
shutdown_notifyer,
_task_tracker,
}
}
/// Returns a future that will resolve only when a shutdown request happened.
pub fn wait(&self) -> WaitForCancellationFuture<'_> {
self.shutdown_notifyer.cancelled()
}
/// Ask for all tasks to quit
pub fn ask_shutdown(&self) {
self.shutdown_notifyer.cancel();
}
}

View file

@ -1,13 +1,15 @@
use std::{
collections::{BTreeMap, HashMap},
fs, io,
path::PathBuf,
process::exit,
sync::Arc,
};
use chrono::Local;
use futures::{SinkExt, StreamExt};
use reaction_plugin::shutdown::ShutdownToken;
use regex::Regex;
use tokio::{fs, net::UnixListener};
use tokio::net::UnixListener;
use tokio_util::{
bytes::Bytes,
codec::{Framed, LengthDelimitedCodec},
@ -15,35 +17,36 @@ use tokio_util::{
use tracing::{error, warn};
use crate::{
concepts::{Config, Filter, Pattern, Stream, now},
concepts::{Config, Filter, Pattern, Stream},
protocol::{ClientRequest, ClientStatus, DaemonResponse, Order},
};
use super::filter::FilterManager;
use super::{filter::FilterManager, shutdown::ShutdownToken};
async fn open_socket(path: PathBuf) -> Result<UnixListener, String> {
macro_rules! err_str {
($expression:expr) => {
$expression.map_err(|err| err.to_string())
};
}
macro_rules! err_str {
($expression:expr) => {
$expression.map_err(|err| err.to_string())
};
}
fn open_socket(path: PathBuf) -> Result<UnixListener, String> {
// First create all directories to the file
let dir = path
.parent()
.ok_or(format!("socket {path:?} has no parent directory"))?;
err_str!(fs::create_dir_all(dir).await)?;
err_str!(fs::create_dir_all(dir))?;
// Test if file exists
match fs::metadata(&path).await {
match fs::metadata(&path) {
Ok(meta) => {
if meta.file_type().is_dir() {
Err(format!("socket {path:?} is already a directory"))
} else {
warn!("socket {path:?} already exists: is the daemon already running? deleting.");
err_str!(fs::remove_file(&path).await)
err_str!(fs::remove_file(&path))
}
}
Err(err) => err_str!(match err.kind() {
std::io::ErrorKind::NotFound => Ok(()),
io::ErrorKind::NotFound => Ok(()),
_ => Err(err),
}),
}?;
@ -51,7 +54,7 @@ async fn open_socket(path: PathBuf) -> Result<UnixListener, String> {
err_str!(UnixListener::bind(path))
}
async fn handle_trigger_order(
fn handle_trigger_order(
stream_name: Option<String>,
filter_name: Option<String>,
patterns: BTreeMap<Arc<Pattern>, String>,
@ -78,7 +81,7 @@ async fn handle_trigger_order(
// Check stream existance
let filters = match shared_state
.iter()
.find(|(stream, _)| stream_name == stream.name)
.find(|(stream, _)| stream_name == stream.name())
{
Some((_, filters)) => filters,
None => {
@ -89,7 +92,7 @@ async fn handle_trigger_order(
// Check filter existance
let filter_manager = match filters
.iter()
.find(|(filter, _)| filter_name == filter.name)
.find(|(filter, _)| filter_name == filter.name())
{
Some((_, filter)) => filter,
None => {
@ -99,53 +102,60 @@ async fn handle_trigger_order(
}
};
match filter_manager.handle_trigger(patterns, now()).await {
let now = Local::now();
match filter_manager.handle_trigger(patterns, now) {
Ok(()) => DaemonResponse::Ok(()),
Err(err) => DaemonResponse::Err(err),
}
}
async fn handle_show_or_flush_order(
fn handle_show_or_flush_order(
stream_name: Option<String>,
filter_name: Option<String>,
patterns: BTreeMap<Arc<Pattern>, Regex>,
order: Order,
shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
) -> DaemonResponse {
let now = now();
let iter = shared_state
let now = Local::now();
let cs: ClientStatus = shared_state
.iter()
// stream filtering
.filter(|(stream, _)| {
stream_name.is_none() || stream_name.clone().is_some_and(|name| name == stream.name)
stream_name.is_none()
|| stream_name
.clone()
.is_some_and(|name| name == stream.name())
})
.fold(BTreeMap::new(), |mut acc, (stream, filter_manager)| {
let inner_map = filter_manager
.iter()
// filter filtering
.filter(|(filter, _)| {
filter_name.is_none()
|| filter_name
.clone()
.is_some_and(|name| name == filter.name())
})
// pattern filtering
.filter(|(filter, _)| {
patterns
.iter()
.all(|(pattern, _)| filter.patterns().get(pattern).is_some())
})
.map(|(filter, manager)| {
(
filter.name().to_owned(),
manager.handle_order(&patterns, order, now),
)
})
.collect();
acc.insert(stream.name().to_owned(), inner_map);
acc
});
let mut cs = ClientStatus::new();
for (stream, filter_manager) in iter {
let iter = filter_manager
.iter()
// filter filtering
.filter(|(filter, _)| {
filter_name.is_none() || filter_name.clone().is_some_and(|name| name == filter.name)
})
// pattern filtering
.filter(|(filter, _)| {
patterns
.iter()
.all(|(pattern, _)| filter.patterns.get(pattern).is_some())
});
let mut inner_map = BTreeMap::new();
for (filter, manager) in iter {
inner_map.insert(
filter.name.to_owned(),
manager.handle_order(&patterns, order, now).await,
);
}
cs.insert(stream.name.to_owned(), inner_map);
}
DaemonResponse::Order(cs)
}
async fn answer_order(
fn answer_order(
config: &'static Config,
shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
options: ClientRequest,
@ -166,7 +176,7 @@ async fn answer_order(
.map(|(name, reg)| {
// lookup pattern in config.patterns
config
.patterns
.patterns()
.iter()
// retrieve or Err
.find(|(pattern_name, _)| &name == *pattern_name)
@ -180,7 +190,7 @@ async fn answer_order(
};
if let Order::Trigger = options.order {
handle_trigger_order(stream_name, filter_name, patterns, shared_state).await
handle_trigger_order(stream_name, filter_name, patterns, shared_state)
} else {
let patterns = match patterns
.into_iter()
@ -188,7 +198,7 @@ async fn answer_order(
Ok(reg) => Ok((pattern, reg)),
Err(err) => Err(format!(
"pattern '{}' regex doesn't compile: {err}",
pattern.name
pattern.name()
)),
})
.collect::<Result<BTreeMap<Arc<Pattern>, Regex>, String>>()
@ -204,7 +214,6 @@ async fn answer_order(
options.order,
shared_state,
)
.await
}
}
@ -220,67 +229,57 @@ macro_rules! or_next {
};
}
pub struct Socket {
path: PathBuf,
socket: UnixListener,
}
pub async fn socket_manager(
config: &'static Config,
socket: PathBuf,
shared_state: HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
shutdown: ShutdownToken,
) {
let listener = match open_socket(socket.clone()) {
Ok(l) => l,
Err(err) => {
error!("while creating communication socket: {err}");
exit(1);
}
};
impl Socket {
pub async fn open(socket: PathBuf) -> Result<Self, String> {
Ok(Socket {
socket: open_socket(socket.clone())
.await
.map_err(|err| format!("while creating communication socket: {err}"))?,
path: socket,
})
}
pub fn manager(
self,
config: &'static Config,
shared_state: HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>,
shutdown: ShutdownToken,
) {
tokio::spawn(async move {
loop {
tokio::select! {
_ = shutdown.wait() => break,
try_conn = self.socket.accept() => {
match try_conn {
Ok((conn, _)) => {
let mut transport = Framed::new(conn, LengthDelimitedCodec::new());
// Decode
let received = transport.next().await;
let encoded_request = match received {
Some(r) => or_next!("while reading request", r),
None => {
error!("failed to answer client: client sent no request");
continue;
}
};
let request = or_next!(
"failed to decode request",
serde_json::from_slice(&encoded_request)
);
// Process
let response = answer_order(config, &shared_state, request).await;
// Encode
let encoded_response =
or_next!("failed to serialize response", serde_json::to_string::<DaemonResponse>(&response));
or_next!(
"failed to send response:",
transport.send(Bytes::from(encoded_response)).await
);
loop {
tokio::select! {
_ = shutdown.wait() => break,
try_conn = listener.accept() => {
match try_conn {
Ok((conn, _)) => {
let mut transport = Framed::new(conn, LengthDelimitedCodec::new());
// Decode
let received = transport.next().await;
let encoded_request = match received {
Some(r) => or_next!("while reading request", r),
None => {
error!("failed to answer client: client sent no request");
continue;
}
Err(err) => error!("failed to open connection from cli: {err}"),
}
};
let request = or_next!(
"failed to decode request",
serde_json::from_slice(&encoded_request)
);
// Process
let response = answer_order(config, &shared_state, request);
// Encode
let encoded_response =
or_next!("failed to serialize response", serde_json::to_string::<DaemonResponse>(&response));
or_next!(
"failed to send response:",
transport.send(Bytes::from(encoded_response)).await
);
}
Err(err) => error!("failed to open connection from cli: {err}"),
}
}
}
}
if let Err(err) = fs::remove_file(self.path).await {
error!("failed to remove socket: {}", err);
}
});
if let Err(err) = fs::remove_file(socket) {
error!("failed to remove socket: {}", err);
}
}

View file

@ -1,21 +1,26 @@
use std::{
collections::{BTreeSet, HashMap},
collections::{BTreeMap, BTreeSet, HashMap},
process::Stdio,
time::Duration,
};
use futures::{FutureExt, Stream as AsyncStream, StreamExt, future::join_all};
use reaction_plugin::{StreamImpl, shutdown::ShutdownToken};
use chrono::Local;
use futures::{FutureExt, Stream as AsyncStream, StreamExt};
use regex::RegexSet;
use tokio::{
io::{AsyncBufReadExt, BufReader},
process::{Child, ChildStderr, ChildStdout, Command},
time::sleep,
};
use tracing::{debug, error, info};
use tracing::{error, info, warn};
use crate::{
concepts::{Filter, Stream, Time, now},
daemon::{filter::FilterManager, plugin::Plugins, utils::kill_child},
concepts::{Filter, Stream},
daemon::filter::FilterManager,
};
use super::shutdown::ShutdownToken;
/// Converts bytes to line string, discarding invalid utf8 sequences and newlines at the end
fn to_line(data: &[u8]) -> String {
String::from_utf8_lossy(data)
@ -23,7 +28,7 @@ fn to_line(data: &[u8]) -> String {
.replace(std::char::REPLACEMENT_CHARACTER, "")
}
pub fn reader_to_stream(
fn reader_to_stream(
reader: impl tokio::io::AsyncRead + Unpin,
) -> impl AsyncStream<Item = Result<String, std::io::Error>> {
let buf_reader = BufReader::new(reader);
@ -44,119 +49,40 @@ pub fn reader_to_stream(
}
pub struct StreamManager {
compiled_regex_set: RegexSet,
regex_index_to_filter_manager: Vec<FilterManager>,
stream: &'static Stream,
stream_plugin: Option<StreamImpl>,
shutdown: ShutdownToken,
}
impl StreamManager {
pub async fn new(
pub fn new(
stream: &'static Stream,
filter_managers: HashMap<&'static Filter, FilterManager>,
shutdown: ShutdownToken,
plugins: &mut Plugins,
) -> Result<Self, String> {
let stream_plugin = if stream.is_plugin() {
Some(
plugins
.get_stream_impl(stream.name.clone())
.ok_or_else(|| {
format!(
"stream {} doesn't load a plugin. this is a bug!",
stream.name
)
})?,
)
} else {
None
};
let regex_index_to_filter_manager = stream
.regex_index_to_filter_name
) -> Result<Self, regex::Error> {
let all_regexes: BTreeMap<_, _> = filter_managers
.iter()
.map(|filter_name| {
filter_managers
.flat_map(|(filter, filter_manager)| {
filter
.regex()
.iter()
.find(|(filter, _)| filter_name == &filter.name)
.unwrap()
.1
.clone()
.map(|regex| (regex, filter_manager.clone()))
})
.collect();
debug!("successfully initialized stream {}", stream.name);
Ok(StreamManager {
regex_index_to_filter_manager,
compiled_regex_set: RegexSet::new(all_regexes.keys())?,
regex_index_to_filter_manager: all_regexes.into_values().collect(),
stream,
stream_plugin,
shutdown,
})
}
pub fn is_standalone(&self) -> bool {
match &self.stream_plugin {
Some(plugin) => plugin.standalone,
None => true,
}
}
pub async fn start(mut self) {
// First start FilterManagers persisted actions
let now = now();
join_all(
self.regex_index_to_filter_manager
.iter()
.map(|filter_manager| filter_manager.start(now)),
)
.await;
// Then start stream
info!("{}: start {:?}", self.stream.name, self.stream.cmd);
if self.stream_plugin.is_some() {
self.start_plugin().await
} else {
self.start_cmd().await
}
}
async fn start_plugin(&mut self) {
let mut plugin = self.stream_plugin.take().unwrap();
loop {
match plugin.stream.recv().await {
Ok(Some((line, time))) => {
self.handle_line(line, time.into()).await;
}
Err(err) => {
if err.is_final() {
error!(
"error reading from plugin stream {}: {}",
self.stream.name, err
);
return;
} else {
error!(
"temporary error reading from plugin stream {}: {}",
self.stream.name, err
);
}
}
Ok(None) => {
if !self.shutdown.is_shutdown() {
error!("stream {} has exited", self.stream.name);
}
return;
}
}
}
}
async fn start_cmd(&self) {
let mut child = match Command::new(&self.stream.cmd[0])
.args(&self.stream.cmd[1..])
pub async fn start(self) {
info!("{}: start {:?}", self.stream.name(), self.stream.cmd());
let mut child = match Command::new(&self.stream.cmd()[0])
.args(&self.stream.cmd()[1..])
.stdin(Stdio::null())
.stderr(Stdio::piped())
.stdout(Stdio::piped())
@ -164,7 +90,11 @@ impl StreamManager {
{
Ok(child) => child,
Err(err) => {
error!("could not execute stream {} cmd: {}", self.stream.name, err);
error!(
"could not execute stream {} cmd: {}",
self.stream.name(),
err
);
return;
}
};
@ -184,16 +114,55 @@ impl StreamManager {
}
async fn handle_child(&self, mut child: Child) {
const STREAM_PROCESS_GRACE_TIME_SEC: u64 = 15;
const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5;
// wait either for the child process to exit on its own or for the shutdown signal
futures::select! {
_ = child.wait().fuse() => {
error!("stream {} exited: its command returned.", self.stream.name);
error!("stream {} exited: its command returned.", self.stream.name());
return;
}
_ = self.shutdown.wait().fuse() => {}
}
kill_child(child, format!("stream {}", self.stream.name), 15).await;
// first, try to ask nicely the child process to exit
if let Some(pid) = child.id() {
let pid = nix::unistd::Pid::from_raw(pid as i32);
// the most likely error is that the process does not exist anymore
// but we still need to reclaim it with Child::wait
let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM);
futures::select! {
_ = child.wait().fuse() => {
return;
},
_ = sleep(Duration::from_secs(STREAM_PROCESS_GRACE_TIME_SEC)).fuse() => {},
}
} else {
warn!(
"could not get PID of child process for stream {}",
self.stream.name()
);
// still try to use tokio API to kill and reclaim the child process
}
// if that fails, or we cannot get the underlying PID, terminate the process.
// NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a
// syscall to a resource no-longer available (a notorious example is a read on a disconnected
// NFS share)
// as before, the only expected error is that the child process already terminated
// but we still need to reclaim it if that's the case.
let _ = child.start_kill();
futures::select! {
_ = child.wait().fuse() => {}
_ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => {
error!("child process of stream {} did not terminate", self.stream.name());
}
}
}
async fn handle_io(&self, child_stdout: ChildStdout, child_stderr: ChildStderr) {
@ -206,13 +175,16 @@ impl StreamManager {
loop {
match lines.next().await {
Some(Ok(line)) => {
let now = now();
self.handle_line(line, now).await;
let now = Local::now();
for manager in self.matching_filters(&line) {
manager.handle_line(&line, now);
}
}
Some(Err(err)) => {
error!(
"impossible to read output from stream {}: {}",
self.stream.name, err
self.stream.name(),
err
);
return;
}
@ -223,14 +195,8 @@ impl StreamManager {
}
}
async fn handle_line(&self, line: String, time: Time) {
for manager in self.matching_filters(&line) {
manager.handle_line(&line, time).await;
}
}
fn matching_filters(&self, line: &str) -> BTreeSet<&FilterManager> {
let matches = self.stream.compiled_regex_set.matches(line);
let matches = self.compiled_regex_set.matches(line);
matches
.into_iter()
.map(|match_| &self.regex_index_to_filter_manager[match_])

View file

@ -1,51 +0,0 @@
use std::time::Duration;
use tokio::{process::Child, time::timeout};
use tracing::{error, warn};
pub async fn kill_child(mut child: Child, context: String, grace_time_sec: u64) {
const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5;
// first, try to ask nicely the child process to exit
if let Some(pid) = child.id() {
let pid = nix::unistd::Pid::from_raw(pid as i32);
// the most likely error is that the process does not exist anymore
// but we still need to reclaim it with Child::wait
let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM);
if let Ok(_) = timeout(Duration::from_secs(grace_time_sec), child.wait()).await {
return;
}
} else {
warn!("could not get PID of child process for {context}");
// still try to use tokio API to kill and reclaim the child process
}
// if that fails, or we cannot get the underlying PID, terminate the process.
// NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a
// syscall to a resource no-longer available (a notorious example is a read on a disconnected
// NFS share)
// as before, the only expected error is that the child process already terminated
// but we still need to reclaim it if that's the case.
warn!("process for {context} didn't exit {grace_time_sec}s after SIGTERM, sending SIGKILL");
let _ = child.start_kill();
match timeout(
Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC),
child.wait(),
)
.await
{
Ok(_) => {}
Err(_) => match child.id() {
Some(id) => {
error!("child process of {context} did not terminate. PID: {id}");
}
None => {
error!("child process of {context} did not terminate");
}
},
}
}

View file

@ -1,4 +1,10 @@
#![warn(clippy::panic, clippy::todo, clippy::unimplemented, unsafe_code)]
#![warn(
clippy::panic,
clippy::todo,
clippy::unimplemented,
clippy::unwrap_used,
unsafe_code
)]
#![allow(clippy::upper_case_acronyms, clippy::mutable_key_type)]
// Allow unwrap in tests
#![cfg_attr(test, allow(clippy::unwrap_used))]
@ -9,3 +15,4 @@ pub mod concepts;
pub mod daemon;
pub mod protocol;
pub mod tests;
pub mod treedb;

View file

@ -7,6 +7,7 @@ use reaction::{
daemon::daemon,
protocol::Order,
};
use tracing::{error, Level};
#[tokio::main]
async fn main() {
@ -27,64 +28,68 @@ async fn main() {
let cli = Cli::parse();
if let SubCommand::Start {
loglevel,
config,
socket,
} = cli.command
{
let (is_daemon, level) = if let SubCommand::Start { loglevel, .. } = cli.command {
(true, loglevel)
} else {
(false, Level::DEBUG)
};
if is_daemon {
// Set log level
if let Err(err) = tracing_subscriber::fmt::fmt()
.without_time()
.with_target(false)
.with_ansi(std::io::stdout().is_terminal())
.with_max_level(loglevel)
.with_max_level(level)
// .with_max_level(Level::TRACE)
.try_init()
{
eprintln!("ERROR could not initialize logging: {err}");
exit(1);
}
exit(daemon(config, socket).await);
} else {
let result = match cli.command {
SubCommand::Show {
socket,
format,
limit,
patterns,
} => request(socket, format, limit, patterns, Order::Show).await,
SubCommand::Flush {
socket,
format,
limit,
patterns,
} => request(socket, format, limit, patterns, Order::Flush).await,
SubCommand::Trigger {
socket,
limit,
patterns,
} => request(socket, Format::JSON, Some(limit), patterns, Order::Trigger).await,
SubCommand::TestRegex {
config,
regex,
line,
} => test_regex(config, regex, line),
SubCommand::TestConfig {
config,
format,
verbose,
} => test_config(config, format, verbose),
// Can't be daemon
_ => Ok(()),
};
match result {
Ok(()) => {
exit(0);
}
Err(err) => {
}
let result = match cli.command {
SubCommand::Start { config, socket, .. } => daemon(config, socket).await,
SubCommand::Show {
socket,
format,
limit,
patterns,
} => request(socket, format, limit, patterns, Order::Show).await,
SubCommand::Flush {
socket,
format,
limit,
patterns,
} => request(socket, format, limit, patterns, Order::Flush).await,
SubCommand::Trigger {
socket,
limit,
patterns,
} => request(socket, Format::JSON, Some(limit), patterns, Order::Trigger).await,
SubCommand::TestRegex {
config,
regex,
line,
} => test_regex(config, regex, line),
SubCommand::TestConfig {
config,
format,
verbose,
} => test_config(config, format, verbose),
};
match result {
Ok(()) => {
exit(0);
}
Err(err) => {
if is_daemon {
error!("{err}");
} else {
eprintln!("ERROR {err}");
exit(1);
}
exit(1);
}
}
}

View file

@ -9,7 +9,7 @@ use std::{
use tempfile::TempDir;
use treedb::{Database, LoadedDB};
use crate::treedb::Database;
pub struct Fixture {
path: PathBuf,
@ -65,15 +65,7 @@ pub struct TempDatabase {
impl TempDatabase {
pub async fn default() -> Self {
let _tempdir = TempDir::new().unwrap();
let db = Database::from_dir(_tempdir.path(), None).await.unwrap();
TempDatabase { _tempdir, db }
}
pub async fn from_loaded_db(loaded_db: LoadedDB) -> Self {
let _tempdir = TempDir::new().unwrap();
let db = Database::from_dir(_tempdir.path(), Some(loaded_db))
.await
.unwrap();
let db = Database::from_dir(_tempdir.path()).await.unwrap();
TempDatabase { _tempdir, db }
}
}

View file

@ -1,12 +1,9 @@
use std::{
collections::{BTreeMap, BTreeSet},
time::Duration,
};
use std::collections::BTreeSet;
use chrono::DateTime;
use chrono::{DateTime, Local};
use serde_json::Value;
use crate::time::Time;
use crate::concepts::{Match, MatchTime, Time};
/// Tries to convert a [`Value`] into a [`String`]
pub fn to_string(val: &Value) -> Result<String, String> {
@ -18,40 +15,17 @@ pub fn to_u64(val: &Value) -> Result<u64, String> {
val.as_u64().ok_or("not a u64".into())
}
/// Old way of converting time: with chrono's serialization
fn old_string_to_time(val: &str) -> Result<Time, String> {
let time = DateTime::parse_from_rfc3339(val).map_err(|err| err.to_string())?;
Ok(Duration::new(time.timestamp() as u64, time.timestamp_subsec_nanos()).into())
}
/// New way of converting time: with our own implem
fn new_string_to_time(val: &str) -> Result<Time, String> {
let nanos: u128 = val.parse().map_err(|_| "not a number")?;
Ok(Duration::new(
(nanos / 1_000_000_000) as u64,
(nanos % 1_000_000_000) as u32,
)
.into())
}
/// Tries to convert a [`&str`] into a [`Time`]
fn string_to_time(val: &str) -> Result<Time, String> {
match new_string_to_time(val) {
Err(err) => match old_string_to_time(val) {
Err(_) => Err(err),
ok => ok,
},
ok => ok,
}
}
/// Tries to convert a [`Value`] into a [`Time`]
pub fn to_time(val: &Value) -> Result<Time, String> {
string_to_time(val.as_str().ok_or("not a string number")?)
Ok(
DateTime::parse_from_rfc3339(val.as_str().ok_or("not a number")?)
.map_err(|err| err.to_string())?
.with_timezone(&Local),
)
}
/// Tries to convert a [`Value`] into a [`Vec<String>`]
pub fn to_match(val: &Value) -> Result<Vec<String>, String> {
/// Tries to convert a [`Value`] into a [`Match`]
pub fn to_match(val: &Value) -> Result<Match, String> {
val.as_array()
.ok_or("not an array")?
.iter()
@ -59,6 +33,15 @@ pub fn to_match(val: &Value) -> Result<Vec<String>, String> {
.collect()
}
/// Tries to convert a [`Value`] into a [`MatchTime`]
pub fn to_matchtime(val: &Value) -> Result<MatchTime, String> {
let map = val.as_object().ok_or("not an object")?;
Ok(MatchTime {
m: to_match(map.get("m").ok_or("no m in object")?)?,
t: to_time(map.get("t").ok_or("no t in object")?)?,
})
}
/// Tries to convert a [`Value`] into a [`BTreeSet<Time>`]
pub fn to_timeset(val: &Value) -> Result<BTreeSet<Time>, String> {
val.as_array()
@ -68,19 +51,13 @@ pub fn to_timeset(val: &Value) -> Result<BTreeSet<Time>, String> {
.collect()
}
/// Tries to convert a [`Value`] into a [`BTreeMap<Time, u64>`]
pub fn to_timemap(val: &Value) -> Result<BTreeMap<Time, u64>, String> {
val.as_object()
.ok_or("not a map")?
.iter()
.map(|(key, value)| Ok((string_to_time(key)?, to_u64(value)?)))
.collect()
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use chrono::TimeZone;
use serde_json::Map;
use super::*;
#[test]
@ -108,12 +85,15 @@ mod tests {
#[test]
fn test_to_time() {
assert_eq!(to_time(&"123456".into()).unwrap(), Time::from_nanos(123456),);
assert_eq!(
to_time(&"1970-01-01T01:02:03.456+01:00".into()).unwrap(),
Local.timestamp_millis_opt(123456).unwrap(),
);
assert!(to_time(&(u64::MAX.into())).is_err());
assert!(to_time(&(["ploup"].into())).is_err());
assert!(to_time(&(true.into())).is_err());
// assert!(to_time(&(12345.into())).is_err());
assert!(to_time(&(12345.into())).is_err());
assert!(to_time(&(None::<String>.into())).is_err());
}
@ -135,14 +115,22 @@ mod tests {
#[test]
fn test_to_timeset() {
assert_eq!(
to_timeset(&Value::from([Value::from("123456789")])),
Ok(BTreeSet::from([Time::from_nanos(123456789)]))
to_timeset(&(["1970-01-01T01:20:34.567+01:00"].into())),
Ok(BTreeSet::from([Local
.timestamp_millis_opt(1234567)
.unwrap()]))
);
assert_eq!(
to_timeset(&Value::from([Value::from("8"), Value::from("123456")])),
to_timeset(
&([
"1970-01-01T01:00:00.008+01:00",
"1970-01-01T01:02:03.456+01:00"
]
.into())
),
Ok(BTreeSet::from([
Time::from_nanos(8),
Time::from_nanos(123456),
Local.timestamp_millis_opt(8).unwrap(),
Local.timestamp_millis_opt(123456).unwrap()
]))
);
assert!(to_timeset(&[Value::from("plip"), Value::from(10)].into()).is_err());
@ -155,34 +143,50 @@ mod tests {
}
#[test]
fn test_to_timemap() {
let time1 = 1234567;
let time1_t = Time::from_nanos(time1);
let time2 = 123456789;
let time2_t = Time::from_nanos(time2);
fn test_to_matchtime() {
assert_eq!(
to_timemap(&Value::from_iter([(time2.to_string(), 1)])),
Ok(BTreeMap::from([(time2_t, 1)]))
);
assert_eq!(
to_timemap(&Value::from_iter([
(time1.to_string(), 4),
(time2.to_string(), 0)
])),
Ok(BTreeMap::from([(time1_t.into(), 4), (time2_t.into(), 0)]))
to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([
("m".into(), ["plip", "ploup"].into()),
("t".into(), "1970-01-01T04:25:45.678+01:00".into()),
])
.into_iter()
))),
Ok(MatchTime {
m: vec!["plip".into(), "ploup".into()],
t: Local.timestamp_millis_opt(12345678).unwrap(),
})
);
assert!(to_timemap(&Value::from_iter([("1-1", time2)])).is_err());
// assert!(to_timemap(&Value::from_iter([(time2.to_string(), time2)])).is_err());
assert!(to_timemap(&Value::from_iter([(time2)])).is_err());
assert!(to_timemap(&Value::from_iter([(1)])).is_err());
assert!(to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([("m".into(), ["plip", "ploup"].into()),]).into_iter()
)))
.is_err());
assert!(to_timemap(&(["1970-01-01T01:20:34.567+01:00"].into())).is_err());
assert!(to_timemap(&([""].into())).is_err());
assert!(to_timemap(&(["ploup"].into())).is_err());
assert!(to_timemap(&(true.into())).is_err());
assert!(to_timemap(&(8.into())).is_err());
assert!(to_timemap(&(None::<String>.into())).is_err());
assert!(to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([("t".into(), 12345678.into()),]).into_iter()
)))
.is_err());
assert!(to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([("m".into(), "ploup".into()), ("t".into(), 12345678.into()),])
.into_iter()
)))
.is_err());
assert!(to_matchtime(&Value::Object(Map::from_iter(
BTreeMap::from([
("m".into(), ["plip", "ploup"].into()),
("t".into(), [1234567].into()),
])
.into_iter()
)))
.is_err());
assert!(to_timeset(&([""].into())).is_err());
assert!(to_timeset(&(["ploup"].into())).is_err());
assert!(to_timeset(&(true.into())).is_err());
assert!(to_timeset(&(8.into())).is_err());
assert!(to_timeset(&(None::<String>.into())).is_err());
}
}

View file

@ -17,93 +17,49 @@ use std::{
time::Duration,
};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use chrono::{Local, TimeDelta};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
use tokio::{
fs::{File, rename},
fs::{rename, File},
sync::{mpsc, oneshot},
time::{MissedTickBehavior, interval},
time::{interval, MissedTickBehavior},
};
use tokio_util::{sync::CancellationToken, task::task_tracker::TaskTrackerToken};
use crate::{
concepts::{Config, Time},
daemon::ShutdownToken,
};
pub mod helpers;
// Database
use raw::{ReadDB, WriteDB};
use time::{Time, now};
pub mod helpers;
mod raw;
pub mod time;
/// Any order the Database can receive
enum Order {
Log(Entry),
OpenTree(OpenTree),
}
/// Entry sent from [`Tree`] to [`Database`]
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Entry {
pub struct Entry {
pub tree: String,
pub key: Value,
pub value: Option<Value>,
pub expiry: Time,
}
/// Order to receive a tree from previous Database
pub struct OpenTree {
name: String,
resp: oneshot::Sender<Option<LoadedTree>>,
}
type LoadedTree = HashMap<Value, Value>;
pub type LoadedDB = HashMap<String, LoadedTree>;
pub type LoadedDB = HashMap<String, HashMap<Value, Value>>;
const DB_NAME: &str = "reaction.db";
const DB_NEW_NAME: &str = "reaction.new.db";
fn path_of(state_directory: &Path, name: &str) -> PathBuf {
if state_directory.as_os_str().is_empty() {
name.into()
} else {
PathBuf::from(state_directory).join(name)
}
}
pub type DatabaseErrorReceiver = oneshot::Receiver<Result<(), String>>;
/// Public-facing API for a treedb Database
pub struct Database {
entry_tx: Option<mpsc::Sender<Order>>,
error_rx: DatabaseErrorReceiver,
}
impl Database {
/// Open a new Database, whom task will start in the background.
/// You'll have to:
/// - drop all [`Tree`]s,
/// - call [`Self::quit`],
///
/// to have the Database properly quit.
///
/// You can wait for [`Self::quit`] returned channel to know how it went.
pub async fn open(
path_directory: &Path,
cancellation_token: CancellationToken,
task_tracker_token: TaskTrackerToken,
) -> Result<Database, IoError> {
let (manager, entry_tx) = DatabaseManager::open(path_directory).await?;
let error_rx = manager.manager(cancellation_token, task_tracker_token);
Ok(Self {
entry_tx: Some(entry_tx),
error_rx,
})
}
/// Permit to close DB's channel.
/// Without this function manually called, the DB can't close.
pub fn quit(self) -> DatabaseErrorReceiver {
self.error_rx
impl Config {
fn path_of(&self, name: &str) -> PathBuf {
if self.state_directory().is_empty() {
name.into()
} else {
PathBuf::from(self.state_directory()).join(name)
}
}
}
@ -111,9 +67,8 @@ impl Database {
// This would make more sense, as actual garbage collection is time-based
/// A [`Database`] logs all write operations on [`Tree`]s in a single file.
/// Logs are written asynchronously, so the write operations in RAM will block only when the
/// underlying channel is full.
struct DatabaseManager {
/// Logs are written asynchronously, so the write operations in RAM will never block.
pub struct Database {
/// Inner database
write_db: WriteDB,
/// [`Tree`]s loaded from disk
@ -124,7 +79,10 @@ struct DatabaseManager {
/// New database atomically replaces the old one when its writing is done.
new_path: PathBuf,
/// The receiver on [`Tree`] write operations
entry_rx: mpsc::Receiver<Order>,
entry_rx: mpsc::Receiver<Entry>,
/// The sender on [`Tree`] write operations.
/// Only used to clone new senders for new Trees.
entry_tx: mpsc::Sender<Entry>,
/// The interval at which the database must be flushed to kernel
flush_every: Duration,
/// The maximum bytes that must be written until the database is rotated
@ -133,37 +91,29 @@ struct DatabaseManager {
bytes_written: usize,
}
impl DatabaseManager {
pub async fn open(
path_directory: &Path,
) -> Result<(DatabaseManager, mpsc::Sender<Order>), IoError> {
let path = path_of(path_directory, DB_NAME);
let new_path = path_of(path_directory, DB_NEW_NAME);
impl Database {
pub async fn open(config: &Config) -> Result<Database, IoError> {
let path = config.path_of(DB_NAME);
let new_path = config.path_of(DB_NEW_NAME);
let (write_db, loaded_db) = rotate_db(&path, &new_path, true).await?;
let (entry_tx, entry_rx) = mpsc::channel(1000);
Ok((
DatabaseManager {
write_db,
loaded_db,
path,
new_path,
entry_rx,
flush_every: Duration::from_secs(2),
max_bytes: 20 * 1024 * 1024, // 20 MiB
bytes_written: 0,
},
Ok(Database {
write_db,
loaded_db,
path,
new_path,
entry_rx,
entry_tx,
))
flush_every: Duration::from_secs(2),
max_bytes: 20 * 1024 * 1024, // 20 MiB
bytes_written: 0,
})
}
pub fn manager(
mut self,
cancellation_token: CancellationToken,
_task_tracker_token: TaskTrackerToken,
) -> oneshot::Receiver<Result<(), String>> {
pub fn manager(mut self, shutdown: ShutdownToken) -> oneshot::Receiver<Result<(), String>> {
let (error_tx, error_rx) = oneshot::channel();
tokio::spawn(async move {
let mut interval = interval(self.flush_every);
@ -171,35 +121,24 @@ impl DatabaseManager {
// flush_every for the next tick, resulting in a relaxed interval.
// Hoping this will smooth IO pressure when under heavy load.
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
let mut status = loop {
let status = loop {
tokio::select! {
order = self.entry_rx.recv() => {
if let Err(err) = self.handle_order(order).await {
cancellation_token.cancel();
entry = self.entry_rx.recv() => {
if let Err(err) = self.handle_entry(entry).await {
shutdown.ask_shutdown();
break err;
}
}
_ = interval.tick() => {
if let Err(err) = self.flush().await {
cancellation_token.cancel();
shutdown.ask_shutdown();
break Some(err);
}
}
_ = cancellation_token.cancelled() => break None
_ = shutdown.wait() => break None
};
};
// Finish consuming received entries when shutdown asked
if status.is_none() {
loop {
let order = self.entry_rx.recv().await;
if let Err(err) = self.handle_order(order).await {
status = err;
break;
}
}
}
// Shutdown
let close_status = self
.close()
@ -217,39 +156,30 @@ impl DatabaseManager {
error_rx
}
/// Executes an order. Returns:
/// Write a received entry. Return:
/// - Err(Some) if there was an error,
/// - Err(None) is channel is closed,
/// - Ok(()) in general case.
async fn handle_order(&mut self, order: Option<Order>) -> Result<(), Option<String>> {
match order {
Some(Order::Log(entry)) => self.handle_entry(entry).await.map_err(Option::Some),
Some(Order::OpenTree(open_tree)) => {
self.handle_open_tree(open_tree);
Ok(())
}
None => Err(None),
}
}
/// Write a received entry.
async fn handle_entry(&mut self, entry: Entry) -> Result<(), String> {
match self.write_db.write_entry(&entry).await {
Ok(bytes_written) => {
self.bytes_written += bytes_written;
if self.bytes_written > self.max_bytes {
match self.rotate_db().await {
Ok(_) => {
self.bytes_written = 0;
Ok(())
async fn handle_entry(&mut self, entry: Option<Entry>) -> Result<(), Option<String>> {
match entry {
Some(entry) => match self.write_db.write_entry(&entry).await {
Ok(bytes_written) => {
self.bytes_written += bytes_written;
if self.bytes_written > self.max_bytes {
match self.rotate_db().await {
Ok(_) => {
self.bytes_written = 0;
Ok(())
}
Err(err) => Err(Some(format!("while rotating database: {err}"))),
}
Err(err) => Err(format!("while rotating database: {err}")),
} else {
Ok(())
}
} else {
Ok(())
}
}
Err(err) => Err(format!("while writing entry to database: {err}")),
Err(err) => Err(Some(format!("while writing entry to database: {err}"))),
},
None => Err(None),
}
}
@ -286,7 +216,7 @@ async fn rotate_db(
// No need to rotate the database when it is new,
// we return here
(true, ErrorKind::NotFound) => {
return Ok((WriteDB::new(File::create(path).await?), HashMap::default()));
return Ok((WriteDB::new(File::create(path).await?), HashMap::default()))
}
(_, _) => return Err(err),
},
@ -326,45 +256,29 @@ pub struct Tree<K: KeyType, V: ValueType> {
/// This property permits the database rotation to be `O(n)` in time and `O(1)` in RAM space,
/// `n` being the number of write operations from the last rotation plus the number of new
/// operations.
entry_timeout: Duration,
entry_timeout: TimeDelta,
/// The inner BTreeMap
tree: BTreeMap<K, V>,
/// The sender that permits to asynchronously send write operations to database
tx: mpsc::Sender<Order>,
tx: mpsc::Sender<Entry>,
}
impl Database {
/// Creates a new Tree with the given name and entry timeout.
/// Takes a closure (or regular function) that converts (Value, Value) JSON entries
/// into (K, V) typed entries.
/// Helpers for this closure can be found in the [`helpers`] module.
pub async fn open_tree<K: KeyType, V: ValueType, F>(
/// Helpers for this closure can be find in the [`helpers`] module.
pub fn open_tree<K: KeyType, V: ValueType, F>(
&mut self,
name: String,
entry_timeout: Duration,
entry_timeout: TimeDelta,
map_f: F,
) -> Result<Tree<K, V>, String>
where
F: Fn((Value, Value)) -> Result<(K, V), String>,
{
// Request the tree
let (tx, rx) = oneshot::channel();
let entry_tx = match self.entry_tx.clone() {
None => return Err("Database is closing".to_string()),
Some(entry_tx) => {
entry_tx
.send(Order::OpenTree(OpenTree {
name: name.clone(),
resp: tx,
}))
.await
.map_err(|_| "Database did not answer")?;
// Get a clone of the channel sender
entry_tx.clone()
}
};
// Load the tree from its JSON
let tree = if let Some(json_tree) = rx.await.map_err(|_| "Database did not respond")? {
let tree = if let Some(json_tree) = self.loaded_db.remove(&name) {
json_tree
.into_iter()
.map(map_f)
@ -376,25 +290,15 @@ impl Database {
id: name,
entry_timeout,
tree,
tx: entry_tx,
tx: self.entry_tx.clone(),
})
}
}
impl DatabaseManager {
/// Creates a new Tree with the given name and entry timeout.
/// Takes a closure (or regular function) that converts (Value, Value) JSON entries
/// into (K, V) typed entries.
/// Helpers for this closure can be found in the [`helpers`] module.
pub fn handle_open_tree(&mut self, open_tree: OpenTree) {
let _ = open_tree.resp.send(self.loaded_db.remove(&open_tree.name));
}
// TODO keep only tree names, and use it for next db rotation to remove associated entries
// Drops Trees that have not been loaded already
// pub fn drop_trees(&mut self) {
// self.loaded_db = HashMap::default();
// }
/// Drops Trees that have not been loaded already
pub fn drop_trees(&mut self) {
self.loaded_db = HashMap::default();
}
}
// Gives access to all read-only functions
@ -409,50 +313,45 @@ impl<K: KeyType, V: ValueType> Deref for Tree<K, V> {
// Reimplement write functions
impl<K: KeyType, V: ValueType> Tree<K, V> {
/// Log an [`Entry`] to the [`Database`]
async fn log(&mut self, k: &K, v: Option<&V>) {
let now = now();
fn log(&mut self, k: &K, v: Option<&V>) {
let e = Entry {
tree: self.id.clone(),
key: serde_json::to_value(k).expect("could not serialize key"),
value: v.map(|v| serde_json::to_value(v).expect("could not serialize value")),
expiry: now + self.entry_timeout,
expiry: Local::now() + self.entry_timeout,
};
let tx = self.tx.clone();
// FIXME what if send fails?
let _ = tx.send(Order::Log(e)).await;
tokio::spawn(async move {
let _ = tx.send(e).await;
});
}
/// Asynchronously persisted version of [`BTreeMap::insert`]
pub async fn insert(&mut self, key: K, value: V) -> Option<V> {
self.log(&key, Some(&value)).await;
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
self.log(&key, Some(&value));
self.tree.insert(key, value)
}
/// Asynchronously persisted version of [`BTreeMap::pop_first`]
pub async fn pop_first(&mut self) -> Option<(K, V)> {
match self.tree.pop_first() {
Some((key, value)) => {
self.log(&key, None).await;
Some((key, value))
}
None => None,
}
pub fn pop_first(&mut self) -> Option<(K, V)> {
self.tree.pop_first().map(|(key, value)| {
self.log(&key, None);
(key, value)
})
}
/// Asynchronously persisted version of [`BTreeMap::pop_last`]
pub async fn pop_last(&mut self) -> Option<(K, V)> {
match self.tree.pop_last() {
Some((key, value)) => {
self.log(&key, None).await;
Some((key, value))
}
None => None,
}
pub fn pop_last(&mut self) -> Option<(K, V)> {
self.tree.pop_last().map(|(key, value)| {
self.log(&key, None);
(key, value)
})
}
/// Asynchronously persisted version of [`BTreeMap::remove`]
pub async fn remove(&mut self, key: &K) -> Option<V> {
self.log(key, None).await;
pub fn remove(&mut self, key: &K) -> Option<V> {
self.log(key, None);
self.tree.remove(key)
}
@ -460,49 +359,22 @@ impl<K: KeyType, V: ValueType> Tree<K, V> {
/// Returning None removes the item if it existed before.
/// Asynchronously persisted.
/// *API design borrowed from [`fjall::WriteTransaction::fetch_update`].*
pub async fn fetch_update<F: FnMut(Option<V>) -> Option<V>>(
pub fn fetch_update<F: FnMut(Option<&V>) -> Option<V>>(
&mut self,
key: K,
mut f: F,
) -> Option<V> {
let old_value = self.get(&key).map(|v| v.to_owned());
let old_value = self.get(&key);
let new_value = f(old_value);
self.log(&key, new_value.as_ref()).await;
if old_value != new_value.as_ref() {
self.log(&key, new_value.as_ref());
}
if let Some(new_value) = new_value {
self.tree.insert(key, new_value)
} else {
self.tree.remove(&key)
}
}
#[cfg(any(test, feature = "test"))]
pub fn tree(&self) -> &BTreeMap<K, V> {
&self.tree
}
}
#[cfg(any(test, feature = "test"))]
impl DatabaseManager {
pub fn set_loaded_db(&mut self, loaded_db: LoadedDB) {
self.loaded_db = loaded_db;
}
}
#[cfg(any(test, feature = "test"))]
impl Database {
pub async fn from_dir(dir_path: &Path, loaded_db: Option<LoadedDB>) -> Result<Self, IoError> {
use tokio_util::task::TaskTracker;
let (mut manager, entry_tx) = DatabaseManager::open(dir_path).await?;
if let Some(loaded_db) = loaded_db {
manager.set_loaded_db(loaded_db)
}
let error_rx = manager.manager(CancellationToken::new(), TaskTracker::new().token());
Ok(Self {
entry_tx: Some(entry_tx),
error_rx,
})
}
}
#[cfg(test)]
@ -510,21 +382,68 @@ mod tests {
use std::{
collections::{BTreeMap, BTreeSet, HashMap},
time::Duration,
io::Error as IoError,
path::Path,
};
use chrono::{Local, TimeDelta};
use serde_json::Value;
use tempfile::{NamedTempFile, TempDir};
use tokio::fs::File;
use tokio::fs::{write, File};
use super::{DB_NAME, Database, Entry, Time, helpers::*, now, raw::WriteDB, rotate_db};
use crate::concepts::Config;
use super::{
helpers::*, raw::WriteDB, rotate_db, Database, Entry, KeyType, LoadedDB, Tree, ValueType,
DB_NAME,
};
impl Database {
pub async fn from_dir(dir_path: &Path) -> Result<Self, IoError> {
let config_path = dir_path.join("reaction.jsonnet");
write(
&config_path,
format!(
"
{{
state_directory: {dir_path:?},
patterns: {{ pattern: {{ regex: \"prout\" }} }},
streams: {{ dummy: {{
cmd: [\"dummy\"],
filters: {{ dummy: {{
regex: [\"dummy\"],
actions: {{ dummy: {{
cmd: [\"dummy\"]
}} }}
}} }}
}} }}
}}
"
),
)
.await?;
let config = Config::from_path(&config_path).unwrap();
Database::open(&config).await
}
pub fn set_loaded_db(&mut self, loaded_db: LoadedDB) {
self.loaded_db = loaded_db;
}
}
impl<K: KeyType, V: ValueType> Tree<K, V> {
pub fn tree(&self) -> &BTreeMap<K, V> {
&self.tree
}
}
#[tokio::test]
async fn test_rotate_db() {
let now = now();
let now = Local::now();
let expired = now - Time::from_secs(2);
let valid = now + Time::from_secs(2);
let expired = now - TimeDelta::seconds(2);
let valid = now + TimeDelta::seconds(2);
let entries = [
Entry {
@ -622,16 +541,15 @@ mod tests {
#[tokio::test]
async fn test_open_tree() {
let now = now();
let now = Local::now();
let now2 = now + TimeDelta::milliseconds(2);
let now3 = now + TimeDelta::milliseconds(3);
let now2 = now + Time::from_millis(2);
let now3 = now + Time::from_millis(3);
let now_ms = now.to_rfc3339();
let now2_ms = now2.to_rfc3339();
let now3_ms = now3.to_rfc3339();
// let now_ms = now.as_nanos().to_string();
// let now2_ms = now2.as_nanos().to_string();
// let now3_ms = now3.as_nanos().to_string();
let valid = now + Time::from_secs(2);
let valid = now + TimeDelta::seconds(2);
let ip127 = vec!["127.0.0.1".to_string()];
let ip1 = vec!["1.1.1.1".to_string()];
@ -639,50 +557,44 @@ mod tests {
let entries = [
Entry {
tree: "time-match".into(),
key: now.as_nanos().to_string().into(),
key: now_ms.clone().into(),
value: Some(ip127.clone().into()),
expiry: valid,
},
Entry {
tree: "time-match".into(),
key: now2.as_nanos().to_string().into(),
key: now2_ms.clone().into(),
value: Some(ip127.clone().into()),
expiry: valid,
},
Entry {
tree: "time-match".into(),
key: now3.as_nanos().to_string().into(),
key: now3_ms.clone().into(),
value: Some(ip127.clone().into()),
expiry: valid,
},
Entry {
tree: "time-match".into(),
key: now2.as_nanos().to_string().into(),
key: now2_ms.clone().into(),
value: Some(ip127.clone().into()),
expiry: valid,
},
Entry {
tree: "match-timeset".into(),
key: ip127.clone().into(),
value: Some([Value::String(now.as_nanos().to_string())].into()),
value: Some([Value::String(now_ms)].into()),
expiry: valid,
},
Entry {
tree: "match-timeset".into(),
key: ip1.clone().into(),
value: Some([Value::String(now2.as_nanos().to_string())].into()),
value: Some([Value::String(now2_ms.clone())].into()),
expiry: valid,
},
Entry {
tree: "match-timeset".into(),
key: ip1.clone().into(),
value: Some(
[
Value::String(now2.as_nanos().to_string()),
Value::String(now3.as_nanos().to_string()),
]
.into(),
),
value: Some([Value::String(now2_ms.clone()), now3_ms.into()].into()),
expiry: valid,
},
];
@ -698,15 +610,14 @@ mod tests {
write_db.close().await.unwrap();
drop(write_db);
let mut database = Database::from_dir(dir_path, None).await.unwrap();
let mut database = Database::from_dir(dir_path).await.unwrap();
let time_match = database
.open_tree(
"time-match".into(),
Duration::from_secs(2),
TimeDelta::seconds(2),
|(key, value)| Ok((to_time(&key)?, to_match(&value)?)),
)
.await
.unwrap();
assert_eq!(
time_match.tree,
@ -720,10 +631,9 @@ mod tests {
let match_timeset = database
.open_tree(
"match-timeset".into(),
Duration::from_hours(2),
TimeDelta::hours(2),
|(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)),
)
.await
.unwrap();
assert_eq!(
match_timeset.tree,
@ -736,10 +646,9 @@ mod tests {
let unknown_tree = database
.open_tree(
"unknown_tree".into(),
Duration::from_hours(2),
TimeDelta::hours(2),
|(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)),
)
.await
.unwrap();
assert_eq!(unknown_tree.tree, BTreeMap::default());
}

View file

@ -1,9 +1,6 @@
use std::{
collections::HashMap,
io::Error as IoError,
time::{SystemTime, UNIX_EPOCH},
};
use std::{collections::HashMap, io::Error as IoError};
use chrono::{Local, TimeZone};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use thiserror::Error;
@ -13,8 +10,6 @@ use tokio::{
};
use tracing::error;
use crate::time::Time;
use super::{Entry, LoadedDB};
const DB_TREE_ID: u64 = 0;
@ -48,7 +43,7 @@ struct WriteEntry<'a> {
#[serde(rename = "v")]
pub value: &'a Option<Value>,
#[serde(rename = "e")]
pub expiry: u64,
pub expiry: i64,
}
/// Entry in custom database format, just read from database
@ -61,7 +56,7 @@ struct ReadEntry {
#[serde(rename = "v")]
pub value: Option<Value>,
#[serde(rename = "e")]
pub expiry: u64,
pub expiry: i64,
}
/// Permits to write entries in a database.
@ -114,7 +109,7 @@ impl WriteDB {
tree: tree_id,
key: &entry.key,
value: &entry.value,
expiry: entry.expiry.as_millis() as u64,
expiry: entry.expiry.timestamp_millis(),
})
.await
.map(|bytes_written| bytes_written + written)
@ -181,14 +176,12 @@ impl ReadDB {
Ok(Some(entry)) => {
// Add back in new DB
match write_db.write_entry(&entry).await {
Ok(_) => (),
Err(err) => match err {
SerdeOrIoError::IO(err) => return Err(err),
SerdeOrIoError::Serde(err) => error!(
"serde should be able to serialize an entry just deserialized: {err}"
),
},
}
Ok(_) => (),
Err(err) => match err {
SerdeOrIoError::IO(err) => return Err(err),
SerdeOrIoError::Serde(err) => error!("serde should be able to serialize an entry just deserialized: {err}"),
}
}
// Insert data in RAM
if load_db {
let map: &mut HashMap<Value, Value> =
@ -206,10 +199,7 @@ impl ReadDB {
}
async fn next(&mut self) -> Result<Option<Entry>, DatabaseError> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis() as u64;
let now = Local::now().timestamp_millis();
// Loop until we get a non-special value
let raw_entry = loop {
self.buffer.clear();
@ -249,7 +239,7 @@ impl ReadDB {
tree: tree.to_owned(),
key: raw_entry.key,
value: raw_entry.value,
expiry: Time::from_millis(raw_entry.expiry),
expiry: Local.timestamp_millis_opt(raw_entry.expiry).unwrap(),
})),
None => Err(DatabaseError::MissingKeyId(raw_entry.tree)),
}
@ -260,22 +250,22 @@ impl ReadDB {
mod tests {
use std::collections::HashMap;
use chrono::{Local, TimeDelta, TimeZone};
use serde_json::Value;
use tempfile::NamedTempFile;
use tokio::fs::{File, read, write};
use tokio::fs::{read, write, File};
use crate::{
use crate::treedb::{
raw::{DatabaseError, ReadDB, WriteDB, DB_TREE_ID},
Entry,
raw::{DB_TREE_ID, DatabaseError, ReadDB, WriteDB},
time::{Time, now},
};
#[tokio::test]
async fn write_db_write_entry() {
let now = now();
let expired = now - Time::from_secs(2);
let expired_ts = expired.as_millis();
// let valid = now + Time::from_secs(2);
let now = Local::now();
let expired = now - TimeDelta::seconds(2);
let expired_ts = expired.timestamp_millis();
// let valid = now + TimeDelta::seconds(2);
// let valid_ts = valid.timestamp_millis();
let path = NamedTempFile::new().unwrap().into_temp_path();
@ -299,23 +289,21 @@ mod tests {
assert_eq!(
contents,
format!(
"{{\"t\":0,\"k\":1,\"v\":\"yooo\",\"e\":0}}\n{{\"t\":1,\"k\":\"key1\",\"v\":\"value1\",\"e\":{expired_ts}}}\n"
)
format!("{{\"t\":0,\"k\":1,\"v\":\"yooo\",\"e\":0}}\n{{\"t\":1,\"k\":\"key1\",\"v\":\"value1\",\"e\":{expired_ts}}}\n")
);
}
#[tokio::test]
async fn read_db_next() {
let now = now();
let now = Local::now();
let expired = now - Time::from_secs(2);
let expired_ts = expired.as_millis();
let expired = now - TimeDelta::seconds(2);
let expired_ts = expired.timestamp_millis();
let valid = now + Time::from_secs(2);
let valid_ts = valid.as_millis();
let valid = now + TimeDelta::seconds(2);
let valid_ts = valid.timestamp_millis();
// Truncate to millisecond precision
let valid = Time::new(valid.as_secs(), valid.subsec_millis() * 1_000_000);
let valid = Local.timestamp_millis_opt(valid_ts).unwrap();
let path = NamedTempFile::new().unwrap().into_temp_path();
@ -355,13 +343,13 @@ mod tests {
#[tokio::test]
async fn read_db_read() {
let now = now();
let now = Local::now();
let expired = now - Time::from_secs(2);
let expired_ts = expired.as_millis();
let expired = now - TimeDelta::seconds(2);
let expired_ts = expired.timestamp_millis();
let valid = now + Time::from_secs(2);
let valid_ts = valid.as_millis();
let valid = now + TimeDelta::seconds(2);
let valid_ts = valid.timestamp_millis();
let read_path = NamedTempFile::new().unwrap().into_temp_path();
let write_path = NamedTempFile::new().unwrap().into_temp_path();
@ -434,13 +422,13 @@ mod tests {
#[tokio::test]
async fn write_then_read_1000() {
// Generate entries
let now = now();
let now = Local::now();
let entries: Vec<_> = (0..1000)
.map(|i| Entry {
tree: format!("tree{}", i % 4),
key: format!("key{}", i % 10).into(),
value: Some(format!("value{}", i % 10).into()),
expiry: now + Time::from_secs(i % 4) - Time::from_secs(1),
expiry: now + TimeDelta::seconds((i % 4) - 1),
})
.collect();
@ -450,7 +438,7 @@ mod tests {
tree: format!("tree{}", i % 4),
key: format!("key{}", i % 10).into(),
value: None,
expiry: now + Time::from_secs(i % 4),
expiry: now + TimeDelta::seconds(i % 4),
})
.collect();

View file

@ -1,80 +0,0 @@
use std::error::Error;
use assert_cmd::cargo::cargo_bin_cmd;
#[test]
fn load_conf_directory() -> Result<(), Box<dyn Error>> {
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args([
"test-config",
"--verbose",
"--config",
"./tests/test-conf/conf-00.d",
]);
cmd.assert().success().stdout(
r#"Loaded the configuration from the following files in the directory ./tests/test-conf/conf-00.d in this order:
part.json
part.jsonnet
part.yaml
part.yml
concurrency: 1
state_directory: .
patterns:
mypat:
regex: FLAG
ipv4mask: null
ipv6mask: null
start:
- - echo
- start
stop:
- - echo
- stop
streams:
common:
cmd:
- cat
- access.log
filters:
from_jsonnet:
regex:
- ^<mypat>
duplicate: extend
actions:
ban:
cmd:
- ban
- <mypat>
unban:
cmd:
- unban
- <mypat>
after: 42s
from_yaml:
regex:
- ^'<mypat>'
duplicate: extend
actions:
print:
cmd:
- echo
- <mypat>
after: 1s
"#);
Ok(())
}
#[test]
fn example_configs_are_equal() {
let outputs = ["config/example.yml", "config/example.jsonnet"]
.map(|config_path| {
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args(["test-config", "--config", config_path]);
cmd.assert().success().get_output().stdout.clone()
})
.map(String::from_utf8)
.map(Result::unwrap);
assert_eq!(outputs[0], outputs[1]);
}

View file

@ -1,238 +0,0 @@
use std::{error::Error, path::Path, process::Stdio, thread::sleep, time::Duration};
use assert_cmd::cargo::cargo_bin_cmd;
use assert_fs::prelude::*;
use nix::sys::signal;
use predicates::prelude::predicate;
#[test]
#[ignore = "currently failing"] // FIXME
fn actions_delayed_and_on_exit() -> Result<(), Box<dyn Error>> {
let tmp_dir = assert_fs::TempDir::new()?;
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-after.jsonnet"))?;
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.timeout(Duration::from_secs(5));
// Expected exit 1: all stream exited
cmd.assert().code(predicate::eq(1));
// Expect 9 lines of im, then de (appended after 1s), then la (appended on reaction exit).
const EXPECTED_MATCH: usize = 9;
const CATEGORIES: [&str; 3] = ["im", "de", "la"];
let mut expected = String::new();
for cat in &CATEGORIES {
for _ in 0..EXPECTED_MATCH {
expected += cat;
expected += "\n";
}
}
tmp_dir.child("log").assert(&expected);
Ok(())
}
#[test]
#[ignore = "long test (~15s)"]
fn kill_stream_on_exit() -> Result<(), Box<dyn Error>> {
let tmp_dir = assert_fs::TempDir::new()?;
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-shutdown.jsonnet"))?;
let cmd = cargo_bin_cmd!("reaction");
let mut cmd = std::process::Command::new(cmd.get_program());
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.stdin(std::process::Stdio::null());
cmd.stdout(std::process::Stdio::null());
cmd.stderr(std::process::Stdio::null());
let mut child = cmd.spawn()?;
let start = std::time::Instant::now();
// wait for reaction to start all its streams
sleep(std::time::Duration::from_millis(500));
let pid = nix::unistd::Pid::from_raw(child.id() as i32);
// stop reaction, ignore kill error (should only happen if the process already exited)
let _ = signal::kill(pid, signal::SIGINT);
// wait for reaction exit (it waits for all streams to exit, ~15s)
loop {
match child.try_wait()? {
None => {}
Some(status) => {
assert_eq!(
status.code(),
Some(0),
"Expect reaction to terminate with code 0"
);
break;
}
}
let elapsed = std::time::Instant::now() - start;
if elapsed > std::time::Duration::from_secs(20) {
// try to terminate reaction before ending the test
let _ = signal::kill(pid, signal::SIGKILL);
let _ = child.wait();
panic!("Test timed out");
}
}
// make sure the streams were correctly signaled
tmp_dir.child("log_term").assert("sigterm\n");
tmp_dir.child("log_kill").assert("sigterm\n");
Ok(())
}
#[test]
fn non_utf8_is_stripped() -> Result<(), Box<dyn Error>> {
let tmp_dir = assert_fs::TempDir::new()?;
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-binary-input.jsonnet"))?;
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.timeout(std::time::Duration::from_secs(1));
// Expect exit code 1: all streams exited
cmd.assert().code(predicate::eq(1));
let expected = "received \"\x1babc \x05\"\n".repeat(3);
tmp_dir.child("log").assert(&expected);
Ok(())
}
#[test]
fn capture_streams_stderr() -> Result<(), Box<dyn Error>> {
let tmp_dir = assert_fs::TempDir::new()?;
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-stream-stderr.jsonnet"))?;
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.timeout(std::time::Duration::from_secs(1));
// Expect exit code 1: all streams exited
cmd.assert().code(predicate::eq(1));
let mut expected = String::new();
for n in 1..=5 {
expected += &format!("{n}\n");
}
tmp_dir.child("log").assert(&expected);
Ok(())
}
#[test]
fn manualy_trigger_filter() -> Result<(), Box<dyn Error>> {
let tmp_dir = assert_fs::TempDir::new()?;
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-trigger.jsonnet"))?;
// start daemon
let cmd = cargo_bin_cmd!("reaction");
let program = cmd.get_program();
let mut cmd = std::process::Command::new(program);
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.stdin(Stdio::null());
cmd.stdout(Stdio::null());
cmd.stderr(Stdio::null());
let mut daemon = cmd.spawn()?;
let start = std::time::Instant::now();
// wait for socket to be created
loop {
std::thread::sleep(Duration::from_millis(10));
let c = tmp_dir.child("s");
if c.exists() {
break;
}
let elapsed = std::time::Instant::now() - start;
if elapsed > Duration::from_secs(1) {
let _ = daemon.kill();
let _ = daemon.wait();
panic!("Daemon did not create socket");
}
}
let socket = tmp_dir.child("s");
let socket_path = socket.path().to_str().unwrap();
// trigger event manually
let mut cmd_trigger = cargo_bin_cmd!("reaction");
cmd_trigger.current_dir(tmp_dir.path());
cmd_trigger.args(["trigger", "--socket", socket_path, "s1.f1", "num=95"]);
cmd_trigger.timeout(Duration::from_secs(1));
cmd_trigger.assert().success();
// wait for daemon exit
loop {
std::thread::sleep(Duration::from_millis(100));
if let Some(res) = daemon.try_wait()? {
assert_eq!(
res.code(),
Some(1),
"Expect exit code 1: All streams exited"
);
break;
}
let elapsed = std::time::Instant::now() - start;
if elapsed > Duration::from_secs(2) {
let _ = daemon.kill();
let _ = daemon.wait();
panic!("Daemon did not exit");
}
}
tmp_dir.child("log").assert("95\n");
Ok(())
}
#[test]
fn filter_regex_match_eol() -> Result<(), Box<dyn Error>> {
let tmp_dir = assert_fs::TempDir::new()?;
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-eol-match.jsonnet"))?;
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.timeout(std::time::Duration::from_secs(1));
// Expect exit code 1: all streams exited
cmd.assert().code(predicate::eq(1));
let mut expected = String::new();
for i in 1..=5 {
expected += &format!("{i}\n");
}
tmp_dir.child("log").assert(&expected);
Ok(())
}

View file

@ -1,179 +0,0 @@
use std::{
env,
fs::File,
io::{IsTerminal, Read, Write},
time::Duration,
};
use tempfile::TempDir;
use tracing::Level;
use reaction::{cli::Format, client::request, daemon::daemon, protocol::Order};
use tokio::time::sleep;
fn file_with_contents(path: &str, contents: &str) {
let mut file = File::create(path).unwrap();
file.write_all(contents.as_bytes()).unwrap();
}
fn config_with_cmd(config_path: &str, cmd: &str) {
file_with_contents(
config_path,
&("
{
concurrency: 0,
patterns: {
ip: {
type: 'ip',
ipv6mask: 64,
},
},
streams: {
stream1: {
cmd: ['sh', '-c', '"
.to_owned()
+ cmd
+ "'],
filters: {
filter1: {
regex: ['ip <ip>'],
retry: 2,
retryperiod: '2s',
duplicate: 'rerun',
actions: {
// Don't mix code and data at home!
// You may permit arbitrary execution from vilains,
// if your regex is permissive enough.
// This is OK only for testing purposes.
ipv4_1: {
cmd: ['sh', '-c', 'echo <ip> >> ./ipv4.txt'],
ipv4only: true,
},
ipv4_2: {
cmd: ['sh', '-c', 'echo del <ip> >> ./ipv4.txt'],
ipv4only: true,
after: '30s',
onexit: false,
},
ipv6_1: {
cmd: ['sh', '-c', 'echo <ip> >> ./ipv6.txt'],
ipv6only: true,
},
ipv6_2: {
cmd: ['sh', '-c', 'echo del <ip> >> ./ipv6.txt'],
ipv6only: true,
after: '30s',
onexit: false,
},
all_1: {
cmd: ['sh', '-c', 'echo <ip> >> ./out.txt'],
},
all_2: {
cmd: ['sh', '-c', 'echo del <ip> >> ./out.txt'],
after: '30s',
onexit: false,
},
}
}
}
}
}
}"),
);
}
fn get_file_content(path: &str) -> String {
let mut out_txt = File::open(path).unwrap();
let mut contents = String::new();
out_txt.read_to_string(&mut contents).unwrap();
contents
}
#[tokio::test]
async fn ip() {
let dir = TempDir::new().unwrap();
env::set_current_dir(&dir).unwrap();
let config_path = "config.jsonnet";
let out_path = "./out.txt";
let ipv4_path = "./ipv4.txt";
let ipv6_path = "./ipv6.txt";
let socket_path = "./reaction.sock";
config_with_cmd(
config_path,
"for i in 1.2.3.4 204:31::1 5.5.5.5 1.2.3.4 204:31::1 5.5.5.5; do echo ip $i; sleep 0.01; done; sleep 0.15",
);
file_with_contents(out_path, "");
file_with_contents(ipv4_path, "");
file_with_contents(ipv6_path, "");
// Set the logger before running any code from the crate
tracing_subscriber::fmt::fmt()
.without_time()
.with_target(false)
.with_ansi(std::io::stdout().is_terminal())
.with_max_level(Level::DEBUG)
.try_init()
.unwrap();
// Run the daemon
let handle = tokio::spawn(async move { daemon(config_path.into(), socket_path.into()).await });
// Run the flushes
// We sleep for the time the echoes are finished + a bit (100ms)
let handle2 = tokio::spawn(async move {
sleep(Duration::from_millis(160)).await;
request(
socket_path.into(),
Format::JSON,
None,
vec![("ip".into(), "1.2.3.4".into())],
Order::Flush,
)
.await
});
let handle3 = tokio::spawn(async move {
sleep(Duration::from_millis(180)).await;
request(
socket_path.into(),
Format::JSON,
None,
vec![("ip".into(), "204:31::/64".into())],
Order::Flush,
)
.await
});
let (daemon_exit, flush1, flush2) = tokio::join!(handle, handle2, handle3);
assert!(daemon_exit.is_ok());
assert!(flush1.is_ok());
assert!(flush2.is_ok());
// tokio::time::sleep(Duration::from_secs(100)).await;
assert_eq!(
get_file_content(out_path).trim(),
[
"1.2.3.4",
"204:31::/64",
"5.5.5.5",
"del 1.2.3.4",
"del 204:31::/64"
]
.join("\n")
);
assert_eq!(
get_file_content(ipv4_path).trim(),
["1.2.3.4", "5.5.5.5", "del 1.2.3.4"].join("\n")
);
assert_eq!(
get_file_content(ipv6_path).trim(),
["204:31::/64", "del 204:31::/64"].join("\n")
);
}

View file

@ -41,23 +41,6 @@
},
},
},
f2: {
regex: [
"^can't found <num>$",
],
retry: 2,
retryperiod: '60s',
actions: {
damn: {
cmd: ['notify-send', 'you should not see that', 'ban <num>'],
},
undamn: {
cmd: ['notify-send', 'you should not see that', 'unban <num>'],
after: '3s',
onexit: true,
},
},
},
},
},
},

View file

@ -1,51 +0,0 @@
use std::{error::Error, path::Path, time::Duration};
use assert_cmd::cargo::cargo_bin_cmd;
use assert_fs::prelude::*;
use predicates::prelude::predicate;
#[test]
fn resume_action() -> Result<(), Box<dyn Error>> {
let tmp_dir = assert_fs::TempDir::new()?;
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-resume-action.jsonnet"))?;
// first run
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.timeout(Duration::from_secs(5));
// Expected exit 1: all stream exited
cmd.assert().code(predicate::eq(1));
// expect a single match from the stream command
let expected = ["starting", "start4 10.1.0.1", "stopping"].join("\n") + "\n";
tmp_dir.child("log").assert(&expected);
// second run, expect to resume action
let mut cmd = cargo_bin_cmd!("reaction");
cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]);
cmd.current_dir(tmp_dir.path());
cmd.timeout(Duration::from_secs(5));
// Expected exit 1: all stream exited
cmd.assert().code(predicate::eq(1));
let expected = [
"starting",
"start4 10.1.0.1", // from the stream command
"stopping",
"starting",
"start4 10.1.0.1", // previous action loaded from db
"stop4 10.1.0.1", // previous action lapses
"start4 10.1.0.1", // from the stream command
"stopping",
]
.join("\n")
+ "\n";
tmp_dir.child("log").assert(&expected);
Ok(())
}

View file

@ -1,127 +0,0 @@
use std::{fs::read_to_string, path::Path, thread, time::Duration};
use assert_cmd::{Command, cargo::cargo_bin_cmd};
use assert_fs::prelude::*;
const SECRET_KEY_A: &str = "g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=";
const PUBLIC_KEY_A: &str = "HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY=";
const SECRET_KEY_B: &str = "5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=";
const PUBLIC_KEY_B: &str = "LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc=";
// require UDP ports 9876-9879 to be free on 127.0.0.1
#[test]
#[ignore = "failing for now"]
fn plugin_cluster_same_startup() {
// First build reaction-plugin-cluster
Command::new("cargo")
.args(["build", "-p", "reaction-plugin-cluster"])
.unwrap();
let config = read_to_string("tests/test-conf/test-cluster.jsonnet").unwrap();
let config_a = config
.replace("PUBLIC_KEY", PUBLIC_KEY_B)
.replace("NODE", "A")
.replace("1234", "9876")
.replace("4321", "9877");
let config_b = config
.replace("PUBLIC_KEY", PUBLIC_KEY_A)
.replace("NODE", "B")
.replace("1234", "9877")
.replace("4321", "9876");
let output_a = vec![
"B a0 1", "B a0 2", "B a0 3", "B a0 4", "B b0 1", "B b0 2", "B b0 3", "B b0 4", "",
];
let output_b = vec![
"A a0 1", "A a0 2", "A a0 3", "A a0 4", "A b0 1", "A b0 2", "A b0 3", "A b0 4", "",
];
let a_handle = thread::spawn(|| launch_node(config_a, SECRET_KEY_A, output_a));
let b_handle = thread::spawn(|| launch_node(config_b, SECRET_KEY_B, output_b));
a_handle.join().unwrap();
b_handle.join().unwrap();
}
#[test]
#[ignore = "failing for now"]
fn plugin_cluster_different_startup() {
// First build reaction-plugin-cluster
Command::new("cargo")
.args(["build", "-p", "reaction-plugin-cluster"])
.unwrap();
let config = read_to_string("tests/test-conf/test-cluster.jsonnet").unwrap();
let config_a = config
.replace("PUBLIC_KEY", PUBLIC_KEY_B)
.replace("NODE", "A")
.replace("1234", "9878")
.replace("4321", "9879");
let config_b = config
.replace("PUBLIC_KEY", PUBLIC_KEY_A)
.replace("NODE", "B")
.replace("1234", "9879")
.replace("4321", "9878");
let output_a = vec![
"B a0 1", "B a0 2", "B a0 3", "B a0 4", "B b0 1", "B b0 2", "B b0 3", "B b0 4", "",
];
let output_b = vec![
"A a0 1", "A a0 2", "A a0 3", "A a0 4", "A b0 1", "A b0 2", "A b0 3", "A b0 4", "",
];
let a_handle = thread::spawn(|| launch_node(config_a, SECRET_KEY_A, output_a));
let b_handle = thread::spawn(|| {
thread::sleep(Duration::from_secs(2));
launch_node(config_b, SECRET_KEY_B, output_b);
});
// thread::sleep(Duration::from_secs(60));
a_handle.join().unwrap();
b_handle.join().unwrap();
}
fn launch_node(config: String, my_secret: &'static str, expected_output: Vec<&'static str>) {
let tmp_dir = assert_fs::TempDir::new().unwrap();
// Write node config
tmp_dir.child("config.jsonnet").write_str(&config).unwrap();
tmp_dir
.child("plugin_data/cluster/secret_key_s1.txt")
.write_str(my_secret)
.unwrap();
// Copy cluster plugin
tmp_dir
.child("./target/debug/reaction-plugin-cluster")
.write_file(Path::new("./target/debug/reaction-plugin-cluster"))
.unwrap();
let output = cargo_bin_cmd!("reaction")
.args([
"start",
"--socket",
"./s",
"--config",
"./config.jsonnet",
"-l",
"DEBUG",
])
.current_dir(tmp_dir.path())
.timeout(Duration::from_secs(5))
.output()
.unwrap();
println!(
"command output:\n{}",
String::from_utf8(output.stdout).unwrap()
);
// Expected output
tmp_dir.child("log").assert(expected_output.join("\n"));
}

View file

@ -1,40 +0,0 @@
use std::{path::Path, time::Duration};
use assert_cmd::{Command, cargo::cargo_bin_cmd};
use assert_fs::prelude::*;
use predicates::prelude::predicate;
#[test]
fn plugin_virtual() {
// First build reaction-plugin-virtual
Command::new("cargo")
.args(["build", "-p", "reaction-plugin-virtual"])
.unwrap();
let tmp_dir = assert_fs::TempDir::new().unwrap();
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/test-conf/test-virtual.jsonnet"))
.unwrap();
// Copy virtual plugin
tmp_dir
.child("./target/debug/reaction-plugin-virtual")
.write_file(Path::new("./target/debug/reaction-plugin-virtual"))
.unwrap();
cargo_bin_cmd!("reaction")
.args(["start", "--socket", "./s", "--config", "./config.jsonnet"])
.current_dir(tmp_dir.path())
.timeout(Duration::from_secs(5))
// Expected exit 1: all stream exited
.assert()
.code(predicate::eq(1));
// Expected output
let output = [
"a0 1", "a0 2", "a0 3", "a0 4", "b0 1", "b0 2", "b0 3", "b0 4", "",
];
tmp_dir.child("log").assert(output.join("\n"));
tmp_dir.child("log").write_str("").unwrap();
}

View file

@ -36,7 +36,6 @@ fn config_with_cmd(config_path: &str, cmd: &str) {
regex: ['here is <num>'],
retry: 2,
retryperiod: '2s',
duplicate: 'rerun',
actions: {
// Don't mix code and data at home!
// You may permit arbitrary execution from vilains,
@ -129,11 +128,8 @@ async fn simple() {
let (daemon_exit, flush1, flush2) = tokio::join!(handle, handle2, handle3);
assert!(daemon_exit.is_ok());
assert!(daemon_exit.unwrap() == 1);
assert!(flush1.is_ok());
assert!(flush1.unwrap().is_ok());
assert!(flush2.is_ok());
assert!(flush2.unwrap().is_ok());
assert_eq!(
// 24 is encountered for the second time, then
@ -156,14 +152,18 @@ async fn simple() {
config_with_cmd(
config_path,
"sleep 0.02; for i in 12 24 36 56 67; do echo here is $i; sleep 0.01; done",
"for i in 12 24 36 56 67; do echo here is $i; sleep 0.01; done",
);
file_with_contents(out_path, "");
file_with_contents(oneshot_path, "");
let daemon_exit = daemon(config_path.into(), socket_path.into()).await;
assert!(daemon_exit == 1);
assert!(daemon_exit.is_err());
assert_eq!(
daemon_exit.unwrap_err().to_string(),
"quitting because all streams finished"
);
// 36 trigger from DB
// 12 trigger from DB
@ -188,4 +188,69 @@ async fn simple() {
get_file_content(oneshot_path).trim(),
"oneshot 12\noneshot 67".to_owned().trim()
);
// Third part of the test
// Check we can capture both stdout and stderr from spawned processes
// New directory to avoid to load the database from previous tests
let dir = TempDir::new().unwrap();
env::set_current_dir(&dir).unwrap();
// echo numbers twice, once on stdout, once on stderr
config_with_cmd(
config_path,
"for i in 1 2 3 4 5 6 7 8 9; do echo here is $i; echo here is $i 1>&2; sleep 0.01; done",
);
file_with_contents(out_path, "");
let daemon_exit = daemon(config_path.into(), socket_path.into()).await;
assert!(daemon_exit.is_err());
assert_eq!(
daemon_exit.unwrap_err().to_string(),
"quitting because all streams finished"
);
// make sure all numbers appear in the output
assert_eq!(
get_file_content(out_path).trim(),
"1\n2\n3\n4\n5\n6\n7\n8\n9".to_owned()
);
// Fourth part of the test
// Check the trigger function
// New directory to avoid to load the database from previous tests
let dir = TempDir::new().unwrap();
env::set_current_dir(&dir).unwrap();
// No thing from stream
config_with_cmd(config_path, "sleep 0.1");
file_with_contents(out_path, "");
// Run the daemon
let handle = tokio::spawn(async move { daemon(config_path.into(), socket_path.into()).await });
// Run the trigger
// We sleep a bit to wait for reaction to start
let handle2 = tokio::spawn(async move {
sleep(Duration::from_millis(20)).await;
request(
socket_path.into(),
Format::JSON,
Some("stream1.filter1".into()),
vec![("num".into(), "95".into())],
Order::Trigger,
)
.await
});
let (daemon_exit, trigger) = tokio::join!(handle, handle2);
assert!(daemon_exit.is_ok());
assert!(trigger.is_ok());
// make sure the trigger number is in the output
assert_eq!(get_file_content(out_path).trim(), "95".to_owned());
}

View file

@ -1,46 +0,0 @@
local echo(message, before='true') = [
'sh',
'-c',
before + '; echo ' + message + ' >> ./log',
];
{
patterns: {
num: {
regex: '[0-9]+',
},
},
start: [
echo('start 1'),
echo('start 2'),
],
stop: [
echo('stop 1'),
echo('stop 2'),
],
streams: {
s1: {
cmd: ['sh', '-c', 'seq 2 | while read i; do echo runtime $i; sleep 0.1; done'],
filters: {
f1: {
duplicate: 'rerun',
regex: [
'^runtime <num>$',
],
actions: {
one: {
cmd: echo('runtime <num>'),
},
two: {
cmd: echo('after', before='sleep 0.2'),
after: '5m',
onexit: true,
},
},
},
},
},
},
}

View file

@ -1,90 +0,0 @@
use std::{path::Path, time::Duration};
use assert_cmd::cargo::cargo_bin_cmd;
use assert_fs::{TempDir, prelude::*};
use predicates::prelude::predicate;
#[test]
fn start_stop() {
let tmp_dir = assert_fs::TempDir::new().unwrap();
run_reaction(&tmp_dir);
// Expected output
let output = [
"start 1",
"start 2",
"runtime 1",
"runtime 2",
// no order required because they'll be awaken all together on exit
"after",
"after",
"stop 1",
"stop 2",
"",
];
tmp_dir.child("log").assert(output.join("\n"));
tmp_dir.child("log").write_str("").unwrap();
println!(
"DATABASE:\n{}",
std::fs::read_to_string(tmp_dir.child("reaction.db")).unwrap()
);
// Second run
run_reaction(&tmp_dir);
// Expected output
// (one of them)
let outputs = [
[
"start 1",
"start 2",
"runtime 1",
"runtime 2",
"runtime 1",
"runtime 2",
// no order required because they'll be awaken all together on exit
"after",
"after",
"after",
"after",
"stop 1",
"stop 2",
"",
],
[
"start 1",
"start 2",
"runtime 2",
"runtime 1",
"runtime 1",
"runtime 2",
// no order required because they'll be awaken all together on exit
"after",
"after",
"after",
"after",
"stop 1",
"stop 2",
"",
],
];
let contents = std::fs::read_to_string(tmp_dir.child("log")).unwrap();
assert!(contents == outputs[0].join("\n") || contents == outputs[1].join("\n"));
}
fn run_reaction(tmp_dir: &TempDir) {
tmp_dir
.child("config.jsonnet")
.write_file(Path::new("tests/start_stop.jsonnet"))
.unwrap();
cargo_bin_cmd!("reaction")
.args(["start", "--socket", "./s", "--config", "./config.jsonnet"])
.current_dir(tmp_dir.path())
.timeout(Duration::from_secs(5))
// Expected exit 1: all stream exited
.assert()
.code(predicate::eq(1));
}

View file

@ -1,7 +1,5 @@
local log(cat) = [
// NOTE: do not log the ID as it would be out of order since all cat commands
// are executed at once
'sh', '-c', 'echo "' + cat + '" >>log',
'sh', '-c', 'echo "' + cat + ' <id>" >>log',
];
{
patterns: {
@ -11,7 +9,7 @@ local log(cat) = [
},
streams: {
idle: {
cmd: ['sh', '-c', 'for n in $(seq 9); do echo $n; done; sleep 2'],
cmd: ['sh', '-c', 'for n in 1 1 3 2 3 1 2 2 3; do echo $n; done; sleep 2'],
filters: {
filt1: {
regex: [
@ -36,3 +34,4 @@ local log(cat) = [
},
},
}

View file

@ -10,7 +10,7 @@
},
streams: {
binary: {
cmd: ['sh', '-c', 'for n in $(seq 3); do printf "\\n\\x1babc\\xe2 \\x05"; done; printf "\\n"; sleep 0.2'],
cmd: ['sh', '-c', 'for n in 123 456 987; do printf "\\n\\x1b$n\\xe2 \\x05"; sleep 0.5; done; printf "\\n"; sleep 0.2'],
filters: {
filt1: {
regex: [
@ -18,7 +18,7 @@
],
actions: {
act: {
cmd: ['sh', '-c', 'echo \'received "<id>"\' >>log'],
cmd: ['echo', 'received "<id>"'],
},
},
},

Some files were not shown because too many files have changed in this diff Show more