From 02f13a263eef6408f6734803565fbc715e916180 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Jun 2025 12:00:00 +0200 Subject: [PATCH 001/241] fix release --- Cargo.lock | 4 ++-- release.py | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a3ec5b..4cfb347 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -826,7 +826,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.0.0-rc2" +version = "2.0.0" dependencies = [ "chrono", "clap", diff --git a/release.py b/release.py index 0f63cf8..e1082de 100644 --- a/release.py +++ b/release.py @@ -33,12 +33,12 @@ def main(): sys.exit(1) # Ask user - # if input(f"We will create a release for tag {tag}. Do you want to continue? (y/n) ") != "y": - # print("exiting.") - # sys.exit(1) + if input(f"We will create a release for tag {tag}. Do you want to continue? (y/n) ") != "y": + print("exiting.") + sys.exit(1) # Git push - # run_command(["git", "push", "--tags"]) + run_command(["git", "push", "--tags"]) # Minisign password cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True) @@ -270,10 +270,11 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\ conn = http.client.HTTPSConnection("framagit.org") conn.request("POST", "/api/v4/projects/90566/releases", body=body, headers=headers) response = conn.getresponse() + body = json.loads(response.read()) if response.status != 201: print( - f"sending message failed: status: {response.status}, reason: {response.reason}" + f"sending message failed: status: {response.status}, reason: {response.reason}, message: {body.message}" ) sys.exit(1) From b53044323c28b508e703fbca43e14133fadeff1d Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Jun 2025 12:00:00 +0200 Subject: [PATCH 002/241] Add small doc for C helpers --- helpers_c/README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 helpers_c/README.md diff --git a/helpers_c/README.md b/helpers_c/README.md new file mode 100644 index 0000000..2407f19 --- /dev/null +++ b/helpers_c/README.md @@ -0,0 +1,12 @@ +# C helpers + +Those helpers permit to handle IPv4 & IPv6 at the same time, waiting for [#79](https://framagit.org/ppom/reaction/-/issues/79) to be addressed. + +Compilation: + +```bash +# Produces nft46 binary +gcc -o nft46 nft46.c +# Produces ip46tables binary +gcc -o ip46tables ip46tables.c +``` From 3beca6d7a55d89aa10ea00a2d7897347195c56d2 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Jun 2025 12:00:00 +0200 Subject: [PATCH 003/241] Document state_directory Fixes #71 --- config/example.jsonnet | 5 +++++ config/example.yml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/config/example.jsonnet b/config/example.jsonnet index 6d84230..7c2acc2 100644 --- a/config/example.jsonnet +++ b/config/example.jsonnet @@ -38,6 +38,11 @@ local banFor(time) = { }, }, + // where the state (database) must be read + // defaults to . which means reaction's working directory. + // The systemd service starts reaction in /var/lib/reaction. + state_directory: '.', + // if set to a positive number → max number of concurrent actions // if set to a negative number → no limit // if not specified or set to 0 → defaults to the number of CPUs on the system diff --git a/config/example.yml b/config/example.yml index 759f597..a9e585e 100644 --- a/config/example.yml +++ b/config/example.yml @@ -15,6 +15,11 @@ definitions: # ip46tables is a minimal C program (only POSIX dependencies) present as a subdirectory. # it permits to handle both ipv4/iptables and ipv6/ip6tables commands +# where the state (database) must be read +# defaults to . which means reaction's working directory. +# The systemd service starts reaction in /var/lib/reaction. +state_directory: . + # if set to a positive number → max number of concurrent actions # if set to a negative number → no limit # if not specified or set to 0 → defaults to the number of CPUs on the system From 8543fead54504beb6c0e1392c9ec3cee1706b62c Mon Sep 17 00:00:00 2001 From: Martin Date: Thu, 5 Jun 2025 16:33:17 +0200 Subject: [PATCH 004/241] Fix makefile install remove duplicated /man/man1 otherwise, get an error during installation install: target '/usr/local/share/man/man1/man/man1/': No such file or directory Use -D to create missing directory --- packaging/Makefile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packaging/Makefile b/packaging/Makefile index 259e393..1a16acd 100644 --- a/packaging/Makefile +++ b/packaging/Makefile @@ -5,18 +5,18 @@ SYSTEMDDIR ?= /etc/systemd install: install -m755 reaction nft46 ip46tables $(DESTDIR)$(BINDIR) - install -m644 reaction*.1 $(DESTDIR)$(MANDIR)/man/man1/ - install -m644 reaction.bash $(DESTDIR)/share/bash-completion/completions/reaction - install -m644 reaction.fish $(DESTDIR)/share/fish/completions/ - install -m644 _reaction $(DESTDIR)/share/zsh/vendor-completions/ + install -m644 reaction*.1 $(DESTDIR)$(MANDIR)/ + install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction + install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction + install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction install -m644 reaction.service $(SYSTEMDDIR)/system/reaction.service remove: rm -f $(DESTDIR)$(BINDIR)/bin/reaction rm -f $(DESTDIR)$(BINDIR)/bin/nft46 rm -f $(DESTDIR)$(BINDIR)/bin/ip46tables - rm -f $(DESTDIR)$(MANDIR)/man/man1/reaction*.1 - rm -f $(DESTDIR)/share/bash-completion/completions/reaction - rm -f $(DESTDIR)/share/fish/completions/ - rm -f $(DESTDIR)/share/zsh/vendor-completions/ + rm -f $(DESTDIR)$(MANDIR)/reaction*.1 + rm -f $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction + rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/ + rm -f $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/ rm -f $(SYSTEMDDIR)/system/reaction.service From 74280d0f45934405afc57199c5f833be40140a32 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Jun 2025 12:00:00 +0200 Subject: [PATCH 005/241] Fix completions filenames and their removal --- packaging/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/Makefile b/packaging/Makefile index 1a16acd..8e36539 100644 --- a/packaging/Makefile +++ b/packaging/Makefile @@ -7,7 +7,7 @@ install: install -m755 reaction nft46 ip46tables $(DESTDIR)$(BINDIR) install -m644 reaction*.1 $(DESTDIR)$(MANDIR)/ install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction - install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction + install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction install -m644 reaction.service $(SYSTEMDDIR)/system/reaction.service @@ -17,6 +17,6 @@ remove: rm -f $(DESTDIR)$(BINDIR)/bin/ip46tables rm -f $(DESTDIR)$(MANDIR)/reaction*.1 rm -f $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction - rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/ - rm -f $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/ + rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish + rm -f $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction rm -f $(SYSTEMDDIR)/system/reaction.service From f63502759fb99805d1e70e3bab50f95000579714 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Jun 2025 12:00:00 +0200 Subject: [PATCH 006/241] make official release only with --publish flag --- release.py | 112 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 68 insertions(+), 44 deletions(-) diff --git a/release.py b/release.py index e1082de..f844bd9 100644 --- a/release.py +++ b/release.py @@ -1,5 +1,6 @@ #!/usr/bin/env nix-shell #!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" -p debian-devscripts git minisign cargo-cross rustup cargo-deb +import argparse import http.client import json import os @@ -19,6 +20,15 @@ def run_command(args, **kwargs): def main(): + parser = argparse.ArgumentParser(description="create a reaction release") + parser.add_argument( + "-p", + "--publish", + action="store_true", + help="publish a release. else build only", + ) + args = parser.parse_args() + # Git tag cmd = run_command( ["git", "tag", "--sort=-creatordate"], capture_output=True, text=True @@ -33,28 +43,35 @@ def main(): sys.exit(1) # Ask user - if input(f"We will create a release for tag {tag}. Do you want to continue? (y/n) ") != "y": + if ( + args.publish + and input( + f"We will create a release for tag {tag}. Do you want to continue? (y/n) " + ) + != "y" + ): print("exiting.") sys.exit(1) - # Git push - run_command(["git", "push", "--tags"]) + if args.publish: + # Git push + run_command(["git", "push", "--tags"]) - # Minisign password - cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True) - minisign_password = cmd.stdout + # Minisign password + cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True) + minisign_password = cmd.stdout - # Create directory - run_command( - [ - "ssh", - "akesi", - # "-J", "pica01", - "mkdir", - "-p", - f"/var/www/static/reaction/releases/{tag}/", - ] - ) + # Create directory + run_command( + [ + "ssh", + "akesi", + # "-J", "pica01", + "mkdir", + "-p", + f"/var/www/static/reaction/releases/{tag}/", + ] + ) architectures = { "x86_64-unknown-linux-musl": "amd64", @@ -161,31 +178,35 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service os.chdir(root_dir) - # Sign - run_command( - ["minisign", "-Sm", deb_path, tar_path], text=True, input=minisign_password - ) - deb_sig = f"{deb_path}.minisig" - tar_sig = f"{tar_path}.minisig" + if args.publish: + # Sign + run_command( + ["minisign", "-Sm", deb_path, tar_path], + text=True, + input=minisign_password, + ) + deb_sig = f"{deb_path}.minisig" + tar_sig = f"{tar_path}.minisig" - # Push - run_command( - [ - "rsync", - "-az", # "-e", "ssh -J pica01", - tar_path, - tar_sig, - deb_path, - deb_sig, - f"akesi:/var/www/static/reaction/releases/{tag}/", - ] - ) - all_files.extend([tar_path, tar_sig, deb_path, deb_sig]) + # Push + run_command( + [ + "rsync", + "-az", # "-e", "ssh -J pica01", + tar_path, + tar_sig, + deb_path, + deb_sig, + f"akesi:/var/www/static/reaction/releases/{tag}/", + ] + ) - # Instructions + all_files.extend([tar_path, tar_sig, deb_path, deb_sig]) - instructions.append( - f""" + # Instructions + + instructions.append( + f""" ## Tar installation ({architectures[architecture]} linux) ```bash @@ -197,10 +218,10 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{tar_name} \\ && sudo make install ``` """.strip() - ) + ) - instructions.append( - f""" + instructions.append( + f""" ## Debian installation ({architectures[architecture]} linux) ```bash @@ -211,7 +232,10 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\ && sudo apt install ./{deb_name} ``` """.strip() - ) + ) + + if not args.publish: + return # Release cmd = run_command( @@ -274,7 +298,7 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\ if response.status != 201: print( - f"sending message failed: status: {response.status}, reason: {response.reason}, message: {body.message}" + f"sending message failed: status: {response.status}, reason: {response.reason}, message: {body.message}" ) sys.exit(1) From 388d4dac90fdb953d6bf61b6d1b8ed64f3c572d0 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Jun 2025 12:00:00 +0200 Subject: [PATCH 007/241] Fix tarball Makefile, release.py - Makefile creates missing directories - release.py puts tarballs & debs in local/ directory when not publishing --- packaging/Makefile | 12 ++++++------ release.py | 35 ++++++++++++++++++++++++----------- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/packaging/Makefile b/packaging/Makefile index 8e36539..4727ac8 100644 --- a/packaging/Makefile +++ b/packaging/Makefile @@ -4,12 +4,12 @@ MANDIR = $(PREFIX)/share/man/man1 SYSTEMDDIR ?= /etc/systemd install: - install -m755 reaction nft46 ip46tables $(DESTDIR)$(BINDIR) - install -m644 reaction*.1 $(DESTDIR)$(MANDIR)/ - install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction - install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish - install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction - install -m644 reaction.service $(SYSTEMDDIR)/system/reaction.service + install -Dm755 reaction nft46 ip46tables $(DESTDIR)$(BINDIR) + install -Dm644 reaction*.1 -t $(DESTDIR)$(MANDIR)/ + install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction + install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish + install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction + install -Dm644 reaction.service $(SYSTEMDDIR)/system/reaction.service remove: rm -f $(DESTDIR)$(BINDIR)/bin/reaction diff --git a/release.py b/release.py index f844bd9..219c568 100644 --- a/release.py +++ b/release.py @@ -20,6 +20,7 @@ def run_command(args, **kwargs): def main(): + # CLI arguments parser = argparse.ArgumentParser(description="create a reaction release") parser.add_argument( "-p", @@ -29,6 +30,8 @@ def main(): ) args = parser.parse_args() + root_dir = os.getcwd() + # Git tag cmd = run_command( ["git", "tag", "--sort=-creatordate"], capture_output=True, text=True @@ -72,14 +75,21 @@ def main(): f"/var/www/static/reaction/releases/{tag}/", ] ) + else: + # Prepare directory for tarball and deb file. + # We must do a `cargo clean` before each build, + # So we have to move them out of `target/` + local_dir = os.path.join(root_dir, "local") + try: + os.mkdir(local_dir) + except FileExistsError: + pass architectures = { "x86_64-unknown-linux-musl": "amd64", "aarch64-unknown-linux-musl": "arm64", } - root_dir = os.getcwd() - all_files = [] instructions = [ @@ -107,7 +117,7 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service """.strip(), ] - for architecture in architectures.keys(): + for (architecture_rs, architecture_pretty) in architectures.items(): # Cargo clean run_command(["cargo", "clean"]) @@ -117,7 +127,7 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service "rustup", "toolchain", "install", - f"stable-{architecture}", + f"stable-{architecture_rs}", "--force-non-host", # I know, I know! "--profile", "minimal", @@ -125,20 +135,20 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service ) # Build - run_command(["cross", "build", "--release", "--target", architecture]) + run_command(["cross", "build", "--release", "--target", architecture_rs]) # Build .deb cmd = run_command( - ["cargo-deb", f"--target={architecture}", "--no-build", "--no-strip"] + ["cargo-deb", f"--target={architecture_rs}", "--no-build", "--no-strip"] ) - deb_dir = os.path.join("./target", architecture, "debian") + deb_dir = os.path.join("./target", architecture_rs, "debian") deb_name = [f for f in os.listdir(deb_dir) if f.endswith(".deb")][0] deb_path = os.path.join(deb_dir, deb_name) # Archive - files_path = os.path.join("./target", architecture, "release") - pkg_name = f"reaction-{tag}-{architectures[architecture]}" + files_path = os.path.join("./target", architecture_rs, "release") + pkg_name = f"reaction-{tag}-{architecture_pretty}" tar_name = f"{pkg_name}.tar.gz" tar_path = os.path.join(files_path, tar_name) @@ -207,7 +217,7 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service instructions.append( f""" -## Tar installation ({architectures[architecture]} linux) +## Tar installation ({architecture_pretty} linux) ```bash curl -O https://static.ppom.me/reaction/releases/{tag}/{tar_name} \\ @@ -222,7 +232,7 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{tar_name} \\ instructions.append( f""" -## Debian installation ({architectures[architecture]} linux) +## Debian installation ({architecture_pretty} linux) ```bash curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\ @@ -233,6 +243,9 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\ ``` """.strip() ) + else: + # Copy + run_command(["cp", tar_path, deb_path, local_dir]) if not args.publish: return From 8f5511b4150fe06970fa8fe56b172b9ab72696bf Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Jun 2025 12:00:00 +0200 Subject: [PATCH 008/241] v2.0.1 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4cfb347..18784da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -826,7 +826,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.0.0" +version = "2.0.1" dependencies = [ "chrono", "clap", diff --git a/Cargo.toml b/Cargo.toml index af27a21..37baee8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction" -version = "2.0.0" +version = "2.0.1" edition = "2021" authors = ["ppom "] license = "AGPL-3.0" From e642620ae3825ca9e8496a6245ba6d375ecf4bea Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Jun 2025 12:00:00 +0200 Subject: [PATCH 009/241] Cross-compile C binaries too --- build.rs | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/build.rs b/build.rs index f7de30e..39c0dca 100644 --- a/build.rs +++ b/build.rs @@ -1,5 +1,5 @@ use std::{ - env::var_os, + env::{var, var_os}, io::{self, ErrorKind}, path::Path, process, @@ -10,14 +10,35 @@ use clap_complete::shells; // SubCommand defined here include!("src/cli.rs"); -fn compile_helper(name: &str, out_dir: &Path) -> io::Result<()> { - process::Command::new("gcc") - .args([ - &format!("helpers_c/{name}.c"), - "-o", - out_dir.join(name).to_str().expect("could not join path"), - ]) - .spawn()?; +fn gcc() -> String { + // TARGET looks like aarch64-unknown-linux-musl + match var("TARGET") { + Ok(target) => { + // We're looking for an environment variable looking like + // CC_aarch64_unknown_linux_musl + let target = target.replace("-", "_"); + var(format!("CC_{}", target.replace("-", "_"))).ok() + } + Err(_) => None, + } + .unwrap_or("gcc".into()) +} + +fn compile_helper(cc: &str, name: &str, out_dir: &Path) -> io::Result<()> { + let mut args = vec![ + format!("helpers_c/{name}.c"), + "-o".into(), + out_dir + .join(name) + .to_str() + .expect("could not join path") + .to_owned(), + ]; + // We can build static executables in cross environment + if cc != "gcc" { + args.push("-static".into()); + } + process::Command::new(cc).args(args).spawn()?; Ok(()) } @@ -26,8 +47,10 @@ fn main() -> io::Result<()> { let out_dir = PathBuf::from(var_os("OUT_DIR").ok_or(ErrorKind::NotFound)?).join("../../.."); // Compile C helpers - compile_helper("ip46tables", &out_dir)?; - compile_helper("nft46", &out_dir)?; + let cc = gcc(); + println!("CC is: {}", cc); + compile_helper(&cc, "ip46tables", &out_dir)?; + compile_helper(&cc, "nft46", &out_dir)?; // Build CLI let cli = clap::Command::new("reaction"); From 2e9e7a2a7b584b38a4d0b573a866206f2b73c17e Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Jun 2025 12:00:00 +0200 Subject: [PATCH 010/241] Remove old go codebase --- go.old/README.md | 4 - go.old/app/client.go | 393 ----------------------------------- go.old/app/daemon.go | 454 ----------------------------------------- go.old/app/example.yml | 108 ---------- go.old/app/main.go | 230 --------------------- go.old/app/persist.go | 264 ------------------------ go.old/app/pipe.go | 81 -------- go.old/app/startup.go | 178 ---------------- go.old/app/types.go | 200 ------------------ go.old/go.mod | 10 - go.old/go.sum | 9 - go.old/logger/log.go | 80 -------- go.old/reaction.go | 13 -- 13 files changed, 2024 deletions(-) delete mode 100644 go.old/README.md delete mode 100644 go.old/app/client.go delete mode 100644 go.old/app/daemon.go delete mode 100644 go.old/app/example.yml delete mode 100644 go.old/app/main.go delete mode 100644 go.old/app/persist.go delete mode 100644 go.old/app/pipe.go delete mode 100644 go.old/app/startup.go delete mode 100644 go.old/app/types.go delete mode 100644 go.old/go.mod delete mode 100644 go.old/go.sum delete mode 100644 go.old/logger/log.go delete mode 100644 go.old/reaction.go diff --git a/go.old/README.md b/go.old/README.md deleted file mode 100644 index bc27779..0000000 --- a/go.old/README.md +++ /dev/null @@ -1,4 +0,0 @@ -This is the old Go codebase of reaction, ie. all 0.x and 1.x versions. -This codebase most probably won't be updated. - -Development now continues in Rust for reaction 2.x. diff --git a/go.old/app/client.go b/go.old/app/client.go deleted file mode 100644 index ad61bea..0000000 --- a/go.old/app/client.go +++ /dev/null @@ -1,393 +0,0 @@ -package app - -import ( - "bufio" - "encoding/gob" - "encoding/json" - "fmt" - "net" - "os" - "regexp" - "slices" - "strings" - "time" - - "framagit.org/ppom/reaction/logger" - "sigs.k8s.io/yaml" -) - -const ( - Info = 0 - Flush = 1 -) - -type Request struct { - Request int - Flush PSF -} - -type Response struct { - Err error - // Config Conf - Matches MatchesMap - Actions ActionsMap -} - -func SendAndRetrieve(data Request) Response { - conn, err := net.Dial("unix", *SocketPath) - if err != nil { - logger.Fatalln("Error opening connection to daemon:", err) - } - defer conn.Close() - - err = gob.NewEncoder(conn).Encode(data) - if err != nil { - logger.Fatalln("Can't send message:", err) - } - - var response Response - err = gob.NewDecoder(conn).Decode(&response) - if err != nil { - logger.Fatalln("Invalid answer from daemon:", err) - } - return response -} - -type PatternStatus struct { - Matches int `json:"matches,omitempty"` - Actions map[string][]string `json:"actions,omitempty"` -} -type MapPatternStatus map[Match]*PatternStatus -type MapPatternStatusFlush MapPatternStatus - -type ClientStatus map[string]map[string]MapPatternStatus -type ClientStatusFlush ClientStatus - -func (mps MapPatternStatusFlush) MarshalJSON() ([]byte, error) { - for _, v := range mps { - return json.Marshal(v) - } - return []byte(""), nil -} - -func (csf ClientStatusFlush) MarshalJSON() ([]byte, error) { - ret := make(map[string]map[string]MapPatternStatusFlush) - for k, v := range csf { - ret[k] = make(map[string]MapPatternStatusFlush) - for kk, vv := range v { - ret[k][kk] = MapPatternStatusFlush(vv) - } - } - return json.Marshal(ret) -} - -func pfMatches(streamName string, filterName string, regexes map[string]*regexp.Regexp, match Match, filter *Filter) bool { - // Check stream and filter match - if streamName != "" && streamName != filter.Stream.Name { - return false - } - if filterName != "" && filterName != filter.Name { - return false - } - // Check that all user requested patterns are in this filter - var nbMatched int - var localMatches = match.Split() - // For each pattern of this filter - for i, pattern := range filter.Pattern { - // Check that this pattern has user requested name - if reg, ok := regexes[pattern.Name]; ok { - // Check that the PF.p[i] matches user requested pattern - if reg.MatchString(localMatches[i]) { - nbMatched++ - } - } - } - if len(regexes) != nbMatched { - return false - } - // All checks passed - return true -} - -func addMatchToCS(cs ClientStatus, pf PF, times map[time.Time]struct{}) { - patterns, streamName, filterName := pf.P, pf.F.Stream.Name, pf.F.Name - if cs[streamName] == nil { - cs[streamName] = make(map[string]MapPatternStatus) - } - if cs[streamName][filterName] == nil { - cs[streamName][filterName] = make(MapPatternStatus) - } - cs[streamName][filterName][patterns] = &PatternStatus{len(times), nil} -} - -func addActionToCS(cs ClientStatus, pa PA, times map[time.Time]struct{}) { - patterns, streamName, filterName, actionName := pa.P, pa.A.Filter.Stream.Name, pa.A.Filter.Name, pa.A.Name - if cs[streamName] == nil { - cs[streamName] = make(map[string]MapPatternStatus) - } - if cs[streamName][filterName] == nil { - cs[streamName][filterName] = make(MapPatternStatus) - } - if cs[streamName][filterName][patterns] == nil { - cs[streamName][filterName][patterns] = new(PatternStatus) - } - ps := cs[streamName][filterName][patterns] - if ps.Actions == nil { - ps.Actions = make(map[string][]string) - } - for then := range times { - ps.Actions[actionName] = append(ps.Actions[actionName], then.Format(time.DateTime)) - } -} - -func printClientStatus(cs ClientStatus, format string) { - var text []byte - var err error - if format == "json" { - text, err = json.MarshalIndent(cs, "", " ") - } else { - text, err = yaml.Marshal(cs) - } - if err != nil { - logger.Fatalln("Failed to convert daemon binary response to text format:", err) - } - - fmt.Println(strings.ReplaceAll(string(text), "\\0", " ")) -} - -func compileKVPatterns(kvpatterns []string) map[string]*regexp.Regexp { - var regexes map[string]*regexp.Regexp - regexes = make(map[string]*regexp.Regexp) - for _, p := range kvpatterns { - // p syntax already checked in Main - key, value, found := strings.Cut(p, "=") - if !found { - logger.Printf(logger.ERROR, "Bad argument: no `=` in %v", p) - logger.Fatalln("Patterns must be prefixed by their name (e.g. ip=1.1.1.1)") - } - if regexes[key] != nil { - logger.Fatalf("Bad argument: same pattern name provided multiple times: %v", key) - } - compiled, err := regexp.Compile(fmt.Sprintf("^%v$", value)) - if err != nil { - logger.Fatalf("Bad argument: Could not compile: `%v`: %v", value, err) - } - regexes[key] = compiled - } - return regexes -} - -func ClientShow(format, stream, filter string, kvpatterns []string) { - response := SendAndRetrieve(Request{Info, PSF{}}) - if response.Err != nil { - logger.Fatalln("Received error from daemon:", response.Err) - } - - cs := make(ClientStatus) - - var regexes map[string]*regexp.Regexp - - if len(kvpatterns) != 0 { - regexes = compileKVPatterns(kvpatterns) - } - - var found bool - - // Painful data manipulation - for pf, times := range response.Matches { - // Check this PF is not empty - if len(times) == 0 { - continue - } - if !pfMatches(stream, filter, regexes, pf.P, pf.F) { - continue - } - addMatchToCS(cs, pf, times) - found = true - } - - // Painful data manipulation - for pa, times := range response.Actions { - // Check this PF is not empty - if len(times) == 0 { - continue - } - if !pfMatches(stream, filter, regexes, pa.P, pa.A.Filter) { - continue - } - addActionToCS(cs, pa, times) - found = true - } - - if !found { - logger.Println(logger.WARN, "No matching stream.filter items found. This does not mean it doesn't exist, maybe it just didn't receive any match.") - os.Exit(1) - } - - printClientStatus(cs, format) - - os.Exit(0) -} - -// TODO : Show values we just flushed - for now we got no details : -/* - * % ./reaction flush -l ssh.failedlogin login=".*t" - * ssh: - * failedlogin: - * actions: - * unban: - * - "2024-04-30 15:27:28" - * - "2024-04-30 15:27:28" - * - "2024-04-30 15:27:28" - * - "2024-04-30 15:27:28" - * - */ -func ClientFlush(format, streamName, filterName string, patterns []string) { - requestedPatterns := compileKVPatterns(patterns) - - // Remember which Filters are compatible with the query - filterCompatibility := make(map[SF]bool) - isCompatible := func(filter *Filter) bool { - sf := SF{filter.Stream.Name, filter.Name} - compatible, ok := filterCompatibility[sf] - - // already tested - if ok { - return compatible - } - - for k := range requestedPatterns { - if -1 == slices.IndexFunc(filter.Pattern, func(pattern *Pattern) bool { - return pattern.Name == k - }) { - filterCompatibility[sf] = false - return false - } - } - filterCompatibility[sf] = true - return true - } - - // match functions - kvMatch := func(filter *Filter, filterPatterns []string) bool { - // For each user requested pattern - for k, v := range requestedPatterns { - // Find its index on the Filter.Pattern - for i, pattern := range filter.Pattern { - if k == pattern.Name { - // Test the match - if !v.MatchString(filterPatterns[i]) { - return false - } - } - } - } - return true - } - - var found bool - fullMatch := func(filter *Filter, match Match) bool { - // Test if we limit by stream - if streamName == "" || filter.Stream.Name == streamName { - // Test if we limit by filter - if filterName == "" || filter.Name == filterName { - found = true - filterPatterns := match.Split() - return isCompatible(filter) && kvMatch(filter, filterPatterns) - } - } - return false - } - - response := SendAndRetrieve(Request{Info, PSF{}}) - if response.Err != nil { - logger.Fatalln("Received error from daemon:", response.Err) - } - - commands := make([]PSF, 0) - - cs := make(ClientStatus) - - for pf, times := range response.Matches { - if fullMatch(pf.F, pf.P) { - commands = append(commands, PSF{pf.P, pf.F.Stream.Name, pf.F.Name}) - addMatchToCS(cs, pf, times) - } - } - - for pa, times := range response.Actions { - if fullMatch(pa.A.Filter, pa.P) { - commands = append(commands, PSF{pa.P, pa.A.Filter.Stream.Name, pa.A.Filter.Name}) - addActionToCS(cs, pa, times) - } - } - - if !found { - logger.Println(logger.WARN, "No matching stream.filter items found. This does not mean it doesn't exist, maybe it just didn't receive any match.") - os.Exit(1) - } - - for _, psf := range commands { - response := SendAndRetrieve(Request{Flush, psf}) - if response.Err != nil { - logger.Fatalln("Received error from daemon:", response.Err) - } - } - - printClientStatus(cs, format) - os.Exit(0) -} - -func TestRegex(confFilename, regex, line string) { - conf := parseConf(confFilename) - - // Code close to app/startup.go - var usedPatterns []*Pattern - for _, pattern := range conf.Patterns { - if strings.Contains(regex, pattern.nameWithBraces) { - usedPatterns = append(usedPatterns, pattern) - regex = strings.Replace(regex, pattern.nameWithBraces, pattern.Regex, 1) - } - } - reg, err := regexp.Compile(regex) - if err != nil { - logger.Fatalln("ERROR the specified regex is invalid: %v", err) - os.Exit(1) - } - - // Code close to app/daemon.go - match := func(line string) { - var ignored bool - if matches := reg.FindStringSubmatch(line); matches != nil { - if usedPatterns != nil { - var result []string - for _, p := range usedPatterns { - match := matches[reg.SubexpIndex(p.Name)] - result = append(result, match) - if !p.notAnIgnore(&match) { - ignored = true - } - } - if !ignored { - fmt.Printf("\033[32mmatching\033[0m %v: %v\n", WithBrackets(result), line) - } else { - fmt.Printf("\033[33mignore matching\033[0m %v: %v\n", WithBrackets(result), line) - } - } else { - fmt.Printf("\033[32mmatching\033[0m [%v]:\n", line) - } - } else { - fmt.Printf("\033[31mno match\033[0m: %v\n", line) - } - } - - if line != "" { - match(line) - } else { - logger.Println(logger.INFO, "no second argument: reading from stdin") - scanner := bufio.NewScanner(os.Stdin) - for scanner.Scan() { - match(scanner.Text()) - } - } -} diff --git a/go.old/app/daemon.go b/go.old/app/daemon.go deleted file mode 100644 index d3bf51b..0000000 --- a/go.old/app/daemon.go +++ /dev/null @@ -1,454 +0,0 @@ -package app - -import ( - "bufio" - "os" - "os/exec" - "os/signal" - "strings" - "sync" - "syscall" - "time" - - "framagit.org/ppom/reaction/logger" -) - -// Executes a command and channel-send its stdout -func cmdStdout(commandline []string) chan *string { - lines := make(chan *string) - - go func() { - cmd := exec.Command(commandline[0], commandline[1:]...) - stdout, err := cmd.StdoutPipe() - if err != nil { - logger.Fatalln("couldn't open stdout on command:", err) - } - if err := cmd.Start(); err != nil { - logger.Fatalln("couldn't start command:", err) - } - defer stdout.Close() - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - line := scanner.Text() - lines <- &line - logger.Println(logger.DEBUG, "stdout:", line) - } - close(lines) - }() - - return lines -} - -func runCommands(commands [][]string, moment string) bool { - ok := true - for _, command := range commands { - cmd := exec.Command(command[0], command[1:]...) - cmd.WaitDelay = time.Minute - - logger.Printf(logger.INFO, "%v command: run %v\n", moment, command) - - if err := cmd.Start(); err != nil { - logger.Printf(logger.ERROR, "%v command: run %v: %v", moment, command, err) - ok = false - } else { - err := cmd.Wait() - if err != nil { - logger.Printf(logger.ERROR, "%v command: run %v: %v", moment, command, err) - ok = false - } - } - } - return ok -} - -func (p *Pattern) notAnIgnore(match *string) bool { - for _, regex := range p.compiledIgnoreRegex { - if regex.MatchString(*match) { - return false - } - } - - for _, ignore := range p.Ignore { - if ignore == *match { - return false - } - } - return true -} - -// Whether one of the filter's regexes is matched on a line -func (f *Filter) match(line *string) Match { - for _, regex := range f.compiledRegex { - - if matches := regex.FindStringSubmatch(*line); matches != nil { - if f.Pattern != nil { - var result []string - for _, p := range f.Pattern { - match := matches[regex.SubexpIndex(p.Name)] - if p.notAnIgnore(&match) { - result = append(result, match) - } - } - if len(result) == len(f.Pattern) { - logger.Printf(logger.INFO, "%s.%s: match %s", f.Stream.Name, f.Name, WithBrackets(result)) - return JoinMatch(result) - } - } else { - logger.Printf(logger.INFO, "%s.%s: match [.]\n", f.Stream.Name, f.Name) - // No pattern, so this match will never actually be used - return "." - } - } - } - return "" -} - -func (f *Filter) sendActions(match Match, at time.Time) { - for _, a := range f.Actions { - actionsC <- PAT{match, a, at.Add(a.afterDuration)} - } -} - -func (a *Action) exec(match Match) { - defer wgActions.Done() - - var computedCommand []string - - if a.Filter.Pattern != nil { - computedCommand = make([]string, 0, len(a.Cmd)) - matches := match.Split() - - for _, item := range a.Cmd { - for i, p := range a.Filter.Pattern { - item = strings.ReplaceAll(item, p.nameWithBraces, matches[i]) - } - computedCommand = append(computedCommand, item) - } - } else { - computedCommand = a.Cmd - } - - logger.Printf(logger.INFO, "%s.%s.%s: run %s\n", a.Filter.Stream.Name, a.Filter.Name, a.Name, computedCommand) - - cmd := exec.Command(computedCommand[0], computedCommand[1:]...) - - if ret := cmd.Run(); ret != nil { - logger.Printf(logger.ERROR, "%s.%s.%s: run %s, code %s\n", a.Filter.Stream.Name, a.Filter.Name, a.Name, computedCommand, ret) - } -} - -func ActionsManager(concurrency int) { - // concurrency init - execActionsC := make(chan PA) - if concurrency > 0 { - for i := 0; i < concurrency; i++ { - go func() { - var pa PA - for { - pa = <-execActionsC - pa.A.exec(pa.P) - } - }() - } - } else { - go func() { - var pa PA - for { - pa = <-execActionsC - go func(pa PA) { - pa.A.exec(pa.P) - }(pa) - } - }() - } - execAction := func(a *Action, p Match) { - wgActions.Add(1) - execActionsC <- PA{p, a} - } - - // main - pendingActionsC := make(chan PAT) - for { - select { - case pat := <-actionsC: - pa := PA{pat.P, pat.A} - pattern, action, then := pat.P, pat.A, pat.T - now := time.Now() - // check if must be executed now - if then.Compare(now) <= 0 { - execAction(action, pattern) - } else { - if actions[pa] == nil { - actions[pa] = make(map[time.Time]struct{}) - } - actions[pa][then] = struct{}{} - go func(insidePat PAT, insideNow time.Time) { - time.Sleep(insidePat.T.Sub(insideNow)) - pendingActionsC <- insidePat - }(pat, now) - } - case pat := <-pendingActionsC: - pa := PA{pat.P, pat.A} - pattern, action, then := pat.P, pat.A, pat.T - if actions[pa] != nil { - delete(actions[pa], then) - execAction(action, pattern) - } - case fo := <-flushToActionsC: - for pa := range actions { - if fo.S == pa.A.Filter.Stream.Name && - fo.F == pa.A.Filter.Name && - fo.P == pa.P { - for range actions[pa] { - execAction(pa.A, pa.P) - } - delete(actions, pa) - break - } - } - case _, _ = <-stopActions: - for pa := range actions { - if pa.A.OnExit { - for range actions[pa] { - execAction(pa.A, pa.P) - } - } - } - wgActions.Done() - return - } - } -} - -func MatchesManager() { - var fo PSF - var pft PFT - end := false - - for !end { - select { - case fo = <-flushToMatchesC: - matchesManagerHandleFlush(fo) - case fo, ok := <-startupMatchesC: - if !ok { - end = true - } else { - _ = matchesManagerHandleMatch(fo) - } - } - } - - for { - select { - case fo = <-flushToMatchesC: - matchesManagerHandleFlush(fo) - case pft = <-matchesC: - - entry := LogEntry{pft.T, 0, pft.P, pft.F.Stream.Name, pft.F.Name, 0, false} - - entry.Exec = matchesManagerHandleMatch(pft) - - logsC <- entry - } - } -} - -func matchesManagerHandleFlush(fo PSF) { - matchesLock.Lock() - for pf := range matches { - if fo.S == pf.F.Stream.Name && - fo.F == pf.F.Name && - fo.P == pf.P { - delete(matches, pf) - break - } - } - matchesLock.Unlock() -} - -func matchesManagerHandleMatch(pft PFT) bool { - matchesLock.Lock() - defer matchesLock.Unlock() - - filter, patterns, then := pft.F, pft.P, pft.T - pf := PF{pft.P, pft.F} - - if filter.Retry > 1 { - // make sure map exists - if matches[pf] == nil { - matches[pf] = make(map[time.Time]struct{}) - } - // add new match - matches[pf][then] = struct{}{} - // remove match when expired - go func(pf PF, then time.Time) { - time.Sleep(then.Sub(time.Now()) + filter.retryDuration) - matchesLock.Lock() - if matches[pf] != nil { - // FIXME replace this and all similar occurences - // by clear() when switching to go 1.21 - delete(matches[pf], then) - } - matchesLock.Unlock() - }(pf, then) - } - - if filter.Retry <= 1 || len(matches[pf]) >= filter.Retry { - delete(matches, pf) - filter.sendActions(patterns, then) - return true - } - return false -} - -func StreamManager(s *Stream, endedSignal chan *Stream) { - defer wgStreams.Done() - logger.Printf(logger.INFO, "%s: start %s\n", s.Name, s.Cmd) - - lines := cmdStdout(s.Cmd) - for { - select { - case line, ok := <-lines: - if !ok { - endedSignal <- s - return - } - for _, filter := range s.Filters { - if match := filter.match(line); match != "" { - matchesC <- PFT{match, filter, time.Now()} - } - } - case _, _ = <-stopStreams: - return - } - } - -} - -var actions ActionsMap -var matches MatchesMap -var matchesLock sync.Mutex - -var stopStreams chan bool -var stopActions chan bool -var wgActions sync.WaitGroup -var wgStreams sync.WaitGroup - -/* - - ↓ -StreamManager onstartup:matches - ↓ ↓ ↑ - matches→ MatchesManager →logs→ DatabaseManager ←· - ↑ ↓ ↑ - ↑ actions→ ActionsManager ↑ - ↑ ↑ ↑ -SocketManager →flushes→→→→→→→→→→·→→→→→→→→→→→→→→→→· - ↑ - -*/ - -// DatabaseManager → MatchesManager -var startupMatchesC chan PFT - -// StreamManager → MatchesManager -var matchesC chan PFT - -// MatchesManager → DatabaseManager -var logsC chan LogEntry - -// MatchesManager → ActionsManager -var actionsC chan PAT - -// SocketManager, DatabaseManager → MatchesManager -var flushToMatchesC chan PSF - -// SocketManager → ActionsManager -var flushToActionsC chan PSF - -// SocketManager → DatabaseManager -var flushToDatabaseC chan LogEntry - -func Daemon(confFilename string) { - conf := parseConf(confFilename) - - startupMatchesC = make(chan PFT) - matchesC = make(chan PFT) - logsC = make(chan LogEntry) - actionsC = make(chan PAT) - flushToMatchesC = make(chan PSF) - flushToActionsC = make(chan PSF) - flushToDatabaseC = make(chan LogEntry) - stopActions = make(chan bool) - stopStreams = make(chan bool) - actions = make(ActionsMap) - matches = make(MatchesMap) - - _ = runCommands(conf.Start, "start") - - go DatabaseManager(conf) - go MatchesManager() - go ActionsManager(conf.Concurrency) - - // Ready to start - - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - - endSignals := make(chan *Stream) - nbStreamsInExecution := len(conf.Streams) - - for _, stream := range conf.Streams { - wgStreams.Add(1) - go StreamManager(stream, endSignals) - } - - go SocketManager(conf) - - for { - select { - case finishedStream := <-endSignals: - logger.Printf(logger.ERROR, "%s stream finished", finishedStream.Name) - nbStreamsInExecution-- - if nbStreamsInExecution == 0 { - quit(conf, false) - } - case <-sigs: - // Trap endSignals, which may cause a deadlock otherwise - go func() { - ok := true - for ok { - _, ok = <-endSignals - } - }() - logger.Printf(logger.INFO, "Received SIGINT/SIGTERM, exiting") - quit(conf, true) - } - } -} - -func quit(conf *Conf, graceful bool) { - // send stop to StreamManager·s - close(stopStreams) - logger.Println(logger.INFO, "Waiting for Streams to finish...") - wgStreams.Wait() - // ActionsManager calls wgActions.Done() when it has launched all pending actions - wgActions.Add(1) - // send stop to ActionsManager - close(stopActions) - // stop all actions - logger.Println(logger.INFO, "Waiting for Actions to finish...") - wgActions.Wait() - // run stop commands - stopOk := runCommands(conf.Stop, "stop") - // delete pipe - err := os.Remove(*SocketPath) - if err != nil { - logger.Println(logger.ERROR, "Failed to remove socket:", err) - } - - if !stopOk || !graceful { - os.Exit(1) - } - os.Exit(0) -} diff --git a/go.old/app/example.yml b/go.old/app/example.yml deleted file mode 100644 index 759f597..0000000 --- a/go.old/app/example.yml +++ /dev/null @@ -1,108 +0,0 @@ ---- -# This example configuration file is a good starting point, but you're -# strongly encouraged to take a look at the full documentation: https://reaction.ppom.me -# -# This file is using the well-established YAML configuration language. -# Note that the more powerful JSONnet configuration language is also supported -# and that the documentation uses JSONnet - -# definitions are just a place to put chunks of conf you want to reuse in another place -# using YAML anchors `&name` and pointers `*name` -# definitions are not readed by reaction -definitions: - - &iptablesban [ 'ip46tables', '-w', '-A', 'reaction', '-s', '', '-j', 'DROP' ] - - &iptablesunban [ 'ip46tables', '-w', '-D', 'reaction', '-s', '', '-j', 'DROP' ] -# ip46tables is a minimal C program (only POSIX dependencies) present as a subdirectory. -# it permits to handle both ipv4/iptables and ipv6/ip6tables commands - -# if set to a positive number → max number of concurrent actions -# if set to a negative number → no limit -# if not specified or set to 0 → defaults to the number of CPUs on the system -concurrency: 0 - -# patterns are substitued in regexes. -# when a filter performs an action, it replaces the found pattern -patterns: - ip: - # reaction regex syntax is defined here: https://github.com/google/re2/wiki/Syntax - # simple version: regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})' - regex: '(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))' - ignore: - - 127.0.0.1 - - ::1 - # Patterns can be ignored based on regexes, it will try to match the whole string detected by the pattern - # ignoreregex: - # - '10\.0\.[0-9]{1,3}\.[0-9]{1,3}' - -# Those commands will be executed in order at start, before everything else -start: - - [ 'ip46tables', '-w', '-N', 'reaction' ] - - [ 'ip46tables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ] - -# Those commands will be executed in order at stop, after everything else -stop: - - [ 'ip46tables', '-w,', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w,', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-F', 'reaction' ] - - [ 'ip46tables', '-w', '-X', 'reaction' ] - -# streams are commands -# they are run and their ouptut is captured -# *example:* `tail -f /var/log/nginx/access.log` -# their output will be used by one or more filters -streams: - # streams have a user-defined name - ssh: - # note that if the command is not in environment's `PATH` - # its full path must be given. - cmd: [ 'journalctl', '-n0', '-fu', 'sshd.service' ] - # filters run actions when they match regexes on a stream - filters: - # filters have a user-defined name - failedlogin: - # reaction's regex syntax is defined here: https://github.com/google/re2/wiki/Syntax - regex: - # is predefined in the patterns section - # ip's regex is inserted in the following regex - - 'authentication failure;.*rhost=' - - 'Failed password for .* from ' - - 'Connection (reset|closed) by (authenticating|invalid) user .* ' - # if retry and retryperiod are defined, - # the actions will only take place if a same pattern is - # found `retry` times in a `retryperiod` interval - retry: 3 - # format is defined here: https://pkg.go.dev/time#ParseDuration - retryperiod: 6h - # actions are run by the filter when regexes are matched - actions: - # actions have a user-defined name - ban: - # YAML substitutes *reference by the value anchored at &reference - cmd: *iptablesban - unban: - cmd: *iptablesunban - # if after is defined, the action will not take place immediately, but after a specified duration - # same format as retryperiod - after: 48h - # let's say reaction is quitting. does it run all those pending commands which had an `after` duration set? - # if you want reaction to run those pending commands before exiting, you can set this: - # onexit: true - # (defaults to false) - # here it is not useful because we will flush and delete the chain containing the bans anyway - # (with the stop commands) - -# persistence -# tldr; when an `after` action is set in a filter, such filter acts as a 'jail', -# which is persisted after reboots. -# -# when a filter is triggered, there are 2 flows: -# -# if none of its actions have an `after` directive set: -# no action will be replayed. -# -# else (if at least one action has an `after` directive set): -# if reaction stops while `after` actions are pending: -# and reaction starts again while those actions would still be pending: -# reaction executes the past actions (actions without after or with then+after < now) -# and plans the execution of future actions (actions with then+after > now) diff --git a/go.old/app/main.go b/go.old/app/main.go deleted file mode 100644 index e5bfb4c..0000000 --- a/go.old/app/main.go +++ /dev/null @@ -1,230 +0,0 @@ -package app - -import ( - _ "embed" - "flag" - "fmt" - "os" - "strings" - - "framagit.org/ppom/reaction/logger" -) - -func addStringFlag(names []string, defvalue string, f *flag.FlagSet) *string { - var value string - for _, name := range names { - f.StringVar(&value, name, defvalue, "") - } - return &value -} - -func addBoolFlag(names []string, f *flag.FlagSet) *bool { - var value bool - for _, name := range names { - f.BoolVar(&value, name, false, "") - } - return &value -} - -var SocketPath *string - -func addSocketFlag(f *flag.FlagSet) *string { - return addStringFlag([]string{"s", "socket"}, "/run/reaction/reaction.sock", f) -} - -func addConfFlag(f *flag.FlagSet) *string { - return addStringFlag([]string{"c", "config"}, "", f) -} - -func addFormatFlag(f *flag.FlagSet) *string { - return addStringFlag([]string{"f", "format"}, "yaml", f) -} - -func addLimitFlag(f *flag.FlagSet) *string { - return addStringFlag([]string{"l", "limit"}, "", f) -} - -func addLevelFlag(f *flag.FlagSet) *string { - return addStringFlag([]string{"l", "loglevel"}, "INFO", f) -} - -func subCommandParse(f *flag.FlagSet, maxRemainingArgs int) { - help := addBoolFlag([]string{"h", "help"}, f) - f.Parse(os.Args[2:]) - if *help { - basicUsage() - os.Exit(0) - } - // -1 = no limit to remaining args - if maxRemainingArgs > -1 && len(f.Args()) > maxRemainingArgs { - fmt.Printf("ERROR unrecognized argument(s): %v\n", f.Args()[maxRemainingArgs:]) - basicUsage() - os.Exit(1) - } -} - -func basicUsage() { - const ( - bold = "\033[1m" - reset = "\033[0m" - ) - fmt.Print( - bold + `reaction help` + reset + ` - # print this help message - -` + bold + `reaction start` + reset + ` - # start the daemon - - # options: - -c/--config CONFIG_FILE # configuration file in json, jsonnet or yaml format (required) - -l/--loglevel LEVEL # minimum log level to show, in DEBUG, INFO, WARN, ERROR, FATAL - # (default: INFO) - -s/--socket SOCKET # path to the client-daemon communication socket - # (default: /run/reaction/reaction.sock) - -` + bold + `reaction example-conf` + reset + ` - # print a configuration file example - -` + bold + `reaction show` + reset + ` [NAME=PATTERN...] - # show current matches and which actions are still to be run for the specified PATTERN regexe(s) - # (e.g know what is currenly banned) - - reaction show - reaction show "ip=192.168.1.1" - reaction show "ip=192\.168\..*" login=root - - # options: - -s/--socket SOCKET # path to the client-daemon communication socket - -f/--format yaml|json # (default: yaml) - -l/--limit STREAM[.FILTER] # only show items related to this STREAM (or STREAM.FILTER) - -` + bold + `reaction flush` + reset + ` NAME=PATTERN [NAME=PATTERN...] - # remove currently active matches and run currently pending actions for the specified PATTERN regexe(s) - # (then show flushed matches and actions) - - reaction flush "ip=192.168.1.1" - reaction flush "ip=192\.168\..*" login=root - - # options: - -s/--socket SOCKET # path to the client-daemon communication socket - -f/--format yaml|json # (default: yaml) - -l/--limit STREAM.FILTER # flush only items related to this STREAM.FILTER - -` + bold + `reaction test-regex` + reset + ` REGEX LINE # test REGEX against LINE -cat FILE | ` + bold + `reaction test-regex` + reset + ` REGEX # test REGEX against each line of FILE - - # options: - -c/--config CONFIG_FILE # configuration file in json, jsonnet or yaml format - # optional: permits to use configured patterns like in regex - -` + bold + `reaction version` + reset + ` - # print version information - -see usage examples, service configurations and good practices -on the ` + bold + `wiki` + reset + `: https://reaction.ppom.me -`) -} - -//go:embed example.yml -var exampleConf string - -func Main(version string) { - if len(os.Args) <= 1 { - logger.Fatalln("No argument provided. Try `reaction help`") - basicUsage() - os.Exit(1) - } - f := flag.NewFlagSet(os.Args[1], flag.ExitOnError) - switch os.Args[1] { - case "help", "-h", "-help", "--help": - basicUsage() - - case "version", "-v", "--version": - fmt.Printf("reaction version %v\n", version) - - case "example-conf": - subCommandParse(f, 0) - fmt.Print(exampleConf) - - case "start": - SocketPath = addSocketFlag(f) - confFilename := addConfFlag(f) - logLevel := addLevelFlag(f) - subCommandParse(f, 0) - if *confFilename == "" { - logger.Fatalln("no configuration file provided") - basicUsage() - os.Exit(1) - } - logLevelType := logger.FromString(*logLevel) - if logLevelType == logger.UNKNOWN { - logger.Fatalf("Log Level %v not recognized", logLevel) - basicUsage() - os.Exit(1) - } - logger.SetLogLevel(logLevelType) - Daemon(*confFilename) - - case "show": - SocketPath = addSocketFlag(f) - queryFormat := addFormatFlag(f) - limit := addLimitFlag(f) - subCommandParse(f, -1) - if *queryFormat != "yaml" && *queryFormat != "json" { - logger.Fatalln("only yaml and json formats are supported") - } - stream, filter := "", "" - if *limit != "" { - splitSF := strings.Split(*limit, ".") - stream = splitSF[0] - if len(splitSF) == 2 { - filter = splitSF[1] - } else if len(splitSF) > 2 { - logger.Fatalln("-l/--limit: only one . separator is supported") - } - } - ClientShow(*queryFormat, stream, filter, f.Args()) - - case "flush": - SocketPath = addSocketFlag(f) - queryFormat := addFormatFlag(f) - limit := addLimitFlag(f) - subCommandParse(f, -1) - if *queryFormat != "yaml" && *queryFormat != "json" { - logger.Fatalln("only yaml and json formats are supported") - } - if len(f.Args()) == 0 { - logger.Fatalln("subcommand flush takes at least one TARGET argument") - } - stream, filter := "", "" - if *limit != "" { - splitSF := strings.Split(*limit, ".") - stream = splitSF[0] - if len(splitSF) == 2 { - filter = splitSF[1] - } else if len(splitSF) > 2 { - logger.Fatalln("-l/--limit: only one . separator is supported") - } - } - ClientFlush(*queryFormat, stream, filter, f.Args()) - - case "test-regex": - // socket not needed, no interaction with the daemon - confFilename := addConfFlag(f) - subCommandParse(f, 2) - if *confFilename == "" { - logger.Println(logger.WARN, "no configuration file provided. Can't make use of registered patterns.") - } - if f.Arg(0) == "" { - logger.Fatalln("subcommand test-regex takes at least one REGEX argument") - basicUsage() - os.Exit(1) - } - TestRegex(*confFilename, f.Arg(0), f.Arg(1)) - - default: - logger.Fatalf("subcommand %v not recognized. Try `reaction help`", os.Args[1]) - basicUsage() - os.Exit(1) - } -} diff --git a/go.old/app/persist.go b/go.old/app/persist.go deleted file mode 100644 index 78e78a1..0000000 --- a/go.old/app/persist.go +++ /dev/null @@ -1,264 +0,0 @@ -package app - -import ( - "encoding/gob" - "errors" - "io" - "os" - "time" - - "framagit.org/ppom/reaction/logger" -) - -const ( - logDBName = "./reaction-matches.db" - logDBNewName = "./reaction-matches.new.db" - flushDBName = "./reaction-flushes.db" -) - -func openDB(path string) (bool, *ReadDB) { - file, err := os.Open(path) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - logger.Printf(logger.WARN, "No DB found at %s. It's ok if this is the first time reaction is running.\n", path) - return true, nil - } - logger.Fatalln("Failed to open DB:", err) - } - return false, &ReadDB{file, gob.NewDecoder(file)} -} - -func createDB(path string) *WriteDB { - file, err := os.Create(path) - if err != nil { - logger.Fatalln("Failed to create DB:", err) - } - return &WriteDB{file, gob.NewEncoder(file)} -} - -func DatabaseManager(c *Conf) { - logDB, flushDB := c.RotateDB(true) - close(startupMatchesC) - c.manageLogs(logDB, flushDB) -} - -func (c *Conf) manageLogs(logDB *WriteDB, flushDB *WriteDB) { - cpt := 0 - writeSF2int := make(map[SF]int) - writeCpt := 1 - for { - select { - case entry := <-flushToDatabaseC: - flushDB.enc.Encode(entry) - case entry := <-logsC: - encodeOrFatal(logDB.enc, entry, writeSF2int, &writeCpt) - cpt++ - // let's say 100 000 entries ~ 10 MB - if cpt == 500_000 { - cpt = 0 - logger.Printf(logger.INFO, "Rotating database...") - logDB.file.Close() - flushDB.file.Close() - logDB, flushDB = c.RotateDB(false) - logger.Printf(logger.INFO, "Rotated database") - } - } - } -} - -func (c *Conf) RotateDB(startup bool) (*WriteDB, *WriteDB) { - var ( - doesntExist bool - err error - logReadDB *ReadDB - flushReadDB *ReadDB - logWriteDB *WriteDB - flushWriteDB *WriteDB - ) - doesntExist, logReadDB = openDB(logDBName) - if doesntExist { - return createDB(logDBName), createDB(flushDBName) - } - doesntExist, flushReadDB = openDB(flushDBName) - if doesntExist { - logger.Println(logger.WARN, "Strange! No flushes db, opening /dev/null instead") - doesntExist, flushReadDB = openDB("/dev/null") - if doesntExist { - logger.Fatalln("Opening dummy /dev/null failed") - } - } - - logWriteDB = createDB(logDBNewName) - - rotateDB(c, logReadDB.dec, flushReadDB.dec, logWriteDB.enc, startup) - - err = logReadDB.file.Close() - if err != nil { - logger.Fatalln("Failed to close old DB:", err) - } - - // It should be ok to rename an open file - err = os.Rename(logDBNewName, logDBName) - if err != nil { - logger.Fatalln("Failed to replace old DB with new one:", err) - } - - err = os.Remove(flushDBName) - if err != nil && !errors.Is(err, os.ErrNotExist) { - logger.Fatalln("Failed to delete old DB:", err) - } - - flushWriteDB = createDB(flushDBName) - return logWriteDB, flushWriteDB -} - -func rotateDB(c *Conf, logDec *gob.Decoder, flushDec *gob.Decoder, logEnc *gob.Encoder, startup bool) { - // This mapping is a space optimization feature - // It permits to compress stream+filter to a small number (which is a byte in gob) - // We do this only for matches, not for flushes - readSF2int := make(map[int]SF) - writeSF2int := make(map[SF]int) - writeCounter := 1 - // This extra code is made to warn only one time for each non-existant filter - discardedEntries := make(map[SF]int) - malformedEntries := 0 - defer func() { - for sf, t := range discardedEntries { - if t > 0 { - logger.Printf(logger.WARN, "info discarded %v times from the DBs: stream/filter not found: %s.%s\n", t, sf.S, sf.F) - } - } - if malformedEntries > 0 { - logger.Printf(logger.WARN, "%v malformed entries discarded from the DBs\n", malformedEntries) - } - }() - - // pattern, stream, fitler → last flush - flushes := make(map[*PSF]time.Time) - for { - var entry LogEntry - var filter *Filter - // decode entry - err := flushDec.Decode(&entry) - if err != nil { - if err == io.EOF { - break - } - malformedEntries++ - continue - } - - // retrieve related filter - if entry.Stream != "" || entry.Filter != "" { - if stream := c.Streams[entry.Stream]; stream != nil { - filter = stream.Filters[entry.Filter] - } - if filter == nil { - discardedEntries[SF{entry.Stream, entry.Filter}]++ - continue - } - } - - // store - flushes[&PSF{entry.Pattern, entry.Stream, entry.Filter}] = entry.T - } - - lastTimeCpt := int64(0) - now := time.Now() - for { - var entry LogEntry - var filter *Filter - - // decode entry - err := logDec.Decode(&entry) - if err != nil { - if err == io.EOF { - break - } - malformedEntries++ - continue - } - - // retrieve related stream & filter - if entry.Stream == "" && entry.Filter == "" { - sf, ok := readSF2int[entry.SF] - if !ok { - discardedEntries[SF{"", ""}]++ - continue - } - entry.Stream = sf.S - entry.Filter = sf.F - } - if stream := c.Streams[entry.Stream]; stream != nil { - filter = stream.Filters[entry.Filter] - } - if filter == nil { - discardedEntries[SF{entry.Stream, entry.Filter}]++ - continue - } - if entry.SF != 0 { - readSF2int[entry.SF] = SF{entry.Stream, entry.Filter} - } - - // check if number of patterns is in sync - if len(entry.Pattern.Split()) != len(filter.Pattern) { - continue - } - - // check if it hasn't been flushed - lastGlobalFlush := flushes[&PSF{entry.Pattern, "", ""}].Unix() - lastLocalFlush := flushes[&PSF{entry.Pattern, entry.Stream, entry.Filter}].Unix() - entryTime := entry.T.Unix() - if lastLocalFlush > entryTime || lastGlobalFlush > entryTime { - continue - } - - // restore time - if entry.T.IsZero() { - entry.T = time.Unix(entry.S, lastTimeCpt) - } - lastTimeCpt++ - - // store matches - if !entry.Exec && entry.T.Add(filter.retryDuration).Unix() > now.Unix() { - if startup { - startupMatchesC <- PFT{entry.Pattern, filter, entry.T} - } - - encodeOrFatal(logEnc, entry, writeSF2int, &writeCounter) - } - - // replay executions - if entry.Exec && entry.T.Add(*filter.longuestActionDuration).Unix() > now.Unix() { - if startup { - flushToMatchesC <- PSF{entry.Pattern, entry.Stream, entry.Filter} - filter.sendActions(entry.Pattern, entry.T) - } - - encodeOrFatal(logEnc, entry, writeSF2int, &writeCounter) - } - } -} - -func encodeOrFatal(enc *gob.Encoder, entry LogEntry, writeSF2int map[SF]int, writeCounter *int) { - // Stream/Filter reduction - sf, ok := writeSF2int[SF{entry.Stream, entry.Filter}] - if ok { - entry.SF = sf - entry.Stream = "" - entry.Filter = "" - } else { - entry.SF = *writeCounter - writeSF2int[SF{entry.Stream, entry.Filter}] = *writeCounter - *writeCounter++ - } - // Time reduction - if !entry.T.IsZero() { - entry.S = entry.T.Unix() - entry.T = time.Time{} - } - err := enc.Encode(entry) - if err != nil { - logger.Fatalln("Failed to write to new DB:", err) - } -} diff --git a/go.old/app/pipe.go b/go.old/app/pipe.go deleted file mode 100644 index 0d5b4ac..0000000 --- a/go.old/app/pipe.go +++ /dev/null @@ -1,81 +0,0 @@ -package app - -import ( - "encoding/gob" - "errors" - "net" - "os" - "path" - "time" - - "framagit.org/ppom/reaction/logger" -) - -func createOpenSocket() net.Listener { - err := os.MkdirAll(path.Dir(*SocketPath), 0755) - if err != nil { - logger.Fatalln("Failed to create socket directory") - } - _, err = os.Stat(*SocketPath) - if err == nil { - logger.Println(logger.WARN, "socket", SocketPath, "already exists: Is the daemon already running? Deleting.") - err = os.Remove(*SocketPath) - if err != nil { - logger.Fatalln("Failed to remove socket:", err) - } - } - ln, err := net.Listen("unix", *SocketPath) - if err != nil { - logger.Fatalln("Failed to create socket:", err) - } - return ln -} - -// Handle connections -//func SocketManager(streams map[string]*Stream) { -func SocketManager(conf *Conf) { - ln := createOpenSocket() - defer ln.Close() - for { - conn, err := ln.Accept() - if err != nil { - logger.Println(logger.ERROR, "Failed to open connection from cli:", err) - continue - } - go func(conn net.Conn) { - defer conn.Close() - var request Request - var response Response - - err := gob.NewDecoder(conn).Decode(&request) - if err != nil { - logger.Println(logger.ERROR, "Invalid Message from cli:", err) - return - } - - switch request.Request { - case Info: - // response.Config = *conf - response.Matches = matches - response.Actions = actions - case Flush: - le := LogEntry{time.Now(), 0, request.Flush.P, request.Flush.S, request.Flush.F, 0, false} - - flushToMatchesC <- request.Flush - flushToActionsC <- request.Flush - flushToDatabaseC <- le - - default: - logger.Println(logger.ERROR, "Invalid Message from cli: unrecognised command type") - response.Err = errors.New("unrecognised command type") - return - } - - err = gob.NewEncoder(conn).Encode(response) - if err != nil { - logger.Println(logger.ERROR, "Can't respond to cli:", err) - return - } - }(conn) - } -} diff --git a/go.old/app/startup.go b/go.old/app/startup.go deleted file mode 100644 index d8cf167..0000000 --- a/go.old/app/startup.go +++ /dev/null @@ -1,178 +0,0 @@ -package app - -import ( - "encoding/json" - "fmt" - "os" - "regexp" - "runtime" - "slices" - "strings" - "time" - - "framagit.org/ppom/reaction/logger" - - "github.com/google/go-jsonnet" -) - -func (c *Conf) setup() { - if c.Concurrency == 0 { - c.Concurrency = runtime.NumCPU() - } - - // Assure we iterate through c.Patterns map in reproductible order - sortedPatternNames := make([]string, 0, len(c.Patterns)) - for k := range c.Patterns { - sortedPatternNames = append(sortedPatternNames, k) - } - slices.Sort(sortedPatternNames) - - for _, patternName := range sortedPatternNames { - pattern := c.Patterns[patternName] - pattern.Name = patternName - pattern.nameWithBraces = fmt.Sprintf("<%s>", pattern.Name) - - if pattern.Regex == "" { - logger.Fatalf("Bad configuration: pattern's regex %v is empty!", patternName) - } - - compiled, err := regexp.Compile(fmt.Sprintf("^%v$", pattern.Regex)) - if err != nil { - logger.Fatalf("Bad configuration: pattern %v: %v", patternName, err) - } - pattern.Regex = fmt.Sprintf("(?P<%s>%s)", patternName, pattern.Regex) - for _, ignore := range pattern.Ignore { - if !compiled.MatchString(ignore) { - logger.Fatalf("Bad configuration: pattern ignore '%v' doesn't match pattern %v! It should be fixed or removed.", ignore, pattern.nameWithBraces) - } - } - - // Compile ignore regexes - for _, regex := range pattern.IgnoreRegex { - // Enclose the regex to make sure that it matches the whole detected string - compiledRegex, err := regexp.Compile("^" + regex + "$") - if err != nil { - logger.Fatalf("Bad configuration: in ignoreregex of pattern %s: %v", pattern.Name, err) - } - - pattern.compiledIgnoreRegex = append(pattern.compiledIgnoreRegex, *compiledRegex) - } - } - - if len(c.Streams) == 0 { - logger.Fatalln("Bad configuration: no streams configured!") - } - for streamName := range c.Streams { - - stream := c.Streams[streamName] - stream.Name = streamName - - if strings.Contains(stream.Name, ".") { - logger.Fatalf("Bad configuration: character '.' is not allowed in stream names: '%v'", stream.Name) - } - - if len(stream.Filters) == 0 { - logger.Fatalf("Bad configuration: no filters configured in %v", stream.Name) - } - for filterName := range stream.Filters { - - filter := stream.Filters[filterName] - filter.Stream = stream - filter.Name = filterName - - if strings.Contains(filter.Name, ".") { - logger.Fatalf("Bad configuration: character '.' is not allowed in filter names: '%v'", filter.Name) - } - // Parse Duration - if filter.RetryPeriod == "" { - if filter.Retry > 1 { - logger.Fatalf("Bad configuration: retry but no retryperiod in %v.%v", stream.Name, filter.Name) - } - } else { - retryDuration, err := time.ParseDuration(filter.RetryPeriod) - if err != nil { - logger.Fatalf("Bad configuration: Failed to parse retry time in %v.%v: %v", stream.Name, filter.Name, err) - } - filter.retryDuration = retryDuration - } - - if len(filter.Regex) == 0 { - logger.Fatalf("Bad configuration: no regexes configured in %v.%v", stream.Name, filter.Name) - } - // Compute Regexes - // Look for Patterns inside Regexes - for _, regex := range filter.Regex { - // iterate through patterns in reproductible order - for _, patternName := range sortedPatternNames { - pattern := c.Patterns[patternName] - if strings.Contains(regex, pattern.nameWithBraces) { - if !slices.Contains(filter.Pattern, pattern) { - filter.Pattern = append(filter.Pattern, pattern) - } - regex = strings.Replace(regex, pattern.nameWithBraces, pattern.Regex, 1) - } - } - compiledRegex, err := regexp.Compile(regex) - if err != nil { - logger.Fatalf("Bad configuration: regex of filter %s.%s: %v", stream.Name, filter.Name, err) - } - filter.compiledRegex = append(filter.compiledRegex, *compiledRegex) - } - - if len(filter.Actions) == 0 { - logger.Fatalln("Bad configuration: no actions configured in", stream.Name, ".", filter.Name) - } - for actionName := range filter.Actions { - - action := filter.Actions[actionName] - action.Filter = filter - action.Name = actionName - - if strings.Contains(action.Name, ".") { - logger.Fatalln("Bad configuration: character '.' is not allowed in action names", action.Name) - } - // Parse Duration - if action.After != "" { - afterDuration, err := time.ParseDuration(action.After) - if err != nil { - logger.Fatalln("Bad configuration: Failed to parse after time in ", stream.Name, ".", filter.Name, ".", action.Name, ":", err) - } - action.afterDuration = afterDuration - } else if action.OnExit { - logger.Fatalln("Bad configuration: Cannot have `onexit: true` without an `after` directive in", stream.Name, ".", filter.Name, ".", action.Name) - } - if filter.longuestActionDuration == nil || filter.longuestActionDuration.Milliseconds() < action.afterDuration.Milliseconds() { - filter.longuestActionDuration = &action.afterDuration - } - } - } - } -} - -func parseConf(filename string) *Conf { - - data, err := os.Open(filename) - if err != nil { - logger.Fatalln("Failed to read configuration file:", err) - } - - var conf Conf - if filename[len(filename)-4:] == ".yml" || filename[len(filename)-5:] == ".yaml" { - err = jsonnet.NewYAMLToJSONDecoder(data).Decode(&conf) - if err != nil { - logger.Fatalln("Failed to parse yaml configuration file:", err) - } - } else { - var jsondata string - jsondata, err = jsonnet.MakeVM().EvaluateFile(filename) - if err == nil { - err = json.Unmarshal([]byte(jsondata), &conf) - } - if err != nil { - logger.Fatalln("Failed to parse json configuration file:", err) - } - } - - conf.setup() - return &conf -} diff --git a/go.old/app/types.go b/go.old/app/types.go deleted file mode 100644 index b49b075..0000000 --- a/go.old/app/types.go +++ /dev/null @@ -1,200 +0,0 @@ -package app - -import ( - "bytes" - "encoding/gob" - "fmt" - "os" - "regexp" - "strings" - "time" -) - -type Conf struct { - Concurrency int `json:"concurrency"` - Patterns map[string]*Pattern `json:"patterns"` - Streams map[string]*Stream `json:"streams"` - Start [][]string `json:"start"` - Stop [][]string `json:"stop"` -} - -type Pattern struct { - Regex string `json:"regex"` - Ignore []string `json:"ignore"` - - IgnoreRegex []string `json:"ignoreregex"` - compiledIgnoreRegex []regexp.Regexp `json:"-"` - - Name string `json:"-"` - nameWithBraces string `json:"-"` -} - -// Stream, Filter & Action structures must never be copied. -// They're always referenced through pointers - -type Stream struct { - Name string `json:"-"` - - Cmd []string `json:"cmd"` - Filters map[string]*Filter `json:"filters"` -} -type LilStream struct { - Name string -} - -func (s *Stream) GobEncode() ([]byte, error) { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(LilStream{s.Name}) - return buf.Bytes(), err -} - -func (s *Stream) GobDecode(b []byte)(error) { - var ls LilStream - dec := gob.NewDecoder(bytes.NewReader(b)) - err := dec.Decode(&ls) - s.Name = ls.Name - return err -} - -type Filter struct { - Stream *Stream `json:"-"` - Name string `json:"-"` - - Regex []string `json:"regex"` - compiledRegex []regexp.Regexp `json:"-"` - Pattern []*Pattern `json:"-"` - - Retry int `json:"retry"` - RetryPeriod string `json:"retryperiod"` - retryDuration time.Duration `json:"-"` - - Actions map[string]*Action `json:"actions"` - longuestActionDuration *time.Duration -} - -// those small versions are needed to prevent infinite recursion in gob because of -// data cycles: Stream <-> Filter, Filter <-> Action -type LilFilter struct { - Stream *Stream - Name string - Pattern []*Pattern -} - -func (f *Filter) GobDecode(b []byte)(error) { - var lf LilFilter - dec := gob.NewDecoder(bytes.NewReader(b)) - err := dec.Decode(&lf) - f.Stream = lf.Stream - f.Name = lf.Name - f.Pattern = lf.Pattern - return err -} - -func (f *Filter) GobEncode() ([]byte, error) { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(LilFilter{f.Stream, f.Name, f.Pattern}) - return buf.Bytes(), err -} - -type Action struct { - Filter *Filter `json:"-"` - Name string `json:"-"` - - Cmd []string `json:"cmd"` - - After string `json:"after"` - afterDuration time.Duration `json:"-"` - - OnExit bool `json:"onexit"` -} -type LilAction struct { - Filter *Filter - Name string -} - -func (a *Action) GobEncode() ([]byte, error) { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(LilAction{a.Filter, a.Name}) - return buf.Bytes(), err -} - -func (a *Action) GobDecode(b []byte)(error) { - var la LilAction - dec := gob.NewDecoder(bytes.NewReader(b)) - err := dec.Decode(&la) - a.Filter = la.Filter - a.Name = la.Name - return err -} - -type LogEntry struct { - T time.Time - S int64 - Pattern Match - Stream, Filter string - SF int - Exec bool -} - -type ReadDB struct { - file *os.File - dec *gob.Decoder -} - -type WriteDB struct { - file *os.File - enc *gob.Encoder -} - -type MatchesMap map[PF]map[time.Time]struct{} -type ActionsMap map[PA]map[time.Time]struct{} - -// This is a "\x00" Joined string -// which contains all matches on a line. -type Match string - -func (m *Match) Split() []string { - return strings.Split(string(*m), "\x00") -} -func JoinMatch(mm []string) Match { - return Match(strings.Join(mm, "\x00")) -} -func WithBrackets(mm []string) string { - var b strings.Builder - for _, match := range mm { - fmt.Fprintf(&b, "[%s]", match) - } - return b.String() -} - -// Helper structs made to carry information -// Stream, Filter -type SF struct{ S, F string } - -// Pattern, Stream, Filter -type PSF struct { - P Match - S, F string -} - -type PF struct { - P Match - F *Filter -} -type PFT struct { - P Match - F *Filter - T time.Time -} -type PA struct { - P Match - A *Action -} -type PAT struct { - P Match - A *Action - T time.Time -} diff --git a/go.old/go.mod b/go.old/go.mod deleted file mode 100644 index d61ddc3..0000000 --- a/go.old/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module framagit.org/ppom/reaction - -go 1.21 - -require ( - github.com/google/go-jsonnet v0.20.0 - sigs.k8s.io/yaml v1.1.0 -) - -require gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.old/go.sum b/go.old/go.sum deleted file mode 100644 index a4ae243..0000000 --- a/go.old/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/google/go-jsonnet v0.20.0 h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g= -github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/go.old/logger/log.go b/go.old/logger/log.go deleted file mode 100644 index 9692545..0000000 --- a/go.old/logger/log.go +++ /dev/null @@ -1,80 +0,0 @@ -package logger - -import "log" - -type Level int - -const ( - UNKNOWN = Level(-1) - DEBUG = Level(1) - INFO = Level(2) - WARN = Level(3) - ERROR = Level(4) - FATAL = Level(5) -) - -func (l Level) String() string { - switch l { - case DEBUG: - return "DEBUG " - case INFO: - return "INFO " - case WARN: - return "WARN " - case ERROR: - return "ERROR " - case FATAL: - return "FATAL " - default: - return "????? " - } -} - -func FromString(s string) Level { - switch s { - case "DEBUG": - return DEBUG - case "INFO": - return INFO - case "WARN": - return WARN - case "ERROR": - return ERROR - case "FATAL": - return FATAL - default: - return UNKNOWN - } -} - -var LogLevel Level = 2 - -func SetLogLevel(level Level) { - LogLevel = level -} - -func Println(level Level, args ...any) { - if level >= LogLevel { - newargs := make([]any, 0) - newargs = append(newargs, level) - newargs = append(newargs, args...) - log.Println(newargs...) - } -} - -func Printf(level Level, format string, args ...any) { - if level >= LogLevel { - log.Printf(level.String()+format, args...) - } -} - -func Fatalln(args ...any) { - newargs := make([]any, 0) - newargs = append(newargs, FATAL) - newargs = append(newargs, args...) - log.Fatalln(newargs...) -} - -func Fatalf(format string, args ...any) { - log.Fatalf(FATAL.String()+format, args...) -} diff --git a/go.old/reaction.go b/go.old/reaction.go deleted file mode 100644 index 37d6746..0000000 --- a/go.old/reaction.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "framagit.org/ppom/reaction/app" -) - -func main() { - app.Main(version) -} - -var ( - version = "v1.4.2" -) From c04168d4dc2f2a4ac2b98c621cac9d4136da0bf2 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Jun 2025 12:00:00 +0200 Subject: [PATCH 011/241] Fix outdated links in README --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2abaef9..7893259 100644 --- a/README.md +++ b/README.md @@ -33,9 +33,10 @@ reaction does not have all the features of the honorable fail2ban, but it's ~10x YAML and [JSONnet](https://jsonnet.org/) (more powerful) are supported. both are extensions of JSON, so JSON is transitively supported. -- See [reaction.yml](./app/example.yml) or [reaction.jsonnet](./config/example.jsonnet) for a fully explained reference -- See [server.jsonnet](./config/server.jsonnet) for a real-world configuration -- See [reaction.example.service](./config/reaction.example.service) for a systemd service file +- See [reaction.yml](./config/example.yml) or [reaction.jsonnet](./config/example.jsonnet) for a fully explained reference +- See the [wiki](https://reaction.ppom.me) for multiple examples, security recommendations and FAQ. +- See [server.jsonnet](https://reaction.ppom.me/configurations/ppom/server.jsonnet.html) for a real-world configuration +- See [reaction.service](./config/reaction.service) for a systemd service file - This minimal example shows what's needed to prevent brute force attacks on an ssh server (please read at least the [Security](https://reaction.ppom.me/security.html) part of the wiki before starting 🆙):
From cc38c55fdb874bb70b9c16cc72bb3db691f4cd48 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Jun 2025 12:00:00 +0200 Subject: [PATCH 012/241] Add test-config subcommand to README --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7893259..8d56c4f 100644 --- a/README.md +++ b/README.md @@ -149,9 +149,10 @@ If you don't know where to start reaction, `/var/lib/reaction` should be a sane ### CLI - `reaction start` runs the server -- `reaction show` show pending actions (ie. bans) +- `reaction show` show pending actions (ie. current bans) - `reaction flush` permits to run pending actions (ie. clear bans) - `reaction test-regex` permits to test regexes +- `reaction test-config` shows loaded configuration - `reaction help` for full usage. ### `ip46tables` From 5bccdb5ba7037811b36e66cb940d2a9f1f260bb0 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 10 Jun 2025 12:00:00 +0200 Subject: [PATCH 013/241] Add oneshot option for actions Fixes #92 --- config/example.jsonnet | 6 ++++++ config/example.yml | 5 +++++ src/concepts/action.rs | 9 ++++++++- src/daemon/filter.rs | 31 +++++++++++++++++++++++++------ tests/simple.rs | 30 ++++++++++++++++++++++++++---- 5 files changed, 70 insertions(+), 11 deletions(-) diff --git a/config/example.jsonnet b/config/example.jsonnet index 7c2acc2..53fb825 100644 --- a/config/example.jsonnet +++ b/config/example.jsonnet @@ -114,6 +114,12 @@ local banFor(time) = { // here it is not useful because we will flush and delete the chain containing the bans anyway // (with the stop commands) }, + mail: { + cmd: ['sendmail', '...', ''], + // some commands, such as alerting commands, are "oneshot". + // this means they'll be run only once, and won't be executed again when reaction is restarted + oneshot: true, + }, }, // or use the banFor function defined at the beginning! // actions: banFor('48h'), diff --git a/config/example.yml b/config/example.yml index a9e585e..f708597 100644 --- a/config/example.yml +++ b/config/example.yml @@ -96,6 +96,11 @@ streams: # (defaults to false) # here it is not useful because we will flush and delete the chain containing the bans anyway # (with the stop commands) + mail: + cmd: ['sendmail', '...', ''] + # some commands, such as alerting commands, are "oneshot". + # this means they'll be run only once, and won't be executed again when reaction is restarted + oneshot: true # persistence # tldr; when an `after` action is set in a filter, such filter acts as a 'jail', diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 51f80e8..e4e0fc9 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -8,7 +8,7 @@ use tokio::process::Command; use super::parse_duration::*; use super::{Match, Pattern}; -#[derive(Clone, Debug, Default, Deserialize , Serialize)] +#[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct Action { cmd: Vec, @@ -25,6 +25,8 @@ pub struct Action { skip_serializing_if = "is_false" )] on_exit: bool, + #[serde(default = "set_false", skip_serializing_if = "is_false")] + oneshot: bool, #[serde(skip)] patterns: Arc>>, @@ -57,6 +59,10 @@ impl Action { self.on_exit } + pub fn oneshot(&self) -> bool { + self.oneshot + } + pub fn setup( &mut self, stream_name: &str, @@ -206,6 +212,7 @@ pub mod tests { after: None, after_duration: None, on_exit: false, + oneshot: false, patterns: Arc::new(BTreeSet::default()), } } diff --git a/src/daemon/filter.rs b/src/daemon/filter.rs index 0d6a5f9..a13ec51 100644 --- a/src/daemon/filter.rs +++ b/src/daemon/filter.rs @@ -100,7 +100,7 @@ impl FilterManager { if exec { state.remove_match(&m); state.add_trigger(m.clone(), now); - self.schedule_exec(m, now, now, &mut state); + self.schedule_exec(m, now, now, &mut state, false); } exec @@ -199,11 +199,24 @@ impl FilterManager { cs.into_iter().map(|(k, v)| (k.join(" "), v)).collect() } - /// Schedule execution for a given Action and Match. + /// Schedule execution for a given Match. /// We check first if the trigger is still here /// because pending actions can be flushed. - fn schedule_exec(&self, m: Match, t: Time, now: Time, state: &mut MutexGuard) { - for action in self.filter.actions().values() { + fn schedule_exec( + &self, + m: Match, + t: Time, + now: Time, + state: &mut MutexGuard, + startup: bool, + ) { + for action in self + .filter + .actions() + .values() + // On startup, skip oneshot actions + .filter(|action| !startup || !action.oneshot()) + { let exec_time = t + action.after_duration().unwrap_or_default(); let m = m.clone(); @@ -240,7 +253,13 @@ impl FilterManager { fn clear_past_triggers_and_schedule_future_actions(&self, now: Time) { let longuest_action_duration = self.filter.longuest_action_duration(); - let number_of_actions = self.filter.actions().len(); + let number_of_actions = self + .filter + .actions() + .values() + // On startup, skip oneshot actions + .filter(|action| !action.oneshot()) + .count(); #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = self.state.lock().unwrap(); @@ -256,7 +275,7 @@ impl FilterManager { // Insert back the upcoming times state.triggers.insert(mt.clone(), number_of_actions as u64); // Schedule the upcoming times - self.schedule_exec(mt.m, mt.t, now, &mut state); + self.schedule_exec(mt.m, mt.t, now, &mut state, true); } else { state.triggers.remove(&mt); } diff --git a/tests/simple.rs b/tests/simple.rs index 70595be..0b168e3 100644 --- a/tests/simple.rs +++ b/tests/simple.rs @@ -49,6 +49,10 @@ fn config_with_cmd(config_path: &str, cmd: &str) { after: '30s', onexit: false, }, + action_oneshot: { + cmd: ['sh', '-c', 'echo oneshot >> ./oneshot.txt'], + oneshot: true, + }, } } } @@ -72,6 +76,7 @@ async fn simple() { let config_path = "config.jsonnet"; let out_path = "./out.txt"; + let oneshot_path = "./oneshot.txt"; let socket_path = "./reaction.sock"; config_with_cmd( @@ -80,6 +85,7 @@ async fn simple() { ); file_with_contents(out_path, ""); + file_with_contents(oneshot_path, ""); // Set the logger before running any code from the crate tracing_subscriber::fmt::fmt() @@ -134,6 +140,12 @@ async fn simple() { "24\n36\n12\ndel 24".to_owned().trim() ); + // oneshot actions are also executed + assert_eq!( + get_file_content(oneshot_path).trim(), + "oneshot 24\noneshot 36\noneshot 12".to_owned().trim() + ); + // Second part of the test // We test that persistence worked as intended // Both for matches and for flushes @@ -144,6 +156,7 @@ async fn simple() { ); file_with_contents(out_path, ""); + file_with_contents(oneshot_path, ""); let daemon_exit = daemon(config_path.into(), socket_path.into()).await; assert!(daemon_exit.is_err()); @@ -152,10 +165,10 @@ async fn simple() { "quitting because all streams finished" ); - // 36 from DB - // 12 from DB - // 12 from DB + new match - // 67 from DB + new match + // 36 trigger from DB + // 12 trigger from DB + // 12 match from DB + new match + // 67 match from DB + new match let content = get_file_content(out_path).trim().to_owned(); let scenario1 = "36\n12\n12\n67".trim().to_owned(); let scenario2 = "12\n36\n12\n67".trim().to_owned(); @@ -167,6 +180,15 @@ async fn simple() { scenario2 ); + // triggers from the DB aren't executed for oneshot actions + // only for new triggers + // 12 match from DB + new match + // 67 match from DB + new match + assert_eq!( + get_file_content(oneshot_path).trim(), + "oneshot 12\noneshot 67".to_owned().trim() + ); + // Third part of the test // Check we can capture both stdout and stderr from spawned processes From 0d9fc4701666ba540269c5ee83663ac3d52dd583 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 10 Jun 2025 12:00:00 +0200 Subject: [PATCH 014/241] Update duration format documentation As it's no longer Go's format --- config/example.jsonnet | 12 ++++++++++-- config/example.yml | 10 +++++++++- src/concepts/parse_duration.rs | 12 ++++++++++++ 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/config/example.jsonnet b/config/example.jsonnet index 53fb825..d77d87a 100644 --- a/config/example.jsonnet +++ b/config/example.jsonnet @@ -94,7 +94,15 @@ local banFor(time) = { // the actions will only take place if a same pattern is // found `retry` times in a `retryperiod` interval retry: 3, - // format is defined here: https://pkg.go.dev/time#ParseDuration + // format is defined as follows: + // - whitespace between the integer and unit is optional + // - integer must be positive (>= 0) + // - unit can be one of: + // - ms / millis / millisecond / milliseconds + // - s / sec / secs / second / seconds + // - m / min / mins / minute / minutes + // - h / hour / hours + // - d / day / days retryperiod: '6h', // actions are run by the filter when regexes are matched actions: { @@ -106,7 +114,7 @@ local banFor(time) = { cmd: iptables(['-D', 'reaction', '-s', '', '-j', 'DROP']), // if after is defined, the action will not take place immediately, but after a specified duration // same format as retryperiod - after: '48h', + after: '2 days', // let's say reaction is quitting. does it run all those pending commands which had an `after` duration set? // if you want reaction to run those pending commands before exiting, you can set this: // onexit: true, diff --git a/config/example.yml b/config/example.yml index f708597..7bae87a 100644 --- a/config/example.yml +++ b/config/example.yml @@ -77,7 +77,15 @@ streams: # the actions will only take place if a same pattern is # found `retry` times in a `retryperiod` interval retry: 3 - # format is defined here: https://pkg.go.dev/time#ParseDuration + # format is defined as follows: + # - whitespace between the integer and unit is optional + # - integer must be positive (>= 0) + # - unit can be one of: + # - ms / millis / millisecond / milliseconds + # - s / sec / secs / second / seconds + # - m / min / mins / minute / minutes + # - h / hour / hours + # - d / day / days retryperiod: 6h # actions are run by the filter when regexes are matched actions: diff --git a/src/concepts/parse_duration.rs b/src/concepts/parse_duration.rs index b5b5760..7121632 100644 --- a/src/concepts/parse_duration.rs +++ b/src/concepts/parse_duration.rs @@ -1,5 +1,17 @@ use chrono::TimeDelta; +/// Parses the &str argument as a Duration +/// Returns Ok(TimeDelta) if successful, or Err(String). +/// +/// Format is defined as follows: ` ` +/// - whitespace between the integer and unit is optional +/// - integer must be positive (>= 0) +/// - unit can be one of: +/// - `ms` / `millis` / `millisecond` / `milliseconds` +/// - `s` / `sec` / `secs` / `second` / `seconds` +/// - `m` / `min` / `mins` / `minute` / `minutes` +/// - `h` / `hour` / `hours` +/// - `d` / `day` / `days` pub fn parse_duration(d: &str) -> Result { let d_trimmed = d.trim(); let chars = d_trimmed.as_bytes(); From ff84a31a7d5aabb314c49b1ebc9eca469c762f82 Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 15 Jun 2025 12:00:00 +0200 Subject: [PATCH 015/241] build: use CC env var if available. defaults to cc instead of gcc --- build.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/build.rs b/build.rs index 39c0dca..0bfa592 100644 --- a/build.rs +++ b/build.rs @@ -10,9 +10,9 @@ use clap_complete::shells; // SubCommand defined here include!("src/cli.rs"); -fn gcc() -> String { +fn cc() -> String { // TARGET looks like aarch64-unknown-linux-musl - match var("TARGET") { + let cc = match var("TARGET") { Ok(target) => { // We're looking for an environment variable looking like // CC_aarch64_unknown_linux_musl @@ -20,8 +20,17 @@ fn gcc() -> String { var(format!("CC_{}", target.replace("-", "_"))).ok() } Err(_) => None, + }; + match cc { + Some(cc) => Some(cc), + // Else we're looking for CC environment variable + None => match var("CC") { + Ok(cc) => Some(cc), + Err(_) => None, + }, } - .unwrap_or("gcc".into()) + // Else we use `cc` + .unwrap_or("cc".into()) } fn compile_helper(cc: &str, name: &str, out_dir: &Path) -> io::Result<()> { @@ -35,7 +44,7 @@ fn compile_helper(cc: &str, name: &str, out_dir: &Path) -> io::Result<()> { .to_owned(), ]; // We can build static executables in cross environment - if cc != "gcc" { + if cc.ends_with("-gcc") { args.push("-static".into()); } process::Command::new(cc).args(args).spawn()?; @@ -47,7 +56,7 @@ fn main() -> io::Result<()> { let out_dir = PathBuf::from(var_os("OUT_DIR").ok_or(ErrorKind::NotFound)?).join("../../.."); // Compile C helpers - let cc = gcc(); + let cc = cc(); println!("CC is: {}", cc); compile_helper(&cc, "ip46tables", &out_dir)?; compile_helper(&cc, "nft46", &out_dir)?; From 9963ef4192b9c9e28539f08ac0c4a0e1483529d2 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 17 Jun 2025 12:00:00 +0200 Subject: [PATCH 016/241] Improve error message for retry < 2. Fixes #125 --- src/concepts/filter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index b487e8a..9eabf91 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -128,7 +128,7 @@ impl Filter { } if self.retry.is_some_and(|r| r < 2) { - return Err("retry has been specified but is < 2".into()); + return Err("retry must be >= 2. Remove 'retry' and 'retryperiod' to trigger at the first occurence.".into()); } if let Some(retry_period) = &self.retry_period { From 0ff8fda6073ed4fe2ae95785ec8ba11d3c2b239a Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 17 Jun 2025 12:00:00 +0200 Subject: [PATCH 017/241] cargo fmt, cargo clippy --all-targets --- build.rs | 5 +---- src/concepts/config.rs | 9 ++------- src/concepts/filter.rs | 2 +- src/daemon/filter.rs | 6 +++--- src/daemon/filter/tests.rs | 4 ++-- src/daemon/shutdown.rs | 1 + src/daemon/stream.rs | 1 + src/lib.rs | 1 - src/main.rs | 6 +----- src/treedb/helpers.rs | 8 ++++---- src/treedb/mod.rs | 24 +++++++++++++----------- src/treedb/raw.rs | 22 +++++----------------- 12 files changed, 34 insertions(+), 55 deletions(-) diff --git a/build.rs b/build.rs index 0bfa592..97c433e 100644 --- a/build.rs +++ b/build.rs @@ -24,10 +24,7 @@ fn cc() -> String { match cc { Some(cc) => Some(cc), // Else we're looking for CC environment variable - None => match var("CC") { - Ok(cc) => Some(cc), - Err(_) => None, - }, + None => var("CC").ok(), } // Else we use `cc` .unwrap_or("cc".into()) diff --git a/src/concepts/config.rs b/src/concepts/config.rs index 9853bbe..904a382 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -102,7 +102,6 @@ impl Config { self.start.append(&mut other.start); self.stop.append(&mut other.stop); - if !(self.state_directory == dot() || other.state_directory == dot() || self.state_directory == other.state_directory) @@ -779,9 +778,7 @@ mod tests { {{STREAMS}} }"#, ); - let cfg_oth = parse_config_json( - r#"{}"#, - ); + let cfg_oth = parse_config_json(r#"{}"#); let res = cfg_org.merge(cfg_oth); assert!(res.is_ok()); @@ -813,9 +810,7 @@ mod tests { {{STREAMS}} }"#, ); - let cfg_oth = parse_config_json( - r#"{ }"#, - ); + let cfg_oth = parse_config_json(r#"{ }"#); let res = cfg_org.merge(cfg_oth); assert!(res.is_ok()); diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 9eabf91..b89dff2 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -64,7 +64,7 @@ impl Filter { Filter { stream_name: stream_name.into(), name: filter_name.into(), - patterns: Arc::new(patterns.into_iter().map(|p| Arc::new(p)).collect()), + patterns: Arc::new(patterns.into_iter().map(Arc::new).collect()), ..Filter::default() } } diff --git a/src/daemon/filter.rs b/src/daemon/filter.rs index a13ec51..430b5a6 100644 --- a/src/daemon/filter.rs +++ b/src/daemon/filter.rs @@ -267,7 +267,7 @@ impl FilterManager { let cloned_triggers = state .triggers .iter() - .map(|(k, v)| (k.clone(), v.clone())) + .map(|(k, v)| (k.clone(), *v)) .collect::>(); for (mt, remaining) in cloned_triggers.into_iter() { @@ -387,7 +387,7 @@ impl State { fn remove_match(&mut self, m: &Match) { if let Some(set) = self.matches.get(m) { for t in set { - self.ordered_times.remove(&t); + self.ordered_times.remove(t); } self.matches.remove(m); } @@ -433,7 +433,7 @@ impl State { // unwrap: we just checked in the condition that first is_some let (t, m) = { let (t, m) = self.ordered_times.first_key_value().unwrap(); - (t.clone(), m.clone()) + (*t, m.clone()) }; self.ordered_times.remove(&t); if let Some(set) = self.matches.get(&m) { diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 5a03eaf..fc92177 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -360,7 +360,7 @@ async fn one_db_match_one_runtime_match_one_action() { let now1s = now - TimeDelta::seconds(1); db.set_loaded_db(HashMap::from([( - filter_ordered_times_db_name(&filter), + filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), )])); @@ -423,7 +423,7 @@ async fn one_outdated_db_match() { let now1s = now - TimeDelta::milliseconds(1001); db.set_loaded_db(HashMap::from([( - filter_ordered_times_db_name(&filter), + filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), )])); diff --git a/src/daemon/shutdown.rs b/src/daemon/shutdown.rs index a59e351..a311ad1 100644 --- a/src/daemon/shutdown.rs +++ b/src/daemon/shutdown.rs @@ -14,6 +14,7 @@ pub struct ShutdownController { } impl ShutdownController { + #[allow(clippy::new_without_default)] pub fn new() -> Self { let (task_tracker, task_waiter) = mpsc::channel(1); Self { diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index f78662c..b8e0ea8 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -17,6 +17,7 @@ use crate::{ use super::shutdown::ShutdownToken; +#[allow(clippy::type_complexity)] fn lines_to_stream( mut lines: Lines, ) -> futures::stream::PollFn< diff --git a/src/lib.rs b/src/lib.rs index 2e7d5af..3619f1a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,6 @@ unsafe_code )] #![allow(clippy::upper_case_acronyms, clippy::mutable_key_type)] - // Allow unwrap in tests #![cfg_attr(test, allow(clippy::unwrap_used))] diff --git a/src/main.rs b/src/main.rs index 7379fa9..53428b7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -50,11 +50,7 @@ async fn main() { } let result = match cli.command { - SubCommand::Start { - config, - socket, - .. - } => daemon(config, socket).await, + SubCommand::Start { config, socket, .. } => daemon(config, socket).await, SubCommand::Show { socket, format, diff --git a/src/treedb/helpers.rs b/src/treedb/helpers.rs index 636aea1..7833957 100644 --- a/src/treedb/helpers.rs +++ b/src/treedb/helpers.rs @@ -28,8 +28,8 @@ pub fn to_time(val: &Value) -> Result { pub fn to_match(val: &Value) -> Result { val.as_array() .ok_or("not an array")? - .into_iter() - .map(|v| to_string(v)) + .iter() + .map(to_string) .collect() } @@ -46,8 +46,8 @@ pub fn to_matchtime(val: &Value) -> Result { pub fn to_timeset(val: &Value) -> Result, String> { val.as_array() .ok_or("not an array")? - .into_iter() - .map(|v| to_time(v)) + .iter() + .map(to_time) .collect() } diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index a12f227..8809169 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -166,10 +166,13 @@ impl Database { Ok(bytes_written) => { self.bytes_written += bytes_written; if self.bytes_written > self.max_bytes { - self.rotate_db() - .await - .and_then(|_| Ok(self.bytes_written = 0)) - .map_err(|err| Some(format!("while rotating database: {err}"))) + match self.rotate_db().await { + Ok(_) => { + self.bytes_written = 0; + Ok(()) + } + Err(err) => Err(Some(format!("while rotating database: {err}"))), + } } else { Ok(()) } @@ -279,8 +282,7 @@ impl Database { json_tree .into_iter() .map(map_f) - .collect::, String>>() - .unwrap() + .collect::, String>>()? } else { BTreeMap::default() }; @@ -349,7 +351,7 @@ impl Tree { /// Asynchronously persisted version of [`BTreeMap::remove`] pub fn remove(&mut self, key: &K) -> Option { - self.log(&key, None); + self.log(key, None); self.tree.remove(key) } @@ -580,19 +582,19 @@ mod tests { Entry { tree: "match-timeset".into(), key: ip127.clone().into(), - value: Some([Value::String(now_ms.into())].into()), + value: Some([Value::String(now_ms)].into()), expiry: valid, }, Entry { tree: "match-timeset".into(), key: ip1.clone().into(), - value: Some([Value::String(now2_ms.clone().into())].into()), + value: Some([Value::String(now2_ms.clone())].into()), expiry: valid, }, Entry { tree: "match-timeset".into(), key: ip1.clone().into(), - value: Some([Value::String(now2_ms.clone().into()), now3_ms.into()].into()), + value: Some([Value::String(now2_ms.clone()), now3_ms.into()].into()), expiry: valid, }, ]; @@ -608,7 +610,7 @@ mod tests { write_db.close().await.unwrap(); drop(write_db); - let mut database = Database::from_dir(&dir_path).await.unwrap(); + let mut database = Database::from_dir(dir_path).await.unwrap(); let time_match = database .open_tree( diff --git a/src/treedb/raw.rs b/src/treedb/raw.rs index c2042ff..587497a 100644 --- a/src/treedb/raw.rs +++ b/src/treedb/raw.rs @@ -185,7 +185,7 @@ impl ReadDB { Ok(_) => (), Err(err) => match err { SerdeOrIoError::IO(err) => return Err(err), - SerdeOrIoError::Serde(err) => panic!("serde should be able to serialize an entry just deserialized: {err}"), + SerdeOrIoError::Serde(err) => error!("serde should be able to serialize an entry just deserialized: {err}"), } } // Insert data in RAM @@ -379,22 +379,10 @@ mod tests { }) ); - assert!(match read_db.next().await { - Err(DatabaseError::Serde(_)) => true, - _ => false, - }); - assert!(match read_db.next().await { - Err(DatabaseError::Serde(_)) => true, - _ => false, - }); - assert!(match read_db.next().await { - Err(DatabaseError::Serde(_)) => true, - _ => false, - }); - assert!(match read_db.next().await { - Err(DatabaseError::MissingKeyId(3)) => true, - _ => false, - }); + matches!(read_db.next().await, Err(DatabaseError::Serde(_))); + matches!(read_db.next().await, Err(DatabaseError::Serde(_))); + matches!(read_db.next().await, Err(DatabaseError::Serde(_))); + matches!(read_db.next().await, Err(DatabaseError::MissingKeyId(3))); assert!(read_db.next().await.unwrap().is_none()); } From 731ad6ddfd2b49f24a00f63241559be96ecf90c5 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 21 Jun 2025 12:00:00 +0200 Subject: [PATCH 018/241] Simplify parse_duration tests by using appropriate units --- src/concepts/parse_duration.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/concepts/parse_duration.rs b/src/concepts/parse_duration.rs index 7121632..f42db64 100644 --- a/src/concepts/parse_duration.rs +++ b/src/concepts/parse_duration.rs @@ -56,16 +56,10 @@ mod tests { assert_eq!(parse_duration("1s"), Ok(TimeDelta::seconds(1))); assert_eq!(parse_duration("12s"), Ok(TimeDelta::seconds(12))); assert_eq!(parse_duration(" 12 secs "), Ok(TimeDelta::seconds(12))); - assert_eq!(parse_duration("2m"), Ok(TimeDelta::seconds(2 * 60))); - assert_eq!( - parse_duration("6 hours"), - Ok(TimeDelta::seconds(6 * 60 * 60)) - ); - assert_eq!(parse_duration("1d"), Ok(TimeDelta::seconds(24 * 60 * 60))); - assert_eq!( - parse_duration("365d"), - Ok(TimeDelta::seconds(365 * 24 * 60 * 60)) - ); + assert_eq!(parse_duration("2m"), Ok(TimeDelta::minutes(2))); + assert_eq!(parse_duration("6 hours"), Ok(TimeDelta::hours(6))); + assert_eq!(parse_duration("1d"), Ok(TimeDelta::days(1))); + assert_eq!(parse_duration("365d"), Ok(TimeDelta::days(365))); assert!(parse_duration("d 3").is_err()); assert!(parse_duration("d3").is_err()); From b0c307a9d24c2328f545e73a3400e2a35b7f257b Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 17 Jun 2025 12:00:00 +0200 Subject: [PATCH 019/241] WIP trigger command --- src/cli.rs | 18 +++++ src/client/show_flush.rs | 1 + src/concepts/filter.rs | 45 +++++++++++ src/daemon/filter.rs | 12 +++ src/daemon/socket.rs | 160 ++++++++++++++++++++++++++++++--------- src/main.rs | 7 +- src/protocol.rs | 2 + 7 files changed, 207 insertions(+), 38 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 6f79c2c..c4d1f94 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -83,6 +83,24 @@ Then prints the flushed matches and actions." patterns: Vec<(String, String)>, }, + /// Trigger a target in reaction (e.g. ban) + #[command( + long_about = "Trigger actions and remove currently active matches for the specified PATTERNS in the specified STREAM.FILTER. (e.g. ban)" + )] + Trigger { + /// path to the client-daemon communication socket + #[clap(short = 's', long, default_value = "/run/reaction/reaction.sock")] + socket: PathBuf, + + /// STREAM.FILTER to trigger + #[clap(value_name = "STREAM.FILTER")] + limit: Option, + + /// PATTERNs to trigger on (e.g. ip=1.2.3.4) + #[clap(value_parser = parse_named_regex, value_name = "NAME=PATTERN")] + patterns: Vec<(String, String)>, + }, + /// Test a regex #[command( name = "test-regex", diff --git a/src/client/show_flush.rs b/src/client/show_flush.rs index ec321ab..986a26e 100644 --- a/src/client/show_flush.rs +++ b/src/client/show_flush.rs @@ -78,6 +78,7 @@ pub async fn request( DaemonResponse::Err(err) => Err(format!( "failed to communicate to daemon: error response: {err}" )), + DaemonResponse::Ok(_) => Ok(()), }?; Ok(()) } diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index b89dff2..19c64a7 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -223,6 +223,51 @@ impl Filter { } None } + + pub fn get_match_from_patterns( + &self, + patterns: BTreeMap, String>, + ) -> Result { + // Check pattern length + if patterns.len() != self.patterns().len() { + return Err(format!( + "{} patterns specified, while the {}.{} filter has {} pattern: ({})", + patterns.len(), + self.stream_name(), + self.name(), + self.patterns().len(), + self.patterns() + .iter() + .map(|pattern| pattern.name().clone()) + .reduce(|acc, pattern| acc + ", " + &pattern) + .unwrap_or("".into()), + )); + } + + for (pattern, _) in &patterns { + if self.patterns.get(pattern).is_none() { + return Err(format!( + "pattern {} is not present in the filter {}.{}", + pattern.name(), + self.stream_name, + self.name + )); + } + } + + for pattern in self.patterns.iter() { + if patterns.get(pattern).is_none() { + return Err(format!( + "pattern {} is missing, because it's in the filter {}.{}", + pattern.name(), + self.stream_name, + self.name + )); + } + } + + Ok(patterns.into_values().collect()) + } } impl Display for Filter { diff --git a/src/daemon/filter.rs b/src/daemon/filter.rs index 430b5a6..17d9558 100644 --- a/src/daemon/filter.rs +++ b/src/daemon/filter.rs @@ -106,6 +106,18 @@ impl FilterManager { exec } + pub fn handle_trigger(&self, patterns: BTreeMap, String>, now: Time) -> Result<(), String> { + let match_ = self.filter.get_match_from_patterns(patterns)?; + + #[allow(clippy::unwrap_used)] // propagating panics is ok + let mut state = self.state.lock().unwrap(); + state.remove_match(&match_); + state.add_trigger(match_.clone(), now); + self.schedule_exec(match_, now, now, &mut state, false); + + Ok(()) + } + pub fn handle_order( &self, patterns: &BTreeMap, Regex>, diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 178d7f6..29010f5 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -18,7 +18,7 @@ use tracing::{error, warn}; use crate::{ concepts::{Config, Filter, Pattern, Stream}, - protocol::{ClientRequest, ClientStatus, DaemonResponse}, + protocol::{ClientRequest, ClientStatus, DaemonResponse, Order}, }; use super::{filter::FilterManager, shutdown::ShutdownToken}; @@ -54,40 +54,68 @@ fn open_socket(path: PathBuf) -> Result { err_str!(UnixListener::bind(path)) } -fn answer_order( - config: &'static Config, +fn handle_trigger_order( + stream_name: Option, + filter_name: Option, + patterns: BTreeMap, String>, shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, - options: ClientRequest, -) -> Result { - // Compute options - let (stream_name, filter_name) = match options.stream_filter { - Some(sf) => match sf.split_once(".") { - Some((s, f)) => (Some(s.to_string()), Some(f.to_string())), - None => (Some(sf), None), - }, - None => (None, None), +) -> DaemonResponse { + // Check names existence + let (stream_name, filter_name) = match (stream_name, filter_name) { + (Some(s), Some(p)) => (s, p), + _ => { + return DaemonResponse::Err( + "trigger must target a filter, e.g. `reaction trigger mystream.myfilter ...`" + .into(), + ); + } }; - // Compute the Vec<(pattern_name: String, regex: String)> into a BTreeMap, Regex> - let patterns = options - .patterns - .into_iter() - .map(|(name, reg)| { - // lookup pattern in config.patterns - config - .patterns() - .iter() - // retrieve or Err - .find(|(pattern_name, _)| &name == *pattern_name) - .ok_or_else(|| format!("pattern '{name}' doesn't exist")) - // compile Regex or Err - .and_then(|(_, pattern)| match Regex::new(®) { - Ok(reg) => Ok((pattern.clone(), reg)), - Err(err) => Err(format!("pattern '{name}' regex doesn't compile: {err}")), - }) - }) - .collect::, Regex>, String>>()?; + // Check patterns existence + if patterns.is_empty() { + return DaemonResponse::Err( + "trigger must specify patterns, e.g. `reaction trigger ... ip=1.2.3.4`".into(), + ); + } + // Check stream existance + let filters = match shared_state + .iter() + .find(|(stream, _)| stream_name == stream.name()) + { + Some((_, filters)) => filters, + None => { + return DaemonResponse::Err(format!("stream {stream_name} doesn't exist")); + } + }; + + // Check filter existance + let filter_manager = match filters + .iter() + .find(|(filter, _)| filter_name == filter.name()) + { + Some((_, filter)) => filter, + None => { + return DaemonResponse::Err(format!( + "filter {stream_name}.{filter_name} doesn't exist" + )); + } + }; + + let now = Local::now(); + match filter_manager.handle_trigger(patterns, now) { + Ok(()) => DaemonResponse::Ok(()), + Err(err) => DaemonResponse::Err(err), + } +} + +fn handle_show_or_flush_order( + stream_name: Option, + filter_name: Option, + patterns: BTreeMap, Regex>, + order: Order, + shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, +) -> DaemonResponse { let now = Local::now(); let cs: ClientStatus = shared_state .iter() @@ -117,15 +145,76 @@ fn answer_order( .map(|(filter, manager)| { ( filter.name().to_owned(), - manager.handle_order(&patterns, options.order, now), + manager.handle_order(&patterns, order, now), ) }) .collect(); acc.insert(stream.name().to_owned(), inner_map); acc }); + DaemonResponse::Order(cs) +} - Ok(cs) +fn answer_order( + config: &'static Config, + shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, + options: ClientRequest, +) -> DaemonResponse { + // Compute options + let (stream_name, filter_name) = match options.stream_filter { + Some(sf) => match sf.split_once(".") { + Some((s, f)) => (Some(s.to_string()), Some(f.to_string())), + None => (Some(sf), None), + }, + None => (None, None), + }; + + // Compute the Vec<(pattern_name: String, regex: String)> into a BTreeMap, String> + let patterns = match options + .patterns + .into_iter() + .map(|(name, reg)| { + // lookup pattern in config.patterns + config + .patterns() + .iter() + // retrieve or Err + .find(|(pattern_name, _)| &name == *pattern_name) + .ok_or_else(|| format!("pattern '{name}' doesn't exist")) + .and_then(|(_, pattern)| Ok((pattern.clone(), reg))) + }) + .collect::, String>, String>>() + { + Ok(p) => p, + Err(err) => return DaemonResponse::Err(err), + }; + + if let Order::Trigger = options.order { + handle_trigger_order(stream_name, filter_name, patterns, shared_state) + } else { + let patterns = match patterns + .into_iter() + .map(|(pattern, reg)| match Regex::new(®) { + Ok(reg) => Ok((pattern, reg)), + Err(err) => Err(format!( + "pattern '{}' regex doesn't compile: {err}", + pattern.name() + )), + }) + .collect::, Regex>, String>>() + { + Ok(p) => p, + Err(err) => return DaemonResponse::Err(err), + }; + + handle_show_or_flush_order( + stream_name, + filter_name, + patterns, + options.order, + shared_state, + ) + } } macro_rules! or_next { @@ -175,10 +264,7 @@ pub async fn socket_manager( serde_json::from_slice(&encoded_request) ); // Process - let response = match answer_order(config, &shared_state, request) { - Ok(res) => DaemonResponse::Order(res), - Err(err) => DaemonResponse::Err(err), - }; + let response = answer_order(config, &shared_state, request); // Encode let encoded_response = or_next!("failed to serialize response", serde_json::to_string::(&response)); diff --git a/src/main.rs b/src/main.rs index 53428b7..91e55a7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use std::{io::IsTerminal, process::exit}; use clap::Parser; use reaction::{ - cli::{Cli, SubCommand}, + cli::{Cli, Format, SubCommand}, client::{request, test_config, test_regex}, daemon::daemon, protocol::Order, @@ -63,6 +63,11 @@ async fn main() { limit, patterns, } => request(socket, format, limit, patterns, Order::Flush).await, + SubCommand::Trigger { + socket, + limit, + patterns, + } => request(socket, Format::JSON, limit, patterns, Order::Trigger).await, SubCommand::TestRegex { config, regex, diff --git a/src/protocol.rs b/src/protocol.rs index be6f926..9dff86e 100644 --- a/src/protocol.rs +++ b/src/protocol.rs @@ -13,6 +13,7 @@ use serde::{ pub enum Order { Show, Flush, + Trigger, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -24,6 +25,7 @@ pub struct ClientRequest { #[derive(Serialize, Deserialize)] pub enum DaemonResponse { + Ok(()), Order(ClientStatus), Err(String), } From ff8ea60ce609dc3f990ba19ee05edaa790546491 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 21 Jun 2025 12:00:00 +0200 Subject: [PATCH 020/241] WIP trigger command and ignoreregex performance improvement - ignoreregex is now a RegexSet for improved performance - Vec::clear() replaced by new Vec to really free RAM --- src/client/test_regex.rs | 2 +- src/concepts/filter.rs | 114 +++++++++++++++++++++++++++++++++---- src/concepts/pattern.rs | 104 +++++++++++++++++++++------------ src/daemon/filter/tests.rs | 12 ++++ 4 files changed, 184 insertions(+), 48 deletions(-) diff --git a/src/client/test_regex.rs b/src/client/test_regex.rs index c698235..8eb01bb 100644 --- a/src/client/test_regex.rs +++ b/src/client/test_regex.rs @@ -45,7 +45,7 @@ pub fn test_regex( for pattern in used_patterns.iter() { if let Some(match_) = matches.name(pattern.name()) { result.push(match_.as_str().to_string()); - if !pattern.not_an_ignore(match_.as_str()) { + if pattern.is_ignore(match_.as_str()) { ignored = true; } } diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 19c64a7..fa3f291 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -177,7 +177,7 @@ impl Filter { self.compiled_regex.push(compiled); first = false; } - self.regex.clear(); + self.regex = Vec::default(); self.patterns = Arc::new(new_patterns); if self.actions.is_empty() { @@ -203,10 +203,10 @@ impl Filter { if !self.patterns.is_empty() { let mut result = Match::new(); for pattern in self.patterns.as_ref() { - // if the pattern is in an optional part of the regex, there may be no - // captured group for it. + // if the pattern is in an optional part of the regex, + // there may be no captured group for it. if let Some(match_) = matches.name(pattern.name()) { - if pattern.not_an_ignore(match_.as_str()) { + if !pattern.is_ignore(match_.as_str()) { result.push(match_.as_str().to_string()); } } @@ -224,6 +224,8 @@ impl Filter { None } + /// Test that the patterns map conforms to the filter's patterns. + /// Then returns a corresponding [`Match`]. pub fn get_match_from_patterns( &self, patterns: BTreeMap, String>, @@ -244,7 +246,7 @@ impl Filter { )); } - for (pattern, _) in &patterns { + for (pattern, match_) in &patterns { if self.patterns.get(pattern).is_none() { return Err(format!( "pattern {} is not present in the filter {}.{}", @@ -253,6 +255,22 @@ impl Filter { self.name )); } + + if !pattern.is_match(match_) { + return Err(format!( + "string '{}' doesn't match pattern {}", + match_, + pattern.name(), + )); + } + + if pattern.is_ignore(match_) { + return Err(format!( + "string '{}' is explcitly ignored by pattern {}", + match_, + pattern.name(), + )); + } } for pattern in self.patterns.iter() { @@ -350,7 +368,7 @@ impl Filter { pub mod tests { use crate::concepts::action::tests::{ok_action, ok_action_with_after}; use crate::concepts::pattern::tests::{ - boubou_pattern_with_ignore, default_pattern, ok_pattern_with_ignore, + boubou_pattern_with_ignore, default_pattern, number_pattern, ok_pattern_with_ignore, }; use super::*; @@ -595,17 +613,24 @@ pub mod tests { let name = "name".to_string(); let mut filter; - // make a Patterns - let mut patterns = Patterns::new(); - let mut pattern = ok_pattern_with_ignore(); pattern.setup(&name).unwrap(); - patterns.insert(name.clone(), pattern.clone().into()); + let pattern = Arc::new(pattern); let boubou_name = "boubou".to_string(); let mut boubou = boubou_pattern_with_ignore(); boubou.setup(&boubou_name).unwrap(); - patterns.insert(boubou_name.clone(), boubou.clone().into()); + let boubou = Arc::new(boubou); + + let patterns = Patterns::from([ + (name.clone(), pattern.clone()), + (boubou_name.clone(), boubou.clone()), + ]); + + let number_name = "number".to_string(); + let mut number_pattern = number_pattern(); + number_pattern.setup(&number_name).unwrap(); + let number_pattern = Arc::new(number_pattern); // one simple regex filter = Filter::default(); @@ -617,6 +642,33 @@ pub mod tests { assert_eq!(filter.get_match("youpi b youpi"), None); assert_eq!(filter.get_match("insert here"), None); + // Ok + assert_eq!( + filter.get_match_from_patterns(BTreeMap::from([(pattern.clone(), "b".into())])), + Ok(vec!("b".into())) + ); + // Doesn't match + assert!(filter + .get_match_from_patterns(BTreeMap::from([(pattern.clone(), "abc".into())])) + .is_err()); + // Ignored match + assert!(filter + .get_match_from_patterns(BTreeMap::from([(pattern.clone(), "a".into())])) + .is_err()); + // Bad pattern + assert!(filter + .get_match_from_patterns(BTreeMap::from([(boubou.clone(), "bou".into())])) + .is_err()); + // Bad number of patterns + assert!(filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (boubou.clone(), "bou".into()), + ])) + .is_err()); + // Bad number of patterns + assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err()); + // two patterns in one regex filter = Filter::default(); filter.actions.insert(name.clone(), ok_action()); @@ -631,6 +683,46 @@ pub mod tests { assert_eq!(filter.get_match("insert a here and bouboubou there"), None); assert_eq!(filter.get_match("insert b here and boubou there"), None); + // Ok + assert_eq!( + filter.get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (boubou.clone(), "bou".into()), + ])), + Ok(vec!("bou".into(), "b".into())) + ); + // Doesn't match + assert!(filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "abc".into()), + (boubou.clone(), "bou".into()), + ])) + .is_err()); + // Ignored match + assert!(filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (boubou.clone(), "boubou".into()), + ])) + .is_err()); + // Bad pattern + assert!(filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (number_pattern.clone(), "1".into()), + ])) + .is_err()); + // Bad number of patterns + assert!(filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (boubou.clone(), "bou".into()), + (number_pattern.clone(), "1".into()), + ])) + .is_err()); + // Bad number of patterns + assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err()); + // multiple regexes with same pattern filter = Filter::default(); filter.actions.insert(name.clone(), ok_action()); diff --git a/src/concepts/pattern.rs b/src/concepts/pattern.rs index 53acbb7..0dbd04f 100644 --- a/src/concepts/pattern.rs +++ b/src/concepts/pattern.rs @@ -1,6 +1,6 @@ use std::cmp::Ordering; -use regex::Regex; +use regex::{Regex, RegexSet}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] @@ -15,7 +15,7 @@ pub struct Pattern { #[serde(default, rename = "ignoreregex", skip_serializing_if = "Vec::is_empty")] ignore_regex: Vec, #[serde(skip)] - compiled_ignore_regex: Vec, + compiled_ignore_regex: RegexSet, #[serde(skip)] name: String, @@ -31,10 +31,6 @@ impl Pattern { ..Pattern::default() } } - pub fn setup(&mut self, name: &str) -> Result<(), String> { - self._setup(name) - .map_err(|msg| format!("pattern {}: {}", name, msg)) - } pub fn name(&self) -> &String { &self.name @@ -43,7 +39,11 @@ impl Pattern { &self.name_with_braces } - pub fn _setup(&mut self, name: &str) -> Result<(), String> { + pub fn setup(&mut self, name: &str) -> Result<(), String> { + self._setup(name) + .map_err(|msg| format!("pattern {}: {}", name, msg)) + } + fn _setup(&mut self, name: &str) -> Result<(), String> { self.name = name.to_string(); self.name_with_braces = format!("<{}>", name); @@ -57,7 +57,7 @@ impl Pattern { if self.regex.is_empty() { return Err("regex is empty".into()); } - let compiled = Regex::new(&format!("^{}$", self.regex)).map_err(|err| err.to_string())?; + let compiled = self.compiled()?; self.regex = format!("(?P<{}>{})", self.name, self.regex); @@ -70,29 +70,55 @@ impl Pattern { } } - for ignore_regex in &self.ignore_regex { - let compiled_ignore = Regex::new(&format!("^{}$", ignore_regex)) - .map_err(|err| format!("ignoreregex '{}': {}", ignore_regex, err))?; - - self.compiled_ignore_regex.push(compiled_ignore); - } - self.ignore_regex.clear(); + self.compiled_ignore_regex = + match RegexSet::new(self.ignore_regex.iter().map(|regex| format!("^{}$", regex))) { + Ok(set) => set, + Err(err) => { + // Recompile regexes one by one to display a more specific error + for ignore_regex in &self.ignore_regex { + Regex::new(&format!("^{}$", ignore_regex)) + .map_err(|err| format!("ignoreregex '{}': {}", ignore_regex, err))?; + } + // Here we should have returned an error already. + // Returning a more generic error if not (which shouldn't happen). + return Err(format!("ignoreregex: {}", err)); + } + }; + self.ignore_regex = Vec::default(); Ok(()) } - pub fn not_an_ignore(&self, match_: &str) -> bool { - for regex in &self.compiled_ignore_regex { - if regex.is_match(match_) { - return false; - } + /// Returns the pattern's regex compiled standalone. + /// It's not kept as a field of the [`Pattern`] struct + /// because it's only used during setup and for the `trigger` manual command. + /// + /// *Yes, I know, avoiding a few bytes of memory is certainly a bad idea.* + /// *I'm open to discussion.* + fn compiled(&self) -> Result { + Regex::new(&format!("^{}$", self.regex)).map_err(|err| err.to_string()) + } + + /// Whether the provided string is a match for this pattern or not. + /// + /// Doesn't take into account ignore and ignore_regex: + /// use [`Self::is_ignore`] to access this information. + pub fn is_match(&self, match_: &str) -> bool { + match self.compiled() { + Ok(regex) => regex.is_match(match_), + // Should not happen, this function should be called only after + // [`Pattern::setup`] + Err(_) => false, } - for ignore in &self.ignore { - if ignore == match_ { - return false; - } - } - true + } + + /// Whether the provided string is ignored by the ignore or ignoreregex + /// fields of this pattern. + /// + /// Can be used in combination with [`Self::is_match`]. + pub fn is_ignore(&self, match_: &str) -> bool { + self.ignore.iter().any(|ignore| ignore == match_) + || self.compiled_ignore_regex.is_match(match_) } } @@ -145,7 +171,7 @@ pub mod tests { regex: "".into(), ignore: Vec::new(), ignore_regex: Vec::new(), - compiled_ignore_regex: Vec::new(), + compiled_ignore_regex: RegexSet::default(), name: "".into(), name_with_braces: "".into(), } @@ -170,6 +196,12 @@ pub mod tests { pattern } + pub fn number_pattern() -> Pattern { + let mut pattern = ok_pattern(); + pattern.regex = "[0-1]+".to_string(); + pattern + } + #[test] fn setup_missing_information() { let mut pattern; @@ -245,7 +277,7 @@ pub mod tests { } #[test] - fn not_an_ignore() { + fn is_ignore() { let mut pattern; // ignore ok @@ -257,13 +289,13 @@ pub mod tests { pattern.ignore_regex.push("[de]".into()); pattern.setup("name").unwrap(); - assert!(!pattern.not_an_ignore("a")); - assert!(!pattern.not_an_ignore("b")); - assert!(!pattern.not_an_ignore("c")); - assert!(!pattern.not_an_ignore("d")); - assert!(!pattern.not_an_ignore("e")); - assert!(pattern.not_an_ignore("f")); - assert!(pattern.not_an_ignore("g")); - assert!(pattern.not_an_ignore("h")); + assert!(pattern.is_ignore("a")); + assert!(pattern.is_ignore("b")); + assert!(pattern.is_ignore("c")); + assert!(pattern.is_ignore("d")); + assert!(pattern.is_ignore("e")); + assert!(!pattern.is_ignore("f")); + assert!(!pattern.is_ignore("g")); + assert!(!pattern.is_ignore("h")); } } diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index fc92177..f0a950a 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -431,3 +431,15 @@ async fn one_outdated_db_match() { let bed = bed.part2(filter, now, Some(db)).await; bed.assert_empty_trees(); } + +#[tokio::test] +async fn flush() { + // TODO multiple tests +} + +#[tokio::test] +async fn trigger() { + // TODO multiple tests +} + +// TODO test State functions From fad9ce11668ad70526bbc86b6ef5ffb592a91c6b Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 22 Jun 2025 12:00:00 +0200 Subject: [PATCH 021/241] Add unit tests to FilterManager::handle_trigger --- src/concepts/filter.rs | 1 + src/daemon/filter/tests.rs | 156 ++++++++++++++++++++++++++++++++++++- 2 files changed, 155 insertions(+), 2 deletions(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index fa3f291..3303220 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -689,6 +689,7 @@ pub mod tests { (pattern.clone(), "b".into()), (boubou.clone(), "bou".into()), ])), + // Reordered by pattern name Ok(vec!("bou".into(), "b".into())) ); // Doesn't match diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index f0a950a..a72f26a 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -45,6 +45,7 @@ impl TestBed { TestBed2 { _out_path: self._out_path, out_file: self.out_file, + az_patterns: self.az_patterns, now, manager: FilterManager::new( filter, @@ -65,6 +66,7 @@ struct TestBed2 { pub semaphore: Arc, pub now: Time, pub manager: FilterManager, + pub az_patterns: Patterns, } impl TestBed2 { @@ -438,8 +440,158 @@ async fn flush() { } #[tokio::test] -async fn trigger() { - // TODO multiple tests +async fn trigger_unmatched_pattern() { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + ), + Action::new( + vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], + Some("200ms"), + false, + "test", + "test", + "a2", + &bed.az_patterns, + ), + ], + vec!["test "], + Some(2), + Some("1s"), + "test", + "test", + &bed.az_patterns, + ); + + let now = Local::now(); + let one = vec!["one".to_string()]; + let bed = bed.part2(filter, now, None).await; + + bed.manager + .handle_trigger( + // az_pattern: "one" + bed.az_patterns + .values() + .cloned() + .map(|pattern| (pattern, one[0].clone())) + .collect(), + now, + ) + .unwrap(); + + // the action executes + tokio::time::sleep(Duration::from_millis(40)).await; + + // No matches, one action registered + { + let state = bed.manager.state.lock().unwrap(); + assert!(state.matches.is_empty()); + assert!(state.ordered_times.is_empty()); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([( + MatchTime { + m: one.clone(), + t: now, + }, + 1 + )]) + ); + } + assert_eq!( + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the action" + ); +} + +#[tokio::test] +async fn trigger_matched_pattern() { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + ), + Action::new( + vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], + Some("200ms"), + false, + "test", + "test", + "a2", + &bed.az_patterns, + ), + ], + vec!["test "], + Some(2), + Some("1s"), + "test", + "test", + &bed.az_patterns, + ); + + let now = Local::now(); + let now1s = now - TimeDelta::milliseconds(10); + let one = vec!["one".to_string()]; + + let mut db = TempDatabase::default().await; + db.set_loaded_db(HashMap::from([( + filter_ordered_times_db_name(&filter), + HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), + )])); + let bed = bed.part2(filter, now, Some(db)).await; + + bed.manager + .handle_trigger( + // az_pattern: "one" + bed.az_patterns + .values() + .cloned() + .map(|pattern| (pattern, one[0].clone())) + .collect(), + now, + ) + .unwrap(); + + // the action executes + tokio::time::sleep(Duration::from_millis(40)).await; + + // No matches, one action registered + { + let state = bed.manager.state.lock().unwrap(); + assert!(state.matches.is_empty()); + assert!(state.ordered_times.is_empty()); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([( + MatchTime { + m: one.clone(), + t: now, + }, + 1 + )]) + ); + } + assert_eq!( + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the action" + ); } // TODO test State functions From 4cb69fb0d40146dcb0dd2d80feba6e3f8a6fb2a3 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 23 Jun 2025 12:00:00 +0200 Subject: [PATCH 022/241] Add test for trigger command --- tests/simple.rs | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/tests/simple.rs b/tests/simple.rs index 0b168e3..cee6f53 100644 --- a/tests/simple.rs +++ b/tests/simple.rs @@ -216,4 +216,48 @@ async fn simple() { get_file_content(out_path).trim(), "1\n2\n3\n4\n5\n6\n7\n8\n9".to_owned() ); + + // Fourth part of the test + // Check the trigger function + + // New directory to avoid to load the database from previous tests + let dir = TempDir::new().unwrap(); + env::set_current_dir(&dir).unwrap(); + + // No thing from stream + config_with_cmd( + config_path, + "sleep 0.1", + ); + + file_with_contents(out_path, ""); + + // Run the daemon + let handle = tokio::spawn(async move { daemon(config_path.into(), socket_path.into()).await }); + + // Run the trigger + + // We sleep a bit to wait for reaction to start + let handle2 = tokio::spawn(async move { + sleep(Duration::from_millis(20)).await; + request( + socket_path.into(), + Format::JSON, + Some("stream1.filter1".into()), + vec![("num".into(), "95".into())], + Order::Trigger, + ) + .await + }); + + let (daemon_exit, trigger) = tokio::join!(handle, handle2); + assert!(daemon_exit.is_ok()); + assert!(trigger.is_ok()); + + // make sure the trigger number is in the output + assert_eq!( + get_file_content(out_path).trim(), + "95".to_owned() + ); + } From 39ae570ae55aea98b805b6160e4ad61b9d56a3a4 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 23 Jun 2025 12:00:00 +0200 Subject: [PATCH 023/241] rename file --- src/daemon/{filter.rs => filter/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/daemon/{filter.rs => filter/mod.rs} (100%) diff --git a/src/daemon/filter.rs b/src/daemon/filter/mod.rs similarity index 100% rename from src/daemon/filter.rs rename to src/daemon/filter/mod.rs From 529e40acd4b8027bf1ed984949e3865ba4b90266 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 23 Jun 2025 12:00:00 +0200 Subject: [PATCH 024/241] move State into its own file This permit to reduce filter/mod.rs file size --- src/daemon/filter/mod.rs | 173 +++---------------------------------- src/daemon/filter/state.rs | 163 ++++++++++++++++++++++++++++++++++ src/daemon/filter/tests.rs | 2 +- 3 files changed, 176 insertions(+), 162 deletions(-) create mode 100644 src/daemon/filter/state.rs diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 17d9558..a67d335 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -1,8 +1,10 @@ #[cfg(test)] mod tests; +mod state; + use std::{ - collections::{BTreeMap, BTreeSet}, + collections::BTreeMap, process::Stdio, sync::{Arc, Mutex, MutexGuard}, }; @@ -12,14 +14,13 @@ use tokio::sync::Semaphore; use tracing::{error, info}; use crate::{ - concepts::{Action, Filter, Match, MatchTime, Pattern, Time}, + concepts::{Action, Filter, Match, Pattern, Time}, protocol::{Order, PatternStatus}, - treedb::{ - helpers::{to_match, to_matchtime, to_time, to_u64}, - Database, Tree, - }, + treedb::Database, }; +use state::State; + use super::shutdown::ShutdownToken; /// Responsible for handling all runtime logic dedicated to a [`Filter`]. @@ -106,7 +107,11 @@ impl FilterManager { exec } - pub fn handle_trigger(&self, patterns: BTreeMap, String>, now: Time) -> Result<(), String> { + pub fn handle_trigger( + &self, + patterns: BTreeMap, String>, + now: Time, + ) -> Result<(), String> { let match_ = self.filter.get_match_from_patterns(patterns)?; #[allow(clippy::unwrap_used)] // propagating panics is ok @@ -320,157 +325,3 @@ fn exec_now(exec_limit: &Option>, action: &'static Action, m: Mat } }); } - -fn filter_ordered_times_db_name(filter: &Filter) -> String { - format!( - "filter_ordered_times_{}.{}", - filter.stream_name(), - filter.name() - ) -} - -fn filter_triggers_db_name(filter: &Filter) -> String { - format!("filter_triggers_{}.{}", filter.stream_name(), filter.name()) -} - -/// Internal state of a [`FilterManager`]. -/// Holds all data on current matches and triggers. -struct State { - /// the Filter managed - filter: &'static Filter, - /// Has the filter at least an action with an after directive? - has_after: bool, - /// Saves all the current Matches for this Filter - /// Has duplicate values for a key - /// Not persisted - matches: BTreeMap>, - /// Alternative view of the current Matches for O(1) cleaning of old Matches - /// without added async Tasks to remove them - /// Persisted - ordered_times: Tree, - /// Saves all the current Triggers for this Filter - /// Persisted - triggers: Tree, -} - -impl State { - fn new( - filter: &'static Filter, - has_after: bool, - db: &mut Database, - now: Time, - ) -> Result { - let mut this = Self { - filter, - has_after, - matches: BTreeMap::new(), - ordered_times: db.open_tree( - filter_ordered_times_db_name(filter), - filter.retry_duration().unwrap_or_default(), - |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), - )?, - triggers: db.open_tree( - filter_triggers_db_name(filter), - filter.retry_duration().unwrap_or_default(), - |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), - )?, - }; - this.clear_past_matches(now); - this.load_matches_from_ordered_times(); - Ok(this) - } - - fn add_match(&mut self, m: Match, t: Time) { - let set = self.matches.entry(m.clone()).or_default(); - set.insert(t); - self.ordered_times.insert(t, m); - } - - fn add_trigger(&mut self, m: Match, t: Time) { - // We record triggered filters only when there is an action with an `after` directive - if self.has_after { - // Add the (Match, Time) to the triggers map - self.triggers - .insert(MatchTime { m, t }, self.filter.actions().len() as u64); - } - } - - // Completely remove a Match from the matches - fn remove_match(&mut self, m: &Match) { - if let Some(set) = self.matches.get(m) { - for t in set { - self.ordered_times.remove(t); - } - self.matches.remove(m); - } - } - - /// Completely remove a Match from the triggers - fn remove_trigger(&mut self, m: &Match, t: &Time) { - self.triggers.remove(&MatchTime { - m: m.clone(), - t: *t, - }); - } - - /// Returns whether we should still execute an action for this (Match, Time) trigger - fn decrement_trigger(&mut self, m: &Match, t: Time) -> bool { - // We record triggered filters only when there is an action with an `after` directive - if self.has_after { - let mut exec_needed = false; - let mt = MatchTime { m: m.clone(), t }; - let count = self.triggers.get(&mt); - if let Some(count) = count { - exec_needed = true; - if *count <= 1 { - self.triggers.remove(&mt); - } else { - self.triggers.insert(mt, count - 1); - } - } - exec_needed - } else { - true - } - } - - fn clear_past_matches(&mut self, now: Time) { - let retry_duration = self.filter.retry_duration().unwrap_or_default(); - while self - .ordered_times - .first_key_value() - .is_some_and(|(t, _)| *t + retry_duration < now) - { - #[allow(clippy::unwrap_used)] - // unwrap: we just checked in the condition that first is_some - let (t, m) = { - let (t, m) = self.ordered_times.first_key_value().unwrap(); - (*t, m.clone()) - }; - self.ordered_times.remove(&t); - if let Some(set) = self.matches.get(&m) { - let mut set = set.clone(); - set.remove(&t); - if set.is_empty() { - self.matches.remove(&m); - } else { - self.matches.insert(m, set); - } - } - } - } - - fn get_times(&self, m: &Match) -> usize { - match self.matches.get(m) { - Some(vec) => vec.len(), - None => 0, - } - } - - fn load_matches_from_ordered_times(&mut self) { - for (t, m) in self.ordered_times.iter() { - let set = self.matches.entry(m.clone()).or_default(); - set.insert(*t); - } - } -} diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs new file mode 100644 index 0000000..2883d44 --- /dev/null +++ b/src/daemon/filter/state.rs @@ -0,0 +1,163 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use crate::{ + concepts::{Filter, Match, MatchTime, Time}, + treedb::{ + helpers::{to_match, to_matchtime, to_time, to_u64}, + Database, Tree, + }, +}; + +pub fn filter_ordered_times_db_name(filter: &Filter) -> String { + format!( + "filter_ordered_times_{}.{}", + filter.stream_name(), + filter.name() + ) +} + +pub fn filter_triggers_db_name(filter: &Filter) -> String { + format!("filter_triggers_{}.{}", filter.stream_name(), filter.name()) +} + +/// Internal state of a [`FilterManager`]. +/// Holds all data on current matches and triggers. +pub struct State { + /// the Filter managed + filter: &'static Filter, + /// Has the filter at least an action with an after directive? + has_after: bool, + /// Saves all the current Matches for this Filter + /// Has duplicate values for a key + /// Not persisted + pub matches: BTreeMap>, + /// Alternative view of the current Matches for O(1) cleaning of old Matches + /// without added async Tasks to remove them + /// Persisted + pub ordered_times: Tree, + /// Saves all the current Triggers for this Filter + /// Persisted + pub triggers: Tree, +} + +impl State { + pub fn new( + filter: &'static Filter, + has_after: bool, + db: &mut Database, + now: Time, + ) -> Result { + let mut this = Self { + filter, + has_after, + matches: BTreeMap::new(), + ordered_times: db.open_tree( + filter_ordered_times_db_name(filter), + filter.retry_duration().unwrap_or_default(), + |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), + )?, + triggers: db.open_tree( + filter_triggers_db_name(filter), + filter.retry_duration().unwrap_or_default(), + |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), + )?, + }; + this.clear_past_matches(now); + this.load_matches_from_ordered_times(); + Ok(this) + } + + pub fn add_match(&mut self, m: Match, t: Time) { + let set = self.matches.entry(m.clone()).or_default(); + set.insert(t); + self.ordered_times.insert(t, m); + } + + pub fn add_trigger(&mut self, m: Match, t: Time) { + // We record triggered filters only when there is an action with an `after` directive + if self.has_after { + // Add the (Match, Time) to the triggers map + self.triggers + .insert(MatchTime { m, t }, self.filter.actions().len() as u64); + } + } + + // Completely remove a Match from the matches + pub fn remove_match(&mut self, m: &Match) { + if let Some(set) = self.matches.get(m) { + for t in set { + self.ordered_times.remove(&t); + } + self.matches.remove(m); + } + } + + /// Completely remove a Match from the triggers + pub fn remove_trigger(&mut self, m: &Match, t: &Time) { + self.triggers.remove(&MatchTime { + m: m.clone(), + t: *t, + }); + } + + /// Returns whether we should still execute an action for this (Match, Time) trigger + pub fn decrement_trigger(&mut self, m: &Match, t: Time) -> bool { + // We record triggered filters only when there is an action with an `after` directive + if self.has_after { + let mut exec_needed = false; + let mt = MatchTime { m: m.clone(), t }; + let count = self.triggers.get(&mt); + if let Some(count) = count { + exec_needed = true; + if *count <= 1 { + self.triggers.remove(&mt); + } else { + self.triggers.insert(mt, count - 1); + } + } + exec_needed + } else { + true + } + } + + pub fn clear_past_matches(&mut self, now: Time) { + let retry_duration = self.filter.retry_duration().unwrap_or_default(); + while self + .ordered_times + .first_key_value() + .is_some_and(|(t, _)| *t + retry_duration < now) + { + #[allow(clippy::unwrap_used)] + // unwrap: we just checked in the condition that first is_some + let (t, m) = { + let (t, m) = self.ordered_times.first_key_value().unwrap(); + (t.clone(), m.clone()) + }; + self.ordered_times.remove(&t); + if let Some(set) = self.matches.get(&m) { + let mut set = set.clone(); + set.remove(&t); + if set.is_empty() { + self.matches.remove(&m); + } else { + self.matches.insert(m, set); + } + } + } + } + + pub fn get_times(&self, m: &Match) -> usize { + match self.matches.get(m) { + Some(vec) => vec.len(), + None => 0, + } + } + + fn load_matches_from_ordered_times(&mut self) { + for (t, m) in self.ordered_times.iter() { + let set = self.matches.entry(m.clone()).or_default(); + set.insert(*t); + } + } +} diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index a72f26a..464b9d3 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -9,7 +9,7 @@ use chrono::{Local, TimeDelta}; use tempfile::TempPath; use tokio::sync::Semaphore; -use super::{filter_ordered_times_db_name, FilterManager, React}; +use super::{state::filter_ordered_times_db_name, FilterManager, React}; use crate::{ concepts::{Action, Filter, MatchTime, Pattern, Patterns, Time}, daemon::shutdown::ShutdownController, From d4ffae8489e873e69249c7a0285a0acf8f0a7955 Mon Sep 17 00:00:00 2001 From: Baptiste Careil Date: Sat, 21 Jun 2025 15:46:37 +0200 Subject: [PATCH 025/241] Fix #126: make config evaluation order predictable --- src/concepts/config.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/concepts/config.rs b/src/concepts/config.rs index 904a382..22780c9 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -212,8 +212,8 @@ impl Config { fn _from_dir_raw(path: &Path) -> Result<(Self, Vec), String> { let dir = std::fs::read_dir(path) .map_err(|e| format!("Error accessing directory {}: {e}", path.display()))?; - let mut cfg: Option = None; - let mut read_cfg_fname = vec![]; + // sorts files by name + let mut cfg_files = BTreeMap::new(); for f in dir { let f = f.map_err(|e| format!("Error while reading directory {}: {e}", path.display()))?; @@ -261,8 +261,8 @@ impl Config { } }; - let cfg_format = match Self::_extension_to_format(ext) { - Ok(fmt) => fmt, + match Self::_extension_to_format(ext) { + Ok(fmt) => cfg_files.insert(fname.to_string(), (fpath, fmt)), Err(_) => { // silently ignore files without an expected extension debug!( @@ -272,10 +272,12 @@ impl Config { continue; } }; + } - let cfg_part = Self::_load_file(&fpath, cfg_format) + let mut cfg: Option = None; + for (fname, (fpath, fmt)) in &cfg_files { + let cfg_part = Self::_load_file(fpath, *fmt) .map_err(|e| format!("While reading {fname} in {}: {e}", path.display()))?; - read_cfg_fname.push(fname.to_string()); if let Some(mut cfg_agg) = cfg.take() { cfg_agg.merge(cfg_part)?; @@ -286,7 +288,7 @@ impl Config { } if let Some(cfg) = cfg { - Ok((cfg, read_cfg_fname)) + Ok((cfg, cfg_files.into_keys().collect())) } else { Err(format!( "No valid configuration files found in {}", @@ -328,6 +330,7 @@ impl Config { } } +#[derive(Clone, Copy)] enum Format { Yaml, Json, From d12a61c14ab22af2e5ad760534d6208b76769344 Mon Sep 17 00:00:00 2001 From: Baptiste Careil Date: Sat, 21 Jun 2025 17:13:04 +0200 Subject: [PATCH 026/241] Fix #124: discard invalid utf8 sequences from input streams --- src/daemon/stream.rs | 50 ++++++++++++++++++++++++++++----- tests/test-binary-input.jsonnet | 28 ++++++++++++++++++ 2 files changed, 71 insertions(+), 7 deletions(-) create mode 100644 tests/test-binary-input.jsonnet diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index b8e0ea8..dd76eb6 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, process::Stdio, task::Poll, time::Duration}; use chrono::Local; use futures::{FutureExt, StreamExt}; use tokio::{ - io::{AsyncBufReadExt, BufReader, Lines}, + io::{AsyncBufReadExt, BufReader}, pin, process::{Child, ChildStderr, ChildStdout, Command}, time::sleep, @@ -17,16 +17,52 @@ use crate::{ use super::shutdown::ShutdownToken; +/** Converts bytes to string, discarding invalid utf8 sequences +*/ +fn to_string(data: &[u8]) -> String { + let res = String::from_utf8_lossy(data); + res.to_string() + .replace(std::char::REPLACEMENT_CHARACTER, "") +} + #[allow(clippy::type_complexity)] -fn lines_to_stream( - mut lines: Lines, +fn lines_to_stream( + mut lines: BufReader, ) -> futures::stream::PollFn< impl FnMut(&mut std::task::Context) -> Poll>>, > { + let mut at_eof = false; + let mut buffer = vec![]; futures::stream::poll_fn(move |cx| { - let nl = lines.next_line(); + if at_eof { + // reached EOF earlier, avoid calling read again + return Poll::Ready(None); + } + // Try to read until LF or EOF. If interrupted, buffer might contain data, in which case + // new data will be happened to it + let nl = lines.read_until(0x0a, &mut buffer); pin!(nl); - futures::Future::poll(nl, cx).map(Result::transpose) + match futures::Future::poll(nl, cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(Ok(0)) => { + if buffer.is_empty() { + // at eof + Poll::Ready(None) + } else { + // reached eof with data in the buffer + at_eof = true; + let line = to_string(&buffer); + buffer.clear(); + Poll::Ready(Some(Ok(line))) + } + } + Poll::Ready(Ok(_)) => { + let line = to_string(&buffer); + buffer.clear(); + Poll::Ready(Some(Ok(line))) + } + Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))), + } }) } @@ -119,8 +155,8 @@ async fn handle_io( child_stderr: ChildStderr, filter_managers: HashMap<&'static Filter, FilterManager>, ) { - let lines_stdout = lines_to_stream(BufReader::new(child_stdout).lines()); - let lines_stderr = lines_to_stream(BufReader::new(child_stderr).lines()); + let lines_stdout = lines_to_stream(BufReader::new(child_stdout)); + let lines_stderr = lines_to_stream(BufReader::new(child_stderr)); // aggregate outputs, will end when both streams end let mut lines = futures::stream::select(lines_stdout, lines_stderr); diff --git a/tests/test-binary-input.jsonnet b/tests/test-binary-input.jsonnet new file mode 100644 index 0000000..adbb883 --- /dev/null +++ b/tests/test-binary-input.jsonnet @@ -0,0 +1,28 @@ +/* Test that non-utf8 characters are stripped from the commands' output + * + * ASCII characters such as \x1b, \x05 are kept as is. + */ +{ + patterns: { + id: { + regex: @'.+', + }, + }, + streams: { + binary: { + cmd: ['sh', '-c', 'for n in 123 456 987; do printf "\\n\\x1b$n\\xe2 \\x05"; sleep 0.5; done; printf "\\n"; sleep 0.2'], + filters: { + filt1: { + regex: [ + @'', + ], + actions: { + act: { + cmd: ['echo', 'received ""'], + }, + }, + }, + }, + }, + }, +} From 55ed7b9c5f32e23241d162ab5b62cd6fe87e694f Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 23 Jun 2025 12:00:00 +0200 Subject: [PATCH 027/241] Amend heavy-load test - Add wrapper script - Add non-matching lines - Put two filters on the same stream, where either one of them matches --- bench/heavy-load.sh | 11 +++++++++++ bench/heavy-load.yml | 17 +++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) create mode 100644 bench/heavy-load.sh diff --git a/bench/heavy-load.sh b/bench/heavy-load.sh new file mode 100644 index 0000000..0abf296 --- /dev/null +++ b/bench/heavy-load.sh @@ -0,0 +1,11 @@ +set -e + +rm -f reaction.db +cargo build --release +sudo systemd-run --wait \ + -p User="$(id -nu)" \ + -p MemoryAccounting=yes \ + -p IOAccounting=yes \ + -p WorkingDirectory="$(pwd)" \ + -p Environment=PATH=/run/current-system/sw/bin/ \ + sh -c "for i in 1 2; do ./target/release/reaction start -c ./bench/heavy-load.yml -l ERROR -s ./reaction.sock; done" diff --git a/bench/heavy-load.yml b/bench/heavy-load.yml index 8ccfc2a..f6ba1a4 100644 --- a/bench/heavy-load.yml +++ b/bench/heavy-load.yml @@ -15,7 +15,7 @@ streams: tailDown1: cmd: [ 'sh', '-c', 'sleep 2; seq 10001 | while read i; do echo found $i; done' ] filters: - find: + find1: regex: - '^found ' retry: 9 @@ -28,9 +28,9 @@ streams: after: 1m onexit: false tailDown2: - cmd: [ 'sh', '-c', 'sleep 2; seq 1000100 | while read i; do echo found $i; done' ] + cmd: [ 'sh', '-c', 'sleep 2; seq 1000100 | while read i; do echo found $i; echo trouvé $i; done' ] filters: - find: + find2: regex: - '^found ' retry: 480 @@ -43,9 +43,9 @@ streams: after: 1m onexit: false tailDown3: - cmd: [ 'sh', '-c', 'sleep 2; seq 1000100 | while read i; do echo found $i; done' ] + cmd: [ 'sh', '-c', 'sleep 2; seq 1000100 | while read i; do echo found $i; echo trouvé $i; done' ] filters: - find: + find3: regex: - '^found ' retry: 480 @@ -57,12 +57,9 @@ streams: cmd: [ 'sleep', '0.0' ] after: 1m onexit: false - tailDown4: - cmd: [ 'sh', '-c', 'sleep 2; seq 1000100 | while read i; do echo found $i; done' ] - filters: - find: + find4: regex: - - '^found ' + - '^trouvé ' retry: 480 retryperiod: 6m actions: From ad6b0faa30c1af84360f66074a917b4bf6cda10a Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 23 Jun 2025 12:00:00 +0200 Subject: [PATCH 028/241] Performance: Use a RegexSet for all regexes of a Stream StreamManager is now a struct that has its own RegexSet created from all the regexes inside its Filters. Instead of calling FilterManager::handle_line on all its FilterManagers, resulting in m*n regex passes, it matches on all the regexes with its RegexSet. It then only calls FilterManager::handle_line on matching Filters. This should increase performance in those cases: - Streams with a lot of filters or a lot of regexes - Filters that match a small proportion of their Stream lines This may decrease performance when most of lines are matched by all Filters of a Stream. --- src/concepts/filter.rs | 8 +- src/daemon/filter/mod.rs | 18 +++ src/daemon/mod.rs | 20 ++-- src/daemon/stream.rs | 245 +++++++++++++++++++++++---------------- 4 files changed, 183 insertions(+), 108 deletions(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 3303220..b338a89 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -89,6 +89,10 @@ impl Filter { self.longuest_action_duration } + pub fn regex(&self) -> &Vec { + &self.regex + } + pub fn actions(&self) -> &BTreeMap { &self.actions } @@ -144,6 +148,7 @@ impl Filter { } let mut new_patterns = BTreeSet::new(); + let mut new_regex = Vec::new(); let mut first = true; for regex in &self.regex { let mut regex_buf = regex.clone(); @@ -172,12 +177,13 @@ impl Filter { )); } regex_buf = regex_buf.replacen(pattern.name_with_braces(), &pattern.regex, 1); + new_regex.push(regex_buf.clone()); } let compiled = Regex::new(®ex_buf).map_err(|err| err.to_string())?; self.compiled_regex.push(compiled); first = false; } - self.regex = Vec::default(); + self.regex = new_regex; self.patterns = Arc::new(new_patterns); if self.actions.is_empty() { diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index a67d335..918bd7d 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -325,3 +325,21 @@ fn exec_now(exec_limit: &Option>, action: &'static Action, m: Mat } }); } + +impl PartialEq for FilterManager { + fn eq(&self, other: &Self) -> bool { + self.filter == other.filter + } +} +impl Eq for FilterManager {} + +impl Ord for FilterManager { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.filter.cmp(other.filter) + } +} +impl PartialOrd for FilterManager { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index ed5294b..ec1ee47 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -20,7 +20,7 @@ use crate::{concepts::Config, treedb::Database}; use filter::FilterManager; pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken}; use socket::socket_manager; -use stream::stream_manager; +use stream::StreamManager; mod filter; mod shutdown; @@ -37,8 +37,6 @@ pub async fn daemon( return Err("a start command failed, exiting.".into()); } - let mut stream_task_handles = Vec::new(); - // Cancellation Token let shutdown = ShutdownController::new(); @@ -54,6 +52,7 @@ pub async fn daemon( // Filter managers let now = Local::now(); let mut state = HashMap::new(); + let mut stream_managers = Vec::new(); for stream in config.streams().values() { let mut filter_managers = HashMap::new(); for filter in stream.filters().values() { @@ -63,13 +62,20 @@ pub async fn daemon( } state.insert(stream, filter_managers.clone()); - let token = shutdown.token(); - stream_task_handles.push(tokio::spawn(async move { - stream_manager(stream, filter_managers, token).await - })); + stream_managers.push(StreamManager::new( + stream, + filter_managers, + shutdown.token(), + )?); } drop(exec_limit); + // Start Stream managers + let mut stream_task_handles = Vec::new(); + for stream_manager in stream_managers { + stream_task_handles.push(tokio::spawn(async move { stream_manager.start().await })); + } + // Run database task let mut db_status_rx = { let token = shutdown.token(); diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index dd76eb6..f819009 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -1,7 +1,13 @@ -use std::{collections::HashMap, process::Stdio, task::Poll, time::Duration}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + process::Stdio, + task::Poll, + time::Duration, +}; use chrono::Local; use futures::{FutureExt, StreamExt}; +use regex::RegexSet; use tokio::{ io::{AsyncBufReadExt, BufReader}, pin, @@ -66,118 +72,157 @@ fn lines_to_stream( }) } -pub async fn stream_manager( +pub struct StreamManager { + compiled_regex_set: RegexSet, + regex_index_to_filter_manager: Vec, stream: &'static Stream, - filter_managers: HashMap<&'static Filter, FilterManager>, shutdown: ShutdownToken, -) { - info!("{}: start {:?}", stream.name(), stream.cmd()); - let mut child = match Command::new(&stream.cmd()[0]) - .args(&stream.cmd()[1..]) - .stdin(Stdio::null()) - .stderr(Stdio::piped()) - .stdout(Stdio::piped()) - .spawn() - { - Ok(child) => child, - Err(err) => { - error!("could not execute stream {} cmd: {}", stream.name(), err); - return; - } - }; - - // keep stdout/stderr before moving child to handle_child - #[allow(clippy::unwrap_used)] - // we know there is an stdout because we asked for Stdio::piped() - let child_stdout = child.stdout.take().unwrap(); - #[allow(clippy::unwrap_used)] - // we know there is an stderr because we asked for Stdio::piped() - let child_stderr = child.stderr.take().unwrap(); - - tokio::join!( - handle_child(stream.name(), child, shutdown), - handle_io(stream.name(), child_stdout, child_stderr, filter_managers) - ); } -async fn handle_child(stream_name: &'static str, mut child: Child, shutdown: ShutdownToken) { - const STREAM_PROCESS_GRACE_TIME_SEC: u64 = 15; - const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5; +impl StreamManager { + pub fn new( + stream: &'static Stream, + filter_managers: HashMap<&'static Filter, FilterManager>, + shutdown: ShutdownToken, + ) -> Result { + let all_regexes: BTreeMap<_, _> = filter_managers + .iter() + .flat_map(|(filter, filter_manager)| { + filter + .regex() + .iter() + .map(|regex| (regex, filter_manager.clone())) + }) + .collect(); - // wait either for the child process to exit on its own or for the shutdown signal - futures::select! { - _ = child.wait().fuse() => { - error!("stream {stream_name} exited: its command returned."); - return; - } - _ = shutdown.wait().fuse() => {} + Ok(StreamManager { + compiled_regex_set: RegexSet::new(all_regexes.keys())?, + regex_index_to_filter_manager: all_regexes.into_values().collect(), + stream, + shutdown, + }) } - // first, try to ask nicely the child process to exit - if let Some(pid) = child.id() { - let pid = nix::unistd::Pid::from_raw(pid as i32); - - // the most likely error is that the process does not exist anymore - // but we still need to reclaim it with Child::wait - let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM); - - futures::select! { - _ = child.wait().fuse() => { - return; - }, - _ = sleep(Duration::from_secs(STREAM_PROCESS_GRACE_TIME_SEC)).fuse() => {}, - } - } else { - warn!("could not get PID of child process for stream {stream_name}"); - // still try to use tokio API to kill and reclaim the child process - } - - // if that fails, or we cannot get the underlying PID, terminate the process. - // NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a - // syscall to a resource no-longer available (a notorious example is a read on a disconnected - // NFS share) - - // as before, the only expected error is that the child process already terminated - // but we still need to reclaim it if that's the case. - let _ = child.start_kill(); - - futures::select! { - _ = child.wait().fuse() => {} - _ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => { - error!("child process of stream {stream_name} did not terminate"); - } - } -} - -async fn handle_io( - stream_name: &'static str, - child_stdout: ChildStdout, - child_stderr: ChildStderr, - filter_managers: HashMap<&'static Filter, FilterManager>, -) { - let lines_stdout = lines_to_stream(BufReader::new(child_stdout)); - let lines_stderr = lines_to_stream(BufReader::new(child_stderr)); - // aggregate outputs, will end when both streams end - let mut lines = futures::stream::select(lines_stdout, lines_stderr); - - loop { - match lines.next().await { - Some(Ok(line)) => { - let now = Local::now(); - for manager in filter_managers.values() { - manager.handle_line(&line, now); - } - } - Some(Err(err)) => { + pub async fn start(self) { + info!("{}: start {:?}", self.stream.name(), self.stream.cmd()); + let mut child = match Command::new(&self.stream.cmd()[0]) + .args(&self.stream.cmd()[1..]) + .stdin(Stdio::null()) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + { + Ok(child) => child, + Err(err) => { error!( - "impossible to read output from stream {}: {}", - stream_name, err + "could not execute stream {} cmd: {}", + self.stream.name(), + err ); return; } - None => { + }; + + // keep stdout/stderr before moving child to handle_child + #[allow(clippy::unwrap_used)] + // we know there is an stdout because we asked for Stdio::piped() + let child_stdout = child.stdout.take().unwrap(); + #[allow(clippy::unwrap_used)] + // we know there is an stderr because we asked for Stdio::piped() + let child_stderr = child.stderr.take().unwrap(); + + tokio::join!( + self.handle_child(child), + self.handle_io(child_stdout, child_stderr), + ); + } + + async fn handle_child(&self, mut child: Child) { + const STREAM_PROCESS_GRACE_TIME_SEC: u64 = 15; + const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5; + + // wait either for the child process to exit on its own or for the shutdown signal + futures::select! { + _ = child.wait().fuse() => { + error!("stream {} exited: its command returned.", self.stream.name()); return; } + _ = self.shutdown.wait().fuse() => {} + } + + // first, try to ask nicely the child process to exit + if let Some(pid) = child.id() { + let pid = nix::unistd::Pid::from_raw(pid as i32); + + // the most likely error is that the process does not exist anymore + // but we still need to reclaim it with Child::wait + let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM); + + futures::select! { + _ = child.wait().fuse() => { + return; + }, + _ = sleep(Duration::from_secs(STREAM_PROCESS_GRACE_TIME_SEC)).fuse() => {}, + } + } else { + warn!( + "could not get PID of child process for stream {}", + self.stream.name() + ); + // still try to use tokio API to kill and reclaim the child process + } + + // if that fails, or we cannot get the underlying PID, terminate the process. + // NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a + // syscall to a resource no-longer available (a notorious example is a read on a disconnected + // NFS share) + + // as before, the only expected error is that the child process already terminated + // but we still need to reclaim it if that's the case. + let _ = child.start_kill(); + + futures::select! { + _ = child.wait().fuse() => {} + _ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => { + error!("child process of stream {} did not terminate", self.stream.name()); + } } } + + async fn handle_io(&self, child_stdout: ChildStdout, child_stderr: ChildStderr) { + let lines_stdout = lines_to_stream(BufReader::new(child_stdout)); + let lines_stderr = lines_to_stream(BufReader::new(child_stderr)); + // aggregate outputs, will end when both streams end + let mut lines = futures::stream::select(lines_stdout, lines_stderr); + + loop { + match lines.next().await { + Some(Ok(line)) => { + let now = Local::now(); + for manager in self.matching_filters(&line) { + manager.handle_line(&line, now); + } + } + Some(Err(err)) => { + error!( + "impossible to read output from stream {}: {}", + self.stream.name(), + err + ); + return; + } + None => { + return; + } + } + } + } + + fn matching_filters(&self, line: &str) -> BTreeSet<&FilterManager> { + let matches = self.compiled_regex_set.matches(line); + matches + .into_iter() + .map(|match_| &self.regex_index_to_filter_manager[match_]) + .collect() + } } From 283d1867b8f341496452e75333ce776ed2d9c8ea Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 23 Jun 2025 12:00:00 +0200 Subject: [PATCH 029/241] Benchmark: Add real-life configuration file and benchmark wrapper Performance on this real-life configuration: Before last commit: Service runtime: 2min 22.669s CPU time consumed: 3min 44.299s Memory peak: 50.7M (swap: 0B) With last commit: Service runtime: 7.569s CPU time consumed: 21.998s Memory peak: 105.6M (swap: 0B) --- bench/bench.sh | 24 ++++++++ bench/heavy-load.sh | 11 ---- bench/heavy-load.yml | 3 + bench/nginx.yml | 130 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 157 insertions(+), 11 deletions(-) create mode 100755 bench/bench.sh delete mode 100644 bench/heavy-load.sh create mode 100644 bench/nginx.yml diff --git a/bench/bench.sh b/bench/bench.sh new file mode 100755 index 0000000..59d4894 --- /dev/null +++ b/bench/bench.sh @@ -0,0 +1,24 @@ +set -e + +if test "$(realpath "$PWD")" != "$(realpath "$(dirname "$0")/..")" +then + echo "You must be in reaction root directory" + exit 1 +fi + +if test ! -f "$1" +then + # shellcheck disable=SC2016 + echo '$1 must be a configuration file (most probably in ./bench)' + exit 1 +fi + +rm -f reaction.db +cargo build --release +sudo systemd-run --wait \ + -p User="$(id -nu)" \ + -p MemoryAccounting=yes \ + -p IOAccounting=yes \ + -p WorkingDirectory="$(pwd)" \ + -p Environment=PATH=/run/current-system/sw/bin/ \ + sh -c "for i in 1 2; do ./target/release/reaction start -c '$1' -l ERROR -s ./reaction.sock; done" diff --git a/bench/heavy-load.sh b/bench/heavy-load.sh deleted file mode 100644 index 0abf296..0000000 --- a/bench/heavy-load.sh +++ /dev/null @@ -1,11 +0,0 @@ -set -e - -rm -f reaction.db -cargo build --release -sudo systemd-run --wait \ - -p User="$(id -nu)" \ - -p MemoryAccounting=yes \ - -p IOAccounting=yes \ - -p WorkingDirectory="$(pwd)" \ - -p Environment=PATH=/run/current-system/sw/bin/ \ - sh -c "for i in 1 2; do ./target/release/reaction start -c ./bench/heavy-load.yml -l ERROR -s ./reaction.sock; done" diff --git a/bench/heavy-load.yml b/bench/heavy-load.yml index f6ba1a4..73c7a3e 100644 --- a/bench/heavy-load.yml +++ b/bench/heavy-load.yml @@ -1,6 +1,9 @@ --- # This configuration permits to test reaction's performance # under a very high load +# +# It keeps regexes super simple, to avoid benchmarking the `regex` crate, +# and benchmark reaction's internals instead. concurrency: 32 patterns: diff --git a/bench/nginx.yml b/bench/nginx.yml new file mode 100644 index 0000000..a328d88 --- /dev/null +++ b/bench/nginx.yml @@ -0,0 +1,130 @@ +# This is an extract of a real life configuration +# +# It reads an nginx's access.log in the following format: +# log_format '$remote_addr - $remote_user [$time_local] ' +# '$host ' +# '"$request" $status $bytes_sent ' +# '"$http_referer" "$http_user_agent"'; +# +# I can't make my access.log public for obvious privacy reasons. +# +# On the opposite of heavy-load.yml, this test is closer to real-life regex complexity. +# +# It has been created to test the performance improvements of +# the previous commit: ad6b0faa30c1af84360f66074a917b4bf6cda10a +# +# On this test, most lines don't match anything, so most time is spent matching regexes. + +concurrency: 0 +patterns: + ip: + ignore: + - 192.168.1.253 + - 10.1.1.1 + - 10.1.1.5 + - 10.1.1.4 + - 127.0.0.1 + - ::1 + regex: (?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])) + untilEOL: + regex: .*$ +streams: + nginx: + cmd: + - cat + - /tmp/access.log + filters: + directusFailedLogin: + actions: + ban: + cmd: + - sleep + - 0.01 + unban: + after: 4h + cmd: + - sleep + - 0.01 + regex: + - ^ .* "POST /repertoire/auth/login HTTP/..." 401 [0-9]+ .https://babos.land + - ^ .* "POST /pompeani.art/auth/login HTTP/..." 401 [0-9]+ .https://edit.ppom.me + - ^ .* "POST /leborddeleau/auth/login HTTP/..." 401 [0-9]+ .https://edit.ppom.me + - ^ .* "POST /5eroue/auth/login HTTP/..." 401 [0-9]+ .https://edit.ppom.me + - ^ .* "POST /edit/auth/login HTTP/..." 401 [0-9]+ .https://edit.ppom.me + - ^ .* "POST /auth/login HTTP/..." 401 [0-9]+ .https://edit.ppom.fr + retry: 6 + retryperiod: 4h + gptbot: + actions: + ban: + cmd: + - sleep + - 0.01 + unban: + after: 4h + cmd: + - sleep + - 0.01 + regex: + - ^.*"[^"]*AI2Bot[^"]*"$ + - ^.*"[^"]*Amazonbot[^"]*"$ + - ^.*"[^"]*Applebot[^"]*"$ + - ^.*"[^"]*Applebot-Extended[^"]*"$ + - ^.*"[^"]*Bytespider[^"]*"$ + - ^.*"[^"]*CCBot[^"]*"$ + - ^.*"[^"]*ChatGPT-User[^"]*"$ + - ^.*"[^"]*ClaudeBot[^"]*"$ + - ^.*"[^"]*Diffbot[^"]*"$ + - ^.*"[^"]*DuckAssistBot[^"]*"$ + - ^.*"[^"]*FacebookBot[^"]*"$ + - ^.*"[^"]*GPTBot[^"]*"$ + - ^.*"[^"]*Google-Extended[^"]*"$ + - ^.*"[^"]*Kangaroo Bot[^"]*"$ + - ^.*"[^"]*Meta-ExternalAgent[^"]*"$ + - ^.*"[^"]*Meta-ExternalFetcher[^"]*"$ + - ^.*"[^"]*OAI-SearchBot[^"]*"$ + - ^.*"[^"]*PerplexityBot[^"]*"$ + - ^.*"[^"]*Timpibot[^"]*"$ + - ^.*"[^"]*Webzio-Extended[^"]*"$ + - ^.*"[^"]*YouBot[^"]*"$ + - ^.*"[^"]*omgili[^"]*"$ + slskd-failedLogin: + actions: + ban: + cmd: + - sleep + - 0.01 + unban: + after: 4h + cmd: + - sleep + - 0.01 + regex: + - ^ .* "POST /slskd/api/v0/session HTTP/..." 401 [0-9]+ .https://ppom.me + - ^ .* "POST /kiosque/api/v0/session HTTP/..." 401 [0-9]+ .https://babos.land + retry: 3 + retryperiod: 1h + suspectRequests: + actions: + ban: + cmd: + - sleep + - 0.01 + unban: + after: 4h + cmd: + - sleep + - 0.01 + regex: + - ^ .*"GET /(?:[^/" ]*/)*wp-login\.php + - ^ .*"GET /(?:[^/" ]*/)*wp-includes + - '^ .*"GET /(?:[^/" ]*/)*\.env ' + - '^ .*"GET /(?:[^/" ]*/)*config\.json ' + - '^ .*"GET /(?:[^/" ]*/)*info\.php ' + - '^ .*"GET /(?:[^/" ]*/)*owa/auth/logon.aspx ' + - '^ .*"GET /(?:[^/" ]*/)*auth.html ' + - '^ .*"GET /(?:[^/" ]*/)*auth1.html ' + - '^ .*"GET /(?:[^/" ]*/)*password.txt ' + - '^ .*"GET /(?:[^/" ]*/)*passwords.txt ' + - '^ .*"GET /(?:[^/" ]*/)*dns-query ' + - '^ .*"GET /(?:[^/" ]*/)*\.git/ ' From 35862d32faa8ba3aa4646521e5bda357c48b8574 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 24 Jun 2025 12:00:00 +0200 Subject: [PATCH 030/241] Fix trigger command - Force STREAM.FILTER one the command line - Fix typo --- src/cli.rs | 2 +- src/concepts/filter.rs | 4 ++-- src/main.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index c4d1f94..fd00aab 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -94,7 +94,7 @@ Then prints the flushed matches and actions." /// STREAM.FILTER to trigger #[clap(value_name = "STREAM.FILTER")] - limit: Option, + limit: String, /// PATTERNs to trigger on (e.g. ip=1.2.3.4) #[clap(value_parser = parse_named_regex, value_name = "NAME=PATTERN")] diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index b338a89..0917286 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -264,7 +264,7 @@ impl Filter { if !pattern.is_match(match_) { return Err(format!( - "string '{}' doesn't match pattern {}", + "'{}' doesn't match pattern {}", match_, pattern.name(), )); @@ -272,7 +272,7 @@ impl Filter { if pattern.is_ignore(match_) { return Err(format!( - "string '{}' is explcitly ignored by pattern {}", + "'{}' is explicitly ignored by pattern {}", match_, pattern.name(), )); diff --git a/src/main.rs b/src/main.rs index 91e55a7..a6e22c6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -67,7 +67,7 @@ async fn main() { socket, limit, patterns, - } => request(socket, Format::JSON, limit, patterns, Order::Trigger).await, + } => request(socket, Format::JSON, Some(limit), patterns, Order::Trigger).await, SubCommand::TestRegex { config, regex, From 6a778f3d017031a7da28c4b6db420c255a8fc507 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 24 Jun 2025 12:00:00 +0200 Subject: [PATCH 031/241] cargo fmt, cargo clippy --all-targets --- src/concepts/filter.rs | 2 +- src/daemon/filter/state.rs | 4 ++-- src/daemon/filter/tests.rs | 2 +- src/daemon/socket.rs | 2 +- tests/simple.rs | 11 ++--------- 5 files changed, 7 insertions(+), 14 deletions(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 0917286..8f13413 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -280,7 +280,7 @@ impl Filter { } for pattern in self.patterns.iter() { - if patterns.get(pattern).is_none() { + if !patterns.contains_key(pattern) { return Err(format!( "pattern {} is missing, because it's in the filter {}.{}", pattern.name(), diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index 2883d44..469d2c6 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -86,7 +86,7 @@ impl State { pub fn remove_match(&mut self, m: &Match) { if let Some(set) = self.matches.get(m) { for t in set { - self.ordered_times.remove(&t); + self.ordered_times.remove(t); } self.matches.remove(m); } @@ -132,7 +132,7 @@ impl State { // unwrap: we just checked in the condition that first is_some let (t, m) = { let (t, m) = self.ordered_times.first_key_value().unwrap(); - (t.clone(), m.clone()) + (*t, m.clone()) }; self.ordered_times.remove(&t); if let Some(set) = self.matches.get(&m) { diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 464b9d3..cc9947b 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -551,7 +551,7 @@ async fn trigger_matched_pattern() { let mut db = TempDatabase::default().await; db.set_loaded_db(HashMap::from([( - filter_ordered_times_db_name(&filter), + filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), )])); let bed = bed.part2(filter, now, Some(db)).await; diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 29010f5..388d954 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -181,7 +181,7 @@ fn answer_order( // retrieve or Err .find(|(pattern_name, _)| &name == *pattern_name) .ok_or_else(|| format!("pattern '{name}' doesn't exist")) - .and_then(|(_, pattern)| Ok((pattern.clone(), reg))) + .map(|(_, pattern)| (pattern.clone(), reg)) }) .collect::, String>, String>>() { diff --git a/tests/simple.rs b/tests/simple.rs index cee6f53..dcbca3e 100644 --- a/tests/simple.rs +++ b/tests/simple.rs @@ -225,10 +225,7 @@ async fn simple() { env::set_current_dir(&dir).unwrap(); // No thing from stream - config_with_cmd( - config_path, - "sleep 0.1", - ); + config_with_cmd(config_path, "sleep 0.1"); file_with_contents(out_path, ""); @@ -255,9 +252,5 @@ async fn simple() { assert!(trigger.is_ok()); // make sure the trigger number is in the output - assert_eq!( - get_file_content(out_path).trim(), - "95".to_owned() - ); - + assert_eq!(get_file_content(out_path).trim(), "95".to_owned()); } From 78056b6fc5e6030d02353fd36799f9231126f4a0 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 24 Jun 2025 12:00:00 +0200 Subject: [PATCH 032/241] src/client/request.rs rename and ARCHITECTURE.md update --- ARCHITECTURE.md | 28 ++++++++++++++---------- src/client/mod.rs | 4 ++-- src/client/{show_flush.rs => request.rs} | 0 3 files changed, 18 insertions(+), 14 deletions(-) rename src/client/{show_flush.rs => request.rs} (100%) diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index b8782ab..f43d2ad 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -8,16 +8,16 @@ Here is a high-level overview of the codebase. - `build.rs`: permits to create shell completions and man pages on build. - `Cargo.toml`, `Cargo.lock`: manifest and dependencies. -- `config`: example / test configuration files. Look at its git history to discover more. +- `config/`: example / test configuration files. Look at its git history to discover more. - `Makefile`: Makefile. Resumes useful commands. -- `packaging`: Files useful for .deb and .tar generation. +- `packaging/`: Files useful for .deb and .tar generation. - `release.py`: Build process for a release. Handles cross-compilation, .tar and .deb generation. ## Main source code -- `helpers_c`: C helpers. I wish to have special IP support in reaction and get rid of them. See #79 and #116. -- `tests`: Integration tests. For now they test basic reaction runtime behavior, persistance, and client-daemon communication. -- `src`: The source code, here we go! +- `helpers_c/`: C helpers. I wish to have special IP support in reaction and get rid of them. See #79 and #116. +- `tests/`: Integration tests. For now they test basic reaction runtime behavior, persistance, and client-daemon communication. +- `src/`: The source code, here we go! ### Top-level files @@ -26,23 +26,24 @@ Here is a high-level overview of the codebase. - `src/cli.rs`: Command-line arguments - `src/tests.rs`: Test utilities -### `src/concepts` +### `src/concepts/` reaction really is about its configuration, which is at the center of the code. There is one file for each of its concepts: configuration, streams, filters, actions, patterns. -### `src/protocol` +### `src/protocol/` Low-level serialization/deserialization and client-daemon protocol messages. Shared by the client and daemon's socket. Also used by daemon's database. -### `src/client` +### `src/client/` -Client code: `reaction show`, `reaction flush`, `reaction test-regex`. +Client code: `reaction show`, `reaction flush`, `reaction trigger`, `reaction test-regex`. -- `show_flush.rs`: `show` & `flush` commands. +- `request.rs`: commands requiring client/server communication: `show`, `flush` & `trigger`. +- `test_config.rs`: `test-config` command. - `test_regex.rs`: `test-regex` command. ### `src/daemon` @@ -53,15 +54,18 @@ This code has async code, to handle input streams and communication with clients - `mod.rs`: daemon main function. Initializes all tasks, handles synchronization and quitting, etc. - `stream.rs`: Stream managers: start the stream `cmd` and dispatch its stdout lines to its Filter managers. -- `filter.rs`: Filter managers: handle lines, persistance, store matches and trigger actions. This is the main piece of runtime logic. +- `filter/`: Filter managers: handle lines, persistance, store matches and trigger actions. This is the main piece of runtime logic. + - `mod.rs`: High-level logic + - `state.rs`: Inner state operations - `socket.rs`: The socket task, responsible for communication with clients. +- `shutdown.rs`: Logic for passing shutdown signal across all tasks ### `src/tree` Persistence layer. This is a database highly adapted to reaction workload, making reaction faster than when used with general purpose key-value databases -(heed, sled and fjall crates ahve been tested). +(heed, sled and fjall crates have been tested). Its design is explained in the comments of its files: - `mod.rs`: main database code, with its two API structs: Tree and Database. diff --git a/src/client/mod.rs b/src/client/mod.rs index 6db6585..6cd410e 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -1,7 +1,7 @@ -mod show_flush; +mod request; mod test_config; mod test_regex; -pub use show_flush::request; +pub use request::request; pub use test_config::test_config; pub use test_regex::test_regex; diff --git a/src/client/show_flush.rs b/src/client/request.rs similarity index 100% rename from src/client/show_flush.rs rename to src/client/request.rs From 3f3236cafb51385d67c17ab5779dd4d3292d07ae Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 24 Jun 2025 12:00:00 +0200 Subject: [PATCH 033/241] v2.1.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18784da..135870a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -826,7 +826,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.0.1" +version = "2.1.0" dependencies = [ "chrono", "clap", diff --git a/Cargo.toml b/Cargo.toml index 37baee8..1f34b57 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction" -version = "2.0.1" +version = "2.1.0" edition = "2021" authors = ["ppom "] license = "AGPL-3.0" From 359957c58ce664d99f4f7ebcf6ca7d7112a82b3b Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 24 Jun 2025 12:00:00 +0200 Subject: [PATCH 034/241] README: Add trigger command --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8d56c4f..d6224dc 100644 --- a/README.md +++ b/README.md @@ -149,8 +149,9 @@ If you don't know where to start reaction, `/var/lib/reaction` should be a sane ### CLI - `reaction start` runs the server -- `reaction show` show pending actions (ie. current bans) +- `reaction show` show pending actions (ie. show current bans) - `reaction flush` permits to run pending actions (ie. clear bans) +- `reaction trigger` permits to manually trigger a filter (ie. run custom ban) - `reaction test-regex` permits to test regexes - `reaction test-config` shows loaded configuration - `reaction help` for full usage. From 39bf662296fc9f75bb13f4c3ddccb51f8cbd78c4 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 28 Jun 2025 12:00:00 +0200 Subject: [PATCH 035/241] Fix example configs - Fix comma issues - Fix regex syntax doc - Add ssh regexes --- config/example.jsonnet | 6 ++++-- config/example.yml | 12 +++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/config/example.jsonnet b/config/example.jsonnet index d77d87a..41a6507 100644 --- a/config/example.jsonnet +++ b/config/example.jsonnet @@ -28,7 +28,7 @@ local banFor(time) = { // when a filter performs an action, it replaces the found pattern patterns: { ip: { - // reaction regex syntax is defined here: https://github.com/google/re2/wiki/Syntax + // reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax // jsonnet's @'string' is for verbatim strings // simple version: regex: @'(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})', regex: @'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))', @@ -82,13 +82,15 @@ local banFor(time) = { filters: { // filters have a user-defined name failedlogin: { - // reaction's regex syntax is defined here: https://github.com/google/re2/wiki/Syntax + // reaction's regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax regex: [ // is predefined in the patterns section // ip's regex is inserted in the following regex @'authentication failure;.*rhost=', @'Failed password for .* from ', + @'Invalid user .* from ', @'Connection (reset|closed) by (authenticating|invalid) user .* ', + @'banner exchange: Connection from port [0-9]*: invalid format', ], // if retry and retryperiod are defined, // the actions will only take place if a same pattern is diff --git a/config/example.yml b/config/example.yml index 7bae87a..4bdf39e 100644 --- a/config/example.yml +++ b/config/example.yml @@ -29,7 +29,7 @@ concurrency: 0 # when a filter performs an action, it replaces the found pattern patterns: ip: - # reaction regex syntax is defined here: https://github.com/google/re2/wiki/Syntax + # reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax # simple version: regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})' regex: '(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))' ignore: @@ -47,8 +47,8 @@ start: # Those commands will be executed in order at stop, after everything else stop: - - [ 'ip46tables', '-w,', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w,', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] + - [ 'ip46tables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] + - [ 'ip46tables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] - [ 'ip46tables', '-w', '-F', 'reaction' ] - [ 'ip46tables', '-w', '-X', 'reaction' ] @@ -66,13 +66,15 @@ streams: filters: # filters have a user-defined name failedlogin: - # reaction's regex syntax is defined here: https://github.com/google/re2/wiki/Syntax + # reaction's regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax regex: # is predefined in the patterns section # ip's regex is inserted in the following regex - 'authentication failure;.*rhost=' - 'Failed password for .* from ' + - 'Invalid user .* from ' - 'Connection (reset|closed) by (authenticating|invalid) user .* ' + - 'banner exchange: Connection from port [0-9]*: invalid format' # if retry and retryperiod are defined, # the actions will only take place if a same pattern is # found `retry` times in a `retryperiod` interval @@ -97,7 +99,7 @@ streams: cmd: *iptablesunban # if after is defined, the action will not take place immediately, but after a specified duration # same format as retryperiod - after: 48h + after: '2 days' # let's say reaction is quitting. does it run all those pending commands which had an `after` duration set? # if you want reaction to run those pending commands before exiting, you can set this: # onexit: true From 5bf67860f4fba3079884e0f37ddd614215b2aae0 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 7 Jul 2025 12:00:00 +0200 Subject: [PATCH 036/241] Fix Filter::regex for StreamManager::compiled_regex_set regexes were pushed multiple times, with pattern names not completely replaced by their corresponding regexes. Now only pushed when pattern replacement is finished. --- src/concepts/filter.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 8f13413..15792ef 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -177,8 +177,8 @@ impl Filter { )); } regex_buf = regex_buf.replacen(pattern.name_with_braces(), &pattern.regex, 1); - new_regex.push(regex_buf.clone()); } + new_regex.push(regex_buf.clone()); let compiled = Regex::new(®ex_buf).map_err(|err| err.to_string())?; self.compiled_regex.push(compiled); first = false; From bba113b6ab17079212dcbbdf85b296941cad4171 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 7 Jul 2025 12:00:00 +0200 Subject: [PATCH 037/241] Remove newline at the end of stream lines Bug introduced by !24 which kept trailing `\n` and fed it to filters. Thus regexes ending with `$` couldn't match anymore. Fixes #128 --- src/daemon/stream.rs | 15 ++++++------ tests/notif-no-pattern.jsonnet | 42 ++++++++++++++++++++++++++++++++++ tests/notif.jsonnet | 2 +- 3 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 tests/notif-no-pattern.jsonnet diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index f819009..fdc5253 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -23,11 +23,10 @@ use crate::{ use super::shutdown::ShutdownToken; -/** Converts bytes to string, discarding invalid utf8 sequences -*/ -fn to_string(data: &[u8]) -> String { - let res = String::from_utf8_lossy(data); - res.to_string() +/// Converts bytes to line string, discarding invalid utf8 sequences and newlines at the end +fn to_line(data: &[u8]) -> String { + String::from_utf8_lossy(data) + .trim_end_matches('\n') .replace(std::char::REPLACEMENT_CHARACTER, "") } @@ -46,7 +45,7 @@ fn lines_to_stream( } // Try to read until LF or EOF. If interrupted, buffer might contain data, in which case // new data will be happened to it - let nl = lines.read_until(0x0a, &mut buffer); + let nl = lines.read_until(b'\n', &mut buffer); pin!(nl); match futures::Future::poll(nl, cx) { Poll::Pending => Poll::Pending, @@ -57,13 +56,13 @@ fn lines_to_stream( } else { // reached eof with data in the buffer at_eof = true; - let line = to_string(&buffer); + let line = to_line(&buffer); buffer.clear(); Poll::Ready(Some(Ok(line))) } } Poll::Ready(Ok(_)) => { - let line = to_string(&buffer); + let line = to_line(&buffer); buffer.clear(); Poll::Ready(Some(Ok(line))) } diff --git a/tests/notif-no-pattern.jsonnet b/tests/notif-no-pattern.jsonnet new file mode 100644 index 0000000..df8bec2 --- /dev/null +++ b/tests/notif-no-pattern.jsonnet @@ -0,0 +1,42 @@ +{ + patterns: { + num: { + regex: '[0-9]+', + ignore: ['1'], + // ignoreregex: ['2.?'], + }, + }, + + start: [ + ['echo', 'coucou'], + ], + + stop: [ + ['echo', 'byebye'], + ], + + streams: { + s1: { + cmd: ['sh', '-c', 'seq 20 | while read i; do echo found $((i % 5)); sleep 1; done'], + filters: { + f1: { + regex: [ + '^found [0-9]+$', + ], + retry: 4, + retryperiod: '60s', + actions: { + damn: { + cmd: ['notify-send', 'first stream', 'found!'], + }, + undamn: { + cmd: ['notify-send', 'first stream', 'unfound'], + after: '3s', + onexit: true, + }, + }, + }, + }, + }, + }, +} diff --git a/tests/notif.jsonnet b/tests/notif.jsonnet index 6e99367..2e46471 100644 --- a/tests/notif.jsonnet +++ b/tests/notif.jsonnet @@ -22,7 +22,7 @@ streams: { s1: { - cmd: ['sh', '-c', "seq 20 | tr ' ' '\n' | while read i; do echo found $((i % 5)); sleep 1; done"], + cmd: ['sh', '-c', 'seq 20 | while read i; do echo found $((i % 5)); sleep 1; done'], filters: { f1: { regex: [ From 5d9f2ceb6a68871c62734e164f5e89398482b3c1 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 7 Jul 2025 12:00:00 +0200 Subject: [PATCH 038/241] v2.1.1 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 135870a..7340980 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -826,7 +826,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.1.0" +version = "2.1.1" dependencies = [ "chrono", "clap", diff --git a/Cargo.toml b/Cargo.toml index 1f34b57..f620943 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction" -version = "2.1.0" +version = "2.1.1" edition = "2021" authors = ["ppom "] license = "AGPL-3.0" From 28f136f491e9ce18eb8493e12c8e73d257d5d17d Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 8 Jul 2025 12:00:00 +0200 Subject: [PATCH 039/241] README update project status update (rust rewrite ok) contributing: separate ideas & code --- README.md | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index d6224dc..9a9a704 100644 --- a/README.md +++ b/README.md @@ -4,24 +4,24 @@ A daemon that scans program outputs for repeated patterns, and takes action. A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors. -🚧 This program hasn't received external audit. however, it already works well on my servers 🚧 +🚧 This program hasn't received external security audit. However, it already works well on my servers 🚧 -## Current project status +## Rust rewrite -reaction just reached v2.0.0-rc2 version, which is a complete rust rewrite of reaction. -It's in feature parity with the Go version, and breaking changes should be small. +reaction v2.x is a complete Rust rewrite of reaction. +It's in feature parity with the Go version, v1.x, which is now deprecated. -See https://reaction.ppom.me/migrate-to-v2.html +See https://blog.ppom.me/en-reaction-v2. ## Rationale -I was using the honorable fail2ban since quite a long time, but i was a bit frustrated by its cpu consumption +I was using the honorable fail2ban since quite a long time, but i was a bit frustrated by its CPU consumption and all its heavy default configuration. In my view, a security-oriented program should be simple to configure and an always-running daemon should be implemented in a fast*er* language. -reaction does not have all the features of the honorable fail2ban, but it's ~10x faster and has more manageable configuration. +reaction does not have all the features of the honorable fail2ban, but it's more than 10x faster and has more manageable configuration. [📽️ quick french name explanation 😉](https://u.ppom.me/reaction.webm) @@ -219,7 +219,16 @@ make install_systemd - [NGI's Diversity and Inclusion Guide](https://nlnet.nl/NGI0/bestpractices/DiversityAndInclusionGuide-v4.pdf) I'll do my best to maintain a safe contribution place, as free as possible from discrimination and elitism. + +### Ideas + +Please take a look at issues which have the "Opinion Welcome 👀" label! +*Your opinion is welcome.* + Your ideas are welcome in the issues. + +### Code + Contributions are welcome. For any substantial feature, please file an issue first, to be assured that we agree on the feature, and to avoid unnecessary work. From e0609e3c3ea51c56e797e5d45f6e54a6c6cc461a Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 8 Jul 2025 12:00:00 +0200 Subject: [PATCH 040/241] Move rewrite section --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 9a9a704..1c4fbae 100644 --- a/README.md +++ b/README.md @@ -6,13 +6,6 @@ A common usage is to scan ssh and webserver logs, and to ban hosts that cause mu 🚧 This program hasn't received external security audit. However, it already works well on my servers 🚧 -## Rust rewrite - -reaction v2.x is a complete Rust rewrite of reaction. -It's in feature parity with the Go version, v1.x, which is now deprecated. - -See https://blog.ppom.me/en-reaction-v2. - ## Rationale I was using the honorable fail2ban since quite a long time, but i was a bit frustrated by its CPU consumption @@ -28,6 +21,13 @@ reaction does not have all the features of the honorable fail2ban, but it's more [🇬🇧 in-depth blog article](https://blog.ppom.me/en-reaction) / [🇫🇷 french version](https://blog.ppom.me/fr-reaction) +## Rust rewrite + +reaction v2.x is a complete Rust rewrite of reaction. +It's in feature parity with the Go version, v1.x, which is now deprecated. + +See https://blog.ppom.me/en-reaction-v2. + ## Configuration YAML and [JSONnet](https://jsonnet.org/) (more powerful) are supported. From d880f7338b16c423d9be940e00f2dd4194c30caf Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 11 Jul 2025 12:00:00 +0200 Subject: [PATCH 041/241] Get rid of low-level async with Poll use futures::stream::try_unfold to create a Stream from an async closure --- src/daemon/stream.rs | 62 ++++++++++++++------------------------------ 1 file changed, 20 insertions(+), 42 deletions(-) diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index fdc5253..3737177 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -1,16 +1,14 @@ use std::{ collections::{BTreeMap, BTreeSet, HashMap}, process::Stdio, - task::Poll, time::Duration, }; use chrono::Local; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, Stream as AsyncStream, StreamExt}; use regex::RegexSet; use tokio::{ io::{AsyncBufReadExt, BufReader}, - pin, process::{Child, ChildStderr, ChildStdout, Command}, time::sleep, }; @@ -30,45 +28,24 @@ fn to_line(data: &[u8]) -> String { .replace(std::char::REPLACEMENT_CHARACTER, "") } -#[allow(clippy::type_complexity)] -fn lines_to_stream( - mut lines: BufReader, -) -> futures::stream::PollFn< - impl FnMut(&mut std::task::Context) -> Poll>>, -> { - let mut at_eof = false; - let mut buffer = vec![]; - futures::stream::poll_fn(move |cx| { - if at_eof { - // reached EOF earlier, avoid calling read again - return Poll::Ready(None); - } - // Try to read until LF or EOF. If interrupted, buffer might contain data, in which case - // new data will be happened to it - let nl = lines.read_until(b'\n', &mut buffer); - pin!(nl); - match futures::Future::poll(nl, cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(Ok(0)) => { - if buffer.is_empty() { - // at eof - Poll::Ready(None) - } else { - // reached eof with data in the buffer - at_eof = true; - let line = to_line(&buffer); - buffer.clear(); - Poll::Ready(Some(Ok(line))) - } - } - Poll::Ready(Ok(_)) => { +fn reader_to_stream( + reader: impl tokio::io::AsyncRead + Unpin, +) -> impl AsyncStream> { + let buf_reader = BufReader::new(reader); + let buffer = vec![]; + futures::stream::try_unfold( + (buf_reader, buffer), + |(mut buf_reader, mut buffer)| async move { + let nl = buf_reader.read_until(b'\n', &mut buffer).await?; + if nl > 0 { let line = to_line(&buffer); buffer.clear(); - Poll::Ready(Some(Ok(line))) + Ok(Some((line, (buf_reader, buffer)))) + } else { + Ok(None) } - Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))), - } - }) + }, + ) } pub struct StreamManager { @@ -189,10 +166,11 @@ impl StreamManager { } async fn handle_io(&self, child_stdout: ChildStdout, child_stderr: ChildStderr) { - let lines_stdout = lines_to_stream(BufReader::new(child_stdout)); - let lines_stderr = lines_to_stream(BufReader::new(child_stderr)); + let lines_stdout = reader_to_stream(child_stdout); + let lines_stderr = reader_to_stream(child_stderr); // aggregate outputs, will end when both streams end - let mut lines = futures::stream::select(lines_stdout, lines_stderr); + let lines = futures::stream::select(lines_stdout, lines_stderr); + tokio::pin!(lines); loop { match lines.next().await { From fd0dc918244865d1fecfe1882948bfe5f2a24c6d Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 11 Jul 2025 12:00:00 +0200 Subject: [PATCH 042/241] Get rid of useless Buffer wrapper for Vec Write is already implemented on Vec --- src/treedb/raw.rs | 55 +++++------------------------------------------ 1 file changed, 5 insertions(+), 50 deletions(-) diff --git a/src/treedb/raw.rs b/src/treedb/raw.rs index 587497a..ded00b2 100644 --- a/src/treedb/raw.rs +++ b/src/treedb/raw.rs @@ -1,7 +1,4 @@ -use std::{ - collections::HashMap, - io::{Error as IoError, Write}, -}; +use std::{collections::HashMap, io::Error as IoError}; use chrono::{Local, TimeZone}; use serde::{Deserialize, Serialize}; @@ -68,7 +65,7 @@ pub struct WriteDB { file: BufWriter, names: HashMap, next_id: u64, - buffer: Buffer, + buffer: Vec, } impl WriteDB { @@ -80,7 +77,7 @@ impl WriteDB { // names: HashMap::from([(DB_TREE_NAME.into(), DB_TREE_ID)]), names: HashMap::default(), next_id: 1, - buffer: Buffer::new(), + buffer: Vec::default(), } } @@ -121,11 +118,8 @@ impl WriteDB { async fn _write_entry(&mut self, raw_entry: &WriteEntry<'_>) -> Result { self.buffer.clear(); serde_json::to_writer(&mut self.buffer, &raw_entry)?; - self.buffer.push("\n".as_bytes()); - self.file - .write(self.buffer.as_ref()) - .await - .map_err(|err| err.into()) + self.buffer.push(b'\n'); + Ok(self.file.write(self.buffer.as_ref()).await?) } /// Flushes the inner [`tokio::io::BufWriter`] @@ -252,45 +246,6 @@ impl ReadDB { } } -/// This [`String`] buffer implements [`Write`] to permit allocation reuse. -/// Using [`serde_json::to_string`] allocates for every entry. -/// This Buffer permits to use [`serde_json::to_writer`] instead. -struct Buffer { - b: Vec, -} - -impl AsRef> for Buffer { - fn as_ref(&self) -> &Vec { - &self.b - } -} - -impl Buffer { - fn new() -> Self { - Buffer { b: Vec::new() } - } - - /// Truncates the buffer without touching its capacity - fn clear(&mut self) { - self.b.clear() - } - - fn push(&mut self, buf: &[u8]) { - self.b.extend_from_slice(buf); - } -} - -impl Write for Buffer { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - self.push(buf); - Ok(buf.len()) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } -} - #[cfg(test)] mod tests { use std::collections::HashMap; From b62f085e5100ff24a202cf1af0894b451e128923 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 14 Jul 2025 12:00:00 +0200 Subject: [PATCH 043/241] Fix trigger persistance Triggers were only persisted for retry duration, instead of longuest action duration. As retry is often shorter than after, this would make reaction forget most triggers on restart. entry_timeout is now set to longuest_action_duration. Cherry picked from the duplicate branch. --- src/daemon/filter/state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index 469d2c6..5cb98ce 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -58,7 +58,7 @@ impl State { )?, triggers: db.open_tree( filter_triggers_db_name(filter), - filter.retry_duration().unwrap_or_default(), + filter.longuest_action_duration(), |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), )?, }; From 4ddaf6c195348f8b63877fd5ec2dafacf6026e63 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 14 Jul 2025 12:00:00 +0200 Subject: [PATCH 044/241] v2.1.2 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7340980..cb1fd54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -826,7 +826,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.1.1" +version = "2.1.2" dependencies = [ "chrono", "clap", diff --git a/Cargo.toml b/Cargo.toml index f620943..a97181a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction" -version = "2.1.1" +version = "2.1.2" edition = "2021" authors = ["ppom "] license = "AGPL-3.0" From 881fc76bf9eefc73c8a5394d72b070a5b24c0492 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 28 Jun 2025 12:00:00 +0200 Subject: [PATCH 045/241] WIP duplicates - new duplicate option - change triggers Tree structure to keep O(log(n)) querying: now we need to know if a match already has a trigger. - triggers migration - triggers adaptations in State & FilterManager --- config/example.yml | 2 + src/concepts/filter.rs | 18 +++++++ src/concepts/mod.rs | 2 +- src/daemon/filter/mod.rs | 41 ++++++++++----- src/daemon/filter/state.rs | 104 +++++++++++++++++++++++++++++-------- src/treedb/helpers.rs | 23 +++++--- src/treedb/mod.rs | 8 ++- 7 files changed, 151 insertions(+), 47 deletions(-) diff --git a/config/example.yml b/config/example.yml index 4bdf39e..74c0cde 100644 --- a/config/example.yml +++ b/config/example.yml @@ -89,6 +89,8 @@ streams: # - h / hour / hours # - d / day / days retryperiod: 6h + # duplicates! + duplicate: rerun # actions are run by the filter when regexes are matched actions: # actions have a user-defined name diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 15792ef..7f87f66 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -14,6 +14,17 @@ use tracing::info; use super::parse_duration; use super::{Action, Match, Pattern, Patterns}; +#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)] +pub enum Duplicate { + #[default] + #[serde(rename = "extend")] + Extend, + #[serde(rename = "ignore")] + Ignore, + #[serde(rename = "rerun")] + Rerun, +} + // Only names are serialized // Only computed fields are not deserialized #[derive(Clone, Debug, Default, Deserialize, Serialize)] @@ -37,6 +48,9 @@ pub struct Filter { #[serde(skip)] retry_duration: Option, + #[serde(default)] + duplicate: Duplicate, + actions: BTreeMap, #[serde(skip)] @@ -93,6 +107,10 @@ impl Filter { &self.regex } + pub fn duplicate(&self) -> Duplicate { + self.duplicate + } + pub fn actions(&self) -> &BTreeMap { &self.actions } diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index a163286..b4a785e 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -7,7 +7,7 @@ mod stream; pub use action::Action; pub use config::{Config, Patterns}; -pub use filter::Filter; +pub use filter::{Duplicate, Filter}; use parse_duration::parse_duration; pub use pattern::Pattern; use serde::{Deserialize, Serialize}; diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 918bd7d..117bc30 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -14,7 +14,7 @@ use tokio::sync::Semaphore; use tracing::{error, info}; use crate::{ - concepts::{Action, Filter, Match, Pattern, Time}, + concepts::{Action, Duplicate, Filter, Match, Pattern, Time}, protocol::{Order, PatternStatus}, treedb::Database, }; @@ -89,7 +89,11 @@ impl FilterManager { let mut state = self.state.lock().unwrap(); state.clear_past_matches(now); - let exec = match self.filter.retry() { + if let Duplicate::Ignore = self.filter.duplicate() { + if state.triggers.contains_key(&m) {} + } + + let trigger = match self.filter.retry() { None => true, Some(retry) => { state.add_match(m.clone(), now); @@ -98,6 +102,11 @@ impl FilterManager { } }; + let exec = match self.filter.duplicate() { + Duplicate::Rerun => true, + Duplicate::Extend | Duplicate::Ignore => false, + }; + if exec { state.remove_match(&m); state.add_trigger(m.clone(), now); @@ -179,12 +188,12 @@ impl FilterManager { .triggers .keys() // match filtering - .filter(|match_| is_match(&match_.m)) + .filter(|match_| is_match(&match_)) // clone necessary to drop all references to State .cloned() .collect::>(); - for mt in cloned_triggers.into_iter() { + for m in cloned_triggers.into_iter() { // mutable State required here // Remove the match from the triggers if let Order::Flush = order { @@ -276,7 +285,7 @@ impl FilterManager { .values() // On startup, skip oneshot actions .filter(|action| !action.oneshot()) - .count(); + .count() as u64; #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = self.state.lock().unwrap(); @@ -284,17 +293,23 @@ impl FilterManager { let cloned_triggers = state .triggers .iter() - .map(|(k, v)| (k.clone(), *v)) + .map(|(k, v)| (k.clone(), v.clone())) .collect::>(); - for (mt, remaining) in cloned_triggers.into_iter() { - if remaining > 0 && mt.t + longuest_action_duration > now { - // Insert back the upcoming times - state.triggers.insert(mt.clone(), number_of_actions as u64); - // Schedule the upcoming times - self.schedule_exec(mt.m, mt.t, now, &mut state, true); + for (m, map) in cloned_triggers.into_iter() { + let mut new_map = BTreeMap::default(); + for (t, remaining) in map.into_iter() { + if remaining > 0 && t + longuest_action_duration > now { + // Insert back the upcoming times + new_map.insert(t, number_of_actions); + // Schedule the upcoming times + self.schedule_exec(m.clone(), t, now, &mut state, true); + } + } + if new_map.is_empty() { + state.triggers.remove(&m); } else { - state.triggers.remove(&mt); + state.triggers.insert(m, new_map); } } } diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index 5cb98ce..f72b9c0 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -3,7 +3,7 @@ use std::collections::{BTreeMap, BTreeSet}; use crate::{ concepts::{Filter, Match, MatchTime, Time}, treedb::{ - helpers::{to_match, to_matchtime, to_time, to_u64}, + helpers::{to_match, to_matchtime, to_time, to_timemap, to_u64}, Database, Tree, }, }; @@ -16,10 +16,18 @@ pub fn filter_ordered_times_db_name(filter: &Filter) -> String { ) } -pub fn filter_triggers_db_name(filter: &Filter) -> String { +pub fn filter_triggers_old_db_name(filter: &Filter) -> String { format!("filter_triggers_{}.{}", filter.stream_name(), filter.name()) } +pub fn filter_triggers_db_name(filter: &Filter) -> String { + format!( + "filter_triggers2_{}.{}", + filter.stream_name(), + filter.name() + ) +} + /// Internal state of a [`FilterManager`]. /// Holds all data on current matches and triggers. pub struct State { @@ -37,7 +45,7 @@ pub struct State { pub ordered_times: Tree, /// Saves all the current Triggers for this Filter /// Persisted - pub triggers: Tree, + pub triggers: Tree>, } impl State { @@ -47,20 +55,40 @@ impl State { db: &mut Database, now: Time, ) -> Result { + let ordered_times = db.open_tree( + filter_ordered_times_db_name(filter), + filter.retry_duration().unwrap_or_default(), + |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), + )?; + let mut triggers = db.open_tree( + filter_triggers_db_name(filter), + filter.retry_duration().unwrap_or_default(), + |(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)), + )?; + if triggers.is_empty() { + let old_triggers = db.open_tree( + filter_triggers_old_db_name(filter), + filter.retry_duration().unwrap_or_default(), + |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), + )?; + for (mt, n) in old_triggers.iter() { + triggers.fetch_update(mt.m.clone(), |map| { + Some(match map { + None => [(mt.t, *n)].into(), + Some(mut map) => { + map.insert(mt.t, *n); + map + } + }) + }); + } + } let mut this = Self { filter, has_after, matches: BTreeMap::new(), - ordered_times: db.open_tree( - filter_ordered_times_db_name(filter), - filter.retry_duration().unwrap_or_default(), - |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), - )?, - triggers: db.open_tree( - filter_triggers_db_name(filter), - filter.longuest_action_duration(), - |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), - )?, + ordered_times, + triggers, }; this.clear_past_matches(now); this.load_matches_from_ordered_times(); @@ -77,8 +105,16 @@ impl State { // We record triggered filters only when there is an action with an `after` directive if self.has_after { // Add the (Match, Time) to the triggers map - self.triggers - .insert(MatchTime { m, t }, self.filter.actions().len() as u64); + let n = self.filter.actions().len() as u64; + self.triggers.fetch_update(m, |map| { + Some(match map { + None => [(t, n)].into(), + Some(mut value) => { + value.insert(t, n); + value + } + }) + }); } } @@ -94,9 +130,19 @@ impl State { /// Completely remove a Match from the triggers pub fn remove_trigger(&mut self, m: &Match, t: &Time) { - self.triggers.remove(&MatchTime { - m: m.clone(), - t: *t, + // self.triggers.remove(&MatchTime { + // m: m.clone(), + // t: *t, + // }); + self.triggers.fetch_update(m.clone(), |map| { + map.and_then(|mut map| { + map.remove(t); + if map.is_empty() { + None + } else { + Some(map) + } + }) }); } @@ -106,13 +152,27 @@ impl State { if self.has_after { let mut exec_needed = false; let mt = MatchTime { m: m.clone(), t }; - let count = self.triggers.get(&mt); + let count = self.triggers.get(&mt.m).and_then(|map| map.get(&mt.t)).cloned(); if let Some(count) = count { exec_needed = true; - if *count <= 1 { - self.triggers.remove(&mt); + if count <= 1 { + self.triggers.fetch_update(mt.m, |map| { + map.and_then(|mut map| { + map.remove(&mt.t); + if map.is_empty() { + None + } else { + Some(map) + } + }) + }); } else { - self.triggers.insert(mt, count - 1); + self.triggers.fetch_update(mt.m, |map| { + map.and_then(|mut map| { + map.insert(mt.t, count - 1); + Some(map) + }) + }); } } exec_needed diff --git a/src/treedb/helpers.rs b/src/treedb/helpers.rs index 7833957..ae4db1d 100644 --- a/src/treedb/helpers.rs +++ b/src/treedb/helpers.rs @@ -1,4 +1,4 @@ -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; use chrono::{DateTime, Local}; use serde_json::Value; @@ -15,13 +15,15 @@ pub fn to_u64(val: &Value) -> Result { val.as_u64().ok_or("not a u64".into()) } +fn string_to_time(val: &str) -> Result { + Ok(DateTime::parse_from_rfc3339(val) + .map_err(|err| err.to_string())? + .with_timezone(&Local)) +} + /// Tries to convert a [`Value`] into a [`Time`] pub fn to_time(val: &Value) -> Result { - Ok( - DateTime::parse_from_rfc3339(val.as_str().ok_or("not a number")?) - .map_err(|err| err.to_string())? - .with_timezone(&Local), - ) + Ok(string_to_time(val.as_str().ok_or("not a number")?)?) } /// Tries to convert a [`Value`] into a [`Match`] @@ -51,6 +53,15 @@ pub fn to_timeset(val: &Value) -> Result, String> { .collect() } +/// Tries to convert a [`Value`] into a [`BTreeMap`] +pub fn to_timemap(val: &Value) -> Result, String> { + val.as_object() + .ok_or("not a map")? + .iter() + .map(|(key, value)| Ok((string_to_time(key)?, to_u64(value)?))) + .collect() +} + #[cfg(test)] mod tests { use std::collections::BTreeMap; diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index 8809169..f012cf4 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -359,16 +359,14 @@ impl Tree { /// Returning None removes the item if it existed before. /// Asynchronously persisted. /// *API design borrowed from [`fjall::WriteTransaction::fetch_update`].* - pub fn fetch_update) -> Option>( + pub fn fetch_update) -> Option>( &mut self, key: K, mut f: F, ) -> Option { - let old_value = self.get(&key); + let old_value = self.remove(&key); let new_value = f(old_value); - if old_value != new_value.as_ref() { - self.log(&key, new_value.as_ref()); - } + self.log(&key, new_value.as_ref()); if let Some(new_value) = new_value { self.tree.insert(key, new_value) } else { From 2cebb733b54025a8ba0bb51e4dc3b7b2867cb7ef Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 28 Jun 2025 12:00:00 +0200 Subject: [PATCH 046/241] WIP duplicates - change remove_trigger to remove all triggers for a Match - schedule_exec will take only_after boolean --- TODO | 9 ++++++ src/concepts/filter.rs | 2 +- src/daemon/filter/mod.rs | 62 +++++++++++++++++++++----------------- src/daemon/filter/state.rs | 23 +++++--------- 4 files changed, 51 insertions(+), 45 deletions(-) create mode 100644 TODO diff --git a/TODO b/TODO new file mode 100644 index 0000000..10bc482 --- /dev/null +++ b/TODO @@ -0,0 +1,9 @@ +Test what happens when a Filter pattern set changes (I think it's shitty) + +fix trigger only after when extend +move match logging from concepts/filter to daemon/filter + +test new treedb::helpers +test different duplicates modes +test migration +document new option diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 7f87f66..d79be4f 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -14,7 +14,7 @@ use tracing::info; use super::parse_duration; use super::{Action, Match, Pattern, Patterns}; -#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum Duplicate { #[default] #[serde(rename = "extend")] diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 117bc30..420d692 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -89,8 +89,11 @@ impl FilterManager { let mut state = self.state.lock().unwrap(); state.clear_past_matches(now); - if let Duplicate::Ignore = self.filter.duplicate() { - if state.triggers.contains_key(&m) {} + let already_triggered = state.triggers.contains_key(&m); + + // if duplicate: ignore and already triggered, skip + if already_triggered && Duplicate::Ignore == self.filter.duplicate() { + return false; } let trigger = match self.filter.retry() { @@ -102,18 +105,19 @@ impl FilterManager { } }; - let exec = match self.filter.duplicate() { - Duplicate::Rerun => true, - Duplicate::Extend | Duplicate::Ignore => false, - }; - - if exec { + if trigger { state.remove_match(&m); state.add_trigger(m.clone(), now); - self.schedule_exec(m, now, now, &mut state, false); + if already_triggered && Duplicate::Extend == self.filter.duplicate() { + state.remove_trigger(&m); + // TODO only schedule after actions + self.schedule_exec(m, now, now, &mut state, false, only_after); + } else { + self.schedule_exec(m, now, now, &mut state, false); + } } - exec + trigger } pub fn handle_trigger( @@ -194,29 +198,31 @@ impl FilterManager { .collect::>(); for m in cloned_triggers.into_iter() { - // mutable State required here - // Remove the match from the triggers + let map = state.triggers.get(&m).unwrap().clone(); + if let Order::Flush = order { - // delete specific (Match, Time) tuple - state.remove_trigger(&mt.m, &mt.t); + state.remove_trigger(&m); } - let m = mt.m.clone(); - let pattern_status = cs.entry(m).or_default(); + for (t, remaining) in map { + if remaining > 0 { + let pattern_status = cs.entry(m.clone()).or_default(); - for action in self.filter.actions().values() { - let action_time = mt.t + action.after_duration().unwrap_or_default(); - if action_time > now { - // Insert action - pattern_status - .actions - .entry(action.name().into()) - .or_default() - .push(action_time.to_rfc3339().chars().take(19).collect()); + for action in self.filter.actions().values() { + let action_time = t + action.after_duration().unwrap_or_default(); + if action_time > now { + // Insert action + pattern_status + .actions + .entry(action.name().into()) + .or_default() + .push(action_time.to_rfc3339().chars().take(19).collect()); - // Execute the action early - if let Order::Flush = order { - exec_now(&self.exec_limit, action, mt.m.clone()); + // Execute the action early + if let Order::Flush = order { + exec_now(&self.exec_limit, action, m.clone()); + } + } } } } diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index f72b9c0..c26b6d8 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -129,21 +129,8 @@ impl State { } /// Completely remove a Match from the triggers - pub fn remove_trigger(&mut self, m: &Match, t: &Time) { - // self.triggers.remove(&MatchTime { - // m: m.clone(), - // t: *t, - // }); - self.triggers.fetch_update(m.clone(), |map| { - map.and_then(|mut map| { - map.remove(t); - if map.is_empty() { - None - } else { - Some(map) - } - }) - }); + pub fn remove_trigger(&mut self, m: &Match) { + self.triggers.remove(m); } /// Returns whether we should still execute an action for this (Match, Time) trigger @@ -152,7 +139,11 @@ impl State { if self.has_after { let mut exec_needed = false; let mt = MatchTime { m: m.clone(), t }; - let count = self.triggers.get(&mt.m).and_then(|map| map.get(&mt.t)).cloned(); + let count = self + .triggers + .get(&mt.m) + .and_then(|map| map.get(&mt.t)) + .cloned(); if let Some(count) = count { exec_needed = true; if count <= 1 { From 22384a2cb47d323e8ef80860d48d873154a2552b Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 7 Jul 2025 12:00:00 +0200 Subject: [PATCH 047/241] rename React::Exec to React::Trigger --- src/daemon/filter/mod.rs | 8 ++++++-- src/daemon/filter/tests.rs | 8 ++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 420d692..5e629d5 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -41,11 +41,15 @@ pub struct FilterManager { state: Arc>, } +/// The react to a line handling. #[derive(Debug, PartialEq, Eq)] pub enum React { + /// This line doesn't match NoMatch, + /// This line matches, but no execution is triggered Match, - Exec, + /// This line matches, and an execution is triggered + Trigger, } #[allow(clippy::unwrap_used)] @@ -75,7 +79,7 @@ impl FilterManager { pub fn handle_line(&self, line: &str, now: Time) -> React { if let Some(match_) = self.filter.get_match(line) { if self.handle_match(match_, now) { - React::Exec + React::Trigger } else { React::Match } diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index cc9947b..077c64c 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -160,7 +160,7 @@ async fn three_matches_then_action_then_delayed_action() { // Third match, exec let _block = bed.semaphore.acquire().await.unwrap(); - assert_eq!(bed.manager.handle_line("test one", now2s), React::Exec); + assert_eq!(bed.manager.handle_line("test one", now2s), React::Trigger); { let state = bed.manager.state.lock().unwrap(); assert!( @@ -251,7 +251,7 @@ async fn one_match_one_action() { bed.assert_empty_trees(); // match - assert_eq!(bed.manager.handle_line("test one", now), React::Exec); + assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); bed.assert_empty_trees(); // the action executes @@ -295,7 +295,7 @@ async fn one_match_one_delayed_action() { // Match let one = vec!["one".to_string()]; - assert_eq!(bed.manager.handle_line("test one", now), React::Exec); + assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); { let state = bed.manager.state.lock().unwrap(); assert!(state.matches.is_empty(), "matches stay empty"); @@ -385,7 +385,7 @@ async fn one_db_match_one_runtime_match_one_action() { } // match - assert_eq!(bed.manager.handle_line("test one", now), React::Exec); + assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); bed.assert_empty_trees(); // the action executes tokio::time::sleep(Duration::from_millis(40)).await; From d9842c23407ec7ee53ff851c8d3434f7d08ecb68 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 7 Jul 2025 12:00:00 +0200 Subject: [PATCH 048/241] Duplicate::Extend: Re-Trigger only after actions - implement schedule_exec's only_after --- TODO | 3 +-- src/daemon/filter/mod.rs | 12 +++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/TODO b/TODO index 10bc482..80e7509 100644 --- a/TODO +++ b/TODO @@ -1,6 +1,5 @@ -Test what happens when a Filter pattern set changes (I think it's shitty) +Test what happens when a Filter's pattern Set changes (I think it's shitty) -fix trigger only after when extend move match logging from concepts/filter to daemon/filter test new treedb::helpers diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 5e629d5..07991b5 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -114,10 +114,9 @@ impl FilterManager { state.add_trigger(m.clone(), now); if already_triggered && Duplicate::Extend == self.filter.duplicate() { state.remove_trigger(&m); - // TODO only schedule after actions - self.schedule_exec(m, now, now, &mut state, false, only_after); + self.schedule_exec(m, now, now, &mut state, false, true); } else { - self.schedule_exec(m, now, now, &mut state, false); + self.schedule_exec(m, now, now, &mut state, false, false); } } @@ -135,7 +134,7 @@ impl FilterManager { let mut state = self.state.lock().unwrap(); state.remove_match(&match_); state.add_trigger(match_.clone(), now); - self.schedule_exec(match_, now, now, &mut state, false); + self.schedule_exec(match_, now, now, &mut state, false, false); Ok(()) } @@ -245,6 +244,7 @@ impl FilterManager { now: Time, state: &mut MutexGuard, startup: bool, + only_after: bool, ) { for action in self .filter @@ -252,6 +252,8 @@ impl FilterManager { .values() // On startup, skip oneshot actions .filter(|action| !startup || !action.oneshot()) + // If only_after, keep only after actions + .filter(|action| !only_after || action.after_duration().is_some()) { let exec_time = t + action.after_duration().unwrap_or_default(); let m = m.clone(); @@ -313,7 +315,7 @@ impl FilterManager { // Insert back the upcoming times new_map.insert(t, number_of_actions); // Schedule the upcoming times - self.schedule_exec(m.clone(), t, now, &mut state, true); + self.schedule_exec(m.clone(), t, now, &mut state, true, false); } } if new_map.is_empty() { From 270a1a9bdffa051efefd754c1ad0fad062d6a060 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 10 Jul 2025 12:00:00 +0200 Subject: [PATCH 049/241] Duplicate: Fix tests, more tests --- src/daemon/filter/tests.rs | 42 ++++++-------------------------------- src/treedb/helpers.rs | 35 ++++++++++++++++++++++++++++++- tests/simple.rs | 1 + 3 files changed, 41 insertions(+), 37 deletions(-) diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 077c64c..44a4a02 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -11,7 +11,7 @@ use tokio::sync::Semaphore; use super::{state::filter_ordered_times_db_name, FilterManager, React}; use crate::{ - concepts::{Action, Filter, MatchTime, Pattern, Patterns, Time}, + concepts::{Action, Filter, Pattern, Patterns, Time}, daemon::shutdown::ShutdownController, tests::TempDatabase, }; @@ -173,13 +173,7 @@ async fn three_matches_then_action_then_delayed_action() { ); assert_eq!( state.triggers.tree(), - &BTreeMap::from([( - MatchTime { - m: one.clone(), - t: now2s - }, - 1 - )]), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]), // 1 and not 2 because the decrement_trigger() doesn't wait for the semaphore "triggers now contain the triggered match with 1 action left" ); @@ -191,13 +185,7 @@ async fn three_matches_then_action_then_delayed_action() { // Check first action assert_eq!( bed.manager.state.lock().unwrap().triggers.tree(), - &BTreeMap::from([( - MatchTime { - m: one.clone(), - t: now2s - }, - 1 - )]), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]), "triggers still contain the triggered match with 1 action left" ); assert_eq!( @@ -302,13 +290,7 @@ async fn one_match_one_delayed_action() { assert!(state.ordered_times.is_empty(), "ordered_times stay empty"); assert_eq!( state.triggers.tree(), - &BTreeMap::from([( - MatchTime { - m: one.clone(), - t: now, - }, - 1 - )]), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), "triggers still contain the triggered match with 1 action left" ); } @@ -497,13 +479,7 @@ async fn trigger_unmatched_pattern() { assert!(state.ordered_times.is_empty()); assert_eq!( state.triggers.tree(), - &BTreeMap::from([( - MatchTime { - m: one.clone(), - t: now, - }, - 1 - )]) + &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), ); } assert_eq!( @@ -578,13 +554,7 @@ async fn trigger_matched_pattern() { assert!(state.ordered_times.is_empty()); assert_eq!( state.triggers.tree(), - &BTreeMap::from([( - MatchTime { - m: one.clone(), - t: now, - }, - 1 - )]) + &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), ); } assert_eq!( diff --git a/src/treedb/helpers.rs b/src/treedb/helpers.rs index ae4db1d..f7afbeb 100644 --- a/src/treedb/helpers.rs +++ b/src/treedb/helpers.rs @@ -23,7 +23,7 @@ fn string_to_time(val: &str) -> Result { /// Tries to convert a [`Value`] into a [`Time`] pub fn to_time(val: &Value) -> Result { - Ok(string_to_time(val.as_str().ok_or("not a number")?)?) + Ok(string_to_time(val.as_str().ok_or("not a datetime")?)?) } /// Tries to convert a [`Value`] into a [`Match`] @@ -200,4 +200,37 @@ mod tests { assert!(to_timeset(&(8.into())).is_err()); assert!(to_timeset(&(None::.into())).is_err()); } + + #[test] + fn test_to_timemap() { + let time1 = "2025-07-10T12:35:00.000+02:00"; + let time1_t = DateTime::parse_from_rfc3339(time1) + .unwrap() + .with_timezone(&Local); + let time2 = "2026-08-11T12:36:01.000+02:00"; + let time2_t = DateTime::parse_from_rfc3339(time2) + .unwrap() + .with_timezone(&Local); + + assert_eq!( + to_timemap(&Value::from_iter([(time2, 1)])), + Ok(BTreeMap::from([(time2_t, 1)])) + ); + assert_eq!( + to_timemap(&Value::from_iter([(time1, 4), (time2, 0)])), + Ok(BTreeMap::from([(time1_t, 4), (time2_t, 0)])) + ); + + assert!(to_timemap(&Value::from_iter([("1", time2)])).is_err()); + assert!(to_timemap(&Value::from_iter([(time2, time2)])).is_err()); + assert!(to_timemap(&Value::from_iter([(time2)])).is_err()); + assert!(to_timemap(&Value::from_iter([(1)])).is_err()); + + assert!(to_timemap(&(["1970-01-01T01:20:34.567+01:00"].into())).is_err()); + assert!(to_timemap(&([""].into())).is_err()); + assert!(to_timemap(&(["ploup"].into())).is_err()); + assert!(to_timemap(&(true.into())).is_err()); + assert!(to_timemap(&(8.into())).is_err()); + assert!(to_timemap(&(None::.into())).is_err()); + } } diff --git a/tests/simple.rs b/tests/simple.rs index dcbca3e..1e7905e 100644 --- a/tests/simple.rs +++ b/tests/simple.rs @@ -36,6 +36,7 @@ fn config_with_cmd(config_path: &str, cmd: &str) { regex: ['here is '], retry: 2, retryperiod: '2s', + duplicate: 'rerun', actions: { // Don't mix code and data at home! // You may permit arbitrary execution from vilains, From 81e5fb4c42b11b9e1ebbf9d45c4799089a83e205 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 11 Jul 2025 12:00:00 +0200 Subject: [PATCH 050/241] add State tests and fix trigger persistance Triggers were only persisted for retry duration, instead of longuest action duration. As retry is often shorter than after, this would make reaction forget most triggers on restart. entry_timeout is now set to longuest_action_duration. --- src/daemon/filter/mod.rs | 1 - src/daemon/filter/state.rs | 269 +++++++++++++++++++++++++++++++++++-- src/treedb/mod.rs | 2 +- 3 files changed, 261 insertions(+), 11 deletions(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 07991b5..4de3526 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -67,7 +67,6 @@ impl FilterManager { shutdown, state: Arc::new(Mutex::new(State::new( filter, - !filter.longuest_action_duration().is_zero(), db, now, )?)), diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index c26b6d8..a67039d 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -42,6 +42,23 @@ pub struct State { /// Alternative view of the current Matches for O(1) cleaning of old Matches /// without added async Tasks to remove them /// Persisted + /// + /// I'm pretty confident that Time will always be unique, because it has enough precision. + /// See this code that gives different times, even in a minimal loop: + /// ```rust + /// use chrono::{Local}; + /// + /// fn main() { + /// let mut res = vec![]; + /// for _ in 0..10 { + /// let now = Local::now(); + /// res.push(format!("Now: {now}")); + /// } + /// for s in res { + /// println!("{s}"); + /// } + /// } + /// ``` pub ordered_times: Tree, /// Saves all the current Triggers for this Filter /// Persisted @@ -49,12 +66,7 @@ pub struct State { } impl State { - pub fn new( - filter: &'static Filter, - has_after: bool, - db: &mut Database, - now: Time, - ) -> Result { + pub fn new(filter: &'static Filter, db: &mut Database, now: Time) -> Result { let ordered_times = db.open_tree( filter_ordered_times_db_name(filter), filter.retry_duration().unwrap_or_default(), @@ -62,13 +74,13 @@ impl State { )?; let mut triggers = db.open_tree( filter_triggers_db_name(filter), - filter.retry_duration().unwrap_or_default(), + filter.longuest_action_duration(), |(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)), )?; if triggers.is_empty() { let old_triggers = db.open_tree( filter_triggers_old_db_name(filter), - filter.retry_duration().unwrap_or_default(), + filter.longuest_action_duration(), |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), )?; for (mt, n) in old_triggers.iter() { @@ -85,7 +97,7 @@ impl State { } let mut this = Self { filter, - has_after, + has_after: !filter.longuest_action_duration().is_zero(), matches: BTreeMap::new(), ordered_times, triggers, @@ -212,3 +224,242 @@ impl State { } } } + +#[cfg(test)] +mod tests { + use std::collections::{BTreeMap, HashMap}; + + use chrono::{DateTime, Local, TimeDelta}; + + use crate::{ + concepts::{filter_tests::ok_filter, Action, Filter, Pattern}, + daemon::filter::state::State, + tests::TempDatabase, + }; + + // Tests `new`, `clear_past_matches` and `load_matches_from_ordered_times` + #[tokio::test] + async fn state_new() { + let patterns = Pattern::new_map("az", "[a-z]+").unwrap(); + let filter = Filter::new_static( + vec![Action::new( + vec!["true"], + None, + false, + "s1", + "f1", + "a1", + &patterns, + )], + vec!["test "], + Some(3), + Some("2s"), + "s1", + "f1", + &patterns, + ); + + let now = DateTime::parse_from_rfc3339("2025-07-10T12:35:00.000+00:00") + .unwrap() + .with_timezone(&Local); + let now_plus_1m = now + TimeDelta::minutes(1); + let now_plus_1m01 = now_plus_1m + TimeDelta::seconds(1); + let now_less_1m = now - TimeDelta::minutes(1); + let now_less_1s = now - TimeDelta::seconds(1); + let now_less_4s = now - TimeDelta::seconds(4); + let now_less_5s = now - TimeDelta::seconds(5); + + let mut db = TempDatabase::default().await; + db.set_loaded_db(HashMap::from([( + "filter_ordered_times_s1.f1".into(), + HashMap::from([ + // Will stay + (now_plus_1m.to_rfc3339().into(), ["one"].into()), + (now_plus_1m01.to_rfc3339().into(), ["one"].into()), + (now_less_1s.to_rfc3339().into(), ["two"].into()), // stays because retry: 2s + // Will get cleaned + (now_less_4s.to_rfc3339().into(), ["two"].into()), + (now_less_5s.to_rfc3339().into(), ["three"].into()), + (now_less_1m.to_rfc3339().into(), ["two"].into()), + ]), + )])); + + let state = State::new(filter, &mut db, now).unwrap(); + + assert_eq!( + state.ordered_times.tree(), + &BTreeMap::from([ + (now_less_1s, vec!["two".into()]), + (now_plus_1m, vec!["one".into()]), + (now_plus_1m01, vec!["one".into()]), + ]) + ); + assert_eq!( + state.matches, + BTreeMap::from([ + (vec!["one".into()], [now_plus_1m, now_plus_1m01].into()), + (vec!["two".into()], [now_less_1s].into()), + ]) + ); + } + + #[tokio::test] + async fn state_match_add_remove() { + let filter = Box::leak(Box::new(ok_filter())); + + let one = vec!["one".into()]; + + let now = DateTime::parse_from_rfc3339("2025-07-10T12:35:00.000+00:00") + .unwrap() + .with_timezone(&Local); + let now_less_1s = now - TimeDelta::seconds(1); + let now_less_4s = now - TimeDelta::seconds(4); + + let mut db = TempDatabase::default().await; + let mut state = State::new(filter, &mut db, now).unwrap(); + + assert!(state.ordered_times.tree().is_empty()); + assert!(state.matches.is_empty()); + + // Add non-previously added match + state.add_match(one.clone(), now_less_1s); + assert_eq!( + state.ordered_times.tree(), + &BTreeMap::from([(now_less_1s, one.clone()),]) + ); + assert_eq!( + state.matches, + BTreeMap::from([(one.clone(), [now_less_1s].into())]) + ); + + // Add previously added match + state.add_match(one.clone(), now_less_4s); + assert_eq!( + state.ordered_times.tree(), + &BTreeMap::from([(now_less_1s, one.clone()), (now_less_4s, one.clone())]) + ); + assert_eq!( + state.matches, + BTreeMap::from([(one.clone(), [now_less_1s, now_less_4s].into())]) + ); + + // Remove added match + state.remove_match(&one); + assert!(state.ordered_times.tree().is_empty()); + assert!(state.matches.is_empty()); + } + + #[tokio::test] + async fn state_trigger_no_after_add_remove_decrement() { + let filter = Box::leak(Box::new(ok_filter())); + + let one = vec!["one".into()]; + let now = Local::now(); + + let mut db = TempDatabase::default().await; + let mut state = State::new(filter, &mut db, now).unwrap(); + + assert!(state.triggers.tree().is_empty()); + + // Add unique trigger + state.add_trigger(one.clone(), now); + // Nothing is really added + assert!(state.triggers.tree().is_empty()); + + // Will be called immediately after, it returns true + assert!(state.decrement_trigger(&one, now)); + } + + #[tokio::test] + async fn state_trigger_has_after_add_remove_decrement() { + let patterns = Pattern::new_map("az", "[a-z]+").unwrap(); + let filter = Filter::new_static( + vec![ + Action::new(vec!["true"], None, false, "s1", "f1", "a1", &patterns), + Action::new(vec!["true"], Some("1s"), false, "s1", "f1", "a2", &patterns), + Action::new(vec!["true"], Some("3s"), false, "s1", "f1", "a3", &patterns), + ], + vec!["test "], + Some(3), + Some("2s"), + "s1", + "f1", + &patterns, + ); + + let one = vec!["one".into()]; + let now = Local::now(); + let now_plus_1s = now + TimeDelta::seconds(1); + + let mut db = TempDatabase::default().await; + let mut state = State::new(filter, &mut db, now).unwrap(); + + assert!(state.triggers.tree().is_empty()); + + // Add unique trigger + state.add_trigger(one.clone(), now); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now, 3)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now, 2)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now, 1)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now)); + assert!(state.triggers.tree().is_empty()); + // Decrement → false + assert!(!state.decrement_trigger(&one, now)); + + // Add trigger with neighbour + state.add_trigger(one.clone(), now); + state.add_trigger(one.clone(), now_plus_1s); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 2)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 1)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now_plus_1s, 3)].into())]) + ); + // Decrement → false + assert!(!state.decrement_trigger(&one, now)); + // Remove neighbour + state.remove_trigger(&one); + assert!(state.triggers.tree().is_empty()); + + // Add two neighbour triggers + state.add_trigger(one.clone(), now); + state.add_trigger(one.clone(), now_plus_1s); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) + ); + // Remove them + state.remove_trigger(&one); + assert!(state.triggers.tree().is_empty()); + } +} diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index f012cf4..a69f84e 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -267,7 +267,7 @@ impl Database { /// Creates a new Tree with the given name and entry timeout. /// Takes a closure (or regular function) that converts (Value, Value) JSON entries /// into (K, V) typed entries. - /// Helpers for this closure can be find in the [`helpers`] module. + /// Helpers for this closure can be found in the [`helpers`] module. pub fn open_tree( &mut self, name: String, From 6f346ff37187528c296db403eb28fa2fb1c6895e Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 14 Jul 2025 12:00:00 +0200 Subject: [PATCH 051/241] Test existing FilterManager tests for each Duplicate enum --- src/concepts/filter.rs | 4 + src/daemon/filter/state.rs | 4 +- src/daemon/filter/tests.rs | 833 +++++++++++++++++++------------------ 3 files changed, 433 insertions(+), 408 deletions(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index d79be4f..99d83c6 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -354,6 +354,7 @@ impl Filter { retry_period: Option<&str>, stream_name: &str, name: &str, + duplicate: Duplicate, config_patterns: &Patterns, ) -> Self { let mut filter = Self { @@ -361,6 +362,7 @@ impl Filter { regex: regex.into_iter().map(|s| s.into()).collect(), retry, retry_period: retry_period.map(|s| s.into()), + duplicate, ..Default::default() }; filter.setup(stream_name, name, config_patterns).unwrap(); @@ -374,6 +376,7 @@ impl Filter { retry_period: Option<&str>, stream_name: &str, name: &str, + duplicate: Duplicate, config_patterns: &Patterns, ) -> &'static Self { Box::leak(Box::new(Self::new( @@ -383,6 +386,7 @@ impl Filter { retry_period, stream_name, name, + duplicate, config_patterns, ))) } diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index a67039d..f5d9692 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -232,7 +232,7 @@ mod tests { use chrono::{DateTime, Local, TimeDelta}; use crate::{ - concepts::{filter_tests::ok_filter, Action, Filter, Pattern}, + concepts::{filter_tests::ok_filter, Action, Duplicate, Filter, Pattern}, daemon::filter::state::State, tests::TempDatabase, }; @@ -256,6 +256,7 @@ mod tests { Some("2s"), "s1", "f1", + Duplicate::default(), &patterns, ); @@ -384,6 +385,7 @@ mod tests { Some("2s"), "s1", "f1", + Duplicate::default(), &patterns, ); diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 44a4a02..5d095d3 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -11,7 +11,7 @@ use tokio::sync::Semaphore; use super::{state::filter_ordered_times_db_name, FilterManager, React}; use crate::{ - concepts::{Action, Filter, Pattern, Patterns, Time}, + concepts::{Action, Duplicate, Filter, Pattern, Patterns, Time}, daemon::shutdown::ShutdownController, tests::TempDatabase, }; @@ -83,10 +83,142 @@ impl TestBed2 { #[tokio::test] async fn three_matches_then_action_then_delayed_action() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![ - Action::new( + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + ), + Action::new( + vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], + Some("100ms"), + false, + "test", + "test", + "a2", + &bed.az_patterns, + ), + ], + vec!["test "], + Some(3), + Some("2s"), + "test", + "test", + dup, + &bed.az_patterns, + ); + + let bed = bed.part2(filter, Local::now(), None).await; + + let now = bed.now; + let now1s = bed.now + TimeDelta::seconds(1); + let now2s = bed.now + TimeDelta::seconds(2); + + // No match + assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); + bed.assert_empty_trees(); + + // First match + let one = vec!["one".to_string()]; + assert_eq!(bed.manager.handle_line("test one", now), React::Match); + { + let state = bed.manager.state.lock().unwrap(); + assert_eq!( + state.matches, + BTreeMap::from([(one.clone(), BTreeSet::from([now]))]), + "the match has been added to matches" + ); + assert_eq!( + state.ordered_times.tree(), + &BTreeMap::from([(now, one.clone())]), + "the match has been added to ordered_times" + ); + assert!(state.triggers.is_empty(), "triggers is still empty"); + } + + // Second match + assert_eq!(bed.manager.handle_line("test one", now1s), React::Match); + { + let state = bed.manager.state.lock().unwrap(); + assert_eq!( + state.matches, + BTreeMap::from([(one.clone(), BTreeSet::from([now, now1s]))]), + "a second match is present in matches" + ); + assert_eq!( + state.ordered_times.tree(), + &BTreeMap::from([(now, one.clone()), (now1s, one.clone())]), + "a second match is present in ordered_times" + ); + assert!(state.triggers.is_empty(), "triggers is still empty"); + } + + // Third match, exec + let _block = bed.semaphore.acquire().await.unwrap(); + assert_eq!(bed.manager.handle_line("test one", now2s), React::Trigger); + { + let state = bed.manager.state.lock().unwrap(); + assert!( + state.matches.is_empty(), + "matches are emptied after trigger" + ); + assert!( + state.ordered_times.is_empty(), + "ordered_times are emptied after trigger" + ); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]), + // 1 and not 2 because the decrement_trigger() doesn't wait for the semaphore + "triggers now contain the triggered match with 1 action left" + ); + } + drop(_block); + + // Now the first action executes + tokio::time::sleep(Duration::from_millis(40)).await; + // Check first action + assert_eq!( + bed.manager.state.lock().unwrap().triggers.tree(), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]), + "triggers still contain the triggered match with 1 action left" + ); + assert_eq!( + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the first action" + ); + + // Now the second action executes + tokio::time::sleep(Duration::from_millis(100)).await; + // Check second action + assert!( + bed.manager.state.lock().unwrap().triggers.is_empty(), + "triggers are empty again" + ); + assert_eq!( + "a1 one\na2 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the 2 actions" + ); + + bed.assert_empty_trees(); + } +} + +#[tokio::test] +async fn one_match_one_action() { + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![Action::new( vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], None, false, @@ -94,326 +226,209 @@ async fn three_matches_then_action_then_delayed_action() { "test", "a1", &bed.az_patterns, - ), - Action::new( - vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], - Some("100ms"), - false, - "test", - "test", - "a2", - &bed.az_patterns, - ), - ], - vec!["test "], - Some(3), - Some("2s"), - "test", - "test", - &bed.az_patterns, - ); - - let bed = bed.part2(filter, Local::now(), None).await; - - let now = bed.now; - let now1s = bed.now + TimeDelta::seconds(1); - let now2s = bed.now + TimeDelta::seconds(2); - - // No match - assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); - bed.assert_empty_trees(); - - // First match - let one = vec!["one".to_string()]; - assert_eq!(bed.manager.handle_line("test one", now), React::Match); - { - let state = bed.manager.state.lock().unwrap(); - assert_eq!( - state.matches, - BTreeMap::from([(one.clone(), BTreeSet::from([now]))]), - "the match has been added to matches" - ); - assert_eq!( - state.ordered_times.tree(), - &BTreeMap::from([(now, one.clone())]), - "the match has been added to ordered_times" - ); - assert!(state.triggers.is_empty(), "triggers is still empty"); - } - - // Second match - assert_eq!(bed.manager.handle_line("test one", now1s), React::Match); - { - let state = bed.manager.state.lock().unwrap(); - assert_eq!( - state.matches, - BTreeMap::from([(one.clone(), BTreeSet::from([now, now1s]))]), - "a second match is present in matches" - ); - assert_eq!( - state.ordered_times.tree(), - &BTreeMap::from([(now, one.clone()), (now1s, one.clone())]), - "a second match is present in ordered_times" - ); - assert!(state.triggers.is_empty(), "triggers is still empty"); - } - - // Third match, exec - let _block = bed.semaphore.acquire().await.unwrap(); - assert_eq!(bed.manager.handle_line("test one", now2s), React::Trigger); - { - let state = bed.manager.state.lock().unwrap(); - assert!( - state.matches.is_empty(), - "matches are emptied after trigger" - ); - assert!( - state.ordered_times.is_empty(), - "ordered_times are emptied after trigger" - ); - assert_eq!( - state.triggers.tree(), - &BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]), - // 1 and not 2 because the decrement_trigger() doesn't wait for the semaphore - "triggers now contain the triggered match with 1 action left" - ); - } - drop(_block); - - // Now the first action executes - tokio::time::sleep(Duration::from_millis(40)).await; - // Check first action - assert_eq!( - bed.manager.state.lock().unwrap().triggers.tree(), - &BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]), - "triggers still contain the triggered match with 1 action left" - ); - assert_eq!( - "a1 one\n", - &read_to_string(&bed.out_file).unwrap(), - "the output file contains the result of the first action" - ); - - // Now the second action executes - tokio::time::sleep(Duration::from_millis(100)).await; - // Check second action - assert!( - bed.manager.state.lock().unwrap().triggers.is_empty(), - "triggers are empty again" - ); - assert_eq!( - "a1 one\na2 one\n", - &read_to_string(&bed.out_file).unwrap(), - "the output file contains the result of the 2 actions" - ); - - bed.assert_empty_trees(); -} - -#[tokio::test] -async fn one_match_one_action() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![Action::new( - vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + )], + vec!["test "], + None, None, - false, "test", "test", - "a1", + dup, &bed.az_patterns, - )], - vec!["test "], - None, - None, - "test", - "test", - &bed.az_patterns, - ); + ); - let bed = bed.part2(filter, Local::now(), None).await; - let now = bed.now; + let bed = bed.part2(filter, Local::now(), None).await; + let now = bed.now; - // No match - assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); - bed.assert_empty_trees(); + // No match + assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); + bed.assert_empty_trees(); - // match - assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); - bed.assert_empty_trees(); + // match + assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); + bed.assert_empty_trees(); - // the action executes - tokio::time::sleep(Duration::from_millis(40)).await; - assert_eq!( - "a1 one\n", - &read_to_string(&bed.out_file).unwrap(), - "the output file contains the result of the first action" - ); + // the action executes + tokio::time::sleep(Duration::from_millis(40)).await; + assert_eq!( + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the first action" + ); - bed.assert_empty_trees(); + bed.assert_empty_trees(); + } } #[tokio::test] async fn one_match_one_delayed_action() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![Action::new( - vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], - Some("100ms"), - false, + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + Some("100ms"), + false, + "test", + "test", + "a1", + &bed.az_patterns, + )], + vec!["test "], + None, + None, "test", "test", - "a1", + dup, &bed.az_patterns, - )], - vec!["test "], - None, - None, - "test", - "test", - &bed.az_patterns, - ); - - let bed = bed.part2(filter, Local::now(), None).await; - let now = bed.now; - - // No match - assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); - bed.assert_empty_trees(); - - // Match - let one = vec!["one".to_string()]; - assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); - { - let state = bed.manager.state.lock().unwrap(); - assert!(state.matches.is_empty(), "matches stay empty"); - assert!(state.ordered_times.is_empty(), "ordered_times stay empty"); - assert_eq!( - state.triggers.tree(), - &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), - "triggers still contain the triggered match with 1 action left" ); + + let bed = bed.part2(filter, Local::now(), None).await; + let now = bed.now; + + // No match + assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); + bed.assert_empty_trees(); + + // Match + let one = vec!["one".to_string()]; + assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); + { + let state = bed.manager.state.lock().unwrap(); + assert!(state.matches.is_empty(), "matches stay empty"); + assert!(state.ordered_times.is_empty(), "ordered_times stay empty"); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), + "triggers still contain the triggered match with 1 action left" + ); + } + assert_eq!( + "", + &read_to_string(&bed.out_file).unwrap(), + "the output file is empty" + ); + + // The action executes + tokio::time::sleep(Duration::from_millis(140)).await; + assert!( + bed.manager.state.lock().unwrap().triggers.is_empty(), + "triggers are empty again" + ); + assert_eq!( + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the action" + ); + + bed.assert_empty_trees(); } - assert_eq!( - "", - &read_to_string(&bed.out_file).unwrap(), - "the output file is empty" - ); - - // The action executes - tokio::time::sleep(Duration::from_millis(140)).await; - assert!( - bed.manager.state.lock().unwrap().triggers.is_empty(), - "triggers are empty again" - ); - assert_eq!( - "a1 one\n", - &read_to_string(&bed.out_file).unwrap(), - "the output file contains the result of the action" - ); - - bed.assert_empty_trees(); } #[tokio::test] async fn one_db_match_one_runtime_match_one_action() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![Action::new( - vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], - None, - false, + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + )], + vec!["test "], + Some(2), + Some("2s"), "test", "test", - "a1", + dup, &bed.az_patterns, - )], - vec!["test "], - Some(2), - Some("2s"), - "test", - "test", - &bed.az_patterns, - ); - - let mut db = TempDatabase::default().await; - - // Pre-add match - let now = Local::now(); - let one = vec!["one".to_string()]; - let now1s = now - TimeDelta::seconds(1); - - db.set_loaded_db(HashMap::from([( - filter_ordered_times_db_name(filter), - HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])); - - // Finish setup - let bed = bed.part2(filter, now, Some(db)).await; - - { - let state = bed.manager.state.lock().unwrap(); - assert_eq!( - state.matches, - BTreeMap::from([(one.clone(), BTreeSet::from([now1s]))]), - "the match previously added to matches" ); + + let mut db = TempDatabase::default().await; + + // Pre-add match + let now = Local::now(); + let one = vec!["one".to_string()]; + let now1s = now - TimeDelta::seconds(1); + + db.set_loaded_db(HashMap::from([( + filter_ordered_times_db_name(filter), + HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), + )])); + + // Finish setup + let bed = bed.part2(filter, now, Some(db)).await; + + { + let state = bed.manager.state.lock().unwrap(); + assert_eq!( + state.matches, + BTreeMap::from([(one.clone(), BTreeSet::from([now1s]))]), + "the match previously added to matches" + ); + assert_eq!( + state.ordered_times.tree(), + &BTreeMap::from([(now1s, one.clone())]), + "the match previously added to matches" + ); + assert!(state.triggers.is_empty(), "triggers stay empty"); + } + + // match + assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); + bed.assert_empty_trees(); + // the action executes + tokio::time::sleep(Duration::from_millis(40)).await; assert_eq!( - state.ordered_times.tree(), - &BTreeMap::from([(now1s, one.clone())]), - "the match previously added to matches" + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the action" ); - assert!(state.triggers.is_empty(), "triggers stay empty"); } - - // match - assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); - bed.assert_empty_trees(); - // the action executes - tokio::time::sleep(Duration::from_millis(40)).await; - assert_eq!( - "a1 one\n", - &read_to_string(&bed.out_file).unwrap(), - "the output file contains the result of the action" - ); } #[tokio::test] async fn one_outdated_db_match() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![Action::new( - vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], - None, - false, + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + )], + vec!["test "], + Some(2), + Some("1s"), "test", "test", - "a1", + dup, &bed.az_patterns, - )], - vec!["test "], - Some(2), - Some("1s"), - "test", - "test", - &bed.az_patterns, - ); + ); - let mut db = TempDatabase::default().await; + let mut db = TempDatabase::default().await; - // Pre-add match - let now = Local::now(); - let one = vec!["one".to_string()]; - let now1s = now - TimeDelta::milliseconds(1001); + // Pre-add match + let now = Local::now(); + let one = vec!["one".to_string()]; + let now1s = now - TimeDelta::milliseconds(1001); - db.set_loaded_db(HashMap::from([( - filter_ordered_times_db_name(filter), - HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])); + db.set_loaded_db(HashMap::from([( + filter_ordered_times_db_name(filter), + HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), + )])); - // Finish setup - let bed = bed.part2(filter, now, Some(db)).await; - bed.assert_empty_trees(); + // Finish setup + let bed = bed.part2(filter, now, Some(db)).await; + bed.assert_empty_trees(); + } } #[tokio::test] @@ -423,145 +438,149 @@ async fn flush() { #[tokio::test] async fn trigger_unmatched_pattern() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![ - Action::new( - vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], - None, - false, - "test", - "test", - "a1", - &bed.az_patterns, - ), - Action::new( - vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], - Some("200ms"), - false, - "test", - "test", - "a2", - &bed.az_patterns, - ), - ], - vec!["test "], - Some(2), - Some("1s"), - "test", - "test", - &bed.az_patterns, - ); + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + ), + Action::new( + vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], + Some("200ms"), + false, + "test", + "test", + "a2", + &bed.az_patterns, + ), + ], + vec!["test "], + Some(2), + Some("1s"), + "test", + "test", + dup, + &bed.az_patterns, + ); - let now = Local::now(); - let one = vec!["one".to_string()]; - let bed = bed.part2(filter, now, None).await; + let now = Local::now(); + let one = vec!["one".to_string()]; + let bed = bed.part2(filter, now, None).await; - bed.manager - .handle_trigger( - // az_pattern: "one" - bed.az_patterns - .values() - .cloned() - .map(|pattern| (pattern, one[0].clone())) - .collect(), - now, - ) - .unwrap(); + bed.manager + .handle_trigger( + // az_pattern: "one" + bed.az_patterns + .values() + .cloned() + .map(|pattern| (pattern, one[0].clone())) + .collect(), + now, + ) + .unwrap(); - // the action executes - tokio::time::sleep(Duration::from_millis(40)).await; + // the action executes + tokio::time::sleep(Duration::from_millis(40)).await; - // No matches, one action registered - { - let state = bed.manager.state.lock().unwrap(); - assert!(state.matches.is_empty()); - assert!(state.ordered_times.is_empty()); + // No matches, one action registered + { + let state = bed.manager.state.lock().unwrap(); + assert!(state.matches.is_empty()); + assert!(state.ordered_times.is_empty()); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), + ); + } assert_eq!( - state.triggers.tree(), - &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the action" ); } - assert_eq!( - "a1 one\n", - &read_to_string(&bed.out_file).unwrap(), - "the output file contains the result of the action" - ); } #[tokio::test] async fn trigger_matched_pattern() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![ - Action::new( - vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], - None, - false, - "test", - "test", - "a1", - &bed.az_patterns, - ), - Action::new( - vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], - Some("200ms"), - false, - "test", - "test", - "a2", - &bed.az_patterns, - ), - ], - vec!["test "], - Some(2), - Some("1s"), - "test", - "test", - &bed.az_patterns, - ); + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + ), + Action::new( + vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], + Some("200ms"), + false, + "test", + "test", + "a2", + &bed.az_patterns, + ), + ], + vec!["test "], + Some(2), + Some("1s"), + "test", + "test", + dup, + &bed.az_patterns, + ); - let now = Local::now(); - let now1s = now - TimeDelta::milliseconds(10); - let one = vec!["one".to_string()]; + let now = Local::now(); + let now1s = now - TimeDelta::milliseconds(10); + let one = vec!["one".to_string()]; - let mut db = TempDatabase::default().await; - db.set_loaded_db(HashMap::from([( - filter_ordered_times_db_name(filter), - HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])); - let bed = bed.part2(filter, now, Some(db)).await; + let mut db = TempDatabase::default().await; + db.set_loaded_db(HashMap::from([( + filter_ordered_times_db_name(filter), + HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), + )])); + let bed = bed.part2(filter, now, Some(db)).await; - bed.manager - .handle_trigger( - // az_pattern: "one" - bed.az_patterns - .values() - .cloned() - .map(|pattern| (pattern, one[0].clone())) - .collect(), - now, - ) - .unwrap(); + bed.manager + .handle_trigger( + // az_pattern: "one" + bed.az_patterns + .values() + .cloned() + .map(|pattern| (pattern, one[0].clone())) + .collect(), + now, + ) + .unwrap(); - // the action executes - tokio::time::sleep(Duration::from_millis(40)).await; + // the action executes + tokio::time::sleep(Duration::from_millis(40)).await; - // No matches, one action registered - { - let state = bed.manager.state.lock().unwrap(); - assert!(state.matches.is_empty()); - assert!(state.ordered_times.is_empty()); + // No matches, one action registered + { + let state = bed.manager.state.lock().unwrap(); + assert!(state.matches.is_empty()); + assert!(state.ordered_times.is_empty()); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), + ); + } assert_eq!( - state.triggers.tree(), - &BTreeMap::from([(one.clone(), BTreeMap::from([(now, 1)]))]), + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the action" ); } - assert_eq!( - "a1 one\n", - &read_to_string(&bed.out_file).unwrap(), - "the output file contains the result of the action" - ); } - -// TODO test State functions From d8db2a1745bfe0f388a762fe030ef750c1275a26 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 14 Jul 2025 12:00:00 +0200 Subject: [PATCH 052/241] Add extensive test on Duplicate and fix related bug --- src/daemon/filter/mod.rs | 15 ++--- src/daemon/filter/tests.rs | 116 +++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 10 deletions(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 4de3526..e57d751 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -65,11 +65,7 @@ impl FilterManager { filter, exec_limit, shutdown, - state: Arc::new(Mutex::new(State::new( - filter, - db, - now, - )?)), + state: Arc::new(Mutex::new(State::new(filter, db, now)?)), }; this.clear_past_triggers_and_schedule_future_actions(now); Ok(this) @@ -110,13 +106,12 @@ impl FilterManager { if trigger { state.remove_match(&m); - state.add_trigger(m.clone(), now); - if already_triggered && Duplicate::Extend == self.filter.duplicate() { + let extend = already_triggered && Duplicate::Extend == self.filter.duplicate(); + if extend { state.remove_trigger(&m); - self.schedule_exec(m, now, now, &mut state, false, true); - } else { - self.schedule_exec(m, now, now, &mut state, false, false); } + state.add_trigger(m.clone(), now); + self.schedule_exec(m, now, now, &mut state, false, extend); } trigger diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 5d095d3..c0c8afa 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -584,3 +584,119 @@ async fn trigger_matched_pattern() { ); } } + +#[tokio::test] +async fn multiple_triggers() { + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + ), + Action::new( + vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], + Some("200ms"), + false, + "test", + "test", + "a2", + &bed.az_patterns, + ), + ], + vec!["test "], + Some(2), + Some("1s"), + "test", + "test", + dup, + &bed.az_patterns, + ); + + let bed = bed.part2(filter, Local::now(), None).await; + + assert_eq!( + bed.manager.handle_line("test one", Local::now()), + React::Match, + "Duplicate: {dup:?}" + ); + assert_eq!( + bed.manager.handle_line("test one", Local::now()), + React::Trigger, + "Duplicate: {dup:?}" + ); + + // Wait for first action to execute + tokio::time::sleep(Duration::from_millis(50)).await; + + assert_eq!( + "a1 one\n", + &read_to_string(&bed.out_file).unwrap(), + "Duplicate: {dup:?}" + ); + + tokio::time::sleep(Duration::from_millis(50)).await; + + assert_eq!( + bed.manager.handle_line("test one", Local::now()), + match dup { + Duplicate::Ignore => React::Match, + _ => React::Match, + }, + "Duplicate: {dup:?}" + ); + + assert_eq!( + bed.manager.handle_line("test one", Local::now()), + match dup { + Duplicate::Ignore => React::Match, + _ => React::Trigger, + }, + "Duplicate: {dup:?}" + ); + + // Wait for second action to execute + tokio::time::sleep(Duration::from_millis(50)).await; + + assert_eq!( + &read_to_string(&bed.out_file).unwrap(), + match dup { + Duplicate::Rerun => "a1 one\na1 one\n", + _ => "a1 one\n", + }, + "Duplicate: {dup:?}" + ); + + // Wait for first after action to execute + tokio::time::sleep(Duration::from_millis(100)).await; + + assert_eq!( + &read_to_string(&bed.out_file).unwrap(), + match dup { + Duplicate::Rerun => "a1 one\na1 one\na2 one\n", + Duplicate::Extend => "a1 one\n", + Duplicate::Ignore => "a1 one\na2 one\n", + }, + "Duplicate: {dup:?}" + ); + + // Wait for second after action to execute + tokio::time::sleep(Duration::from_millis(100)).await; + + assert_eq!( + &read_to_string(&bed.out_file).unwrap(), + match dup { + Duplicate::Rerun => "a1 one\na1 one\na2 one\na2 one\n", + Duplicate::Extend => "a1 one\na2 one\n", + Duplicate::Ignore => "a1 one\na2 one\n", + }, + "Duplicate: {dup:?}" + ); + } +} From 6b970e74c5ab9f96effe08ae4c08098afb4aa8ce Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 14 Jul 2025 12:00:00 +0200 Subject: [PATCH 053/241] Update configuration reference --- config/example.jsonnet | 13 +++++++++++++ config/example.yml | 15 +++++++++++++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/config/example.jsonnet b/config/example.jsonnet index 41a6507..ab06bd7 100644 --- a/config/example.jsonnet +++ b/config/example.jsonnet @@ -78,6 +78,7 @@ local banFor(time) = { // note that if the command is not in environment's `PATH` // its full path must be given. cmd: ['journalctl', '-n0', '-fu', 'sshd.service'], + // filters run actions when they match regexes on a stream filters: { // filters have a user-defined name @@ -92,6 +93,7 @@ local banFor(time) = { @'Connection (reset|closed) by (authenticating|invalid) user .* ', @'banner exchange: Connection from port [0-9]*: invalid format', ], + // if retry and retryperiod are defined, // the actions will only take place if a same pattern is // found `retry` times in a `retryperiod` interval @@ -106,12 +108,22 @@ local banFor(time) = { // - h / hour / hours // - d / day / days retryperiod: '6h', + + // duplicate specify how to handle matches after an action has already been taken. + // 3 options are possible: + // - extend (default): update the pending actions' time, so they run later + // - ignore: don't do anything, ignore the match + // - rerun: run the actions again. so we may have the same pending actions multiple times. + // (this was the default before 2.2.0) + // duplicate: extend + // actions are run by the filter when regexes are matched actions: { // actions have a user-defined name ban: { cmd: iptables(['-A', 'reaction', '-s', '', '-j', 'DROP']), }, + unban: { cmd: iptables(['-D', 'reaction', '-s', '', '-j', 'DROP']), // if after is defined, the action will not take place immediately, but after a specified duration @@ -124,6 +136,7 @@ local banFor(time) = { // here it is not useful because we will flush and delete the chain containing the bans anyway // (with the stop commands) }, + mail: { cmd: ['sendmail', '...', ''], // some commands, such as alerting commands, are "oneshot". diff --git a/config/example.yml b/config/example.yml index 74c0cde..0d5f9fd 100644 --- a/config/example.yml +++ b/config/example.yml @@ -62,6 +62,7 @@ streams: # note that if the command is not in environment's `PATH` # its full path must be given. cmd: [ 'journalctl', '-n0', '-fu', 'sshd.service' ] + # filters run actions when they match regexes on a stream filters: # filters have a user-defined name @@ -75,6 +76,7 @@ streams: - 'Invalid user .* from ' - 'Connection (reset|closed) by (authenticating|invalid) user .* ' - 'banner exchange: Connection from port [0-9]*: invalid format' + # if retry and retryperiod are defined, # the actions will only take place if a same pattern is # found `retry` times in a `retryperiod` interval @@ -89,14 +91,22 @@ streams: # - h / hour / hours # - d / day / days retryperiod: 6h - # duplicates! - duplicate: rerun + + # duplicate specify how to handle matches after an action has already been taken. + # 3 options are possible: + # - extend (default): update the pending actions' time, so they run later + # - ignore: don't do anything, ignore the match + # - rerun: run the actions again. so we may have the same pending actions multiple times. + # (this was the default before 2.2.0) + # duplicate: extend + # actions are run by the filter when regexes are matched actions: # actions have a user-defined name ban: # YAML substitutes *reference by the value anchored at &reference cmd: *iptablesban + unban: cmd: *iptablesunban # if after is defined, the action will not take place immediately, but after a specified duration @@ -108,6 +118,7 @@ streams: # (defaults to false) # here it is not useful because we will flush and delete the chain containing the bans anyway # (with the stop commands) + mail: cmd: ['sendmail', '...', ''] # some commands, such as alerting commands, are "oneshot". From ea0452f62cd730d03565c4f8d753789c2d206aca Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 4 Aug 2025 12:00:00 +0200 Subject: [PATCH 054/241] Fix components starting order Now Database and Socket components are created before start commands are executed. So in case of error, start commands are not executed. Also socket syscalls are now async instead of blocking, for better integration with the async runtime. New start order: - DB - Socket - Start commands - Streams --- src/daemon/mod.rs | 78 ++++++++++++++++++------------------- src/daemon/socket.rs | 93 ++++++++++++++++++++++---------------------- 2 files changed, 84 insertions(+), 87 deletions(-) diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index ec1ee47..78abb41 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -33,42 +33,51 @@ pub async fn daemon( ) -> Result<(), Box> { let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?)); - if !config.start() { - return Err("a start command failed, exiting.".into()); - } - // Cancellation Token let shutdown = ShutdownController::new(); - // Semaphore limiting action execution concurrency - let exec_limit = match config.concurrency() { - 0 => None, - n => Some(Arc::new(Semaphore::new(n))), - }; - // Open Database let mut db = Database::open(config).await?; - // Filter managers - let now = Local::now(); - let mut state = HashMap::new(); - let mut stream_managers = Vec::new(); - for stream in config.streams().values() { - let mut filter_managers = HashMap::new(); - for filter in stream.filters().values() { - let manager = - FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now)?; - filter_managers.insert(filter, manager); - } - state.insert(stream, filter_managers.clone()); + let (state, stream_managers) = { + // Semaphore limiting action execution concurrency + let exec_limit = match config.concurrency() { + 0 => None, + n => Some(Arc::new(Semaphore::new(n))), + }; - stream_managers.push(StreamManager::new( - stream, - filter_managers, - shutdown.token(), - )?); + // Filter managers + let now = Local::now(); + let mut state = HashMap::new(); + let mut stream_managers = Vec::new(); + for stream in config.streams().values() { + let mut filter_managers = HashMap::new(); + for filter in stream.filters().values() { + let manager = + FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now)?; + filter_managers.insert(filter, manager); + } + state.insert(stream, filter_managers.clone()); + + stream_managers.push(StreamManager::new( + stream, + filter_managers, + shutdown.token(), + )?); + } + (state, stream_managers) + }; + + // Run database task + let mut db_status_rx = db.manager(shutdown.token()); + + // Run socket task + socket_manager(config, socket, state, shutdown.token()).await?; + + // reaction won't abort on startup anymore, we can run start commands + if !config.start() { + return Err("a start command failed, exiting.".into()); } - drop(exec_limit); // Start Stream managers let mut stream_task_handles = Vec::new(); @@ -76,23 +85,10 @@ pub async fn daemon( stream_task_handles.push(tokio::spawn(async move { stream_manager.start().await })); } - // Run database task - let mut db_status_rx = { - let token = shutdown.token(); - db.manager(token) - }; - // Close streams when we receive a quit signal let signal_received = Arc::new(AtomicBool::new(false)); handle_signals(shutdown.delegate(), signal_received.clone())?; - // Run socket task - { - let socket = socket.to_owned(); - let token = shutdown.token(); - tokio::spawn(async move { socket_manager(config, socket, state, token).await }); - } - // Wait for all streams to quit for task_handle in stream_task_handles { let _ = task_handle.await; diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 388d954..7c3d0c9 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -1,15 +1,13 @@ use std::{ collections::{BTreeMap, HashMap}, - fs, io, path::PathBuf, - process::exit, sync::Arc, }; use chrono::Local; use futures::{SinkExt, StreamExt}; use regex::Regex; -use tokio::net::UnixListener; +use tokio::{fs, net::UnixListener}; use tokio_util::{ bytes::Bytes, codec::{Framed, LengthDelimitedCodec}, @@ -29,24 +27,24 @@ macro_rules! err_str { }; } -fn open_socket(path: PathBuf) -> Result { +async fn open_socket(path: PathBuf) -> Result { // First create all directories to the file let dir = path .parent() .ok_or(format!("socket {path:?} has no parent directory"))?; - err_str!(fs::create_dir_all(dir))?; + err_str!(fs::create_dir_all(dir).await)?; // Test if file exists - match fs::metadata(&path) { + match fs::metadata(&path).await { Ok(meta) => { if meta.file_type().is_dir() { Err(format!("socket {path:?} is already a directory")) } else { warn!("socket {path:?} already exists: is the daemon already running? deleting."); - err_str!(fs::remove_file(&path)) + err_str!(fs::remove_file(&path).await) } } Err(err) => err_str!(match err.kind() { - io::ErrorKind::NotFound => Ok(()), + std::io::ErrorKind::NotFound => Ok(()), _ => Err(err), }), }?; @@ -234,52 +232,55 @@ pub async fn socket_manager( socket: PathBuf, shared_state: HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, shutdown: ShutdownToken, -) { - let listener = match open_socket(socket.clone()) { +) -> Result<(), String> { + let listener = match open_socket(socket.clone()).await { Ok(l) => l, Err(err) => { - error!("while creating communication socket: {err}"); - exit(1); + return Err(format!("while creating communication socket: {err}")); } }; - loop { - tokio::select! { - _ = shutdown.wait() => break, - try_conn = listener.accept() => { - match try_conn { - Ok((conn, _)) => { - let mut transport = Framed::new(conn, LengthDelimitedCodec::new()); - // Decode - let received = transport.next().await; - let encoded_request = match received { - Some(r) => or_next!("while reading request", r), - None => { - error!("failed to answer client: client sent no request"); - continue; - } - }; - let request = or_next!( - "failed to decode request", - serde_json::from_slice(&encoded_request) - ); - // Process - let response = answer_order(config, &shared_state, request); - // Encode - let encoded_response = - or_next!("failed to serialize response", serde_json::to_string::(&response)); - or_next!( - "failed to send response:", - transport.send(Bytes::from(encoded_response)).await - ); + tokio::spawn(async move { + loop { + tokio::select! { + _ = shutdown.wait() => break, + try_conn = listener.accept() => { + match try_conn { + Ok((conn, _)) => { + let mut transport = Framed::new(conn, LengthDelimitedCodec::new()); + // Decode + let received = transport.next().await; + let encoded_request = match received { + Some(r) => or_next!("while reading request", r), + None => { + error!("failed to answer client: client sent no request"); + continue; + } + }; + let request = or_next!( + "failed to decode request", + serde_json::from_slice(&encoded_request) + ); + // Process + let response = answer_order(config, &shared_state, request); + // Encode + let encoded_response = + or_next!("failed to serialize response", serde_json::to_string::(&response)); + or_next!( + "failed to send response:", + transport.send(Bytes::from(encoded_response)).await + ); + } + Err(err) => error!("failed to open connection from cli: {err}"), } - Err(err) => error!("failed to open connection from cli: {err}"), } } } - } - if let Err(err) = fs::remove_file(socket) { - error!("failed to remove socket: {}", err); - } + if let Err(err) = fs::remove_file(socket).await { + error!("failed to remove socket: {}", err); + } + }); + + Ok(()) } From 44e5757ae3c922f1e87d88618d893f8b52e6c6c4 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 14 Jul 2025 12:00:00 +0200 Subject: [PATCH 055/241] WIP pattern ip --- TODO | 5 +- src/concepts/pattern.rs | 13 ++++ src/concepts/pattern/ip.rs | 134 +++++++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 src/concepts/pattern/ip.rs diff --git a/TODO b/TODO index 80e7509..2cd7f5d 100644 --- a/TODO +++ b/TODO @@ -2,7 +2,6 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) move match logging from concepts/filter to daemon/filter -test new treedb::helpers -test different duplicates modes test migration -document new option +stream: test regex ending with $ +test Filter::regex conformity after setup diff --git a/src/concepts/pattern.rs b/src/concepts/pattern.rs index 0dbd04f..7fbdaa8 100644 --- a/src/concepts/pattern.rs +++ b/src/concepts/pattern.rs @@ -3,6 +3,10 @@ use std::cmp::Ordering; use regex::{Regex, RegexSet}; use serde::{Deserialize, Serialize}; +mod ip; + +use ip::PatternIP; + #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] #[serde(deny_unknown_fields)] @@ -17,6 +21,9 @@ pub struct Pattern { #[serde(skip)] compiled_ignore_regex: RegexSet, + #[serde(flatten)] + ip: PatternIP, + #[serde(skip)] name: String, #[serde(skip)] @@ -54,6 +61,10 @@ impl Pattern { return Err("character '.' is not allowed in pattern name".into()); } + if let Some(regex) = self.ip.setup()? { + self.regex = regex; + } + if self.regex.is_empty() { return Err("regex is empty".into()); } @@ -119,6 +130,7 @@ impl Pattern { pub fn is_ignore(&self, match_: &str) -> bool { self.ignore.iter().any(|ignore| ignore == match_) || self.compiled_ignore_regex.is_match(match_) + || self.ip.is_ignore(match_) } } @@ -172,6 +184,7 @@ pub mod tests { ignore: Vec::new(), ignore_regex: Vec::new(), compiled_ignore_regex: RegexSet::default(), + ip: PatternIP::default(), name: "".into(), name_with_braces: "".into(), } diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip.rs new file mode 100644 index 0000000..74a9e70 --- /dev/null +++ b/src/concepts/pattern/ip.rs @@ -0,0 +1,134 @@ +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + ops::BitOr, + str::FromStr, +}; + +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] +pub enum PatternType { + #[default] + Regex, + IP, + IPv4, + IPv6, +} + +impl PatternType { + pub fn is_default(&self) -> bool { + *self == PatternType::default() + } + + pub fn regex(&self) -> Option { + match self { + PatternType::IPv4 => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}"#.into()), + PatternType::IPv6 => Some(r#"(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])"#.into()), + PatternType::IP => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"#.into()), + PatternType::Regex => None, + } + } +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +pub struct PatternIP { + #[serde( + default, + rename = "type", + skip_serializing_if = "PatternType::is_default" + )] + pattern_type: PatternType, + #[serde(default, rename = "ignorecidr", skip_serializing_if = "Vec::is_empty")] + ignore_cidr: Vec, + #[serde(skip)] + ignore_cidr_normalized: Vec, +} + +impl PatternIP { + pub fn setup(&mut self) -> Result, String> { + match self.pattern_type { + PatternType::IP | PatternType::IPv4 | PatternType::IPv6 => { + for cidr in &self.ignore_cidr { + let cidr_normalized = Cidr::from_str(cidr)?; + self.ignore_cidr_normalized.push(cidr_normalized); + } + self.ignore_cidr = Vec::default(); + } + PatternType::Regex => { + if !self.ignore_cidr.is_empty() { + return Err("ignorecidr is only allowed for patterns of `type: 'ip'`".into()); + } + } + } + return Ok(self.pattern_type.regex()); + } + + pub fn is_ignore(&self, match_: &str) -> bool { + // TODO + todo!() + } +} + +#[derive(Clone, Debug)] +pub enum Cidr { + IPv4((Ipv4Addr, Ipv4Addr)), + IPv6((Ipv6Addr, Ipv6Addr)), +} + +fn make_mask(mut mask_u32: u32) -> T { + let mask = 0; + while mask_u32 > 0 { + mask |= 1 << mask_u32; + mask_u32 -= 1; + } +} + +impl FromStr for Cidr { + type Err = String; + + fn from_str(cidr: &str) -> Result { + let (ip, mask) = cidr.split_once('/').ok_or(format!( + "malformed IP/MASK. '{cidr}' doesn't contain any '/'" + ))?; + let ip = IpAddr::from_str(ip) + .map_err(|err| format!("malformed IP '{ip}' in '{cidr}': {err}"))?; + let mut mask_u32 = u32::from_str(mask) + .map_err(|err| format!("malformed mask '{mask}' in '{cidr}': {err}"))?; + + let (ip_type, ip_bits) = match ip { + IpAddr::V4(_) => ("IPv4", 32), + IpAddr::V6(_) => ("IPv6", 128), + }; + + if mask_u32 > ip_bits { + return Err(format!( + "{ip_type} mask must be between 0 and {} inclusive. {mask_u32} is too big.", + ip_bits + )); + } + + match ip { + IpAddr::V4(ipv4_addr) => { + let mask = match mask_u32 { + 0 => 0u32, + n => !0u32 << (32 - n), + }; + let mask = Ipv4Addr::from_bits(mask); + Ok(Cidr::IPv4((ipv4_addr, mask))) + } + IpAddr::V6(ipv6_addr) => { + let mask = match mask_u32 { + 0 => 0u128, + n => !0u128 << (128 - n), + }; + let mask = Ipv6Addr::from_bits(mask); + Ok(Cidr::IPv6((ipv6_addr, mask))) + } + } + + // TODO normalize IP + } +} + +#[cfg(test)] +mod tests {} From 04b5dfd95b48909384307842a5055fda78268c6f Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 22 Jul 2025 12:00:00 +0200 Subject: [PATCH 056/241] ip: Add includes, tests, more setup constraints --- src/concepts/pattern/ip.rs | 191 +++++++++++++++++++++++++++++++++---- 1 file changed, 172 insertions(+), 19 deletions(-) diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip.rs index 74a9e70..3a6c4aa 100644 --- a/src/concepts/pattern/ip.rs +++ b/src/concepts/pattern/ip.rs @@ -1,10 +1,10 @@ use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr}, - ops::BitOr, str::FromStr, }; use serde::{Deserialize, Serialize}; +use tracing::warn; #[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum PatternType { @@ -50,6 +50,22 @@ impl PatternIP { PatternType::IP | PatternType::IPv4 | PatternType::IPv6 => { for cidr in &self.ignore_cidr { let cidr_normalized = Cidr::from_str(cidr)?; + if let PatternType::IPv4 = self.pattern_type { + if let Cidr::IPv6(_) = cidr_normalized { + return Err(format!( + "An IPv4-only pattern can't have an IPv6 ({}) as an ignore", + cidr + )); + } + } + if let PatternType::IPv6 = self.pattern_type { + if let Cidr::IPv4(_) = cidr_normalized { + return Err(format!( + "An IPv6-only pattern can't have an IPv4 ({}) as an ignore", + cidr + )); + } + } self.ignore_cidr_normalized.push(cidr_normalized); } self.ignore_cidr = Vec::default(); @@ -64,25 +80,22 @@ impl PatternIP { } pub fn is_ignore(&self, match_: &str) -> bool { - // TODO - todo!() + let match_ip = match IpAddr::from_str(match_) { + Ok(ip) => ip, + Err(_) => return false, + }; + self.ignore_cidr_normalized + .iter() + .all(|cidr| !cidr.includes(&match_ip)) } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum Cidr { IPv4((Ipv4Addr, Ipv4Addr)), IPv6((Ipv6Addr, Ipv6Addr)), } -fn make_mask(mut mask_u32: u32) -> T { - let mask = 0; - while mask_u32 > 0 { - mask |= 1 << mask_u32; - mask_u32 -= 1; - } -} - impl FromStr for Cidr { type Err = String; @@ -92,43 +105,183 @@ impl FromStr for Cidr { ))?; let ip = IpAddr::from_str(ip) .map_err(|err| format!("malformed IP '{ip}' in '{cidr}': {err}"))?; - let mut mask_u32 = u32::from_str(mask) + let mask_count = u32::from_str(mask) .map_err(|err| format!("malformed mask '{mask}' in '{cidr}': {err}"))?; + if mask_count < 2 { + return Err(format!("Can't have a network mask of 0 or 1. You're either ignoring all Internet or half of it.")); + } else if mask_count + < (match ip { + IpAddr::V4(_) => 8, + IpAddr::V6(_) => 16, + }) + { + warn!("With a mask of {mask_count}, you're ignoring a big part of Internet. Are you sure you want to do this?"); + } + let (ip_type, ip_bits) = match ip { IpAddr::V4(_) => ("IPv4", 32), IpAddr::V6(_) => ("IPv6", 128), }; - if mask_u32 > ip_bits { + if mask_count > ip_bits { return Err(format!( - "{ip_type} mask must be between 0 and {} inclusive. {mask_u32} is too big.", + "{ip_type} mask must be between 0 and {} inclusive. {mask_count} is too big.", ip_bits )); } match ip { IpAddr::V4(ipv4_addr) => { - let mask = match mask_u32 { + // Create bitmask + let mask = match mask_count { 0 => 0u32, n => !0u32 << (32 - n), }; let mask = Ipv4Addr::from_bits(mask); + // Normalize IP from mask + let ipv4_addr = ipv4_addr & mask; + Ok(Cidr::IPv4((ipv4_addr, mask))) } IpAddr::V6(ipv6_addr) => { - let mask = match mask_u32 { + // Create bitmask + let mask = match mask_count { 0 => 0u128, n => !0u128 << (128 - n), }; let mask = Ipv6Addr::from_bits(mask); + // Normalize IP from mask + let ipv6_addr = ipv6_addr & mask; + Ok(Cidr::IPv6((ipv6_addr, mask))) } } + } +} - // TODO normalize IP +impl Cidr { + fn includes(&self, ip: &IpAddr) -> bool { + match self { + Cidr::IPv4((network_ipv4, mask)) => match ip { + IpAddr::V6(_) => false, + IpAddr::V4(ipv4_addr) => *network_ipv4 == ipv4_addr & mask, + }, + Cidr::IPv6((network_ipv6, mask)) => match ip { + IpAddr::V4(_) => false, + IpAddr::V6(ipv6_addr) => *network_ipv6 == ipv6_addr & mask, + }, + } } } #[cfg(test)] -mod tests {} +mod tests { + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + str::FromStr, + }; + + use super::Cidr; + + #[test] + fn cidrv4_from_str() { + assert_eq!( + Ok(Cidr::IPv4((Ipv4Addr::new(192, 168, 1, 4), u32::MAX.into()))), + Cidr::from_str("192.168.1.4/32") + ); + // Test IP normalization from mask + assert_eq!( + Ok(Cidr::IPv4(( + Ipv4Addr::new(192, 168, 1, 0), + Ipv4Addr::new(255, 255, 255, 0), + ))), + Cidr::from_str("192.168.1.4/24") + ); + // Another ok-test "pour la route" + assert_eq!( + Ok(Cidr::IPv4(( + Ipv4Addr::new(1, 1, 0, 0), + Ipv4Addr::new(255, 255, 0, 0), + ))), + Cidr::from_str("1.1.248.25/16") + ); + // Errors + assert!(Cidr::from_str("256.1.1.1/8").is_err()); + assert!(Cidr::from_str("1.1.1.1/0").is_err()); + assert!(Cidr::from_str("1.1.1.1/1").is_err()); + assert!(Cidr::from_str("1.1.1.1.1").is_err()); + assert!(Cidr::from_str("1.1.1.1/16/16").is_err()); + } + + #[test] + fn cidrv6_from_str() { + assert_eq!( + Ok(Cidr::IPv6(( + Ipv6Addr::new(0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68), + u128::MAX.into() + ))), + Cidr::from_str("fe80::df68:2ee:e4f9:e68/128") + ); + // Test IP normalization from mask + assert_eq!( + Ok(Cidr::IPv6(( + Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9de5, 0, 0, 0, 0), + Ipv6Addr::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX, 0, 0, 0, 0), + ))), + Cidr::from_str("2001:db8:85a3:9de5::8a2e:370:7334/64") + ); + // Another ok-test "pour la route" + assert_eq!( + Ok(Cidr::IPv6(( + Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9d00, 0, 0, 0, 0), + Ipv6Addr::new( + u16::MAX, + u16::MAX, + u16::MAX, + u16::MAX - u8::MAX as u16, + 0, + 0, + 0, + 0 + ), + ))), + Cidr::from_str("2001:db8:85a3:9d00::8a2e:370:7334/56") + ); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/56").is_ok()); + assert!(Cidr::from_str("2001:DB8:85A3:0:0:8A2E:370:7334/56").is_ok()); + // Errors + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:g334/56").is_err()); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/0").is_err()); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/1").is_err()); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334:11/56").is_err()); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/11/56").is_err()); + } + + #[test] + fn cidrv4_includes() { + let cidr = Cidr::from_str("192.168.1.0/24").unwrap(); + assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0)))); + assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)))); + assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 234)))); + assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 0, 1)))); + assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68 + ),))); + } + + #[test] + fn cidrv6_includes() { + let cidr = Cidr::from_str("2001:db8:85a3:9d00:0:8a2e:370:7334/56").unwrap(); + assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0x2001, 0x0db8, 0x85a3, 0x9d00, 0, 0, 0, 0 + )))); + assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0x2001, 0x0db8, 0x85a3, 0x9da4, 0x34fc, 0x0d8b, 0xffff, 0x1111 + )))); + assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0x2001, 0x0db8, 0x85a3, 0xad00, 0, 0, 0, 1 + )))); + assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0)))); + } +} From a5f616e295367205435cae2b7ee86c3cc3748707 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 23 Jul 2025 12:00:00 +0200 Subject: [PATCH 057/241] WIP pattern ip add ipv{4,6}mask factorize redundant code in util functions normalize match most tests done --- src/concepts/filter.rs | 10 +- src/concepts/pattern.rs | 13 +- src/concepts/pattern/ip.rs | 558 +++++++++++++++++++++++++++++++------ 3 files changed, 494 insertions(+), 87 deletions(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 99d83c6..052dc1b 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -231,7 +231,9 @@ impl Filter { // there may be no captured group for it. if let Some(match_) = matches.name(pattern.name()) { if !pattern.is_ignore(match_.as_str()) { - result.push(match_.as_str().to_string()); + let mut match_ = match_.as_str().to_string(); + pattern.normalize(&mut match_); + result.push(match_); } } } @@ -252,7 +254,7 @@ impl Filter { /// Then returns a corresponding [`Match`]. pub fn get_match_from_patterns( &self, - patterns: BTreeMap, String>, + mut patterns: BTreeMap, String>, ) -> Result { // Check pattern length if patterns.len() != self.patterns().len() { @@ -270,7 +272,7 @@ impl Filter { )); } - for (pattern, match_) in &patterns { + for (pattern, match_) in &mut patterns { if self.patterns.get(pattern).is_none() { return Err(format!( "pattern {} is not present in the filter {}.{}", @@ -295,6 +297,8 @@ impl Filter { pattern.name(), )); } + + pattern.normalize(match_); } for pattern in self.patterns.iter() { diff --git a/src/concepts/pattern.rs b/src/concepts/pattern.rs index 7fbdaa8..dc3c34f 100644 --- a/src/concepts/pattern.rs +++ b/src/concepts/pattern.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; mod ip; -use ip::PatternIP; +use ip::PatternIp; #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] @@ -22,7 +22,7 @@ pub struct Pattern { compiled_ignore_regex: RegexSet, #[serde(flatten)] - ip: PatternIP, + ip: PatternIp, #[serde(skip)] name: String, @@ -110,6 +110,13 @@ impl Pattern { Regex::new(&format!("^{}$", self.regex)).map_err(|err| err.to_string()) } + /// Normalize the pattern. + /// No-op when the pattern is not an IP. + /// Otherwise BitAnd the IP with its configured mask. + pub fn normalize(&self, match_: &mut String) { + self.ip.normalize(match_) + } + /// Whether the provided string is a match for this pattern or not. /// /// Doesn't take into account ignore and ignore_regex: @@ -184,7 +191,7 @@ pub mod tests { ignore: Vec::new(), ignore_regex: Vec::new(), compiled_ignore_regex: RegexSet::default(), - ip: PatternIP::default(), + ip: PatternIp::default(), name: "".into(), name_with_braces: "".into(), } diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip.rs index 3a6c4aa..da8bec7 100644 --- a/src/concepts/pattern/ip.rs +++ b/src/concepts/pattern/ip.rs @@ -1,18 +1,19 @@ use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr}, + fmt::Display, + net::{AddrParseError, IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, }; use serde::{Deserialize, Serialize}; use tracing::warn; -#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum PatternType { #[default] Regex, - IP, - IPv4, - IPv6, + Ip, + Ipv4, + Ipv6, } impl PatternType { @@ -22,63 +23,115 @@ impl PatternType { pub fn regex(&self) -> Option { match self { - PatternType::IPv4 => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}"#.into()), - PatternType::IPv6 => Some(r#"(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])"#.into()), - PatternType::IP => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"#.into()), + PatternType::Ipv4 => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}"#.into()), + PatternType::Ipv6 => Some(r#"(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])"#.into()), + PatternType::Ip => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"#.into()), PatternType::Regex => None, } } } -#[derive(Clone, Debug, Default, Deserialize, Serialize)] -pub struct PatternIP { +#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] +pub struct PatternIp { #[serde( default, rename = "type", skip_serializing_if = "PatternType::is_default" )] pattern_type: PatternType, + + #[serde(default, rename = "ipv4mask")] + ipv4_mask: Option, + #[serde(default, rename = "ipv6mask")] + ipv6_mask: Option, + #[serde(skip)] + ipv4_bitmask: Option, + #[serde(skip)] + ipv6_bitmask: Option, + #[serde(default, rename = "ignorecidr", skip_serializing_if = "Vec::is_empty")] ignore_cidr: Vec, #[serde(skip)] ignore_cidr_normalized: Vec, } -impl PatternIP { +impl PatternIp { + /// Setup the IP-specific part of a Pattern. + /// Returns an optional regex string if of type IP, else None + /// Returns an error if one of: + /// - the type is not IP but there is IP-specific config + /// - the type is IP/IPv4/IPv6 and there is invalid IP-specific config + /// - the type is IPv4 and there is IPv6-specific config + /// - the type is IPv6 and there is IPv4-specific config pub fn setup(&mut self) -> Result, String> { match self.pattern_type { - PatternType::IP | PatternType::IPv4 | PatternType::IPv6 => { - for cidr in &self.ignore_cidr { - let cidr_normalized = Cidr::from_str(cidr)?; - if let PatternType::IPv4 = self.pattern_type { - if let Cidr::IPv6(_) = cidr_normalized { - return Err(format!( - "An IPv4-only pattern can't have an IPv6 ({}) as an ignore", - cidr - )); - } - } - if let PatternType::IPv6 = self.pattern_type { - if let Cidr::IPv4(_) = cidr_normalized { - return Err(format!( - "An IPv6-only pattern can't have an IPv4 ({}) as an ignore", - cidr - )); - } - } - self.ignore_cidr_normalized.push(cidr_normalized); - } - self.ignore_cidr = Vec::default(); - } PatternType::Regex => { + if self.ipv4_mask.is_some() { + return Err("ipv4mask is only allowed for patterns of `type: 'ip'`".into()); + } + if self.ipv6_mask.is_some() { + return Err("ipv6mask is only allowed for patterns of `type: 'ip'`".into()); + } if !self.ignore_cidr.is_empty() { return Err("ignorecidr is only allowed for patterns of `type: 'ip'`".into()); } } + + PatternType::Ip | PatternType::Ipv4 | PatternType::Ipv6 => { + if let Some(mask) = self.ipv4_mask { + self.ipv4_bitmask = Some(mask_to_ipv4(mask)?); + } + if let Some(mask) = self.ipv6_mask { + self.ipv6_bitmask = Some(mask_to_ipv6(mask)?); + } + + for cidr in &self.ignore_cidr { + let cidr_normalized = Cidr::from_str(cidr)?; + let cidr_normalized_string = cidr_normalized.to_string(); + if &cidr_normalized_string != cidr { + warn!("CIDR {cidr} should be rewritten in its normalized form: {cidr_normalized_string}"); + } + self.ignore_cidr_normalized.push(cidr_normalized); + } + self.ignore_cidr = Vec::default(); + + match self.pattern_type { + PatternType::Regex => (), + PatternType::Ip => (), + PatternType::Ipv4 => { + if self.ipv6_mask.is_some() { + return Err("An IPv4-only pattern can't have an ipv6mask".into()); + } + for cidr in &self.ignore_cidr_normalized { + if let Cidr::IPv6(_) = cidr { + return Err(format!( + "An IPv4-only pattern can't have an IPv6 ({}) as an ignore", + cidr + )); + } + } + } + + PatternType::Ipv6 => { + if self.ipv4_mask.is_some() { + return Err("An IPv6-only pattern can't have an ipv4mask".into()); + } + for cidr in &self.ignore_cidr_normalized { + if let Cidr::IPv4(_) = cidr { + return Err(format!( + "An IPv6-only pattern can't have an IPv4 ({}) as an ignore", + cidr + )); + } + } + } + } + } } - return Ok(self.pattern_type.regex()); + Ok(self.pattern_type.regex()) } + /// Whether the IP match is included in one of [`Self::ignore_cidr`] pub fn is_ignore(&self, match_: &str) -> bool { let match_ip = match IpAddr::from_str(match_) { Ok(ip) => ip, @@ -88,6 +141,314 @@ impl PatternIP { .iter() .all(|cidr| !cidr.includes(&match_ip)) } + + /// Normalize the pattern. + /// No-op when the pattern is not an IP. + /// Otherwise BitAnd the IP with its configured mask, + /// and add the / + pub fn normalize(&self, match_: &mut String) { + let ip = match self.pattern_type { + PatternType::Regex => None, + // Attempt to normalize only if type is IP* + _ => normalize(match_) + .ok() + .and_then(|ip| match self.pattern_type { + PatternType::Ip => Some(ip), + PatternType::Ipv4 => match ip { + IpAddr::V4(_) => Some(ip), + _ => None, + }, + PatternType::Ipv6 => match ip { + IpAddr::V6(_) => Some(ip), + _ => None, + }, + _ => None, + }), + }; + if let Some(ip) = ip { + *match_ = match ip { + IpAddr::V4(addr) => match self.ipv4_bitmask { + Some(bitmask) => { + format!("{}/{}", addr & bitmask, self.ipv4_mask.unwrap_or(32)) + } + None => addr.to_string(), + }, + IpAddr::V6(addr) => match self.ipv6_bitmask { + Some(bitmask) => { + format!("{}/{}", addr & bitmask, self.ipv6_mask.unwrap_or(128)) + } + None => addr.to_string(), + }, + }; + } + } +} + +#[cfg(test)] +mod patternip_tests { + use std::net::{Ipv4Addr, Ipv6Addr}; + + use crate::concepts::pattern::ip::Cidr; + + use super::{PatternIp, PatternType}; + + #[test] + fn test_setup_type_regex() { + let mut regex_struct = PatternIp { + pattern_type: PatternType::Regex, + ..Default::default() + }; + let copy = regex_struct.clone(); + // All default patterns is ok for regex type + assert!(regex_struct.setup().is_ok()); + // Setup changes nothing + assert_eq!(regex_struct, copy); + + // Any non-default field is err + + let mut regex_struct = PatternIp { + pattern_type: PatternType::Regex, + ipv4_mask: Some(24), + ..Default::default() + }; + assert!(regex_struct.setup().is_err()); + + let mut regex_struct = PatternIp { + pattern_type: PatternType::Regex, + ipv6_mask: Some(64), + ..Default::default() + }; + assert!(regex_struct.setup().is_err()); + + let mut regex_struct = PatternIp { + pattern_type: PatternType::Regex, + ignore_cidr: vec!["192.168.1/24".into()], + ..Default::default() + }; + assert!(regex_struct.setup().is_err()); + } + + #[test] + fn test_setup_type_ip() { + for pattern_type in [PatternType::Ip, PatternType::Ipv4, PatternType::Ipv6] { + let mut ip_struct = PatternIp { + pattern_type, + ..Default::default() + }; + assert!(ip_struct.setup().is_ok()); + + let mut ip_struct = PatternIp { + pattern_type, + ipv4_mask: Some(24), + ..Default::default() + }; + match pattern_type { + PatternType::Ipv6 => assert!(ip_struct.setup().is_err()), + _ => { + assert!(ip_struct.setup().is_ok()); + assert_eq!( + ip_struct.ipv4_bitmask, + Some(Ipv4Addr::new(255, 255, 255, 0)) + ); + } + } + + let mut ip_struct = PatternIp { + pattern_type, + ipv6_mask: Some(64), + ..Default::default() + }; + match pattern_type { + PatternType::Ipv4 => assert!(ip_struct.setup().is_err()), + _ => { + assert!(ip_struct.setup().is_ok()); + assert_eq!( + ip_struct.ipv6_bitmask, + Some(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0)) + ); + } + } + + let mut ip_struct = PatternIp { + pattern_type, + ignore_cidr: vec!["192.168.1.0/24".into()], + ..Default::default() + }; + match pattern_type { + PatternType::Ipv6 => assert!(ip_struct.setup().is_err()), + _ => { + assert!(ip_struct.setup().is_ok()); + assert_eq!( + ip_struct.ignore_cidr_normalized, + vec![Cidr::IPv4(( + Ipv4Addr::new(192, 168, 1, 0), + Ipv4Addr::new(255, 255, 255, 0) + ))] + ); + } + } + + let mut ip_struct = PatternIp { + pattern_type, + ignore_cidr: vec!["::ffff:192.168.1.0/24".into()], + ..Default::default() + }; + match pattern_type { + PatternType::Ipv6 => assert!(ip_struct.setup().is_err()), + _ => { + assert!(ip_struct.setup().is_ok()); + assert_eq!( + ip_struct.ignore_cidr_normalized, + vec![Cidr::IPv4(( + Ipv4Addr::new(192, 168, 1, 0), + Ipv4Addr::new(255, 255, 255, 0) + ))] + ); + } + } + + let mut ip_struct = PatternIp { + pattern_type, + ignore_cidr: vec!["2001:db8:85a3:9de5::8a2e:370:7334/64".into()], + ..Default::default() + }; + match pattern_type { + PatternType::Ipv4 => assert!(ip_struct.setup().is_err()), + _ => { + assert!(ip_struct.setup().is_ok()); + assert_eq!( + ip_struct.ignore_cidr_normalized, + vec![Cidr::IPv6(( + Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9de5, 0, 0, 0, 0), + Ipv6Addr::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX, 0, 0, 0, 0), + ))] + ); + } + } + } + } + + #[test] + fn test_is_ignore() { + // TODO + } + + #[test] + fn test_normalize() { + // TODO + } +} + +/// Normalize a string as an IP address. +/// IPv6-mapped IPv4 addresses are casted to IPv4. +fn normalize(ip: &str) -> Result { + IpAddr::from_str(ip).map(normalize_ip) +} + +/// Normalize a string as an IP address. +/// IPv6-mapped IPv4 addresses are casted to IPv4. +fn normalize_ip(ip: IpAddr) -> IpAddr { + match ip { + IpAddr::V4(_) => ip, + IpAddr::V6(ipv6) => match ipv6.to_ipv4_mapped() { + Some(ipv4) => IpAddr::V4(ipv4), + None => ip, + }, + } +} + +/// Creates an [`Ipv4Addr`] from a mask +fn mask_to_ipv4(mask_count: u8) -> Result { + if mask_count > 32 { + Err(format!( + "an IPv4 mask must be 32 max. {mask_count} is too big." + )) + } else { + let mask = match mask_count { + 0 => 0u32, + n => u32::MAX << (32 - n), + }; + let mask = Ipv4Addr::from_bits(mask); + Ok(mask) + } +} + +/// Creates an [`Ipv4Addr`] from a mask +fn mask_to_ipv6(mask_count: u8) -> Result { + if mask_count > 128 { + Err(format!( + "an IPv4 mask must be 128 max. {mask_count} is too big." + )) + } else { + let mask = match mask_count { + 0 => 0u128, + n => u128::MAX << (128 - n), + }; + let mask = Ipv6Addr::from_bits(mask); + Ok(mask) + } +} + +#[cfg(test)] +mod utils_tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + + use super::{mask_to_ipv4, mask_to_ipv6, normalize}; + + #[test] + fn test_normalize_ip() { + assert_eq!( + normalize("83.44.23.14"), + Ok(IpAddr::V4(Ipv4Addr::new(83, 44, 23, 14))) + ); + assert_eq!( + normalize("2001:db8:85a3::8a2e:370:7334"), + Ok(IpAddr::V6(Ipv6Addr::new( + 0x2001, 0xdb8, 0x85a3, 0x0, 0x0, 0x8a2e, 0x370, 0x7334 + ))) + ); + assert_eq!( + normalize("::ffff:192.168.1.34"), + Ok(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 34))) + ); + } + + #[test] + fn test_mask_to_ipv4() { + assert!(mask_to_ipv4(33).is_err()); + assert!(mask_to_ipv4(100).is_err()); + assert_eq!(mask_to_ipv4(16), Ok(Ipv4Addr::new(255, 255, 0, 0))); + assert_eq!(mask_to_ipv4(24), Ok(Ipv4Addr::new(255, 255, 255, 0))); + assert_eq!(mask_to_ipv4(25), Ok(Ipv4Addr::new(255, 255, 255, 128))); + assert_eq!(mask_to_ipv4(26), Ok(Ipv4Addr::new(255, 255, 255, 192))); + assert_eq!(mask_to_ipv4(32), Ok(Ipv4Addr::new(255, 255, 255, 255))); + } + + #[test] + fn test_mask_to_ipv6() { + assert!(mask_to_ipv6(129).is_err()); + assert!(mask_to_ipv6(254).is_err()); + assert_eq!( + mask_to_ipv6(56), + Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xff00, 0, 0, 0, 0)) + ); + assert_eq!( + mask_to_ipv6(64), + Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0)) + ); + assert_eq!( + mask_to_ipv6(112), + Ok(Ipv6Addr::new( + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 + )) + ); + assert_eq!( + mask_to_ipv6(128), + Ok(Ipv6Addr::new( + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff + )) + ); + } } #[derive(Clone, Debug, PartialEq, Eq)] @@ -103,65 +464,57 @@ impl FromStr for Cidr { let (ip, mask) = cidr.split_once('/').ok_or(format!( "malformed IP/MASK. '{cidr}' doesn't contain any '/'" ))?; - let ip = IpAddr::from_str(ip) - .map_err(|err| format!("malformed IP '{ip}' in '{cidr}': {err}"))?; - let mask_count = u32::from_str(mask) + let ip = normalize(ip).map_err(|err| format!("malformed IP '{ip}' in '{cidr}': {err}"))?; + let mask_count = u8::from_str(mask) .map_err(|err| format!("malformed mask '{mask}' in '{cidr}': {err}"))?; - if mask_count < 2 { - return Err(format!("Can't have a network mask of 0 or 1. You're either ignoring all Internet or half of it.")); - } else if mask_count - < (match ip { - IpAddr::V4(_) => 8, - IpAddr::V6(_) => 16, - }) - { - warn!("With a mask of {mask_count}, you're ignoring a big part of Internet. Are you sure you want to do this?"); - } + // Let's accept any mask size for now, as useless as it may seem + // if mask_count < 2 { + // return Err("Can't have a network mask of 0 or 1. You're either ignoring all Internet or half of it.".into()); + // } else if mask_count + // < (match ip { + // IpAddr::V4(_) => 8, + // IpAddr::V6(_) => 16, + // }) + // { + // warn!("With a mask of {mask_count}, you're ignoring a big part of Internet. Are you sure you want to do this?"); + // } - let (ip_type, ip_bits) = match ip { - IpAddr::V4(_) => ("IPv4", 32), - IpAddr::V6(_) => ("IPv6", 128), - }; + Self::from_ip_and_mask(ip, mask_count) + } +} - if mask_count > ip_bits { - return Err(format!( - "{ip_type} mask must be between 0 and {} inclusive. {mask_count} is too big.", - ip_bits - )); - } +impl Display for Cidr { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", self.network(), self.mask()) + } +} +impl Cidr { + fn from_ip_and_mask(ip: IpAddr, mask_count: u8) -> Result { match ip { - IpAddr::V4(ipv4_addr) => { + IpAddr::V4(mut ipv4_addr) => { // Create bitmask - let mask = match mask_count { - 0 => 0u32, - n => !0u32 << (32 - n), - }; - let mask = Ipv4Addr::from_bits(mask); + let mask = mask_to_ipv4(mask_count)?; // Normalize IP from mask - let ipv4_addr = ipv4_addr & mask; + ipv4_addr &= mask; Ok(Cidr::IPv4((ipv4_addr, mask))) } - IpAddr::V6(ipv6_addr) => { - // Create bitmask - let mask = match mask_count { - 0 => 0u128, - n => !0u128 << (128 - n), - }; - let mask = Ipv6Addr::from_bits(mask); + IpAddr::V6(mut ipv6_addr) => { + let mask = mask_to_ipv6(mask_count)?; // Normalize IP from mask - let ipv6_addr = ipv6_addr & mask; + ipv6_addr &= mask; Ok(Cidr::IPv6((ipv6_addr, mask))) } } } -} -impl Cidr { + /// Whether an IP is included in this IP CIDR. + /// If IP is not the same version as CIDR, returns always false. fn includes(&self, ip: &IpAddr) -> bool { + let ip = normalize_ip(*ip); match self { Cidr::IPv4((network_ipv4, mask)) => match ip { IpAddr::V6(_) => false, @@ -173,10 +526,32 @@ impl Cidr { }, } } + + fn network(&self) -> IpAddr { + match self { + Cidr::IPv4((network, _)) => IpAddr::from(*network), + Cidr::IPv6((network, _)) => IpAddr::from(*network), + } + } + + fn mask(&self) -> u8 { + let mut raw_mask = match self { + Cidr::IPv4((_, mask)) => mask.to_bits() as u128, + Cidr::IPv6((_, mask)) => mask.to_bits(), + }; + let mut ret = 0; + for _ in 0..128 { + if raw_mask % 2 == 1 { + ret += 1; + } + raw_mask >>= 1; + } + ret + } } #[cfg(test)] -mod tests { +mod cidr_tests { use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, @@ -208,9 +583,9 @@ mod tests { ); // Errors assert!(Cidr::from_str("256.1.1.1/8").is_err()); - assert!(Cidr::from_str("1.1.1.1/0").is_err()); - assert!(Cidr::from_str("1.1.1.1/1").is_err()); - assert!(Cidr::from_str("1.1.1.1.1").is_err()); + // assert!(Cidr::from_str("1.1.1.1/0").is_err()); + // assert!(Cidr::from_str("1.1.1.1/1").is_err()); + // assert!(Cidr::from_str("1.1.1.1.1").is_err()); assert!(Cidr::from_str("1.1.1.1/16/16").is_err()); } @@ -252,8 +627,8 @@ mod tests { assert!(Cidr::from_str("2001:DB8:85A3:0:0:8A2E:370:7334/56").is_ok()); // Errors assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:g334/56").is_err()); - assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/0").is_err()); - assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/1").is_err()); + // assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/0").is_err()); + // assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/1").is_err()); assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334:11/56").is_err()); assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/11/56").is_err()); } @@ -284,4 +659,25 @@ mod tests { )))); assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0)))); } + + #[test] + fn cidr_display() { + let cidrs = [ + ("192.168.1.4/32", "192.168.1.4/32"), + ("192.168.1.4/24", "192.168.1.0/24"), + ("1.1.248.25/16", "1.1.0.0/16"), + ("fe80::df68:2ee:e4f9:e68/128", "fe80::df68:2ee:e4f9:e68/128"), + ( + "2001:db8:85a3:9de5::8a2e:370:7334/64", + "2001:db8:85a3:9de5::/64", + ), + ( + "2001:db8:85a3:9d00::8a2e:370:7334/56", + "2001:db8:85a3:9d00::/56", + ), + ]; + for (from, to) in cidrs { + assert_eq!(Cidr::from_str(from).unwrap().to_string(), to); + } + } } From 94b40c4a0b73aecabed9a8ec650e12ab0f7911d9 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 25 Jul 2025 12:00:00 +0200 Subject: [PATCH 058/241] Add more tests Done: Tests on PatternIp. Todo: Tests on Pattern. Fixed a bug in is_ignore. Checked a new possible misconfiguration. --- src/concepts/pattern.rs | 7 ++- src/concepts/pattern/ip.rs | 105 +++++++++++++++++++++++++++++++++++-- 2 files changed, 108 insertions(+), 4 deletions(-) diff --git a/src/concepts/pattern.rs b/src/concepts/pattern.rs index dc3c34f..eb9a05a 100644 --- a/src/concepts/pattern.rs +++ b/src/concepts/pattern.rs @@ -62,6 +62,9 @@ impl Pattern { } if let Some(regex) = self.ip.setup()? { + if !self.regex.is_empty() { + return Err("patterns of type ip, ipv4, ipv6 have a built-in regex defined. you should not define it yourself".into()); + } self.regex = regex; } @@ -111,8 +114,10 @@ impl Pattern { } /// Normalize the pattern. + /// This should happen after checking on ignores. /// No-op when the pattern is not an IP. - /// Otherwise BitAnd the IP with its configured mask. + /// Otherwise BitAnd the IP with its configured mask, + /// and add the / pub fn normalize(&self, match_: &mut String) { self.ip.normalize(match_) } diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip.rs index da8bec7..1fde46f 100644 --- a/src/concepts/pattern/ip.rs +++ b/src/concepts/pattern/ip.rs @@ -139,10 +139,11 @@ impl PatternIp { }; self.ignore_cidr_normalized .iter() - .all(|cidr| !cidr.includes(&match_ip)) + .any(|cidr| cidr.includes(&match_ip)) } /// Normalize the pattern. + /// This should happen after checking on ignores. /// No-op when the pattern is not an IP. /// Otherwise BitAnd the IP with its configured mask, /// and add the / @@ -330,12 +331,110 @@ mod patternip_tests { #[test] fn test_is_ignore() { - // TODO + let mut ip_struct = PatternIp { + pattern_type: PatternType::Ip, + ignore_cidr: vec!["10.0.0.0/8".into(), "2001:db8:85a3:9de5::/64".into()], + ..Default::default() + }; + ip_struct.setup().unwrap(); + assert!(!ip_struct.is_ignore("prout")); + assert!(!ip_struct.is_ignore("1.1.1.1")); + assert!(!ip_struct.is_ignore("11.1.1.1")); + assert!(!ip_struct.is_ignore("2001:db8:85a3:9de6::1")); + assert!(ip_struct.is_ignore("10.1.1.1")); + assert!(ip_struct.is_ignore("2001:db8:85a3:9de5::1")); } #[test] fn test_normalize() { - // TODO + let ipv4_32 = "1.1.1.1"; + let ipv4_32_norm = "1.1.1.1"; + let ipv4_24 = "1.1.1.0"; + let ipv4_24_norm = "1.1.1.0"; + let ipv4_24_mask = "1.1.1.0/24"; + let ipv6_128 = "2001:db8:85a3:9de5:0:0:01:02"; + let ipv6_128_norm = "2001:db8:85a3:9de5::1:2"; + let ipv6_64 = "2001:db8:85a3:9de5:0:0:0:0"; + let ipv6_64_norm = "2001:db8:85a3:9de5::"; + let ipv6_64_mask = "2001:db8:85a3:9de5::/64"; + + for (ipv4_mask, ipv6_mask) in [(Some(24), None), (None, Some(64)), (Some(24), Some(64))] { + let mut ip_struct = PatternIp { + pattern_type: PatternType::Ip, + ipv4_mask, + ipv6_mask, + ..Default::default() + }; + ip_struct.setup().unwrap(); + + let mut ipv4_32_modified = ipv4_32.to_string(); + let mut ipv4_24_modified = ipv4_24.to_string(); + let mut ipv6_128_modified = ipv6_128.to_string(); + let mut ipv6_64_modified = ipv6_64.to_string(); + + ip_struct.normalize(&mut ipv4_32_modified); + ip_struct.normalize(&mut ipv4_24_modified); + ip_struct.normalize(&mut ipv6_128_modified); + ip_struct.normalize(&mut ipv6_64_modified); + + match ipv4_mask { + Some(_) => { + // modified with mask + assert_eq!( + ipv4_32_modified, ipv4_24_mask, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + assert_eq!( + ipv4_24_modified, ipv4_24_mask, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + } + None => { + // only normaized + assert_eq!( + ipv4_32_modified, ipv4_32_norm, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + assert_eq!( + ipv4_24_modified, ipv4_24_norm, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + } + } + + match ipv6_mask { + Some(_) => { + // modified with mask + assert_eq!( + ipv6_128_modified, ipv6_64_mask, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + assert_eq!( + ipv6_64_modified, ipv6_64_mask, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + } + None => { + // only normaized + assert_eq!( + ipv6_128_modified, ipv6_128_norm, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + assert_eq!( + ipv6_64_modified, ipv6_64_norm, + "ipv4mask: {:?}, ipv6mask: {:?}", + ipv4_mask, ipv6_mask + ); + } + } + } } } From 43f8b6687033d4d3501bbc06d7698e459b1769a4 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 25 Jul 2025 12:00:00 +0200 Subject: [PATCH 059/241] Update config documentation --- config/example.jsonnet | 39 ++++++++++++++++++++++++++++++++------- config/example.yml | 33 +++++++++++++++++++++++++++------ 2 files changed, 59 insertions(+), 13 deletions(-) diff --git a/config/example.jsonnet b/config/example.jsonnet index ab06bd7..1a9fc7d 100644 --- a/config/example.jsonnet +++ b/config/example.jsonnet @@ -27,15 +27,40 @@ local banFor(time) = { // patterns are substitued in regexes. // when a filter performs an action, it replaces the found pattern patterns: { - ip: { + + name: { // reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax - // jsonnet's @'string' is for verbatim strings - // simple version: regex: @'(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})', - regex: @'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))', - ignore: ['127.0.0.1', '::1'], - // Patterns can be ignored based on regexes, it will try to match the whole string detected by the pattern - // ignoreregex: [@'10\.0\.[0-9]{1,3}\.[0-9]{1,3}'], + // common patterns have a 'regex' field + regex: '[a-z]+', + // patterns can ignore specific strings + ignore: ['cecilia'], + // patterns can also be ignored based on regexes, it will try to match the whole string detected by the pattern + ignoreregex: [ + // ignore names starting with 'jo' + 'jo.*', + ], }, + + ip: { + // patterns can have a special 'ip' type that matches both ipv4 and ipv6 + // or 'ipv4' or 'ipv6' to match only that ip version + type: 'ip', + ignore: ['127.0.0.1', '::1'], + // they can also ignore whole CIDR ranges of ip + ignorecidr: ['10.0.0.0/8'], + // last but not least, patterns of type ip, ipv4, ipv6 can also group their matched ips by mask + // ipv4mask: 30 + // this means that ipv6 matches will be converted to their network part. + ipv6mask: 64, + // for example,"2001:db8:85a3:9de5::8a2e:370:7334" will be converted to "2001:db8:85a3:9de5::/64". + }, + + // ipv4: { + // type: 'ipv4', + // ignore: ... + // ipv4mask: ... + // }, + }, // where the state (database) must be read diff --git a/config/example.yml b/config/example.yml index 0d5f9fd..4169ecc 100644 --- a/config/example.yml +++ b/config/example.yml @@ -28,16 +28,37 @@ concurrency: 0 # patterns are substitued in regexes. # when a filter performs an action, it replaces the found pattern patterns: - ip: + name: # reaction regex syntax is defined here: https://docs.rs/regex/latest/regex/#syntax - # simple version: regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})' - regex: '(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))' + # common patterns have a 'regex' field + regex: '[a-z]+' + # patterns can ignore specific strings + ignore: + - 'cecilia' + # patterns can also be ignored based on regexes, it will try to match the whole string detected by the pattern + ignoreregex: + # ignore names starting with 'jo' + - 'jo.*' + + ip: + # patterns can have a special 'ip' type that matches both ipv4 and ipv6 + # or 'ipv4' or 'ipv6' to match only that ip version + type: ip ignore: - 127.0.0.1 - ::1 - # Patterns can be ignored based on regexes, it will try to match the whole string detected by the pattern - # ignoreregex: - # - '10\.0\.[0-9]{1,3}\.[0-9]{1,3}' + # they can also ignore whole CIDR ranges of ip + ignorecidr: + - 10.0.0.0/8 + # last but not least, patterns of type ip, ipv4, ipv6 can also group their matched ips by mask + # ipv4mask: 30 + # this means that ipv6 matches will be converted to their network part. + ipv6mask: 64 + # for example,"2001:db8:85a3:9de5::8a2e:370:7334" will be converted to "2001:db8:85a3:9de5::/64". + + # ipv4: + # type: ipv4 + # ignore: ... # Those commands will be executed in order at start, before everything else start: From 6cde89cc4bf6b12ae5bb6ca74695fbe7fe4f1f36 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 25 Jul 2025 12:00:00 +0200 Subject: [PATCH 060/241] rename file --- src/concepts/{pattern.rs => pattern/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/concepts/{pattern.rs => pattern/mod.rs} (100%) diff --git a/src/concepts/pattern.rs b/src/concepts/pattern/mod.rs similarity index 100% rename from src/concepts/pattern.rs rename to src/concepts/pattern/mod.rs From 4f79b476aa44670f1d35ad899862cedfe60a1af4 Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 27 Jul 2025 12:00:00 +0200 Subject: [PATCH 061/241] Cut ip regexes in smaller blocks and add tests --- src/concepts/pattern/ip.rs | 85 +++++++++++++++++++++++++++++++++++--- 1 file changed, 80 insertions(+), 5 deletions(-) diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip.rs index 1fde46f..13c65da 100644 --- a/src/concepts/pattern/ip.rs +++ b/src/concepts/pattern/ip.rs @@ -22,10 +22,28 @@ impl PatternType { } pub fn regex(&self) -> Option { + let num4 = r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"#; + let num6 = r#"[0-9a-fA-F]{1,4}"#; + let ipv4 = format!(r#"{num4}(?:\.{num4}){{3}}"#); + let ipv6 = [ + format!(r#"(?:{num6}:){{7}}{num6}"#), + format!(r#"(?:{num6}:){{1,7}}:"#), + format!(r#"(?:{num6}:){{1,6}}:{num6}"#), + format!(r#"(?:{num6}:){{1,5}}(?::{num6}){{1,2}}"#), + format!(r#"(?:{num6}:){{1,4}}(?::{num6}){{1,3}}"#), + format!(r#"(?:{num6}:){{1,3}}(?::{num6}){{1,4}}"#), + format!(r#"(?:{num6}:){{1,2}}(?::{num6}){{1,5}}"#), + format!(r#"{num6}:(?:(?::{num6}){{1,6}})"#), + format!(r#":(?:(?::{num6}){{1,7}}|:)"#), + format!(r#"fe80:(?::[0-9a-fA-F]{{0,4}}){{0,4}}%[0-9a-zA-Z]+"#), + format!(r#"::(?:ffff(?::0{{1,4}})?:)?{ipv4}"#), + format!(r#"(?:{num6}:){{1,4}}:{ipv4}"#), + ] + .join("|"); match self { - PatternType::Ipv4 => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}"#.into()), - PatternType::Ipv6 => Some(r#"(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])"#.into()), - PatternType::Ip => Some(r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"#.into()), + PatternType::Ipv4 => Some(ipv4), + PatternType::Ipv6 => Some(ipv6), + PatternType::Ip => Some(format!("{ipv4}|{ipv6}")), PatternType::Regex => None, } } @@ -189,7 +207,7 @@ impl PatternIp { mod patternip_tests { use std::net::{Ipv4Addr, Ipv6Addr}; - use crate::concepts::pattern::ip::Cidr; + use crate::concepts::{pattern::ip::Cidr, Pattern}; use super::{PatternIp, PatternType}; @@ -421,7 +439,7 @@ mod patternip_tests { ); } None => { - // only normaized + // only normalized assert_eq!( ipv6_128_modified, ipv6_128_norm, "ipv4mask: {:?}, ipv6mask: {:?}", @@ -436,6 +454,58 @@ mod patternip_tests { } } } + + #[test] + fn test_ip_regexes() { + for pattern_type in [PatternType::Ip, PatternType::Ipv4, PatternType::Ipv6] { + let mut pattern = Pattern { + ip: PatternIp { + pattern_type, + ..Default::default() + }, + ..Default::default() + }; + assert!(pattern.setup("zblorg").is_ok()); + let regex = pattern.compiled().unwrap(); + + let accepts_ipv4 = pattern_type == PatternType::Ip || pattern_type == PatternType::Ipv4; + let accepts_ipv6 = pattern_type == PatternType::Ip || pattern_type == PatternType::Ipv6; + + macro_rules! assert2 { + ($a:expr) => { + assert!($a, "PatternType: {pattern_type:?}"); + }; + } + + assert2!(accepts_ipv4 == regex.is_match("1.2.3.4")); + assert2!(accepts_ipv4 == regex.is_match("255.255.255.255")); + + assert2!(!regex.is_match(".1.2.3.4")); + assert2!(!regex.is_match(" 1.2.3.4")); + assert2!(!regex.is_match("1.2.3.4 ")); + assert2!(!regex.is_match("1.2. 3.4")); + assert2!(!regex.is_match("257.2.3.4")); + assert2!(!regex.is_match("1.2.3.4.5")); + assert2!(!regex.is_match("1.2..4")); + assert2!(!regex.is_match("1.2..3.4")); + + assert2!(accepts_ipv6 == regex.is_match("1:2:3:4:5:6:7:8")); + assert2!(accepts_ipv6 == regex.is_match("::")); + assert2!(accepts_ipv6 == regex.is_match("1::")); + assert2!(accepts_ipv6 == regex.is_match("::1")); + assert2!(accepts_ipv6 == regex.is_match("1:2::6:7:8")); + assert2!(accepts_ipv6 == regex.is_match("0123:4567:89:ab:cdef:AB:CD:EF")); + assert2!(accepts_ipv6 == regex.is_match("::ffff:1.2.3.4")); + assert2!(accepts_ipv6 == regex.is_match("ffff::1.2.3.4")); + + assert2!(!regex.is_match("1:")); + assert2!(!regex.is_match("1:::")); + assert2!(!regex.is_match("1:::2")); + assert2!(!regex.is_match("1:2:3:4:5:6:7:8:9")); + assert2!(!regex.is_match("1:23456:3:4:5:6:7:8")); + assert2!(!regex.is_match("1:2:3:4:5:6:7:8:")); + } + } } /// Normalize a string as an IP address. @@ -510,6 +580,10 @@ mod utils_tests { normalize("::ffff:192.168.1.34"), Ok(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 34))) ); + assert_eq!( + normalize("::ffff:1.2.3.4"), + Ok(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))) + ); } #[test] @@ -550,6 +624,7 @@ mod utils_tests { } } +/// Stores an IP and an associated mask. #[derive(Clone, Debug, PartialEq, Eq)] pub enum Cidr { IPv4((Ipv4Addr, Ipv4Addr)), From 421002442ee34f365824cf2d21bfd693ce4c761a Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 27 Jul 2025 12:00:00 +0200 Subject: [PATCH 062/241] Add ip tests on daemon::filter Fix PatternType deserialization Fix regex deserialization (now optional) Tests currently failing --- TODO | 3 ++ src/concepts/pattern/ip.rs | 4 ++ src/concepts/pattern/mod.rs | 3 +- src/daemon/filter/tests.rs | 89 +++++++++++++++++++++++++++++++++++++ 4 files changed, 98 insertions(+), 1 deletion(-) diff --git a/TODO b/TODO index 2cd7f5d..9b3b9fa 100644 --- a/TODO +++ b/TODO @@ -5,3 +5,6 @@ move match logging from concepts/filter to daemon/filter test migration stream: test regex ending with $ test Filter::regex conformity after setup + +should an ipv6-mapped ipv4 match a pattern of type ipv6? +should it be normalized as ipv4 then? diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip.rs index 13c65da..9f894df 100644 --- a/src/concepts/pattern/ip.rs +++ b/src/concepts/pattern/ip.rs @@ -10,9 +10,13 @@ use tracing::warn; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum PatternType { #[default] + #[serde(rename = "regex")] Regex, + #[serde(rename = "ip")] Ip, + #[serde(rename = "ipv4")] Ipv4, + #[serde(rename = "ipv6")] Ipv6, } diff --git a/src/concepts/pattern/mod.rs b/src/concepts/pattern/mod.rs index eb9a05a..90e7cf1 100644 --- a/src/concepts/pattern/mod.rs +++ b/src/concepts/pattern/mod.rs @@ -11,6 +11,7 @@ use ip::PatternIp; #[cfg_attr(test, derive(Default))] #[serde(deny_unknown_fields)] pub struct Pattern { + #[serde(default)] pub regex: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] @@ -103,7 +104,7 @@ impl Pattern { Ok(()) } - /// Returns the pattern's regex compiled standalone. + /// Returns the pattern's regex compiled standalone, enclosed in ^ and $ /// It's not kept as a field of the [`Pattern`] struct /// because it's only used during setup and for the `trigger` manual command. /// diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index c0c8afa..7844e52 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -20,6 +20,7 @@ struct TestBed { pub _out_path: TempPath, pub out_file: String, pub az_patterns: Patterns, + pub ip_patterns: Patterns, } impl TestBed { @@ -28,10 +29,45 @@ impl TestBed { let out_file = _out_path.to_str().unwrap().to_string(); let az_patterns = Pattern::new_map("az", "[a-z]+").unwrap(); + let ip_patterns = [ + "type: ip", + " +type: ipv4 +ignorecidr: + - 192.168.1.0/24 +", + " +type: ipv4 +ipv4mask: 24 +ignorecidr: + - 192.168.1.0/24 +", + " +type: ipv6 +ignorecidr: + - fe80::/16 +", + " +type: ipv6 +ipv6mask: 64 +ignorecidr: + - 1::/120 +", + ] + .into_iter() + .map(serde_yaml::from_str::) + .map(Result::unwrap) + .zip(["ip", "ipv4", "ipv4_mask24", "ipv6", "ipv6_mask64"]) + .map(|(mut pat, name)| { + pat.setup(name).unwrap(); + (name.into(), pat.into()) + }) + .collect(); Self { _out_path, out_file, az_patterns, + ip_patterns, } } @@ -46,6 +82,7 @@ impl TestBed { _out_path: self._out_path, out_file: self.out_file, az_patterns: self.az_patterns, + ip_patterns: self.ip_patterns, now, manager: FilterManager::new( filter, @@ -67,6 +104,7 @@ struct TestBed2 { pub now: Time, pub manager: FilterManager, pub az_patterns: Patterns, + pub ip_patterns: Patterns, } impl TestBed2 { @@ -700,3 +738,54 @@ async fn multiple_triggers() { ); } } + +#[tokio::test] +async fn ip_pattern_matches() { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![Action::new( + vec!["sh", "-c", &format!("echo >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.ip_patterns, + )], + vec!["borned test", "unborned .*"], + None, + None, + "test", + "test", + Duplicate::Ignore, + &bed.ip_patterns, + ); + let bed = bed.part2(filter, Local::now(), None).await; + + let ips = [ + // IPv4 + ("83.4.92.35", "83.4.92.35"), + ("83.4.92.0", "83.4.92.0"), + // Normal IPv6 + ("1:2:3:4:5:6:7:08", "1:2:3:4:5:6:7:8"), + // IPv6 with :: + ("1:2:3:04:0:0:7:8", "1:2:3:4::7:8"), + ("1:2:3:4:0::", "1:2:3:4::"), + // IPv6-mapped IPv4 :: + ("ffff::1.2.3.4", "1.2.3.4"), + ]; + + for (ip, ip_normalized) in ips { + assert_eq!( + bed.manager + .handle_line(&format!("borned {ip} test"), Local::now()), + React::Trigger + ); + tokio::time::sleep(Duration::from_millis(50)).await; + assert_eq!( + read_to_string(&bed.out_file).unwrap().trim_end(), + ip_normalized + ); + tokio::fs::write(&bed.out_file, "").await.unwrap(); + } +} From 19e3b2bf983a268a45e9d1c3aab0432c2559102f Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 29 Jul 2025 12:00:00 +0200 Subject: [PATCH 063/241] Make IP regex much more robust and add tests IP will be correctly extracted in any regex line, even if it is surrounded by greedy catch-all: .*.* This what actually hard to do! --- src/concepts/pattern/ip.rs | 253 +++++++++++++++++++++++++++++++++---- src/daemon/filter/mod.rs | 2 +- src/daemon/filter/tests.rs | 66 ++-------- src/daemon/mod.rs | 4 + 4 files changed, 244 insertions(+), 81 deletions(-) diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip.rs index 9f894df..15a3319 100644 --- a/src/concepts/pattern/ip.rs +++ b/src/concepts/pattern/ip.rs @@ -26,22 +26,90 @@ impl PatternType { } pub fn regex(&self) -> Option { - let num4 = r#"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"#; - let num6 = r#"[0-9a-fA-F]{1,4}"#; + // Those orders of preference are very important for + // patterns that have greedy catch-all regexes becore or after them, + // for example: "Failed password .*.*" + + let num4 = [ + // Order is important, first is preferred. + + // first 25x + "(?:25[0-5]", + // then 2xx + "2[0-4][0-9]", + // then 1xx + "1[0-9][0-9]", + // then 0xx + "[0-9][0-9]", + // then 0x + "[0-9])", + ] + .join("|"); + + let numsix = "[0-9a-fA-F]{1,4}"; + let ipv4 = format!(r#"{num4}(?:\.{num4}){{3}}"#); + let ipv6 = [ - format!(r#"(?:{num6}:){{7}}{num6}"#), - format!(r#"(?:{num6}:){{1,7}}:"#), - format!(r#"(?:{num6}:){{1,6}}:{num6}"#), - format!(r#"(?:{num6}:){{1,5}}(?::{num6}){{1,2}}"#), - format!(r#"(?:{num6}:){{1,4}}(?::{num6}){{1,3}}"#), - format!(r#"(?:{num6}:){{1,3}}(?::{num6}){{1,4}}"#), - format!(r#"(?:{num6}:){{1,2}}(?::{num6}){{1,5}}"#), - format!(r#"{num6}:(?:(?::{num6}){{1,6}})"#), - format!(r#":(?:(?::{num6}){{1,7}}|:)"#), - format!(r#"fe80:(?::[0-9a-fA-F]{{0,4}}){{0,4}}%[0-9a-zA-Z]+"#), + // We're unrolling all possibilities, longer IPv6 first, + // to make it super-greedy, + // more than an eventual .* before or after , + // that would "eat" its first or last blocks. + + // Order is important, first is preferred. + + // We put IPv4-suffixed regexes first format!(r#"::(?:ffff(?::0{{1,4}})?:)?{ipv4}"#), - format!(r#"(?:{num6}:){{1,4}}:{ipv4}"#), + format!(r#"(?:{numsix}:){{1,4}}:{ipv4}"#), + // Then link-local addresses with interface name + format!(r#"fe80:(?::[0-9a-fA-F]{{0,4}}){{0,4}}%[0-9a-zA-Z]+"#), + // Full IPv6 + format!("(?:{numsix}:){{7}}{numsix}"), + // 1 block cut + format!("(?:{numsix}:){{7}}:"), + format!("(?:{numsix}:){{6}}:{numsix}"), + format!("(?:{numsix}:){{5}}(?::{numsix}){{2}}"), + format!("(?:{numsix}:){{4}}(?::{numsix}){{3}}"), + format!("(?:{numsix}:){{3}}(?::{numsix}){{4}}"), + format!("(?:{numsix}:){{2}}(?::{numsix}){{5}}"), + format!("{numsix}:(?:(?::{numsix}){{6}})"), + format!(":(?:(?::{numsix}){{7}})"), + // 2 blocks cut + format!("(?:{numsix}:){{6}}:"), + format!("(?:{numsix}:){{5}}:{numsix}"), + format!("(?:{numsix}:){{4}}(?::{numsix}){{2}}"), + format!("(?:{numsix}:){{3}}(?::{numsix}){{3}}"), + format!("(?:{numsix}:){{2}}(?::{numsix}){{4}}"), + format!("{numsix}:(?:(?::{numsix}){{5}})"), + format!(":(?:(?::{numsix}){{6}})"), + // 3 blocks cut + format!("(?:{numsix}:){{5}}:"), + format!("(?:{numsix}:){{4}}:{numsix}"), + format!("(?:{numsix}:){{3}}(?::{numsix}){{2}}"), + format!("(?:{numsix}:){{2}}(?::{numsix}){{3}}"), + format!("{numsix}:(?:(?::{numsix}){{4}})"), + format!(":(?:(?::{numsix}){{5}})"), + // 4 blocks cut + format!("(?:{numsix}:){{4}}:"), + format!("(?:{numsix}:){{3}}:{numsix}"), + format!("(?:{numsix}:){{2}}(?::{numsix}){{2}}"), + format!("{numsix}:(?:(?::{numsix}){{3}})"), + format!(":(?:(?::{numsix}){{4}})"), + // 5 blocks cut + format!("(?:{numsix}:){{3}}:"), + format!("(?:{numsix}:){{2}}:{numsix}"), + format!("{numsix}:(?:(?::{numsix}){{2}})"), + format!(":(?:(?::{numsix}){{3}})"), + // 6 blocks cut + format!("(?:{numsix}:){{2}}:"), + format!("{numsix}::{numsix}"), + format!(":(?:(?::{numsix}){{2}})"), + // 7 blocks cut + format!("{numsix}::"), + format!("::{numsix}"), + // special cuts + // 8 blocks cut + format!("::"), ] .join("|"); match self { @@ -211,7 +279,13 @@ impl PatternIp { mod patternip_tests { use std::net::{Ipv4Addr, Ipv6Addr}; - use crate::concepts::{pattern::ip::Cidr, Pattern}; + use chrono::Local; + use tokio::{fs::read_to_string, task::JoinSet}; + + use crate::{ + concepts::{pattern::ip::Cidr, Action, Duplicate, Filter, Pattern}, + daemon::{tests::TestBed, React}, + }; use super::{PatternIp, PatternType}; @@ -459,6 +533,73 @@ mod patternip_tests { } } + pub const VALID_IPV4: [&str; 8] = [ + "252.4.92.250", + "212.4.92.210", + "112.4.92.110", + "83.4.92.35", + "83.4.92.0", + "3.254.92.4", + "1.2.3.4", + "255.255.255.255", + ]; + + pub const VALID_IPV6: [&str; 42] = [ + // all accepted characters + "0123:4567:89:ab:cdef:AB:CD:EF", + // ipv6-mapped ipv4 + "::ffff:1.2.3.4", + "ffff::1.2.3.4", + // 8 blocks + "1111:2:3:4:5:6:7:8888", + // 7 blocks + "::2:3:4:5:6:7:8888", + "1111::3:4:5:6:7:8888", + "1111:2::4:5:6:7:8888", + "1111:2:3::5:6:7:8888", + "1111:2:3:4::6:7:8888", + "1111:2:3:4:5::7:8888", + "1111:2:3:4:5:6::8888", + "1111:2:3:4:5:6:7::", + // 6 blocks + "::3:4:5:6:7:8888", + "1111::4:5:6:7:8888", + "1111:2::5:6:7:8888", + "1111:2:3::6:7:8888", + "1111:2:3:4::7:8888", + "1111:2:3:4:5::8888", + "1111:2:3:4:5:6::", + // 5 blocks + "::4:5:6:7:8888", + "1111::5:6:7:8888", + "1111:2::6:7:8888", + "1111:2:3::7:8888", + "1111:2:3:4::8888", + "1111:2:3:4:5::", + // 4 blocks + "::5:6:7:8888", + "1111::6:7:8888", + "1111:2::7:8888", + "1111:2:3::8888", + "1111:2:3:4::", + // 3 blocks + "::6:7:8888", + "1111::7:8888", + "1111:2::8888", + "1111:2:3::", + // 2 blocks + "::7:8888", + "1111::8888", + "1111:2::", + // 1 block + "::8", + "::8888", + "1::", + "1111::", + // 0 block + "::", + ]; + #[test] fn test_ip_regexes() { for pattern_type in [PatternType::Ip, PatternType::Ipv4, PatternType::Ipv6] { @@ -481,8 +622,9 @@ mod patternip_tests { }; } - assert2!(accepts_ipv4 == regex.is_match("1.2.3.4")); - assert2!(accepts_ipv4 == regex.is_match("255.255.255.255")); + for ip in VALID_IPV4 { + assert2!(accepts_ipv4 == regex.is_match(ip)); + } assert2!(!regex.is_match(".1.2.3.4")); assert2!(!regex.is_match(" 1.2.3.4")); @@ -493,14 +635,9 @@ mod patternip_tests { assert2!(!regex.is_match("1.2..4")); assert2!(!regex.is_match("1.2..3.4")); - assert2!(accepts_ipv6 == regex.is_match("1:2:3:4:5:6:7:8")); - assert2!(accepts_ipv6 == regex.is_match("::")); - assert2!(accepts_ipv6 == regex.is_match("1::")); - assert2!(accepts_ipv6 == regex.is_match("::1")); - assert2!(accepts_ipv6 == regex.is_match("1:2::6:7:8")); - assert2!(accepts_ipv6 == regex.is_match("0123:4567:89:ab:cdef:AB:CD:EF")); - assert2!(accepts_ipv6 == regex.is_match("::ffff:1.2.3.4")); - assert2!(accepts_ipv6 == regex.is_match("ffff::1.2.3.4")); + for ip in VALID_IPV6 { + assert2!(accepts_ipv6 == regex.is_match(ip)); + } assert2!(!regex.is_match("1:")); assert2!(!regex.is_match("1:::")); @@ -510,6 +647,74 @@ mod patternip_tests { assert2!(!regex.is_match("1:2:3:4:5:6:7:8:")); } } + + #[tokio::test(flavor = "multi_thread")] + async fn ip_pattern_matches() { + let mut join_set = JoinSet::new(); + + for ip in VALID_IPV4.iter().chain(&VALID_IPV6) { + for line in [ + format!("borned {ip} test"), + // + format!("right-unborned {ip} text"), + format!("right-unborned {ip}text"), + format!("right-unborned {ip}:"), + // + format!("left-unborned text {ip}"), + format!("left-unborned text{ip}"), + format!("left-unborned :{ip}"), + // + format!("full-unborned text {ip} text"), + format!("full-unborned text{ip} text"), + format!("full-unborned text {ip}text"), + format!("full-unborned text{ip}text"), + format!("full-unborned :{ip}:"), + format!("full-unborned : {ip}:"), + ] { + join_set.spawn(tokio::spawn(async move { + let bed = TestBed::new(); + let filter = Filter::new_static( + vec![Action::new( + vec!["sh", "-c", &format!("echo >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.ip_patterns, + )], + vec![ + "^borned test", + "^right-unborned .*", + "^left-unborned .*", + "^full-unborned .*.*", + ], + None, + None, + "test", + "test", + Duplicate::Ignore, + &bed.ip_patterns, + ); + let bed = bed.part2(filter, Local::now(), None).await; + assert_eq!( + bed.manager.handle_line(&line, Local::now()), + React::Trigger, + "line: {line}" + ); + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + assert_eq!( + &read_to_string(&bed.out_file).await.unwrap().trim_end(), + ip, + "line: {line}" + ); + println!("line ok: {line}"); + })); + } + } + + join_set.join_all().await; + } } /// Normalize a string as an IP address. diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index e57d751..3db01ba 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -1,5 +1,5 @@ #[cfg(test)] -mod tests; +pub mod tests; mod state; diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 7844e52..08127b9 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -16,7 +16,7 @@ use crate::{ tests::TempDatabase, }; -struct TestBed { +pub struct TestBed { pub _out_path: TempPath, pub out_file: String, pub az_patterns: Patterns, @@ -24,7 +24,7 @@ struct TestBed { } impl TestBed { - fn new() -> Self { + pub fn new() -> Self { let _out_path = tempfile::NamedTempFile::new().unwrap().into_temp_path(); let out_file = _out_path.to_str().unwrap().to_string(); @@ -71,7 +71,12 @@ ignorecidr: } } - async fn part2(self, filter: &'static Filter, now: Time, db: Option) -> TestBed2 { + pub async fn part2( + self, + filter: &'static Filter, + now: Time, + db: Option, + ) -> TestBed2 { let mut db = match db { Some(db) => db, None => TempDatabase::default().await, @@ -97,7 +102,7 @@ ignorecidr: } } -struct TestBed2 { +pub struct TestBed2 { pub _out_path: TempPath, pub out_file: String, pub semaphore: Arc, @@ -108,7 +113,7 @@ struct TestBed2 { } impl TestBed2 { - fn assert_empty_trees(&self) { + pub fn assert_empty_trees(&self) { let state = self.manager.state.lock().unwrap(); assert!(state.matches.is_empty(), "matches must be empty"); assert!( @@ -738,54 +743,3 @@ async fn multiple_triggers() { ); } } - -#[tokio::test] -async fn ip_pattern_matches() { - let bed = TestBed::new(); - let filter = Filter::new_static( - vec![Action::new( - vec!["sh", "-c", &format!("echo >> {}", &bed.out_file)], - None, - false, - "test", - "test", - "a1", - &bed.ip_patterns, - )], - vec!["borned test", "unborned .*"], - None, - None, - "test", - "test", - Duplicate::Ignore, - &bed.ip_patterns, - ); - let bed = bed.part2(filter, Local::now(), None).await; - - let ips = [ - // IPv4 - ("83.4.92.35", "83.4.92.35"), - ("83.4.92.0", "83.4.92.0"), - // Normal IPv6 - ("1:2:3:4:5:6:7:08", "1:2:3:4:5:6:7:8"), - // IPv6 with :: - ("1:2:3:04:0:0:7:8", "1:2:3:4::7:8"), - ("1:2:3:4:0::", "1:2:3:4::"), - // IPv6-mapped IPv4 :: - ("ffff::1.2.3.4", "1.2.3.4"), - ]; - - for (ip, ip_normalized) in ips { - assert_eq!( - bed.manager - .handle_line(&format!("borned {ip} test"), Local::now()), - React::Trigger - ); - tokio::time::sleep(Duration::from_millis(50)).await; - assert_eq!( - read_to_string(&bed.out_file).unwrap().trim_end(), - ip_normalized - ); - tokio::fs::write(&bed.out_file, "").await.unwrap(); - } -} diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 78abb41..9cb8d3a 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -18,10 +18,14 @@ use tracing::{debug, info}; use crate::{concepts::Config, treedb::Database}; use filter::FilterManager; +pub use filter::React; pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken}; use socket::socket_manager; use stream::StreamManager; +#[cfg(test)] +pub use filter::tests; + mod filter; mod shutdown; mod socket; From 130607d28faab18819137af9b1273f5078e71a12 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 30 Jul 2025 12:00:00 +0200 Subject: [PATCH 064/241] Add test for pattern deserialization --- src/concepts/pattern/mod.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/concepts/pattern/mod.rs b/src/concepts/pattern/mod.rs index 90e7cf1..9fbb610 100644 --- a/src/concepts/pattern/mod.rs +++ b/src/concepts/pattern/mod.rs @@ -302,6 +302,26 @@ pub mod tests { assert!(pattern.setup("name").is_err()); } + #[test] + fn setup_yml() { + let mut pattern: Pattern = serde_yaml::from_str("{}").unwrap(); + assert!(pattern.setup("name").is_err()); + + let mut pattern: Pattern = serde_yaml::from_str(r#"regex: "[abc]""#).unwrap(); + assert!(pattern.setup("name").is_ok()); + + let mut pattern: Pattern = serde_yaml::from_str(r#"type: ip"#).unwrap(); + assert!(pattern.setup("name").is_ok()); + + let mut pattern: Pattern = serde_yaml::from_str(r#"type: ipv4"#).unwrap(); + assert!(pattern.setup("name").is_ok()); + + let mut pattern: Pattern = serde_yaml::from_str(r#"type: ipv6"#).unwrap(); + assert!(pattern.setup("name").is_ok()); + + assert!(serde_yaml::from_str::(r#"type: zblorg"#).is_err()); + } + #[test] fn is_ignore() { let mut pattern; From 0a9c7f97dfb39d3786e6e24abd40fea4ca31e524 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 30 Jul 2025 12:00:00 +0200 Subject: [PATCH 065/241] Split IP pattern code in 3 files --- TODO | 2 + src/concepts/pattern/ip/cidr.rs | 239 ++++++++++++++ src/concepts/pattern/{ip.rs => ip/mod.rs} | 361 +--------------------- src/concepts/pattern/ip/utils.rs | 120 +++++++ 4 files changed, 370 insertions(+), 352 deletions(-) create mode 100644 src/concepts/pattern/ip/cidr.rs rename src/concepts/pattern/{ip.rs => ip/mod.rs} (68%) create mode 100644 src/concepts/pattern/ip/utils.rs diff --git a/TODO b/TODO index 9b3b9fa..3737c19 100644 --- a/TODO +++ b/TODO @@ -8,3 +8,5 @@ test Filter::regex conformity after setup should an ipv6-mapped ipv4 match a pattern of type ipv6? should it be normalized as ipv4 then? + +duplicate: deduplicate when loading database diff --git a/src/concepts/pattern/ip/cidr.rs b/src/concepts/pattern/ip/cidr.rs new file mode 100644 index 0000000..9e77975 --- /dev/null +++ b/src/concepts/pattern/ip/cidr.rs @@ -0,0 +1,239 @@ +use std::{ + fmt::Display, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + str::FromStr, +}; + +use super::*; + +/// Stores an IP and an associated mask. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Cidr { + IPv4((Ipv4Addr, Ipv4Addr)), + IPv6((Ipv6Addr, Ipv6Addr)), +} + +impl FromStr for Cidr { + type Err = String; + + fn from_str(cidr: &str) -> Result { + let (ip, mask) = cidr.split_once('/').ok_or(format!( + "malformed IP/MASK. '{cidr}' doesn't contain any '/'" + ))?; + let ip = normalize(ip).map_err(|err| format!("malformed IP '{ip}' in '{cidr}': {err}"))?; + let mask_count = u8::from_str(mask) + .map_err(|err| format!("malformed mask '{mask}' in '{cidr}': {err}"))?; + + // Let's accept any mask size for now, as useless as it may seem + // if mask_count < 2 { + // return Err("Can't have a network mask of 0 or 1. You're either ignoring all Internet or half of it.".into()); + // } else if mask_count + // < (match ip { + // IpAddr::V4(_) => 8, + // IpAddr::V6(_) => 16, + // }) + // { + // warn!("With a mask of {mask_count}, you're ignoring a big part of Internet. Are you sure you want to do this?"); + // } + + Self::from_ip_and_mask(ip, mask_count) + } +} + +impl Display for Cidr { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", self.network(), self.mask()) + } +} + +impl Cidr { + fn from_ip_and_mask(ip: IpAddr, mask_count: u8) -> Result { + match ip { + IpAddr::V4(mut ipv4_addr) => { + // Create bitmask + let mask = mask_to_ipv4(mask_count)?; + // Normalize IP from mask + ipv4_addr &= mask; + + Ok(Cidr::IPv4((ipv4_addr, mask))) + } + IpAddr::V6(mut ipv6_addr) => { + let mask = mask_to_ipv6(mask_count)?; + // Normalize IP from mask + ipv6_addr &= mask; + + Ok(Cidr::IPv6((ipv6_addr, mask))) + } + } + } + + /// Whether an IP is included in this IP CIDR. + /// If IP is not the same version as CIDR, returns always false. + pub fn includes(&self, ip: &IpAddr) -> bool { + let ip = normalize_ip(*ip); + match self { + Cidr::IPv4((network_ipv4, mask)) => match ip { + IpAddr::V6(_) => false, + IpAddr::V4(ipv4_addr) => *network_ipv4 == ipv4_addr & mask, + }, + Cidr::IPv6((network_ipv6, mask)) => match ip { + IpAddr::V4(_) => false, + IpAddr::V6(ipv6_addr) => *network_ipv6 == ipv6_addr & mask, + }, + } + } + + fn network(&self) -> IpAddr { + match self { + Cidr::IPv4((network, _)) => IpAddr::from(*network), + Cidr::IPv6((network, _)) => IpAddr::from(*network), + } + } + + fn mask(&self) -> u8 { + let mut raw_mask = match self { + Cidr::IPv4((_, mask)) => mask.to_bits() as u128, + Cidr::IPv6((_, mask)) => mask.to_bits(), + }; + let mut ret = 0; + for _ in 0..128 { + if raw_mask % 2 == 1 { + ret += 1; + } + raw_mask >>= 1; + } + ret + } +} + +#[cfg(test)] +mod cidr_tests { + use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + str::FromStr, + }; + + use super::Cidr; + + #[test] + fn cidrv4_from_str() { + assert_eq!( + Ok(Cidr::IPv4((Ipv4Addr::new(192, 168, 1, 4), u32::MAX.into()))), + Cidr::from_str("192.168.1.4/32") + ); + // Test IP normalization from mask + assert_eq!( + Ok(Cidr::IPv4(( + Ipv4Addr::new(192, 168, 1, 0), + Ipv4Addr::new(255, 255, 255, 0), + ))), + Cidr::from_str("192.168.1.4/24") + ); + // Another ok-test "pour la route" + assert_eq!( + Ok(Cidr::IPv4(( + Ipv4Addr::new(1, 1, 0, 0), + Ipv4Addr::new(255, 255, 0, 0), + ))), + Cidr::from_str("1.1.248.25/16") + ); + // Errors + assert!(Cidr::from_str("256.1.1.1/8").is_err()); + // assert!(Cidr::from_str("1.1.1.1/0").is_err()); + // assert!(Cidr::from_str("1.1.1.1/1").is_err()); + // assert!(Cidr::from_str("1.1.1.1.1").is_err()); + assert!(Cidr::from_str("1.1.1.1/16/16").is_err()); + } + + #[test] + fn cidrv6_from_str() { + assert_eq!( + Ok(Cidr::IPv6(( + Ipv6Addr::new(0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68), + u128::MAX.into() + ))), + Cidr::from_str("fe80::df68:2ee:e4f9:e68/128") + ); + // Test IP normalization from mask + assert_eq!( + Ok(Cidr::IPv6(( + Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9de5, 0, 0, 0, 0), + Ipv6Addr::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX, 0, 0, 0, 0), + ))), + Cidr::from_str("2001:db8:85a3:9de5::8a2e:370:7334/64") + ); + // Another ok-test "pour la route" + assert_eq!( + Ok(Cidr::IPv6(( + Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9d00, 0, 0, 0, 0), + Ipv6Addr::new( + u16::MAX, + u16::MAX, + u16::MAX, + u16::MAX - u8::MAX as u16, + 0, + 0, + 0, + 0 + ), + ))), + Cidr::from_str("2001:db8:85a3:9d00::8a2e:370:7334/56") + ); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/56").is_ok()); + assert!(Cidr::from_str("2001:DB8:85A3:0:0:8A2E:370:7334/56").is_ok()); + // Errors + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:g334/56").is_err()); + // assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/0").is_err()); + // assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/1").is_err()); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334:11/56").is_err()); + assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/11/56").is_err()); + } + + #[test] + fn cidrv4_includes() { + let cidr = Cidr::from_str("192.168.1.0/24").unwrap(); + assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0)))); + assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)))); + assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 234)))); + assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 0, 1)))); + assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68 + ),))); + } + + #[test] + fn cidrv6_includes() { + let cidr = Cidr::from_str("2001:db8:85a3:9d00:0:8a2e:370:7334/56").unwrap(); + assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0x2001, 0x0db8, 0x85a3, 0x9d00, 0, 0, 0, 0 + )))); + assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0x2001, 0x0db8, 0x85a3, 0x9da4, 0x34fc, 0x0d8b, 0xffff, 0x1111 + )))); + assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new( + 0x2001, 0x0db8, 0x85a3, 0xad00, 0, 0, 0, 1 + )))); + assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0)))); + } + + #[test] + fn cidr_display() { + let cidrs = [ + ("192.168.1.4/32", "192.168.1.4/32"), + ("192.168.1.4/24", "192.168.1.0/24"), + ("1.1.248.25/16", "1.1.0.0/16"), + ("fe80::df68:2ee:e4f9:e68/128", "fe80::df68:2ee:e4f9:e68/128"), + ( + "2001:db8:85a3:9de5::8a2e:370:7334/64", + "2001:db8:85a3:9de5::/64", + ), + ( + "2001:db8:85a3:9d00::8a2e:370:7334/56", + "2001:db8:85a3:9d00::/56", + ), + ]; + for (from, to) in cidrs { + assert_eq!(Cidr::from_str(from).unwrap().to_string(), to); + } + } +} diff --git a/src/concepts/pattern/ip.rs b/src/concepts/pattern/ip/mod.rs similarity index 68% rename from src/concepts/pattern/ip.rs rename to src/concepts/pattern/ip/mod.rs index 15a3319..77f525d 100644 --- a/src/concepts/pattern/ip.rs +++ b/src/concepts/pattern/ip/mod.rs @@ -1,12 +1,17 @@ use std::{ - fmt::Display, - net::{AddrParseError, IpAddr, Ipv4Addr, Ipv6Addr}, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, str::FromStr, }; use serde::{Deserialize, Serialize}; use tracing::warn; +use cidr::Cidr; +use utils::*; + +mod cidr; +mod utils; + #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum PatternType { #[default] @@ -283,11 +288,11 @@ mod patternip_tests { use tokio::{fs::read_to_string, task::JoinSet}; use crate::{ - concepts::{pattern::ip::Cidr, Action, Duplicate, Filter, Pattern}, + concepts::{Action, Duplicate, Filter, Pattern}, daemon::{tests::TestBed, React}, }; - use super::{PatternIp, PatternType}; + use super::{Cidr, PatternIp, PatternType}; #[test] fn test_setup_type_regex() { @@ -716,351 +721,3 @@ mod patternip_tests { join_set.join_all().await; } } - -/// Normalize a string as an IP address. -/// IPv6-mapped IPv4 addresses are casted to IPv4. -fn normalize(ip: &str) -> Result { - IpAddr::from_str(ip).map(normalize_ip) -} - -/// Normalize a string as an IP address. -/// IPv6-mapped IPv4 addresses are casted to IPv4. -fn normalize_ip(ip: IpAddr) -> IpAddr { - match ip { - IpAddr::V4(_) => ip, - IpAddr::V6(ipv6) => match ipv6.to_ipv4_mapped() { - Some(ipv4) => IpAddr::V4(ipv4), - None => ip, - }, - } -} - -/// Creates an [`Ipv4Addr`] from a mask -fn mask_to_ipv4(mask_count: u8) -> Result { - if mask_count > 32 { - Err(format!( - "an IPv4 mask must be 32 max. {mask_count} is too big." - )) - } else { - let mask = match mask_count { - 0 => 0u32, - n => u32::MAX << (32 - n), - }; - let mask = Ipv4Addr::from_bits(mask); - Ok(mask) - } -} - -/// Creates an [`Ipv4Addr`] from a mask -fn mask_to_ipv6(mask_count: u8) -> Result { - if mask_count > 128 { - Err(format!( - "an IPv4 mask must be 128 max. {mask_count} is too big." - )) - } else { - let mask = match mask_count { - 0 => 0u128, - n => u128::MAX << (128 - n), - }; - let mask = Ipv6Addr::from_bits(mask); - Ok(mask) - } -} - -#[cfg(test)] -mod utils_tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - - use super::{mask_to_ipv4, mask_to_ipv6, normalize}; - - #[test] - fn test_normalize_ip() { - assert_eq!( - normalize("83.44.23.14"), - Ok(IpAddr::V4(Ipv4Addr::new(83, 44, 23, 14))) - ); - assert_eq!( - normalize("2001:db8:85a3::8a2e:370:7334"), - Ok(IpAddr::V6(Ipv6Addr::new( - 0x2001, 0xdb8, 0x85a3, 0x0, 0x0, 0x8a2e, 0x370, 0x7334 - ))) - ); - assert_eq!( - normalize("::ffff:192.168.1.34"), - Ok(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 34))) - ); - assert_eq!( - normalize("::ffff:1.2.3.4"), - Ok(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))) - ); - } - - #[test] - fn test_mask_to_ipv4() { - assert!(mask_to_ipv4(33).is_err()); - assert!(mask_to_ipv4(100).is_err()); - assert_eq!(mask_to_ipv4(16), Ok(Ipv4Addr::new(255, 255, 0, 0))); - assert_eq!(mask_to_ipv4(24), Ok(Ipv4Addr::new(255, 255, 255, 0))); - assert_eq!(mask_to_ipv4(25), Ok(Ipv4Addr::new(255, 255, 255, 128))); - assert_eq!(mask_to_ipv4(26), Ok(Ipv4Addr::new(255, 255, 255, 192))); - assert_eq!(mask_to_ipv4(32), Ok(Ipv4Addr::new(255, 255, 255, 255))); - } - - #[test] - fn test_mask_to_ipv6() { - assert!(mask_to_ipv6(129).is_err()); - assert!(mask_to_ipv6(254).is_err()); - assert_eq!( - mask_to_ipv6(56), - Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xff00, 0, 0, 0, 0)) - ); - assert_eq!( - mask_to_ipv6(64), - Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0)) - ); - assert_eq!( - mask_to_ipv6(112), - Ok(Ipv6Addr::new( - 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 - )) - ); - assert_eq!( - mask_to_ipv6(128), - Ok(Ipv6Addr::new( - 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff - )) - ); - } -} - -/// Stores an IP and an associated mask. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum Cidr { - IPv4((Ipv4Addr, Ipv4Addr)), - IPv6((Ipv6Addr, Ipv6Addr)), -} - -impl FromStr for Cidr { - type Err = String; - - fn from_str(cidr: &str) -> Result { - let (ip, mask) = cidr.split_once('/').ok_or(format!( - "malformed IP/MASK. '{cidr}' doesn't contain any '/'" - ))?; - let ip = normalize(ip).map_err(|err| format!("malformed IP '{ip}' in '{cidr}': {err}"))?; - let mask_count = u8::from_str(mask) - .map_err(|err| format!("malformed mask '{mask}' in '{cidr}': {err}"))?; - - // Let's accept any mask size for now, as useless as it may seem - // if mask_count < 2 { - // return Err("Can't have a network mask of 0 or 1. You're either ignoring all Internet or half of it.".into()); - // } else if mask_count - // < (match ip { - // IpAddr::V4(_) => 8, - // IpAddr::V6(_) => 16, - // }) - // { - // warn!("With a mask of {mask_count}, you're ignoring a big part of Internet. Are you sure you want to do this?"); - // } - - Self::from_ip_and_mask(ip, mask_count) - } -} - -impl Display for Cidr { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}/{}", self.network(), self.mask()) - } -} - -impl Cidr { - fn from_ip_and_mask(ip: IpAddr, mask_count: u8) -> Result { - match ip { - IpAddr::V4(mut ipv4_addr) => { - // Create bitmask - let mask = mask_to_ipv4(mask_count)?; - // Normalize IP from mask - ipv4_addr &= mask; - - Ok(Cidr::IPv4((ipv4_addr, mask))) - } - IpAddr::V6(mut ipv6_addr) => { - let mask = mask_to_ipv6(mask_count)?; - // Normalize IP from mask - ipv6_addr &= mask; - - Ok(Cidr::IPv6((ipv6_addr, mask))) - } - } - } - - /// Whether an IP is included in this IP CIDR. - /// If IP is not the same version as CIDR, returns always false. - fn includes(&self, ip: &IpAddr) -> bool { - let ip = normalize_ip(*ip); - match self { - Cidr::IPv4((network_ipv4, mask)) => match ip { - IpAddr::V6(_) => false, - IpAddr::V4(ipv4_addr) => *network_ipv4 == ipv4_addr & mask, - }, - Cidr::IPv6((network_ipv6, mask)) => match ip { - IpAddr::V4(_) => false, - IpAddr::V6(ipv6_addr) => *network_ipv6 == ipv6_addr & mask, - }, - } - } - - fn network(&self) -> IpAddr { - match self { - Cidr::IPv4((network, _)) => IpAddr::from(*network), - Cidr::IPv6((network, _)) => IpAddr::from(*network), - } - } - - fn mask(&self) -> u8 { - let mut raw_mask = match self { - Cidr::IPv4((_, mask)) => mask.to_bits() as u128, - Cidr::IPv6((_, mask)) => mask.to_bits(), - }; - let mut ret = 0; - for _ in 0..128 { - if raw_mask % 2 == 1 { - ret += 1; - } - raw_mask >>= 1; - } - ret - } -} - -#[cfg(test)] -mod cidr_tests { - use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr}, - str::FromStr, - }; - - use super::Cidr; - - #[test] - fn cidrv4_from_str() { - assert_eq!( - Ok(Cidr::IPv4((Ipv4Addr::new(192, 168, 1, 4), u32::MAX.into()))), - Cidr::from_str("192.168.1.4/32") - ); - // Test IP normalization from mask - assert_eq!( - Ok(Cidr::IPv4(( - Ipv4Addr::new(192, 168, 1, 0), - Ipv4Addr::new(255, 255, 255, 0), - ))), - Cidr::from_str("192.168.1.4/24") - ); - // Another ok-test "pour la route" - assert_eq!( - Ok(Cidr::IPv4(( - Ipv4Addr::new(1, 1, 0, 0), - Ipv4Addr::new(255, 255, 0, 0), - ))), - Cidr::from_str("1.1.248.25/16") - ); - // Errors - assert!(Cidr::from_str("256.1.1.1/8").is_err()); - // assert!(Cidr::from_str("1.1.1.1/0").is_err()); - // assert!(Cidr::from_str("1.1.1.1/1").is_err()); - // assert!(Cidr::from_str("1.1.1.1.1").is_err()); - assert!(Cidr::from_str("1.1.1.1/16/16").is_err()); - } - - #[test] - fn cidrv6_from_str() { - assert_eq!( - Ok(Cidr::IPv6(( - Ipv6Addr::new(0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68), - u128::MAX.into() - ))), - Cidr::from_str("fe80::df68:2ee:e4f9:e68/128") - ); - // Test IP normalization from mask - assert_eq!( - Ok(Cidr::IPv6(( - Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9de5, 0, 0, 0, 0), - Ipv6Addr::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX, 0, 0, 0, 0), - ))), - Cidr::from_str("2001:db8:85a3:9de5::8a2e:370:7334/64") - ); - // Another ok-test "pour la route" - assert_eq!( - Ok(Cidr::IPv6(( - Ipv6Addr::new(0x2001, 0xdb8, 0x85a3, 0x9d00, 0, 0, 0, 0), - Ipv6Addr::new( - u16::MAX, - u16::MAX, - u16::MAX, - u16::MAX - u8::MAX as u16, - 0, - 0, - 0, - 0 - ), - ))), - Cidr::from_str("2001:db8:85a3:9d00::8a2e:370:7334/56") - ); - assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/56").is_ok()); - assert!(Cidr::from_str("2001:DB8:85A3:0:0:8A2E:370:7334/56").is_ok()); - // Errors - assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:g334/56").is_err()); - // assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/0").is_err()); - // assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/1").is_err()); - assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334:11/56").is_err()); - assert!(Cidr::from_str("2001:db8:85a3:0:0:8a2e:370:7334/11/56").is_err()); - } - - #[test] - fn cidrv4_includes() { - let cidr = Cidr::from_str("192.168.1.0/24").unwrap(); - assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0)))); - assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)))); - assert!(cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 234)))); - assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 0, 1)))); - assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new( - 0xfe80, 0, 0, 0, 0xdf68, 0x2ee, 0xe4f9, 0xe68 - ),))); - } - - #[test] - fn cidrv6_includes() { - let cidr = Cidr::from_str("2001:db8:85a3:9d00:0:8a2e:370:7334/56").unwrap(); - assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new( - 0x2001, 0x0db8, 0x85a3, 0x9d00, 0, 0, 0, 0 - )))); - assert!(cidr.includes(&IpAddr::V6(Ipv6Addr::new( - 0x2001, 0x0db8, 0x85a3, 0x9da4, 0x34fc, 0x0d8b, 0xffff, 0x1111 - )))); - assert!(!cidr.includes(&IpAddr::V6(Ipv6Addr::new( - 0x2001, 0x0db8, 0x85a3, 0xad00, 0, 0, 0, 1 - )))); - assert!(!cidr.includes(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 0)))); - } - - #[test] - fn cidr_display() { - let cidrs = [ - ("192.168.1.4/32", "192.168.1.4/32"), - ("192.168.1.4/24", "192.168.1.0/24"), - ("1.1.248.25/16", "1.1.0.0/16"), - ("fe80::df68:2ee:e4f9:e68/128", "fe80::df68:2ee:e4f9:e68/128"), - ( - "2001:db8:85a3:9de5::8a2e:370:7334/64", - "2001:db8:85a3:9de5::/64", - ), - ( - "2001:db8:85a3:9d00::8a2e:370:7334/56", - "2001:db8:85a3:9d00::/56", - ), - ]; - for (from, to) in cidrs { - assert_eq!(Cidr::from_str(from).unwrap().to_string(), to); - } - } -} diff --git a/src/concepts/pattern/ip/utils.rs b/src/concepts/pattern/ip/utils.rs new file mode 100644 index 0000000..a294c55 --- /dev/null +++ b/src/concepts/pattern/ip/utils.rs @@ -0,0 +1,120 @@ +use std::{ + net::{AddrParseError, IpAddr, Ipv4Addr, Ipv6Addr}, + str::FromStr, +}; + +/// Normalize a string as an IP address. +/// IPv6-mapped IPv4 addresses are casted to IPv4. +pub fn normalize(ip: &str) -> Result { + IpAddr::from_str(ip).map(normalize_ip) +} + +/// Normalize a string as an IP address. +/// IPv6-mapped IPv4 addresses are casted to IPv4. +pub fn normalize_ip(ip: IpAddr) -> IpAddr { + match ip { + IpAddr::V4(_) => ip, + IpAddr::V6(ipv6) => match ipv6.to_ipv4_mapped() { + Some(ipv4) => IpAddr::V4(ipv4), + None => ip, + }, + } +} + +/// Creates an [`Ipv4Addr`] from a mask +pub fn mask_to_ipv4(mask_count: u8) -> Result { + if mask_count > 32 { + Err(format!( + "an IPv4 mask must be 32 max. {mask_count} is too big." + )) + } else { + let mask = match mask_count { + 0 => 0u32, + n => u32::MAX << (32 - n), + }; + let mask = Ipv4Addr::from_bits(mask); + Ok(mask) + } +} + +/// Creates an [`Ipv4Addr`] from a mask +pub fn mask_to_ipv6(mask_count: u8) -> Result { + if mask_count > 128 { + Err(format!( + "an IPv4 mask must be 128 max. {mask_count} is too big." + )) + } else { + let mask = match mask_count { + 0 => 0u128, + n => u128::MAX << (128 - n), + }; + let mask = Ipv6Addr::from_bits(mask); + Ok(mask) + } +} + +#[cfg(test)] +mod utils_tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + + use super::{mask_to_ipv4, mask_to_ipv6, normalize}; + + #[test] + fn test_normalize_ip() { + assert_eq!( + normalize("83.44.23.14"), + Ok(IpAddr::V4(Ipv4Addr::new(83, 44, 23, 14))) + ); + assert_eq!( + normalize("2001:db8:85a3::8a2e:370:7334"), + Ok(IpAddr::V6(Ipv6Addr::new( + 0x2001, 0xdb8, 0x85a3, 0x0, 0x0, 0x8a2e, 0x370, 0x7334 + ))) + ); + assert_eq!( + normalize("::ffff:192.168.1.34"), + Ok(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 34))) + ); + assert_eq!( + normalize("::ffff:1.2.3.4"), + Ok(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))) + ); + } + + #[test] + fn test_mask_to_ipv4() { + assert!(mask_to_ipv4(33).is_err()); + assert!(mask_to_ipv4(100).is_err()); + assert_eq!(mask_to_ipv4(16), Ok(Ipv4Addr::new(255, 255, 0, 0))); + assert_eq!(mask_to_ipv4(24), Ok(Ipv4Addr::new(255, 255, 255, 0))); + assert_eq!(mask_to_ipv4(25), Ok(Ipv4Addr::new(255, 255, 255, 128))); + assert_eq!(mask_to_ipv4(26), Ok(Ipv4Addr::new(255, 255, 255, 192))); + assert_eq!(mask_to_ipv4(32), Ok(Ipv4Addr::new(255, 255, 255, 255))); + } + + #[test] + fn test_mask_to_ipv6() { + assert!(mask_to_ipv6(129).is_err()); + assert!(mask_to_ipv6(254).is_err()); + assert_eq!( + mask_to_ipv6(56), + Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xff00, 0, 0, 0, 0)) + ); + assert_eq!( + mask_to_ipv6(64), + Ok(Ipv6Addr::new(0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0)) + ); + assert_eq!( + mask_to_ipv6(112), + Ok(Ipv6Addr::new( + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 + )) + ); + assert_eq!( + mask_to_ipv6(128), + Ok(Ipv6Addr::new( + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff + )) + ); + } +} From e4e50dd03babae243c25b003022b3951afaa0934 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 30 Jul 2025 12:00:00 +0200 Subject: [PATCH 066/241] cargo clippy --- src/concepts/filter.rs | 1 + src/concepts/pattern/ip/mod.rs | 3 ++- src/daemon/filter/mod.rs | 2 +- src/daemon/filter/state.rs | 14 ++++++-------- src/daemon/filter/tests.rs | 22 ++++++++++++---------- src/treedb/helpers.rs | 2 +- 6 files changed, 23 insertions(+), 21 deletions(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 052dc1b..410a997 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -349,6 +349,7 @@ impl Hash for Filter { } #[cfg(test)] +#[allow(clippy::too_many_arguments)] impl Filter { /// Test-only constructor designed to be easy to call pub fn new( diff --git a/src/concepts/pattern/ip/mod.rs b/src/concepts/pattern/ip/mod.rs index 77f525d..00cf1c1 100644 --- a/src/concepts/pattern/ip/mod.rs +++ b/src/concepts/pattern/ip/mod.rs @@ -55,6 +55,7 @@ impl PatternType { let ipv4 = format!(r#"{num4}(?:\.{num4}){{3}}"#); + #[allow(clippy::useless_format)] let ipv6 = [ // We're unrolling all possibilities, longer IPv6 first, // to make it super-greedy, @@ -677,7 +678,7 @@ mod patternip_tests { format!("full-unborned : {ip}:"), ] { join_set.spawn(tokio::spawn(async move { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![Action::new( vec!["sh", "-c", &format!("echo >> {}", &bed.out_file)], diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 3db01ba..290f3a1 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -189,7 +189,7 @@ impl FilterManager { .triggers .keys() // match filtering - .filter(|match_| is_match(&match_)) + .filter(|match_| is_match(match_)) // clone necessary to drop all references to State .cloned() .collect::>(); diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index f5d9692..8e3d7d3 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -48,15 +48,13 @@ pub struct State { /// ```rust /// use chrono::{Local}; /// - /// fn main() { - /// let mut res = vec![]; - /// for _ in 0..10 { + /// let mut res = vec![]; + /// for _ in 0..10 { /// let now = Local::now(); /// res.push(format!("Now: {now}")); - /// } - /// for s in res { + /// } + /// for s in res { /// println!("{s}"); - /// } /// } /// ``` pub ordered_times: Tree, @@ -171,9 +169,9 @@ impl State { }); } else { self.triggers.fetch_update(mt.m, |map| { - map.and_then(|mut map| { + map.map(|mut map| { map.insert(mt.t, count - 1); - Some(map) + map }) }); } diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 08127b9..a926b2a 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -23,8 +23,8 @@ pub struct TestBed { pub ip_patterns: Patterns, } -impl TestBed { - pub fn new() -> Self { +impl Default for TestBed { + fn default() -> Self { let _out_path = tempfile::NamedTempFile::new().unwrap().into_temp_path(); let out_file = _out_path.to_str().unwrap().to_string(); @@ -70,7 +70,9 @@ ignorecidr: ip_patterns, } } +} +impl TestBed { pub async fn part2( self, filter: &'static Filter, @@ -127,7 +129,7 @@ impl TestBed2 { #[tokio::test] async fn three_matches_then_action_then_delayed_action() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![ Action::new( @@ -259,7 +261,7 @@ async fn three_matches_then_action_then_delayed_action() { #[tokio::test] async fn one_match_one_action() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![Action::new( vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], @@ -305,7 +307,7 @@ async fn one_match_one_action() { #[tokio::test] async fn one_match_one_delayed_action() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![Action::new( vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], @@ -370,7 +372,7 @@ async fn one_match_one_delayed_action() { #[tokio::test] async fn one_db_match_one_runtime_match_one_action() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![Action::new( vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], @@ -436,7 +438,7 @@ async fn one_db_match_one_runtime_match_one_action() { #[tokio::test] async fn one_outdated_db_match() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![Action::new( vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], @@ -482,7 +484,7 @@ async fn flush() { #[tokio::test] async fn trigger_unmatched_pattern() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![ Action::new( @@ -553,7 +555,7 @@ async fn trigger_unmatched_pattern() { #[tokio::test] async fn trigger_matched_pattern() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![ Action::new( @@ -631,7 +633,7 @@ async fn trigger_matched_pattern() { #[tokio::test] async fn multiple_triggers() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { - let bed = TestBed::new(); + let bed = TestBed::default(); let filter = Filter::new_static( vec![ Action::new( diff --git a/src/treedb/helpers.rs b/src/treedb/helpers.rs index f7afbeb..73dccd7 100644 --- a/src/treedb/helpers.rs +++ b/src/treedb/helpers.rs @@ -23,7 +23,7 @@ fn string_to_time(val: &str) -> Result { /// Tries to convert a [`Value`] into a [`Time`] pub fn to_time(val: &Value) -> Result { - Ok(string_to_time(val.as_str().ok_or("not a datetime")?)?) + string_to_time(val.as_str().ok_or("not a datetime")?) } /// Tries to convert a [`Value`] into a [`Match`] From b927ba4fdfa354a6955a1c00a604cd636bff2633 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 31 Jul 2025 12:00:00 +0200 Subject: [PATCH 067/241] Add ipv4only/ipv6only logic to actions --- src/concepts/action.rs | 57 +++++++++++++-------- src/concepts/filter.rs | 10 ++++ src/concepts/mod.rs | 2 +- src/concepts/pattern/ip/mod.rs | 6 ++- src/concepts/pattern/mod.rs | 6 ++- src/daemon/filter/mod.rs | 29 ++++++++++- src/daemon/filter/state.rs | 25 +++++++-- src/daemon/filter/tests.rs | 93 ++++++++++++++++++++++++++++++++++ 8 files changed, 201 insertions(+), 27 deletions(-) diff --git a/src/concepts/action.rs b/src/concepts/action.rs index e4e0fc9..3e9972a 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -5,7 +5,7 @@ use chrono::TimeDelta; use serde::{Deserialize, Serialize}; use tokio::process::Command; -use super::parse_duration::*; +use super::{parse_duration::*, PatternType}; use super::{Match, Pattern}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] @@ -28,6 +28,11 @@ pub struct Action { #[serde(default = "set_false", skip_serializing_if = "is_false")] oneshot: bool, + #[serde(default = "set_false", skip_serializing_if = "is_false")] + ipv4only: bool, + #[serde(default = "set_false", skip_serializing_if = "is_false")] + ipv6only: bool, + #[serde(skip)] patterns: Arc>>, #[serde(skip)] @@ -63,6 +68,13 @@ impl Action { self.oneshot } + pub fn ipv4only(&self) -> bool { + self.ipv4only + } + pub fn ipv6only(&self) -> bool { + self.ipv6only + } + pub fn setup( &mut self, stream_name: &str, @@ -110,6 +122,22 @@ impl Action { return Err("cannot have `onexit: true`, without an `after` directive".into()); } + if self.ipv4only && self.ipv6only { + return Err("cannot have `ipv4only: true` and `ipv6only: true` in one action".into()); + } + if self + .patterns + .iter() + .all(|pattern| pattern.pattern_type() != PatternType::Ip) + { + if self.ipv4only { + return Err("it makes no sense to have an action with `ipv4only: true` when no pattern of type ip is defined on the filter".into()); + } + if self.ipv6only { + return Err("it makes no sense to have an action with `ipv6only: true` when no pattern of type ip is defined on the filter".into()); + } + } + Ok(()) } @@ -175,11 +203,14 @@ impl Action { filter_name: &str, name: &str, config_patterns: &super::Patterns, + ip_only: u8, ) -> Self { let mut action = Self { cmd: cmd.into_iter().map(|s| s.into()).collect(), after: after.map(|s| s.into()), on_exit, + ipv4only: ip_only == 4, + ipv6only: ip_only == 6, ..Default::default() }; action @@ -203,28 +234,14 @@ pub mod tests { use super::*; - fn default_action() -> Action { - Action { - cmd: Vec::new(), - name: "".into(), - filter_name: "".into(), - stream_name: "".into(), - after: None, - after_duration: None, - on_exit: false, - oneshot: false, - patterns: Arc::new(BTreeSet::default()), - } - } - pub fn ok_action() -> Action { - let mut action = default_action(); + let mut action = Action::default(); action.cmd = vec!["command".into()]; action } pub fn ok_action_with_after(d: String, name: &str) -> Action { - let mut action = default_action(); + let mut action = Action::default(); action.cmd = vec!["command".into()]; action.after = Some(d); action @@ -240,16 +257,16 @@ pub mod tests { let patterns = Arc::new(BTreeSet::default()); // No command - action = default_action(); + action = Action::default(); assert!(action.setup(&name, &name, &name, patterns.clone()).is_err()); // No command - action = default_action(); + action = Action::default(); action.cmd = vec!["".into()]; assert!(action.setup(&name, &name, &name, patterns.clone()).is_err()); // No command - action = default_action(); + action = Action::default(); action.cmd = vec!["".into(), "arg1".into()]; assert!(action.setup(&name, &name, &name, patterns.clone()).is_err()); diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 410a997..98be40b 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -32,6 +32,8 @@ pub enum Duplicate { pub struct Filter { #[serde(skip)] longuest_action_duration: TimeDelta, + #[serde(skip)] + has_ip: bool, regex: Vec, #[serde(skip)] @@ -119,6 +121,10 @@ impl Filter { &self.patterns } + pub fn check_ip(&self) -> bool { + self.has_ip + } + pub fn setup( &mut self, stream_name: &str, @@ -211,6 +217,10 @@ impl Filter { for (key, action) in &mut self.actions { action.setup(stream_name, name, key, self.patterns.clone())?; } + self.has_ip = self + .actions + .values() + .any(|action| action.ipv4only() || action.ipv6only()); self.longuest_action_duration = self.actions.values().fold(TimeDelta::seconds(0), |acc, v| { diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index b4a785e..40a624c 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -9,7 +9,7 @@ pub use action::Action; pub use config::{Config, Patterns}; pub use filter::{Duplicate, Filter}; use parse_duration::parse_duration; -pub use pattern::Pattern; +pub use pattern::{Pattern, PatternType}; use serde::{Deserialize, Serialize}; pub use stream::Stream; diff --git a/src/concepts/pattern/ip/mod.rs b/src/concepts/pattern/ip/mod.rs index 00cf1c1..4165852 100644 --- a/src/concepts/pattern/ip/mod.rs +++ b/src/concepts/pattern/ip/mod.rs @@ -152,6 +152,10 @@ pub struct PatternIp { } impl PatternIp { + pub fn pattern_type(&self) -> PatternType { + self.pattern_type + } + /// Setup the IP-specific part of a Pattern. /// Returns an optional regex string if of type IP, else None /// Returns an error if one of: @@ -688,6 +692,7 @@ mod patternip_tests { "test", "a1", &bed.ip_patterns, + 0, )], vec![ "^borned test", @@ -714,7 +719,6 @@ mod patternip_tests { ip, "line: {line}" ); - println!("line ok: {line}"); })); } } diff --git a/src/concepts/pattern/mod.rs b/src/concepts/pattern/mod.rs index 9fbb610..21c8d31 100644 --- a/src/concepts/pattern/mod.rs +++ b/src/concepts/pattern/mod.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; mod ip; -use ip::PatternIp; +pub use ip::{PatternIp, PatternType}; #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] @@ -47,6 +47,10 @@ impl Pattern { &self.name_with_braces } + pub fn pattern_type(&self) -> PatternType { + self.ip.pattern_type() + } + pub fn setup(&mut self, name: &str) -> Result<(), String> { self._setup(name) .map_err(|msg| format!("pattern {}: {}", name, msg)) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 290f3a1..484b1b5 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -14,7 +14,7 @@ use tokio::sync::Semaphore; use tracing::{error, info}; use crate::{ - concepts::{Action, Duplicate, Filter, Match, Pattern, Time}, + concepts::{Action, Duplicate, Filter, Match, Pattern, PatternType, Time}, protocol::{Order, PatternStatus}, treedb::Database, }; @@ -240,6 +240,30 @@ impl FilterManager { startup: bool, only_after: bool, ) { + // Testing if we have an IPv4 or IPv6 + let ip_type = if self.filter.check_ip() { + self.filter + .patterns() + .iter() + .zip(&m) + .find(|(p, _)| p.pattern_type() == PatternType::Ip) + .map(|(_, m)| -> _ { + // Using this dumb heuristic is ok, + // because we know we have a valid IP address. + if m.contains(':') { + PatternType::Ipv6 + } else if m.contains('.') { + PatternType::Ipv4 + } else { + PatternType::Regex + } + }) + .unwrap_or(PatternType::Regex) + } else { + PatternType::Regex + }; + + // Scheduling each action for action in self .filter .actions() @@ -248,6 +272,9 @@ impl FilterManager { .filter(|action| !startup || !action.oneshot()) // If only_after, keep only after actions .filter(|action| !only_after || action.after_duration().is_some()) + // If specific ip version, check it + .filter(|action| !action.ipv4only() || ip_type == PatternType::Ipv4) + .filter(|action| !action.ipv6only() || ip_type == PatternType::Ipv6) { let exec_time = t + action.after_duration().unwrap_or_default(); let m = m.clone(); diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index 8e3d7d3..9c44d71 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -248,6 +248,7 @@ mod tests { "f1", "a1", &patterns, + 0, )], vec!["test "], Some(3), @@ -374,9 +375,27 @@ mod tests { let patterns = Pattern::new_map("az", "[a-z]+").unwrap(); let filter = Filter::new_static( vec![ - Action::new(vec!["true"], None, false, "s1", "f1", "a1", &patterns), - Action::new(vec!["true"], Some("1s"), false, "s1", "f1", "a2", &patterns), - Action::new(vec!["true"], Some("3s"), false, "s1", "f1", "a3", &patterns), + Action::new(vec!["true"], None, false, "s1", "f1", "a1", &patterns, 0), + Action::new( + vec!["true"], + Some("1s"), + false, + "s1", + "f1", + "a2", + &patterns, + 0, + ), + Action::new( + vec!["true"], + Some("3s"), + false, + "s1", + "f1", + "a3", + &patterns, + 0, + ), ], vec!["test "], Some(3), diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index a926b2a..0565db2 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -124,6 +124,10 @@ impl TestBed2 { ); assert!(state.triggers.is_empty(), "triggers must be empty"); } + + pub async fn reset_out_file(&self) { + tokio::fs::write(&self.out_file, "").await.unwrap(); + } } #[tokio::test] @@ -140,6 +144,7 @@ async fn three_matches_then_action_then_delayed_action() { "test", "a1", &bed.az_patterns, + 0, ), Action::new( vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], @@ -149,6 +154,7 @@ async fn three_matches_then_action_then_delayed_action() { "test", "a2", &bed.az_patterns, + 0, ), ], vec!["test "], @@ -271,6 +277,7 @@ async fn one_match_one_action() { "test", "a1", &bed.az_patterns, + 0, )], vec!["test "], None, @@ -317,6 +324,7 @@ async fn one_match_one_delayed_action() { "test", "a1", &bed.az_patterns, + 0, )], vec!["test "], None, @@ -382,6 +390,7 @@ async fn one_db_match_one_runtime_match_one_action() { "test", "a1", &bed.az_patterns, + 0, )], vec!["test "], Some(2), @@ -448,6 +457,7 @@ async fn one_outdated_db_match() { "test", "a1", &bed.az_patterns, + 0, )], vec!["test "], Some(2), @@ -495,6 +505,7 @@ async fn trigger_unmatched_pattern() { "test", "a1", &bed.az_patterns, + 0, ), Action::new( vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], @@ -504,6 +515,7 @@ async fn trigger_unmatched_pattern() { "test", "a2", &bed.az_patterns, + 0, ), ], vec!["test "], @@ -566,6 +578,7 @@ async fn trigger_matched_pattern() { "test", "a1", &bed.az_patterns, + 0, ), Action::new( vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], @@ -575,6 +588,7 @@ async fn trigger_matched_pattern() { "test", "a2", &bed.az_patterns, + 0, ), ], vec!["test "], @@ -644,6 +658,7 @@ async fn multiple_triggers() { "test", "a1", &bed.az_patterns, + 0, ), Action::new( vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], @@ -653,6 +668,7 @@ async fn multiple_triggers() { "test", "a2", &bed.az_patterns, + 0, ), ], vec!["test "], @@ -745,3 +761,80 @@ async fn multiple_triggers() { ); } } + +#[tokio::test] +async fn ip_specific() { + let bed = TestBed::default(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo ipv4 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a4", + &bed.ip_patterns, + 4, + ), + Action::new( + vec!["sh", "-c", &format!("echo ipv6 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a6", + &bed.ip_patterns, + 6, + ), + Action::new( + vec!["sh", "-c", &format!("echo any >> {}", &bed.out_file)], + Some("20ms"), + false, + "test", + "test", + "ax", + &bed.ip_patterns, + 0, + ), + ], + vec!["test "], + None, + None, + "test", + "test", + Duplicate::Extend, + &bed.ip_patterns, + ); + + let bed = bed.part2(filter, Local::now(), None).await; + + assert_eq!( + bed.manager.handle_line("test 1.2.3.4", Local::now()), + React::Trigger, + ); + + // Wait for action to execute + tokio::time::sleep(Duration::from_millis(70)).await; + + assert_eq!( + "ipv4 1.2.3.4\nany 1.2.3.4\n", + &read_to_string(&bed.out_file).unwrap(), + ); + + bed.reset_out_file().await; + + assert_eq!( + bed.manager + .handle_line("test 1:2:3:4:5:6:7:8", Local::now()), + React::Trigger, + ); + + // Wait for action to execute + tokio::time::sleep(Duration::from_millis(70)).await; + + assert_eq!( + "ipv6 1:2:3:4:5:6:7:8\nany 1:2:3:4:5:6:7:8\n", + &read_to_string(&bed.out_file).unwrap(), + ); +} From 6f63f49acdec9afc70a5a54fd74f4fa81f3f680c Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 31 Jul 2025 12:00:00 +0200 Subject: [PATCH 068/241] Add failing test for flushing ipvXonly actions --- tests/ip.rs | 179 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) create mode 100644 tests/ip.rs diff --git a/tests/ip.rs b/tests/ip.rs new file mode 100644 index 0000000..fe5f44b --- /dev/null +++ b/tests/ip.rs @@ -0,0 +1,179 @@ +use std::{ + env, + fs::File, + io::{IsTerminal, Read, Write}, + time::Duration, +}; + +use tempfile::TempDir; +use tracing::Level; + +use reaction::{cli::Format, client::request, daemon::daemon, protocol::Order}; +use tokio::time::sleep; + +fn file_with_contents(path: &str, contents: &str) { + let mut file = File::create(path).unwrap(); + file.write_all(contents.as_bytes()).unwrap(); +} + +fn config_with_cmd(config_path: &str, cmd: &str) { + file_with_contents( + config_path, + &(" +{ + concurrency: 0, + patterns: { + ip: { + type: 'ip', + ipv6mask: 64, + }, + }, + streams: { + stream1: { + cmd: ['sh', '-c', '" + .to_owned() + + cmd + + "'], + filters: { + filter1: { + regex: ['ip '], + retry: 2, + retryperiod: '2s', + duplicate: 'rerun', + actions: { + // Don't mix code and data at home! + // You may permit arbitrary execution from vilains, + // if your regex is permissive enough. + // This is OK only for testing purposes. + ipv4_1: { + cmd: ['sh', '-c', 'echo >> ./ipv4.txt'], + ipv4only: true, + }, + ipv4_2: { + cmd: ['sh', '-c', 'echo del >> ./ipv4.txt'], + ipv4only: true, + after: '30s', + onexit: false, + }, + ipv6_1: { + cmd: ['sh', '-c', 'echo >> ./ipv6.txt'], + ipv6only: true, + }, + ipv6_2: { + cmd: ['sh', '-c', 'echo del >> ./ipv6.txt'], + ipv6only: true, + after: '30s', + onexit: false, + }, + all_1: { + cmd: ['sh', '-c', 'echo >> ./out.txt'], + }, + all_2: { + cmd: ['sh', '-c', 'echo del >> ./out.txt'], + after: '30s', + onexit: false, + }, + } + } + } + } + } +}"), + ); +} + +fn get_file_content(path: &str) -> String { + let mut out_txt = File::open(path).unwrap(); + let mut contents = String::new(); + out_txt.read_to_string(&mut contents).unwrap(); + contents +} + +#[tokio::test] +async fn ip() { + let dir = TempDir::new().unwrap(); + env::set_current_dir(&dir).unwrap(); + + let config_path = "config.jsonnet"; + let out_path = "./out.txt"; + let ipv4_path = "./ipv4.txt"; + let ipv6_path = "./ipv6.txt"; + let socket_path = "./reaction.sock"; + + config_with_cmd( + config_path, + "for i in 1.2.3.4 204:31::1 5.5.5.5 1.2.3.4 204:31::1 5.5.5.5; do echo ip $i; sleep 0.01; done; sleep 0.15", + ); + + file_with_contents(out_path, ""); + file_with_contents(ipv4_path, ""); + file_with_contents(ipv6_path, ""); + + // Set the logger before running any code from the crate + tracing_subscriber::fmt::fmt() + .without_time() + .with_target(false) + .with_ansi(std::io::stdout().is_terminal()) + .with_max_level(Level::DEBUG) + .try_init() + .unwrap(); + + // Run the daemon + let handle = tokio::spawn(async move { daemon(config_path.into(), socket_path.into()).await }); + + // Run the flushes + + // We sleep for the time the echoes are finished + a bit (100ms) + let handle2 = tokio::spawn(async move { + sleep(Duration::from_millis(160)).await; + request( + socket_path.into(), + Format::JSON, + None, + vec![("ip".into(), "1.2.3.4".into())], + Order::Flush, + ) + .await + }); + + let handle3 = tokio::spawn(async move { + sleep(Duration::from_millis(180)).await; + request( + socket_path.into(), + Format::JSON, + None, + vec![("ip".into(), "204:31::/64".into())], + Order::Flush, + ) + .await + }); + + let (daemon_exit, flush1, flush2) = tokio::join!(handle, handle2, handle3); + assert!(daemon_exit.is_ok()); + assert!(flush1.is_ok()); + assert!(flush2.is_ok()); + + // tokio::time::sleep(Duration::from_secs(100)).await; + + assert_eq!( + get_file_content(out_path).trim(), + [ + "1.2.3.4", + "204:31::/64", + "5.5.5.5", + "del 1.2.3.4", + "del 204:31::/64" + ] + .join("\n") + ); + + assert_eq!( + get_file_content(ipv4_path).trim(), + ["1.2.3.4", "5.5.5.5", "del 1.2.3.4"].join("\n") + ); + + assert_eq!( + get_file_content(ipv6_path).trim(), + ["204:31::/64", "del 204:31::/64"].join("\n") + ); +} From a0b804811b761f4eb885030dbd34e658bae23ecc Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 1 Aug 2025 12:00:00 +0200 Subject: [PATCH 069/241] Refacto: make all Config structures' fields public Config is 'static after setup anyways. I don't need to hide all this, it's just cumbersome for tests. --- src/client/test_regex.rs | 4 +- src/concepts/action.rs | 45 ++++------------- src/concepts/config.rs | 49 ++++-------------- src/concepts/filter.rs | 92 ++++++++++------------------------ src/concepts/pattern/ip/mod.rs | 14 +++--- src/concepts/pattern/mod.rs | 17 +++---- src/concepts/stream.rs | 18 ++----- src/daemon/filter/mod.rs | 38 +++++++------- src/daemon/filter/state.rs | 22 ++++---- src/daemon/mod.rs | 6 +-- src/daemon/socket.rs | 18 +++---- src/daemon/stream.rs | 18 +++---- src/treedb/mod.rs | 4 +- 13 files changed, 119 insertions(+), 226 deletions(-) diff --git a/src/client/test_regex.rs b/src/client/test_regex.rs index 8eb01bb..22ccc63 100644 --- a/src/client/test_regex.rs +++ b/src/client/test_regex.rs @@ -19,7 +19,7 @@ pub fn test_regex( // Code close to Filter::setup() let mut used_patterns: BTreeSet> = BTreeSet::new(); - for pattern in config.patterns().values() { + for pattern in config.patterns.values() { if let Some(index) = regex.find(pattern.name_with_braces()) { // we already `find` it, so we must be able to `rfind` it #[allow(clippy::unwrap_used)] @@ -43,7 +43,7 @@ pub fn test_regex( let mut result = Vec::new(); if !used_patterns.is_empty() { for pattern in used_patterns.iter() { - if let Some(match_) = matches.name(pattern.name()) { + if let Some(match_) = matches.name(&pattern.name) { result.push(match_.as_str().to_string()); if pattern.is_ignore(match_.as_str()) { ignored = true; diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 3e9972a..acd11d7 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -11,36 +11,36 @@ use super::{Match, Pattern}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct Action { - cmd: Vec, + pub cmd: Vec, // TODO one shot time deserialization #[serde(skip_serializing_if = "Option::is_none")] - after: Option, + pub after: Option, #[serde(skip)] - after_duration: Option, + pub after_duration: Option, #[serde( rename = "onexit", default = "set_false", skip_serializing_if = "is_false" )] - on_exit: bool, + pub on_exit: bool, #[serde(default = "set_false", skip_serializing_if = "is_false")] - oneshot: bool, + pub oneshot: bool, #[serde(default = "set_false", skip_serializing_if = "is_false")] - ipv4only: bool, + pub ipv4only: bool, #[serde(default = "set_false", skip_serializing_if = "is_false")] - ipv6only: bool, + pub ipv6only: bool, #[serde(skip)] - patterns: Arc>>, + pub patterns: Arc>>, #[serde(skip)] - name: String, + pub name: String, #[serde(skip)] - filter_name: String, + pub filter_name: String, #[serde(skip)] - stream_name: String, + pub stream_name: String, } fn set_false() -> bool { @@ -52,29 +52,6 @@ fn is_false(b: &bool) -> bool { } impl Action { - pub fn name(&self) -> &str { - &self.name - } - - pub fn after_duration(&self) -> Option { - self.after_duration - } - - pub fn on_exit(&self) -> bool { - self.on_exit - } - - pub fn oneshot(&self) -> bool { - self.oneshot - } - - pub fn ipv4only(&self) -> bool { - self.ipv4only - } - pub fn ipv6only(&self) -> bool { - self.ipv6only - } - pub fn setup( &mut self, stream_name: &str, diff --git a/src/concepts/config.rs b/src/concepts/config.rs index 22780c9..7daaea9 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use tracing::{debug, error, info, warn}; -use super::{Filter, Pattern, Stream}; +use super::{Pattern, Stream}; pub type Patterns = BTreeMap>; @@ -20,20 +20,20 @@ pub type Patterns = BTreeMap>; #[serde(deny_unknown_fields)] pub struct Config { #[serde(default = "num_cpus::get")] - concurrency: usize, + pub concurrency: usize, #[serde(default = "dot", skip_serializing_if = "String::is_empty")] - state_directory: String, + pub state_directory: String, #[serde(default)] - patterns: Patterns, + pub patterns: Patterns, #[serde(default, skip_serializing_if = "Vec::is_empty")] - start: Vec>, + pub start: Vec>, #[serde(default, skip_serializing_if = "Vec::is_empty")] - stop: Vec>, + pub stop: Vec>, #[serde(default)] - streams: BTreeMap, + pub streams: BTreeMap, // This field only serve the purpose of having a top-level place for saving YAML variables #[serde(default, skip_serializing, rename = "definitions")] @@ -45,35 +45,6 @@ fn dot() -> String { } impl Config { - pub fn streams(&self) -> &BTreeMap { - &self.streams - } - - pub fn patterns(&self) -> &Patterns { - &self.patterns - } - - pub fn concurrency(&self) -> usize { - self.concurrency - } - - pub fn state_directory(&self) -> &str { - &self.state_directory - } - - pub fn filters(&self) -> Vec<&Filter> { - self.streams - .values() - .flat_map(|stream| stream.filters().values()) - .collect() - } - - pub fn get_filter(&self, name: &(String, String)) -> Option<&Filter> { - self.streams - .get(&name.0) - .and_then(|stream| stream.get_filter(&name.1)) - } - fn merge(&mut self, mut other: Config) -> Result<(), String> { for (key, pattern) in other.patterns.into_iter() { match self.patterns.entry(key) { @@ -644,7 +615,7 @@ mod tests { assert!(cfg_org.streams.contains_key("echo")); assert_eq!(cfg_org.streams.len(), 1); - let filters = cfg_org.streams.get("echo").unwrap().filters(); + let filters = &cfg_org.streams.get("echo").unwrap().filters; assert!(filters.contains_key("f1")); assert!(filters.contains_key("f2")); assert_eq!(filters.len(), 2); @@ -704,8 +675,8 @@ mod tests { assert!(cfg_org.streams.contains_key("echo")); assert_eq!(cfg_org.streams.len(), 1); let stream = cfg_org.streams.get("echo").unwrap(); - assert_eq!(stream.cmd().len(), 1); - assert_eq!(stream.filters().len(), 1); + assert_eq!(stream.cmd.len(), 1); + assert_eq!(stream.filters.len(), 1); } #[test] diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 98be40b..55760f7 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -31,34 +31,34 @@ pub enum Duplicate { #[serde(deny_unknown_fields)] pub struct Filter { #[serde(skip)] - longuest_action_duration: TimeDelta, + pub longuest_action_duration: TimeDelta, #[serde(skip)] - has_ip: bool, + pub has_ip: bool, - regex: Vec, + pub regex: Vec, #[serde(skip)] - compiled_regex: Vec, + pub compiled_regex: Vec, // We want patterns to be ordered // This is necessary when using matches which contain multiple patterns #[serde(skip)] - patterns: Arc>>, + pub patterns: Arc>>, #[serde(skip_serializing_if = "Option::is_none")] - retry: Option, + pub retry: Option, #[serde(rename = "retryperiod", skip_serializing_if = "Option::is_none")] - retry_period: Option, + pub retry_period: Option, #[serde(skip)] - retry_duration: Option, + pub retry_duration: Option, #[serde(default)] - duplicate: Duplicate, + pub duplicate: Duplicate, - actions: BTreeMap, + pub actions: BTreeMap, #[serde(skip)] - name: String, + pub name: String, #[serde(skip)] - stream_name: String, + pub stream_name: String, } impl Filter { @@ -85,46 +85,6 @@ impl Filter { } } - pub fn name(&self) -> &str { - &self.name - } - - pub fn stream_name(&self) -> &str { - &self.stream_name - } - - pub fn retry(&self) -> Option { - self.retry - } - - pub fn retry_duration(&self) -> Option { - self.retry_duration - } - - pub fn longuest_action_duration(&self) -> TimeDelta { - self.longuest_action_duration - } - - pub fn regex(&self) -> &Vec { - &self.regex - } - - pub fn duplicate(&self) -> Duplicate { - self.duplicate - } - - pub fn actions(&self) -> &BTreeMap { - &self.actions - } - - pub fn patterns(&self) -> &BTreeSet> { - &self.patterns - } - - pub fn check_ip(&self) -> bool { - self.has_ip - } - pub fn setup( &mut self, stream_name: &str, @@ -220,11 +180,11 @@ impl Filter { self.has_ip = self .actions .values() - .any(|action| action.ipv4only() || action.ipv6only()); + .any(|action| action.ipv4only || action.ipv6only); self.longuest_action_duration = self.actions.values().fold(TimeDelta::seconds(0), |acc, v| { - v.after_duration() + v.after_duration .map_or(acc, |v| if v > acc { v } else { acc }) }); @@ -239,7 +199,7 @@ impl Filter { for pattern in self.patterns.as_ref() { // if the pattern is in an optional part of the regex, // there may be no captured group for it. - if let Some(match_) = matches.name(pattern.name()) { + if let Some(match_) = matches.name(&pattern.name) { if !pattern.is_ignore(match_.as_str()) { let mut match_ = match_.as_str().to_string(); pattern.normalize(&mut match_); @@ -267,16 +227,16 @@ impl Filter { mut patterns: BTreeMap, String>, ) -> Result { // Check pattern length - if patterns.len() != self.patterns().len() { + if patterns.len() != self.patterns.len() { return Err(format!( "{} patterns specified, while the {}.{} filter has {} pattern: ({})", patterns.len(), - self.stream_name(), - self.name(), - self.patterns().len(), - self.patterns() + self.stream_name, + self.name, + self.patterns.len(), + self.patterns .iter() - .map(|pattern| pattern.name().clone()) + .map(|pattern| pattern.name.clone()) .reduce(|acc, pattern| acc + ", " + &pattern) .unwrap_or("".into()), )); @@ -286,7 +246,7 @@ impl Filter { if self.patterns.get(pattern).is_none() { return Err(format!( "pattern {} is not present in the filter {}.{}", - pattern.name(), + pattern.name, self.stream_name, self.name )); @@ -296,7 +256,7 @@ impl Filter { return Err(format!( "'{}' doesn't match pattern {}", match_, - pattern.name(), + pattern.name, )); } @@ -304,7 +264,7 @@ impl Filter { return Err(format!( "'{}' is explicitly ignored by pattern {}", match_, - pattern.name(), + pattern.name, )); } @@ -315,7 +275,7 @@ impl Filter { if !patterns.contains_key(pattern) { return Err(format!( "pattern {} is missing, because it's in the filter {}.{}", - pattern.name(), + pattern.name, self.stream_name, self.name )); @@ -373,7 +333,7 @@ impl Filter { config_patterns: &Patterns, ) -> Self { let mut filter = Self { - actions: actions.into_iter().map(|a| (a.name().into(), a)).collect(), + actions: actions.into_iter().map(|a| (a.name.clone(), a)).collect(), regex: regex.into_iter().map(|s| s.into()).collect(), retry, retry_period: retry_period.map(|s| s.into()), diff --git a/src/concepts/pattern/ip/mod.rs b/src/concepts/pattern/ip/mod.rs index 4165852..2fdfd80 100644 --- a/src/concepts/pattern/ip/mod.rs +++ b/src/concepts/pattern/ip/mod.rs @@ -134,21 +134,21 @@ pub struct PatternIp { rename = "type", skip_serializing_if = "PatternType::is_default" )] - pattern_type: PatternType, + pub pattern_type: PatternType, #[serde(default, rename = "ipv4mask")] - ipv4_mask: Option, + pub ipv4_mask: Option, #[serde(default, rename = "ipv6mask")] - ipv6_mask: Option, + pub ipv6_mask: Option, #[serde(skip)] - ipv4_bitmask: Option, + pub ipv4_bitmask: Option, #[serde(skip)] - ipv6_bitmask: Option, + pub ipv6_bitmask: Option, #[serde(default, rename = "ignorecidr", skip_serializing_if = "Vec::is_empty")] - ignore_cidr: Vec, + pub ignore_cidr: Vec, #[serde(skip)] - ignore_cidr_normalized: Vec, + pub ignore_cidr_normalized: Vec, } impl PatternIp { diff --git a/src/concepts/pattern/mod.rs b/src/concepts/pattern/mod.rs index 21c8d31..ac312c5 100644 --- a/src/concepts/pattern/mod.rs +++ b/src/concepts/pattern/mod.rs @@ -15,20 +15,20 @@ pub struct Pattern { pub regex: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] - ignore: Vec, + pub ignore: Vec, #[serde(default, rename = "ignoreregex", skip_serializing_if = "Vec::is_empty")] - ignore_regex: Vec, + pub ignore_regex: Vec, #[serde(skip)] - compiled_ignore_regex: RegexSet, + pub compiled_ignore_regex: RegexSet, #[serde(flatten)] - ip: PatternIp, + pub ip: PatternIp, #[serde(skip)] - name: String, + pub name: String, #[serde(skip)] - name_with_braces: String, + pub name_with_braces: String, } impl Pattern { @@ -40,15 +40,12 @@ impl Pattern { } } - pub fn name(&self) -> &String { - &self.name - } pub fn name_with_braces(&self) -> &String { &self.name_with_braces } pub fn pattern_type(&self) -> PatternType { - self.ip.pattern_type() + self.ip.pattern_type } pub fn setup(&mut self, name: &str) -> Result<(), String> { diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 780db2f..011652b 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -9,31 +9,19 @@ use super::{Filter, Patterns}; #[serde(deny_unknown_fields)] pub struct Stream { #[serde(default)] - cmd: Vec, + pub cmd: Vec, #[serde(default)] - filters: BTreeMap, + pub filters: BTreeMap, #[serde(skip)] - name: String, + pub name: String, } impl Stream { - pub fn filters(&self) -> &BTreeMap { - &self.filters - } - pub fn get_filter(&self, filter_name: &str) -> Option<&Filter> { self.filters.get(filter_name) } - pub fn name(&self) -> &str { - &self.name - } - - pub fn cmd(&self) -> &Vec { - &self.cmd - } - pub fn merge(&mut self, other: Stream) -> Result<(), String> { if !(self.cmd.is_empty() || other.cmd.is_empty() || self.cmd == other.cmd) { return Err("cmd has conflicting definitions".into()); diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 484b1b5..541b761 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -91,11 +91,11 @@ impl FilterManager { let already_triggered = state.triggers.contains_key(&m); // if duplicate: ignore and already triggered, skip - if already_triggered && Duplicate::Ignore == self.filter.duplicate() { + if already_triggered && Duplicate::Ignore == self.filter.duplicate { return false; } - let trigger = match self.filter.retry() { + let trigger = match self.filter.retry { None => true, Some(retry) => { state.add_match(m.clone(), now); @@ -106,7 +106,7 @@ impl FilterManager { if trigger { state.remove_match(&m); - let extend = already_triggered && Duplicate::Extend == self.filter.duplicate(); + let extend = already_triggered && Duplicate::Extend == self.filter.duplicate; if extend { state.remove_trigger(&m); } @@ -142,7 +142,7 @@ impl FilterManager { let is_match = |match_: &Match| { match_ .iter() - .zip(self.filter.patterns()) + .zip(self.filter.patterns.as_ref()) .filter_map(|(a_match, pattern)| { patterns.get(pattern.as_ref()).map(|regex| (a_match, regex)) }) @@ -205,13 +205,13 @@ impl FilterManager { if remaining > 0 { let pattern_status = cs.entry(m.clone()).or_default(); - for action in self.filter.actions().values() { - let action_time = t + action.after_duration().unwrap_or_default(); + for action in self.filter.actions.values() { + let action_time = t + action.after_duration.unwrap_or_default(); if action_time > now { // Insert action pattern_status .actions - .entry(action.name().into()) + .entry(action.name.clone()) .or_default() .push(action_time.to_rfc3339().chars().take(19).collect()); @@ -241,9 +241,9 @@ impl FilterManager { only_after: bool, ) { // Testing if we have an IPv4 or IPv6 - let ip_type = if self.filter.check_ip() { + let ip_type = if self.filter.has_ip { self.filter - .patterns() + .patterns .iter() .zip(&m) .find(|(p, _)| p.pattern_type() == PatternType::Ip) @@ -266,17 +266,17 @@ impl FilterManager { // Scheduling each action for action in self .filter - .actions() + .actions .values() // On startup, skip oneshot actions - .filter(|action| !startup || !action.oneshot()) + .filter(|action| !startup || !action.oneshot) // If only_after, keep only after actions - .filter(|action| !only_after || action.after_duration().is_some()) + .filter(|action| !only_after || action.after_duration.is_some()) // If specific ip version, check it - .filter(|action| !action.ipv4only() || ip_type == PatternType::Ipv4) - .filter(|action| !action.ipv6only() || ip_type == PatternType::Ipv6) + .filter(|action| !action.ipv4only || ip_type == PatternType::Ipv4) + .filter(|action| !action.ipv6only || ip_type == PatternType::Ipv6) { - let exec_time = t + action.after_duration().unwrap_or_default(); + let exec_time = t + action.after_duration.unwrap_or_default(); let m = m.clone(); if exec_time <= now { @@ -298,7 +298,7 @@ impl FilterManager { _ = this.shutdown.wait() => true, }; // Exec action if triggered hasn't been already flushed - if !exiting || action.on_exit() { + if !exiting || action.on_exit { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = this.state.lock().unwrap(); if state.decrement_trigger(&m, t) { @@ -311,13 +311,13 @@ impl FilterManager { } fn clear_past_triggers_and_schedule_future_actions(&self, now: Time) { - let longuest_action_duration = self.filter.longuest_action_duration(); + let longuest_action_duration = self.filter.longuest_action_duration; let number_of_actions = self .filter - .actions() + .actions .values() // On startup, skip oneshot actions - .filter(|action| !action.oneshot()) + .filter(|action| !action.oneshot) .count() as u64; #[allow(clippy::unwrap_used)] // propagating panics is ok diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index 9c44d71..4c088e4 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -11,20 +11,20 @@ use crate::{ pub fn filter_ordered_times_db_name(filter: &Filter) -> String { format!( "filter_ordered_times_{}.{}", - filter.stream_name(), - filter.name() + filter.stream_name, + filter.name ) } pub fn filter_triggers_old_db_name(filter: &Filter) -> String { - format!("filter_triggers_{}.{}", filter.stream_name(), filter.name()) + format!("filter_triggers_{}.{}", filter.stream_name, filter.name) } pub fn filter_triggers_db_name(filter: &Filter) -> String { format!( "filter_triggers2_{}.{}", - filter.stream_name(), - filter.name() + filter.stream_name, + filter.name ) } @@ -67,18 +67,18 @@ impl State { pub fn new(filter: &'static Filter, db: &mut Database, now: Time) -> Result { let ordered_times = db.open_tree( filter_ordered_times_db_name(filter), - filter.retry_duration().unwrap_or_default(), + filter.retry_duration.unwrap_or_default(), |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), )?; let mut triggers = db.open_tree( filter_triggers_db_name(filter), - filter.longuest_action_duration(), + filter.longuest_action_duration, |(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)), )?; if triggers.is_empty() { let old_triggers = db.open_tree( filter_triggers_old_db_name(filter), - filter.longuest_action_duration(), + filter.longuest_action_duration, |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), )?; for (mt, n) in old_triggers.iter() { @@ -95,7 +95,7 @@ impl State { } let mut this = Self { filter, - has_after: !filter.longuest_action_duration().is_zero(), + has_after: !filter.longuest_action_duration.is_zero(), matches: BTreeMap::new(), ordered_times, triggers, @@ -115,7 +115,7 @@ impl State { // We record triggered filters only when there is an action with an `after` directive if self.has_after { // Add the (Match, Time) to the triggers map - let n = self.filter.actions().len() as u64; + let n = self.filter.actions.len() as u64; self.triggers.fetch_update(m, |map| { Some(match map { None => [(t, n)].into(), @@ -183,7 +183,7 @@ impl State { } pub fn clear_past_matches(&mut self, now: Time) { - let retry_duration = self.filter.retry_duration().unwrap_or_default(); + let retry_duration = self.filter.retry_duration.unwrap_or_default(); while self .ordered_times .first_key_value() diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 9cb8d3a..415394a 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -45,7 +45,7 @@ pub async fn daemon( let (state, stream_managers) = { // Semaphore limiting action execution concurrency - let exec_limit = match config.concurrency() { + let exec_limit = match config.concurrency { 0 => None, n => Some(Arc::new(Semaphore::new(n))), }; @@ -54,9 +54,9 @@ pub async fn daemon( let now = Local::now(); let mut state = HashMap::new(); let mut stream_managers = Vec::new(); - for stream in config.streams().values() { + for stream in config.streams.values() { let mut filter_managers = HashMap::new(); - for filter in stream.filters().values() { + for filter in stream.filters.values() { let manager = FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now)?; filter_managers.insert(filter, manager); diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 7c3d0c9..2caa04a 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -79,7 +79,7 @@ fn handle_trigger_order( // Check stream existance let filters = match shared_state .iter() - .find(|(stream, _)| stream_name == stream.name()) + .find(|(stream, _)| stream_name == stream.name) { Some((_, filters)) => filters, None => { @@ -90,7 +90,7 @@ fn handle_trigger_order( // Check filter existance let filter_manager = match filters .iter() - .find(|(filter, _)| filter_name == filter.name()) + .find(|(filter, _)| filter_name == filter.name) { Some((_, filter)) => filter, None => { @@ -122,7 +122,7 @@ fn handle_show_or_flush_order( stream_name.is_none() || stream_name .clone() - .is_some_and(|name| name == stream.name()) + .is_some_and(|name| name == stream.name) }) .fold(BTreeMap::new(), |mut acc, (stream, filter_manager)| { let inner_map = filter_manager @@ -132,22 +132,22 @@ fn handle_show_or_flush_order( filter_name.is_none() || filter_name .clone() - .is_some_and(|name| name == filter.name()) + .is_some_and(|name| name == filter.name) }) // pattern filtering .filter(|(filter, _)| { patterns .iter() - .all(|(pattern, _)| filter.patterns().get(pattern).is_some()) + .all(|(pattern, _)| filter.patterns.get(pattern).is_some()) }) .map(|(filter, manager)| { ( - filter.name().to_owned(), + filter.name.to_owned(), manager.handle_order(&patterns, order, now), ) }) .collect(); - acc.insert(stream.name().to_owned(), inner_map); + acc.insert(stream.name.to_owned(), inner_map); acc }); DaemonResponse::Order(cs) @@ -174,7 +174,7 @@ fn answer_order( .map(|(name, reg)| { // lookup pattern in config.patterns config - .patterns() + .patterns .iter() // retrieve or Err .find(|(pattern_name, _)| &name == *pattern_name) @@ -196,7 +196,7 @@ fn answer_order( Ok(reg) => Ok((pattern, reg)), Err(err) => Err(format!( "pattern '{}' regex doesn't compile: {err}", - pattern.name() + pattern.name )), }) .collect::, Regex>, String>>() diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index 3737177..ab464be 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -65,7 +65,7 @@ impl StreamManager { .iter() .flat_map(|(filter, filter_manager)| { filter - .regex() + .regex .iter() .map(|regex| (regex, filter_manager.clone())) }) @@ -80,9 +80,9 @@ impl StreamManager { } pub async fn start(self) { - info!("{}: start {:?}", self.stream.name(), self.stream.cmd()); - let mut child = match Command::new(&self.stream.cmd()[0]) - .args(&self.stream.cmd()[1..]) + info!("{}: start {:?}", self.stream.name, self.stream.cmd); + let mut child = match Command::new(&self.stream.cmd[0]) + .args(&self.stream.cmd[1..]) .stdin(Stdio::null()) .stderr(Stdio::piped()) .stdout(Stdio::piped()) @@ -92,7 +92,7 @@ impl StreamManager { Err(err) => { error!( "could not execute stream {} cmd: {}", - self.stream.name(), + self.stream.name, err ); return; @@ -120,7 +120,7 @@ impl StreamManager { // wait either for the child process to exit on its own or for the shutdown signal futures::select! { _ = child.wait().fuse() => { - error!("stream {} exited: its command returned.", self.stream.name()); + error!("stream {} exited: its command returned.", self.stream.name); return; } _ = self.shutdown.wait().fuse() => {} @@ -143,7 +143,7 @@ impl StreamManager { } else { warn!( "could not get PID of child process for stream {}", - self.stream.name() + self.stream.name ); // still try to use tokio API to kill and reclaim the child process } @@ -160,7 +160,7 @@ impl StreamManager { futures::select! { _ = child.wait().fuse() => {} _ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => { - error!("child process of stream {} did not terminate", self.stream.name()); + error!("child process of stream {} did not terminate", self.stream.name); } } } @@ -183,7 +183,7 @@ impl StreamManager { Some(Err(err)) => { error!( "impossible to read output from stream {}: {}", - self.stream.name(), + self.stream.name, err ); return; diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index a69f84e..8549559 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -55,10 +55,10 @@ const DB_NEW_NAME: &str = "reaction.new.db"; impl Config { fn path_of(&self, name: &str) -> PathBuf { - if self.state_directory().is_empty() { + if self.state_directory.is_empty() { name.into() } else { - PathBuf::from(self.state_directory()).join(name) + PathBuf::from(&self.state_directory).join(name) } } } From 0b2bfe533b1f84d45dcc540af66064aa88bb828e Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 2 Aug 2025 12:00:00 +0200 Subject: [PATCH 070/241] Update example configs to get rid of ip46tables --- config/example.jsonnet | 78 +++++++++++++++++++++++++++++------------- config/example.yml | 48 +++++++++++++++++++------- 2 files changed, 89 insertions(+), 37 deletions(-) diff --git a/config/example.jsonnet b/config/example.jsonnet index 1a9fc7d..c9e9abb 100644 --- a/config/example.jsonnet +++ b/config/example.jsonnet @@ -7,22 +7,38 @@ // strongly encouraged to take a look at the full documentation: https://reaction.ppom.me // JSONnet functions -local iptables(args) = ['ip46tables', '-w'] + args; -// ip46tables is a minimal C program (only POSIX dependencies) present in a -// subdirectory of this repo. -// it permits to handle both ipv4/iptables and ipv6/ip6tables commands +local ipBan(cmd) = [cmd, '-w', '-A', 'reaction', '-s', '', '-j', 'DROP']; +local ipUnban(cmd) = [cmd, '-w', '-D', 'reaction', '-s', '', '-j', 'DROP']; -// See meaning and usage of this function around L106 +// See meaning and usage of this function around L180 local banFor(time) = { - ban: { - cmd: iptables(['-A', 'reaction', '-s', '', '-j', 'DROP']), + ban4: { + cmd: ipBan('iptables'), + ipv4only: true, }, - unban: { + ban6: { + cmd: ipBan('ip6tables'), + ipv6only: true, + }, + unban4: { + cmd: ipUnban('iptables'), after: time, - cmd: iptables(['-D', 'reaction', '-s', '', '-j', 'DROP']), + ipv4only: true, + }, + unban6: { + cmd: ipUnban('ip6tables'), + after: time, + ipv6only: true, }, }; +// See usage of this function around L90 +// Generates a command for iptables and ip46tables +local ip46tables(arguments) = [ + ['iptables', '-w'] + arguments, + ['ip6tables', '-w'] + arguments, +]; + { // patterns are substitued in regexes. // when a filter performs an action, it replaces the found pattern @@ -74,24 +90,23 @@ local banFor(time) = { concurrency: 0, // Those commands will be executed in order at start, before everything else - start: [ + start: // Create an iptables chain for reaction - iptables(['-N', 'reaction']), + ip46tables(['-N', 'reaction']) + // Insert this chain as the first item of the INPUT & FORWARD chains (for incoming connections) - iptables(['-I', 'INPUT', '-p', 'all', '-j', 'reaction']), - iptables(['-I', 'FORWARD', '-p', 'all', '-j', 'reaction']), - ], + ip46tables(['-I', 'INPUT', '-p', 'all', '-j', 'reaction']) + + ip46tables(['-I', 'FORWARD', '-p', 'all', '-j', 'reaction']), // Those commands will be executed in order at stop, after everything else - stop: [ + stop: // Remove the chain from the INPUT & FORWARD chains - iptables(['-D', 'INPUT', '-p', 'all', '-j', 'reaction']), - iptables(['-D', 'FORWARD', '-p', 'all', '-j', 'reaction']), + ip46tables(['-D', 'INPUT', '-p', 'all', '-j', 'reaction']) + + ip46tables(['-D', 'FORWARD', '-p', 'all', '-j', 'reaction']) + // Empty the chain - iptables(['-F', 'reaction']), + ip46tables(['-F', 'reaction']) + // Delete the chain - iptables(['-X', 'reaction']), - ], + ip46tables(['-X', 'reaction']), + // streams are commands // they are run and their ouptut is captured @@ -145,12 +160,20 @@ local banFor(time) = { // actions are run by the filter when regexes are matched actions: { // actions have a user-defined name - ban: { - cmd: iptables(['-A', 'reaction', '-s', '', '-j', 'DROP']), + ban4: { + cmd: ['iptables', '-w', '-A', 'reaction', '-s', '', '-j', 'DROP'], + // this optional field permits to run an action only when a pattern of type ip contains an ipv4 + ipv4only: true, }, - unban: { - cmd: iptables(['-D', 'reaction', '-s', '', '-j', 'DROP']), + ban6: { + cmd: ['ip6tables', '-w', '-A', 'reaction', '-s', '', '-j', 'DROP'], + // this optional field permits to run an action only when a pattern of type ip contains an ipv6 + ipv6only: true, + }, + + unban4: { + cmd: ['iptables', '-w', '-D', 'reaction', '-s', '', '-j', 'DROP'], // if after is defined, the action will not take place immediately, but after a specified duration // same format as retryperiod after: '2 days', @@ -160,6 +183,13 @@ local banFor(time) = { // (defaults to false) // here it is not useful because we will flush and delete the chain containing the bans anyway // (with the stop commands) + ipv4only: true, + }, + + unban6: { + cmd: ['ip6tables', '-w', '-D', 'reaction', '-s', '', '-j', 'DROP'], + after: '2 days', + ipv6only: true, }, mail: { diff --git a/config/example.yml b/config/example.yml index 4169ecc..40f2764 100644 --- a/config/example.yml +++ b/config/example.yml @@ -10,8 +10,10 @@ # using YAML anchors `&name` and pointers `*name` # definitions are not readed by reaction definitions: - - &iptablesban [ 'ip46tables', '-w', '-A', 'reaction', '-s', '', '-j', 'DROP' ] - - &iptablesunban [ 'ip46tables', '-w', '-D', 'reaction', '-s', '', '-j', 'DROP' ] + - &ip4tablesban [ 'iptables', '-w', '-A', 'reaction', '-s', '', '-j', 'DROP' ] + - &ip6tablesban [ 'ip6tables', '-w', '-A', 'reaction', '-s', '', '-j', 'DROP' ] + - &ip4tablesunban [ 'iptables', '-w', '-D', 'reaction', '-s', '', '-j', 'DROP' ] + - &ip6tablesunban [ 'ip6tables', '-w', '-D', 'reaction', '-s', '', '-j', 'DROP' ] # ip46tables is a minimal C program (only POSIX dependencies) present as a subdirectory. # it permits to handle both ipv4/iptables and ipv6/ip6tables commands @@ -62,16 +64,23 @@ patterns: # Those commands will be executed in order at start, before everything else start: - - [ 'ip46tables', '-w', '-N', 'reaction' ] - - [ 'ip46tables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-N', 'reaction' ] + - [ 'ip6tables', '-w', '-N', 'reaction' ] + - [ 'iptables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ] + - [ 'ip6tables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ] + - [ 'ip6tables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ] # Those commands will be executed in order at stop, after everything else stop: - - [ 'ip46tables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-F', 'reaction' ] - - [ 'ip46tables', '-w', '-X', 'reaction' ] + - [ 'iptables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] + - [ 'ip6tables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] + - [ 'ip6tables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-F', 'reaction' ] + - [ 'ip6tables', '-w', '-F', 'reaction' ] + - [ 'iptables', '-w', '-X', 'reaction' ] + - [ 'ip6tables', '-w', '-X', 'reaction' ] # streams are commands # they are run and their ouptut is captured @@ -124,12 +133,19 @@ streams: # actions are run by the filter when regexes are matched actions: # actions have a user-defined name - ban: + ban4: # YAML substitutes *reference by the value anchored at &reference - cmd: *iptablesban + cmd: *ip4tablesban + # this optional field permits to run an action only when a pattern of type ip contains an ipv4 + ipv4only: true - unban: - cmd: *iptablesunban + ban6: + cmd: *ip6tablesban + # this optional field permits to run an action only when a pattern of type ip contains an ipv6 + ipv6only: true + + unban4: + cmd: *ip4tablesunban # if after is defined, the action will not take place immediately, but after a specified duration # same format as retryperiod after: '2 days' @@ -139,6 +155,12 @@ streams: # (defaults to false) # here it is not useful because we will flush and delete the chain containing the bans anyway # (with the stop commands) + ipv4only: true + + unban6: + cmd: *ip6tablesunban + after: '2 days' + ipv6only: true mail: cmd: ['sendmail', '...', ''] From cebdbc7ad057a4f4e0ca67ad63b88eb558f82e56 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 2 Aug 2025 12:00:00 +0200 Subject: [PATCH 071/241] ipv4 regex: do no accept numbers 0[0-9] The Rust std won't accept it anyway, as it interprets numbers starting with 0 as octal numbers and forbid that. --- src/concepts/pattern/ip/mod.rs | 3 ++- src/concepts/pattern/ip/utils.rs | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/concepts/pattern/ip/mod.rs b/src/concepts/pattern/ip/mod.rs index 2fdfd80..3bf727c 100644 --- a/src/concepts/pattern/ip/mod.rs +++ b/src/concepts/pattern/ip/mod.rs @@ -45,7 +45,7 @@ impl PatternType { // then 1xx "1[0-9][0-9]", // then 0xx - "[0-9][0-9]", + "[1-9][0-9]", // then 0x "[0-9])", ] @@ -641,6 +641,7 @@ mod patternip_tests { assert2!(!regex.is_match("1.2.3.4 ")); assert2!(!regex.is_match("1.2. 3.4")); assert2!(!regex.is_match("257.2.3.4")); + assert2!(!regex.is_match("074.2.3.4")); assert2!(!regex.is_match("1.2.3.4.5")); assert2!(!regex.is_match("1.2..4")); assert2!(!regex.is_match("1.2..3.4")); diff --git a/src/concepts/pattern/ip/utils.rs b/src/concepts/pattern/ip/utils.rs index a294c55..2e1ad66 100644 --- a/src/concepts/pattern/ip/utils.rs +++ b/src/concepts/pattern/ip/utils.rs @@ -79,6 +79,8 @@ mod utils_tests { normalize("::ffff:1.2.3.4"), Ok(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4))) ); + // octal numbers are forbidden + assert!(normalize("083.44.23.14").is_err()); } #[test] From 59c7bfdd1d182db5b48f963e9264ef79bc5d8003 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 2 Aug 2025 12:00:00 +0200 Subject: [PATCH 072/241] Move action filtering logic from daemon to concepts and use at 3 places Used in Filter::schedule_exec, Filter::handle_order, State::add_trigger Add proper testing. This also fix previously failing test. --- src/concepts/filter.rs | 195 +++++++++++++++++++++++++++++++++++- src/concepts/pattern/mod.rs | 2 +- src/daemon/filter/mod.rs | 33 +----- src/daemon/filter/state.rs | 2 +- 4 files changed, 199 insertions(+), 33 deletions(-) diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 55760f7..b439944 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -11,7 +11,7 @@ use regex::Regex; use serde::{Deserialize, Serialize}; use tracing::info; -use super::parse_duration; +use super::{parse_duration, PatternType}; use super::{Action, Match, Pattern, Patterns}; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] @@ -284,6 +284,39 @@ impl Filter { Ok(patterns.into_values().collect()) } + + /// Filters [`Filter`]'s [`Action`]s according to its [`Pattern`]s [`PatternType`] + /// and those of the given [`Match`] + pub fn filtered_actions_from_match(&self, m: &Match) -> impl Iterator { + let ip_type = if self.has_ip { + self.patterns + .iter() + .zip(m) + .find(|(p, _)| p.pattern_type() == PatternType::Ip) + .map(|(_, m)| -> _ { + // Using this dumb heuristic is ok, + // because we know we have a valid IP address. + if m.contains(':') { + PatternType::Ipv6 + } else if m.contains('.') { + PatternType::Ipv4 + } else { + // This else should not happen, but better falling back on something than + // panicking, right? Maybe we should add a warning there? + PatternType::Regex + } + }) + .unwrap_or(PatternType::Regex) + } else { + PatternType::Regex + }; + + self.actions + .values() + // If specific ip version, check it + .filter(move |action| !action.ipv4only || ip_type == PatternType::Ipv4) + .filter(move |action| !action.ipv6only || ip_type == PatternType::Ipv6) + } } impl Display for Filter { @@ -373,6 +406,7 @@ pub mod tests { use crate::concepts::pattern::tests::{ boubou_pattern_with_ignore, default_pattern, number_pattern, ok_pattern_with_ignore, }; + use crate::concepts::pattern::PatternIp; use super::*; @@ -764,4 +798,163 @@ pub mod tests { assert_eq!(filter.get_match("insert b here and boubou there"), None); assert_eq!(filter.get_match("also add boubou here and b there"), None); } + + #[test] + fn get_match_from_patterns() { + // TODO + } + + #[test] + fn filtered_actions_from_match_one_regex_pattern() { + let az_patterns = Pattern::new_map("az", "[a-z]+").unwrap(); + let action = Action::new( + vec!["zblorg "], + None, + false, + "test", + "test", + "a1", + &az_patterns, + 0, + ); + let filter = Filter::new( + vec![action.clone()], + vec![""], + None, + None, + "test", + "test", + Duplicate::default(), + &az_patterns, + ); + assert_eq!( + vec![&action], + filter + .filtered_actions_from_match(&vec!["zboum".into()]) + .collect::>() + ); + } + + #[test] + fn filtered_actions_from_match_two_regex_patterns() { + let patterns = BTreeMap::from([ + ( + "az".to_string(), + Arc::new(Pattern::new("az", "[a-z]+").unwrap()), + ), + ( + "num".to_string(), + Arc::new(Pattern::new("num", "[0-9]{1,3}").unwrap()), + ), + ]); + let action1 = Action::new( + vec!["zblorg "], + None, + false, + "test", + "test", + "a1", + &patterns, + 0, + ); + let action2 = Action::new( + vec!["zbleurg "], + None, + false, + "test", + "test", + "a2", + &patterns, + 0, + ); + let filter = Filter::new( + vec![action1.clone(), action2.clone()], + vec![""], + None, + None, + "test", + "test", + Duplicate::default(), + &patterns, + ); + assert_eq!( + vec![&action1, &action2], + filter + .filtered_actions_from_match(&vec!["zboum".into()]) + .collect::>() + ); + } + + #[test] + fn filtered_actions_from_match_one_regex_one_ip() { + let patterns = BTreeMap::from([ + ( + "az".to_string(), + Arc::new(Pattern::new("az", "[a-z]+").unwrap()), + ), + ("ip".to_string(), { + let mut pattern = Pattern { + ip: PatternIp { + pattern_type: PatternType::Ip, + ..Default::default() + }, + ..Default::default() + }; + pattern.setup("ip").unwrap(); + Arc::new(pattern) + }), + ]); + let action4 = Action::new( + vec!["zblorg4 "], + None, + false, + "test", + "test", + "action4", + &patterns, + 4, + ); + let action6 = Action::new( + vec!["zblorg6 "], + None, + false, + "test", + "test", + "action6", + &patterns, + 6, + ); + let action = Action::new( + vec!["zblorg "], + None, + false, + "test", + "test", + "action", + &patterns, + 0, + ); + let filter = Filter::new( + vec![action4.clone(), action6.clone(), action.clone()], + vec![": "], + None, + None, + "test", + "test", + Duplicate::default(), + &patterns, + ); + assert_eq!( + filter + .filtered_actions_from_match(&vec!["zboum".into(), "1.2.3.4".into()]) + .collect::>(), + vec![&action, &action4], + ); + assert_eq!( + filter + .filtered_actions_from_match(&vec!["zboum".into(), "ab4:35f::1".into()]) + .collect::>(), + vec![&action, &action6], + ); + } } diff --git a/src/concepts/pattern/mod.rs b/src/concepts/pattern/mod.rs index ac312c5..365f379 100644 --- a/src/concepts/pattern/mod.rs +++ b/src/concepts/pattern/mod.rs @@ -181,7 +181,7 @@ impl Pattern { } /// Test-only constructor designed to be easy to call. - /// Constructs a full super::Paterns collection with one given pattern + /// Constructs a full [`super::Patterns`] collection with one given pattern pub fn new_map(name: &str, regex: &str) -> Result { Ok(std::iter::once((name.into(), Self::new(name, regex)?.into())).collect()) } diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 541b761..7a6517e 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -14,7 +14,7 @@ use tokio::sync::Semaphore; use tracing::{error, info}; use crate::{ - concepts::{Action, Duplicate, Filter, Match, Pattern, PatternType, Time}, + concepts::{Action, Duplicate, Filter, Match, Pattern, Time}, protocol::{Order, PatternStatus}, treedb::Database, }; @@ -205,7 +205,7 @@ impl FilterManager { if remaining > 0 { let pattern_status = cs.entry(m.clone()).or_default(); - for action in self.filter.actions.values() { + for action in self.filter.filtered_actions_from_match(&m) { let action_time = t + action.after_duration.unwrap_or_default(); if action_time > now { // Insert action @@ -240,41 +240,14 @@ impl FilterManager { startup: bool, only_after: bool, ) { - // Testing if we have an IPv4 or IPv6 - let ip_type = if self.filter.has_ip { - self.filter - .patterns - .iter() - .zip(&m) - .find(|(p, _)| p.pattern_type() == PatternType::Ip) - .map(|(_, m)| -> _ { - // Using this dumb heuristic is ok, - // because we know we have a valid IP address. - if m.contains(':') { - PatternType::Ipv6 - } else if m.contains('.') { - PatternType::Ipv4 - } else { - PatternType::Regex - } - }) - .unwrap_or(PatternType::Regex) - } else { - PatternType::Regex - }; - // Scheduling each action for action in self .filter - .actions - .values() + .filtered_actions_from_match(&m) // On startup, skip oneshot actions .filter(|action| !startup || !action.oneshot) // If only_after, keep only after actions .filter(|action| !only_after || action.after_duration.is_some()) - // If specific ip version, check it - .filter(|action| !action.ipv4only || ip_type == PatternType::Ipv4) - .filter(|action| !action.ipv6only || ip_type == PatternType::Ipv6) { let exec_time = t + action.after_duration.unwrap_or_default(); let m = m.clone(); diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index 4c088e4..a1bbca5 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -115,7 +115,7 @@ impl State { // We record triggered filters only when there is an action with an `after` directive if self.has_after { // Add the (Match, Time) to the triggers map - let n = self.filter.actions.len() as u64; + let n = self.filter.filtered_actions_from_match(&m).count() as u64; self.triggers.fetch_update(m, |map| { Some(match map { None => [(t, n)].into(), From 773eb76f928f100ceac14c951ba6365341b2df66 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 2 Aug 2025 12:00:00 +0200 Subject: [PATCH 073/241] Update README to advertise ip-specific features --- README.md | 65 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index 1c4fbae..68ec110 100644 --- a/README.md +++ b/README.md @@ -33,11 +33,11 @@ See https://blog.ppom.me/en-reaction-v2. YAML and [JSONnet](https://jsonnet.org/) (more powerful) are supported. both are extensions of JSON, so JSON is transitively supported. -- See [reaction.yml](./config/example.yml) or [reaction.jsonnet](./config/example.jsonnet) for a fully explained reference +- See [reaction.yml](./config/example.yml) or [reaction.jsonnet](./config/example.jsonnet) for a fully explained reference (ipv4 + ipv6) - See the [wiki](https://reaction.ppom.me) for multiple examples, security recommendations and FAQ. - See [server.jsonnet](https://reaction.ppom.me/configurations/ppom/server.jsonnet.html) for a real-world configuration - See [reaction.service](./config/reaction.service) for a systemd service file -- This minimal example shows what's needed to prevent brute force attacks on an ssh server (please read at least the [Security](https://reaction.ppom.me/security.html) part of the wiki before starting 🆙): +- This minimal example (ipv4 only) shows what's needed to prevent brute force attacks on an ssh server (please read at least the [Security](https://reaction.ppom.me/security.html) part of the wiki before starting 🆙):
@@ -46,21 +46,18 @@ both are extensions of JSON, so JSON is transitively supported. ```yaml patterns: ip: - regex: '(([0-9]{1,3}\.){3}[0-9]{1,3})|([0-9a-fA-F:]{2,90})' - ignore: - - '127.0.0.1' - - '::1' + type: ipv4 start: - - [ 'ip46tables', '-w', '-N', 'reaction' ] - - [ 'ip46tables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-N', 'reaction' ] + - [ 'iptables', '-w', '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ] stop: - - [ 'ip46tables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] - - [ 'ip46tables', '-w', '-F', 'reaction' ] - - [ 'ip46tables', '-w', '-X', 'reaction' ] + - [ 'iptables', '-w', '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ] + - [ 'iptables', '-w', '-F', 'reaction' ] + - [ 'iptables', '-w', '-X', 'reaction' ] streams: ssh: @@ -76,9 +73,9 @@ streams: retryperiod: '6h' actions: ban: - cmd: [ 'ip46tables', '-w', '-I', 'reaction', '1', '-s', '', '-j', 'DROP' ] + cmd: [ 'iptables', '-w', '-I', 'reaction', '1', '-s', '', '-j', 'DROP' ] unban: - cmd: [ 'ip46tables', '-w', '-D', 'reaction', '1', '-s', '', '-j', 'DROP' ] + cmd: [ 'iptables', '-w', '-D', 'reaction', '1', '-s', '', '-j', 'DROP' ] after: '48h' ``` @@ -89,41 +86,40 @@ streams: /etc/reaction.jsonnet ```jsonnet -local iptables(args) = [ 'ip46tables', '-w' ] + args; local banFor(time) = { ban: { - cmd: iptables(['-A', 'reaction', '-s', '', '-j', 'DROP']), + cmd: ['iptables', '-w', '-A', 'reaction', '-s', '', '-j', 'DROP'], }, unban: { + cmd: ['iptables', '-w', '-D', 'reaction', '-s', '', '-j', 'DROP'], after: time, - cmd: iptables(['-D', 'reaction', '-s', '', '-j', 'DROP']), }, }; { patterns: { ip: { - regex: @'(?:(?:[ 0-9 ]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})', + type: 'ipv4', }, }, start: [ - iptables([ '-N', 'reaction' ]), - iptables([ '-I', 'INPUT', '-p', 'all', '-j', 'reaction' ]), - iptables([ '-I', 'FORWARD', '-p', 'all', '-j', 'reaction' ]), + ['iptables', '-N', 'reaction'], + ['iptables', '-I', 'INPUT', '-p', 'all', '-j', 'reaction'], + ['iptables', '-I', 'FORWARD', '-p', 'all', '-j', 'reaction'], ], stop: [ - iptables([ '-D', 'INPUT', '-p', 'all', '-j', 'reaction' ]), - iptables([ '-D', 'FORWARD', '-p', 'all', '-j', 'reaction' ]), - iptables([ '-F', 'reaction' ]), - iptables([ '-X', 'reaction' ]), + ['iptables', '-D', 'INPUT', '-p', 'all', '-j', 'reaction'], + ['iptables', '-D', 'FORWARD', '-p', 'all', '-j', 'reaction'], + ['iptables', '-F', 'reaction'], + ['iptables', '-X', 'reaction'], ], streams: { ssh: { - cmd: [ 'journalctl', '-fu', 'sshd.service' ], + cmd: ['journalctl', '-fu', 'sshd.service'], filters: { failedlogin: { regex: [ - @'authentication failure;.*rhost=' + @'authentication failure;.*rhost=', @'Failed password for .* from ', @'banner exchange: Connection from port [0-9]*: invalid format', @'Invalid user .* from ', @@ -156,13 +152,20 @@ If you don't know where to start reaction, `/var/lib/reaction` should be a sane - `reaction test-config` shows loaded configuration - `reaction help` for full usage. -### `ip46tables` +### `ip46tables` and `nft46` -`ip46tables` is a minimal c program present in its own subdirectory with only standard posix dependencies. +> ⚠️Deprecated since v2.2.0: +> reaction now provides builtin support for executing different actions on ipv4 and ipv6. +> They will be removed in a future version. -It permits to configure `iptables` and `ip6tables` at the same time. +`ip46tables` and `nft46` are two minimal c programs present in the `helpers_c` directory with only standard posix dependencies. + +`ip46tables` permits to configure `iptables` and `ip6tables` at the same time. It will execute `iptables` when detecting ipv4, `ip6tables` when detecting ipv6 and both if no ip address is present on the command line. +`nft46` works slightly differently: it will replace the `X` in its argument by 4 or 6 depending on the ip address on the command line. +This permits to have 2 IP sets, one of type `ipv4_addr` and one of type `ipv6_addr`. + ## Wiki You'll find more ressources, service configurations, etc. on [the wiki](https://reaction.ppom.me)! From f477310a29129303c775ece04deb547ebbbc1fef Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 5 Aug 2025 12:00:00 +0200 Subject: [PATCH 074/241] duplicates: Add failing tests for Deduplication on start --- src/daemon/filter/tests.rs | 92 +++++++++++++++++++++++++++++++++++++- 1 file changed, 91 insertions(+), 1 deletion(-) diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 0565db2..653be29 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -6,10 +6,14 @@ use std::{ }; use chrono::{Local, TimeDelta}; +use serde_json::json; use tempfile::TempPath; use tokio::sync::Semaphore; -use super::{state::filter_ordered_times_db_name, FilterManager, React}; +use super::{ + state::{filter_ordered_times_db_name, filter_triggers_db_name}, + FilterManager, React, +}; use crate::{ concepts::{Action, Duplicate, Filter, Pattern, Patterns, Time}, daemon::shutdown::ShutdownController, @@ -644,6 +648,92 @@ async fn trigger_matched_pattern() { } } +#[tokio::test] +async fn trigger_deduplication_on_start() { + for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { + let bed = TestBed::default(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + 0, + ), + Action::new( + vec!["sh", "-c", &format!("echo a2 >> {}", &bed.out_file)], + Some("2s"), + false, + "test", + "test", + "a2", + &bed.az_patterns, + 0, + ), + ], + vec!["test "], + None, + None, + "test", + "test", + dup, + &bed.az_patterns, + ); + + let now = Local::now(); + let now1s = now - TimeDelta::milliseconds(1000); + let now2s = now - TimeDelta::milliseconds(1030); + let one = vec!["one".to_string()]; + + let mut db = TempDatabase::default().await; + db.set_loaded_db(HashMap::from([( + filter_triggers_db_name(filter), + HashMap::from([( + one.clone().into(), + json!({ + now1s.to_rfc3339(): 1, + now2s.to_rfc3339(): 1, + }), + )]), + )])); + let bed = bed.part2(filter, now, Some(db)).await; + + // the action executes + tokio::time::sleep(Duration::from_millis(50)).await; + + // No matches, one or two action·s registered + { + let state = bed.manager.state.lock().unwrap(); + assert!(state.matches.is_empty()); + assert!(state.ordered_times.is_empty()); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([( + one.clone(), + match dup { + Duplicate::Extend => BTreeMap::from([(now1s, 1)]), + Duplicate::Ignore => BTreeMap::from([(now2s, 1)]), + Duplicate::Rerun => BTreeMap::from([(now1s, 1), (now2s, 1)]), + } + )]), + "the state contains one or two triggers. Duplicate: {dup:?}" + ); + } + assert_eq!( + match dup { + Duplicate::Ignore | Duplicate::Extend => "a1 one\n", + Duplicate::Rerun => "a1 one\na1 one\n", + }, + &read_to_string(&bed.out_file).unwrap(), + "the output file contains the result of the action. Duplicate: {dup:?}" + ); + } +} + #[tokio::test] async fn multiple_triggers() { for dup in [Duplicate::Rerun, Duplicate::Extend, Duplicate::Ignore] { From f4d002c615276caa5fc4eca5e7e034f9fd1883fc Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 5 Aug 2025 12:00:00 +0200 Subject: [PATCH 075/241] Fix trigger count on start schedule_exec was called before inserting the data in triggers, resulting in action count being set again after decrement in schedule exec. This could lead to: - trigger not disappearing after done - second action with no "after" not being run - ... --- src/daemon/filter/mod.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 7a6517e..c69a8b8 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -303,20 +303,24 @@ impl FilterManager { .collect::>(); for (m, map) in cloned_triggers.into_iter() { - let mut new_map = BTreeMap::default(); - for (t, remaining) in map.into_iter() { - if remaining > 0 && t + longuest_action_duration > now { - // Insert back the upcoming times - new_map.insert(t, number_of_actions); + let map: BTreeMap<_, _> = map + .into_iter() + // Keep only up-to-date triggers + .filter(|(t, remaining)| *remaining > 0 && *t + longuest_action_duration > now) + // Reset action count + .map(|(t, _)| (t, number_of_actions)) + .collect(); + + if map.is_empty() { + state.triggers.remove(&m); + } else { + let times = map.clone(); + state.triggers.insert(m.clone(), map); + for (t, _) in times { // Schedule the upcoming times self.schedule_exec(m.clone(), t, now, &mut state, true, false); } } - if new_map.is_empty() { - state.triggers.remove(&m); - } else { - state.triggers.insert(m, new_map); - } } } } From 56e4d778546c17f298fe171c8f9ab37f2210907d Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 5 Aug 2025 12:00:00 +0200 Subject: [PATCH 076/241] Deduplication of triggers on start --- src/daemon/filter/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index c69a8b8..0b86562 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -314,6 +314,16 @@ impl FilterManager { if map.is_empty() { state.triggers.remove(&m); } else { + // Filter duplicates + // unwrap is fine because map is not empty (see if) + let map = match self.filter.duplicate { + // Keep only last item + Duplicate::Extend => BTreeMap::from([map.into_iter().last().unwrap()]), + // Keep only first item + Duplicate::Ignore => BTreeMap::from([map.into_iter().next().unwrap()]), + // No filtering + Duplicate::Rerun => map, + }; let times = map.clone(); state.triggers.insert(m.clone(), map); for (t, _) in times { From a1df62077c2468c01e1ab2394d94b25238ffd10d Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 5 Aug 2025 12:00:00 +0200 Subject: [PATCH 077/241] cargo clippy --- src/concepts/action.rs | 16 ++++++++++------ src/daemon/filter/mod.rs | 2 +- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/concepts/action.rs b/src/concepts/action.rs index acd11d7..c63724e 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -172,6 +172,7 @@ impl Display for Action { #[cfg(test)] impl Action { /// Test-only constructor designed to be easy to call + #[allow(clippy::too_many_arguments)] pub fn new( cmd: Vec<&str>, after: Option<&str>, @@ -212,15 +213,18 @@ pub mod tests { use super::*; pub fn ok_action() -> Action { - let mut action = Action::default(); - action.cmd = vec!["command".into()]; - action + Action { + cmd: vec!["command".into()], + ..Default::default() + } } pub fn ok_action_with_after(d: String, name: &str) -> Action { - let mut action = Action::default(); - action.cmd = vec!["command".into()]; - action.after = Some(d); + let mut action = Action { + cmd: vec!["command".into()], + after: Some(d), + ..Default::default() + }; action .setup("", "", name, Arc::new(BTreeSet::default())) .unwrap(); diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 0b86562..b6a251b 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -318,7 +318,7 @@ impl FilterManager { // unwrap is fine because map is not empty (see if) let map = match self.filter.duplicate { // Keep only last item - Duplicate::Extend => BTreeMap::from([map.into_iter().last().unwrap()]), + Duplicate::Extend => BTreeMap::from([map.into_iter().next_back().unwrap()]), // Keep only first item Duplicate::Ignore => BTreeMap::from([map.into_iter().next().unwrap()]), // No filtering From f36464299abe04fba6fd5e99fb795f1243d44b0e Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 6 Aug 2025 12:00:00 +0200 Subject: [PATCH 078/241] Duplicate::Extend: reschedule correctly actions not already triggered Before, it rescheduled all actions with an `after` directive, which is wrong when some after actions have already been executed (in case of different actions with different after durations) --- TODO | 2 - src/concepts/filter.rs | 44 +++++++------- src/daemon/filter/mod.rs | 51 +++++++++------- src/daemon/filter/state.rs | 30 ++++------ src/daemon/filter/tests.rs | 119 +++++++++++++++++++++++++++++++++++++ 5 files changed, 181 insertions(+), 65 deletions(-) diff --git a/TODO b/TODO index 3737c19..9b3b9fa 100644 --- a/TODO +++ b/TODO @@ -8,5 +8,3 @@ test Filter::regex conformity after setup should an ipv6-mapped ipv4 match a pattern of type ipv6? should it be normalized as ipv4 then? - -duplicate: deduplicate when loading database diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index b439944..ba7a205 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -246,25 +246,21 @@ impl Filter { if self.patterns.get(pattern).is_none() { return Err(format!( "pattern {} is not present in the filter {}.{}", - pattern.name, - self.stream_name, - self.name + pattern.name, self.stream_name, self.name )); } if !pattern.is_match(match_) { return Err(format!( "'{}' doesn't match pattern {}", - match_, - pattern.name, + match_, pattern.name, )); } if pattern.is_ignore(match_) { return Err(format!( "'{}' is explicitly ignored by pattern {}", - match_, - pattern.name, + match_, pattern.name, )); } @@ -275,9 +271,7 @@ impl Filter { if !patterns.contains_key(pattern) { return Err(format!( "pattern {} is missing, because it's in the filter {}.{}", - pattern.name, - self.stream_name, - self.name + pattern.name, self.stream_name, self.name )); } } @@ -287,7 +281,7 @@ impl Filter { /// Filters [`Filter`]'s [`Action`]s according to its [`Pattern`]s [`PatternType`] /// and those of the given [`Match`] - pub fn filtered_actions_from_match(&self, m: &Match) -> impl Iterator { + pub fn filtered_actions_from_match(&self, m: &Match) -> Vec<&Action> { let ip_type = if self.has_ip { self.patterns .iter() @@ -311,11 +305,21 @@ impl Filter { PatternType::Regex }; - self.actions + let mut actions: Vec<_> = self + .actions .values() // If specific ip version, check it .filter(move |action| !action.ipv4only || ip_type == PatternType::Ipv4) .filter(move |action| !action.ipv6only || ip_type == PatternType::Ipv6) + .collect(); + + // Sort by after + actions.sort_by(|a, b| { + a.after_duration + .unwrap_or_default() + .cmp(&b.after_duration.unwrap_or_default()) + }); + actions } } @@ -829,9 +833,7 @@ pub mod tests { ); assert_eq!( vec![&action], - filter - .filtered_actions_from_match(&vec!["zboum".into()]) - .collect::>() + filter.filtered_actions_from_match(&vec!["zboum".into()]) ); } @@ -879,9 +881,7 @@ pub mod tests { ); assert_eq!( vec![&action1, &action2], - filter - .filtered_actions_from_match(&vec!["zboum".into()]) - .collect::>() + filter.filtered_actions_from_match(&vec!["zboum".into()]) ); } @@ -945,15 +945,11 @@ pub mod tests { &patterns, ); assert_eq!( - filter - .filtered_actions_from_match(&vec!["zboum".into(), "1.2.3.4".into()]) - .collect::>(), + filter.filtered_actions_from_match(&vec!["zboum".into(), "1.2.3.4".into()]), vec![&action, &action4], ); assert_eq!( - filter - .filtered_actions_from_match(&vec!["zboum".into(), "ab4:35f::1".into()]) - .collect::>(), + filter.filtered_actions_from_match(&vec!["zboum".into(), "ab4:35f::1".into()]), vec![&action, &action6], ); } diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index b6a251b..136e1d8 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -88,10 +88,8 @@ impl FilterManager { let mut state = self.state.lock().unwrap(); state.clear_past_matches(now); - let already_triggered = state.triggers.contains_key(&m); - - // if duplicate: ignore and already triggered, skip - if already_triggered && Duplicate::Ignore == self.filter.duplicate { + // if Duplicate::Ignore and already triggered, skip + if state.triggers.contains_key(&m) && Duplicate::Ignore == self.filter.duplicate { return false; } @@ -106,12 +104,17 @@ impl FilterManager { if trigger { state.remove_match(&m); - let extend = already_triggered && Duplicate::Extend == self.filter.duplicate; - if extend { - state.remove_trigger(&m); - } - state.add_trigger(m.clone(), now); - self.schedule_exec(m, now, now, &mut state, false, extend); + let actions_left = if Duplicate::Extend == self.filter.duplicate { + // Get number of actions left from last trigger + state + .remove_trigger(&m) + // Only one entry in the map because Duplicate::Extend + .and_then(|map| map.first_key_value().map(|(_, n)| n.clone())) + } else { + None + }; + state.add_trigger(m.clone(), now, actions_left); + self.schedule_exec(m, now, now, &mut state, false, actions_left); } trigger @@ -127,8 +130,8 @@ impl FilterManager { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = self.state.lock().unwrap(); state.remove_match(&match_); - state.add_trigger(match_.clone(), now); - self.schedule_exec(match_, now, now, &mut state, false, false); + state.add_trigger(match_.clone(), now, None); + self.schedule_exec(match_, now, now, &mut state, false, None); Ok(()) } @@ -238,17 +241,22 @@ impl FilterManager { now: Time, state: &mut MutexGuard, startup: bool, - only_after: bool, + actions_left: Option, ) { - // Scheduling each action - for action in self + let actions = self .filter .filtered_actions_from_match(&m) + .into_iter() // On startup, skip oneshot actions .filter(|action| !startup || !action.oneshot) - // If only_after, keep only after actions - .filter(|action| !only_after || action.after_duration.is_some()) - { + // skip any actions + .skip(match actions_left { + Some(actions_left) => self.filter.actions.len() - actions_left as usize, + None => 0, + }); + + // Scheduling each action + for action in actions { let exec_time = t + action.after_duration.unwrap_or_default(); let m = m.clone(); @@ -324,11 +332,10 @@ impl FilterManager { // No filtering Duplicate::Rerun => map, }; - let times = map.clone(); - state.triggers.insert(m.clone(), map); - for (t, _) in times { + state.triggers.insert(m.clone(), map.clone()); + for (t, _) in map { // Schedule the upcoming times - self.schedule_exec(m.clone(), t, now, &mut state, true, false); + self.schedule_exec(m.clone(), t, now, &mut state, true, None); } } } diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index a1bbca5..52f480c 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -11,8 +11,7 @@ use crate::{ pub fn filter_ordered_times_db_name(filter: &Filter) -> String { format!( "filter_ordered_times_{}.{}", - filter.stream_name, - filter.name + filter.stream_name, filter.name ) } @@ -21,11 +20,7 @@ pub fn filter_triggers_old_db_name(filter: &Filter) -> String { } pub fn filter_triggers_db_name(filter: &Filter) -> String { - format!( - "filter_triggers2_{}.{}", - filter.stream_name, - filter.name - ) + format!("filter_triggers2_{}.{}", filter.stream_name, filter.name) } /// Internal state of a [`FilterManager`]. @@ -111,11 +106,12 @@ impl State { self.ordered_times.insert(t, m); } - pub fn add_trigger(&mut self, m: Match, t: Time) { + pub fn add_trigger(&mut self, m: Match, t: Time, action_count: Option) { // We record triggered filters only when there is an action with an `after` directive if self.has_after { // Add the (Match, Time) to the triggers map - let n = self.filter.filtered_actions_from_match(&m).count() as u64; + let n = action_count + .unwrap_or_else(|| self.filter.filtered_actions_from_match(&m).len() as u64); self.triggers.fetch_update(m, |map| { Some(match map { None => [(t, n)].into(), @@ -139,8 +135,8 @@ impl State { } /// Completely remove a Match from the triggers - pub fn remove_trigger(&mut self, m: &Match) { - self.triggers.remove(m); + pub fn remove_trigger(&mut self, m: &Match) -> Option> { + self.triggers.remove(m) } /// Returns whether we should still execute an action for this (Match, Time) trigger @@ -362,7 +358,7 @@ mod tests { assert!(state.triggers.tree().is_empty()); // Add unique trigger - state.add_trigger(one.clone(), now); + state.add_trigger(one.clone(), now, None); // Nothing is really added assert!(state.triggers.tree().is_empty()); @@ -416,7 +412,7 @@ mod tests { assert!(state.triggers.tree().is_empty()); // Add unique trigger - state.add_trigger(one.clone(), now); + state.add_trigger(one.clone(), now, None); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 3)].into())]) @@ -440,8 +436,8 @@ mod tests { assert!(!state.decrement_trigger(&one, now)); // Add trigger with neighbour - state.add_trigger(one.clone(), now); - state.add_trigger(one.clone(), now_plus_1s); + state.add_trigger(one.clone(), now, None); + state.add_trigger(one.clone(), now_plus_1s, None); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) @@ -471,8 +467,8 @@ mod tests { assert!(state.triggers.tree().is_empty()); // Add two neighbour triggers - state.add_trigger(one.clone(), now); - state.add_trigger(one.clone(), now_plus_1s); + state.add_trigger(one.clone(), now, None); + state.add_trigger(one.clone(), now_plus_1s, None); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 653be29..936d728 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -851,6 +851,125 @@ async fn multiple_triggers() { ); } } +#[tokio::test] +async fn extend_trigger_multiple_after_actions() { + let last_match_duration = Duration::from_millis(350); + + for (second_match_duration, first_action_list, last_action_list) in [ + ( + Duration::from_millis(50), + vec!["a0", "a1"], + vec!["d2", "c3", "a4"], + ), + ( + Duration::from_millis(150), + vec!["a0", "a1", "d2"], + vec!["c3", "a4"], + ), + ( + Duration::from_millis(250), + vec!["a0", "a1", "d2", "c3"], + vec!["a4"], + ), + ] { + let bed = TestBed::default(); + let filter = Filter::new_static( + vec![ + Action::new( + vec!["sh", "-c", &format!("echo a0 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a0", + &bed.az_patterns, + 0, + ), + Action::new( + vec!["sh", "-c", &format!("echo a1 >> {}", &bed.out_file)], + None, + false, + "test", + "test", + "a1", + &bed.az_patterns, + 0, + ), + Action::new( + vec!["sh", "-c", &format!("echo d2 >> {}", &bed.out_file)], + Some("100ms"), + false, + "test", + "test", + "d2", + &bed.az_patterns, + 0, + ), + Action::new( + vec!["sh", "-c", &format!("echo c3 >> {}", &bed.out_file)], + Some("200ms"), + false, + "test", + "test", + "c3", + &bed.az_patterns, + 0, + ), + Action::new( + vec!["sh", "-c", &format!("echo a4 >> {}", &bed.out_file)], + Some("300ms"), + false, + "test", + "test", + "a4", + &bed.az_patterns, + 0, + ), + ], + vec!["test "], + None, + None, + "test", + "test", + Duplicate::Extend, + &bed.az_patterns, + ); + + let bed = bed.part2(filter, Local::now(), None).await; + + assert_eq!( + bed.manager.handle_line("test one", Local::now()), + React::Trigger, + ); + + // Wait for first trigger to execute (partially) + tokio::time::sleep(second_match_duration).await; + + assert_eq!( + first_action_list.join("\n"), + read_to_string(&bed.out_file).unwrap().trim(), + "Sleep: {second_match_duration:?}" + ); + + assert_eq!( + bed.manager.handle_line("test one", Local::now()), + React::Trigger, + ); + + // Wait for second trigger to execute (fully) + tokio::time::sleep(last_match_duration).await; + + assert_eq!( + first_action_list + .into_iter() + .chain(last_action_list.clone()) + .fold("".to_string(), |acc, elt| format!("{acc}\n{elt}")) + .trim(), + read_to_string(&bed.out_file).unwrap().trim(), + "Sleep: {last_match_duration:?}" + ); + } +} #[tokio::test] async fn ip_specific() { From 441d981a20e08c4833a170805354786968a59e1c Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 6 Aug 2025 12:00:00 +0200 Subject: [PATCH 079/241] Duplicate::Ignore: do not show ignored matches move match logging from concepts/filter to daemon/filter --- TODO | 2 -- src/concepts/filter.rs | 5 +---- src/daemon/filter/mod.rs | 2 ++ 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/TODO b/TODO index 9b3b9fa..4e068e1 100644 --- a/TODO +++ b/TODO @@ -1,7 +1,5 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) -move match logging from concepts/filter to daemon/filter - test migration stream: test regex ending with $ test Filter::regex conformity after setup diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index ba7a205..e8e7a0b 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -9,7 +9,6 @@ use std::{ use chrono::TimeDelta; use regex::Regex; use serde::{Deserialize, Serialize}; -use tracing::info; use super::{parse_duration, PatternType}; use super::{Action, Match, Pattern, Patterns}; @@ -208,12 +207,10 @@ impl Filter { } } if result.len() == self.patterns.len() { - info!("{}: match {:?}", self, result); return Some(result); } } else { - info!("{}: match []", self); - return Some(vec![".".to_string()]); + return Some(vec![]); } } } diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 136e1d8..64614a1 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -93,6 +93,8 @@ impl FilterManager { return false; } + info!("{}: match {:?}", self.filter, &m); + let trigger = match self.filter.retry { None => true, Some(retry) => { From eaf40cb57929a39728200709a26569b5154911cd Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 6 Aug 2025 12:00:00 +0200 Subject: [PATCH 080/241] test Filter::regex conformity after setup --- TODO | 1 - src/concepts/filter.rs | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/TODO b/TODO index 4e068e1..baef0f9 100644 --- a/TODO +++ b/TODO @@ -2,7 +2,6 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) test migration stream: test regex ending with $ -test Filter::regex conformity after setup should an ipv6-mapped ipv4 match a pattern of type ipv6? should it be normalized as ipv4 then? diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index e8e7a0b..fef8391 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -539,6 +539,7 @@ pub mod tests { .unwrap() .to_string() ); + assert_eq!(&filter.regex[0].to_string(), "insert (?P[abc]) here$"); assert_eq!(filter.patterns.len(), 1); let stored_pattern = filter.patterns.first().unwrap(); assert_eq!(stored_pattern.regex, pattern.regex); @@ -564,6 +565,10 @@ pub mod tests { .unwrap() .to_string() ); + assert_eq!( + &filter.compiled_regex[0].to_string(), + "insert (?P[abc]) here and (?P(?:bou){1,3}) there" + ); assert_eq!(filter.patterns.len(), 2); let stored_pattern = filter.patterns.first().unwrap(); assert_eq!(stored_pattern.regex, boubou.regex); @@ -582,12 +587,20 @@ pub mod tests { .unwrap() .to_string() ); + assert_eq!( + &filter.compiled_regex[0].to_string(), + "insert (?P[abc]) here" + ); assert_eq!( filter.compiled_regex[1].to_string(), Regex::new("also add (?P[abc]) there") .unwrap() .to_string() ); + assert_eq!( + &filter.compiled_regex[1].to_string(), + "also add (?P[abc]) there" + ); assert_eq!(filter.patterns.len(), 1); let stored_pattern = filter.patterns.first().unwrap(); assert_eq!(stored_pattern.regex, pattern.regex); @@ -614,6 +627,10 @@ pub mod tests { .unwrap() .to_string() ); + assert_eq!( + &filter.compiled_regex[1].to_string(), + "also add (?P(?:bou){1,3}) here and (?P[abc]) there" + ); assert_eq!(filter.patterns.len(), 2); let stored_pattern = filter.patterns.first().unwrap(); assert_eq!(stored_pattern.regex, boubou.regex); From 90ec56902aad7921a98826c1f7e64b67db219f41 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 6 Aug 2025 12:00:00 +0200 Subject: [PATCH 081/241] Add tests for triggers tree migration --- TODO | 1 - src/daemon/filter/state.rs | 146 +++++++++++++++++++++++++++---------- 2 files changed, 106 insertions(+), 41 deletions(-) diff --git a/TODO b/TODO index baef0f9..586b4e6 100644 --- a/TODO +++ b/TODO @@ -1,6 +1,5 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) -test migration stream: test regex ending with $ should an ipv6-mapped ipv4 match a pattern of type ipv6? diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index 52f480c..d18bb0a 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -224,6 +224,7 @@ mod tests { use std::collections::{BTreeMap, HashMap}; use chrono::{DateTime, Local, TimeDelta}; + use serde_json::json; use crate::{ concepts::{filter_tests::ok_filter, Action, Duplicate, Filter, Pattern}, @@ -236,16 +237,19 @@ mod tests { async fn state_new() { let patterns = Pattern::new_map("az", "[a-z]+").unwrap(); let filter = Filter::new_static( - vec![Action::new( - vec!["true"], - None, - false, - "s1", - "f1", - "a1", - &patterns, - 0, - )], + vec![ + Action::new(vec!["true"], None, false, "s1", "f1", "a1", &patterns, 0), + Action::new( + vec!["true"], + Some("3s"), + false, + "s1", + "f1", + "a2", + &patterns, + 0, + ), + ], vec!["test "], Some(3), Some("2s"), @@ -265,38 +269,100 @@ mod tests { let now_less_4s = now - TimeDelta::seconds(4); let now_less_5s = now - TimeDelta::seconds(5); - let mut db = TempDatabase::default().await; - db.set_loaded_db(HashMap::from([( - "filter_ordered_times_s1.f1".into(), - HashMap::from([ - // Will stay - (now_plus_1m.to_rfc3339().into(), ["one"].into()), - (now_plus_1m01.to_rfc3339().into(), ["one"].into()), - (now_less_1s.to_rfc3339().into(), ["two"].into()), // stays because retry: 2s - // Will get cleaned - (now_less_4s.to_rfc3339().into(), ["two"].into()), - (now_less_5s.to_rfc3339().into(), ["three"].into()), - (now_less_1m.to_rfc3339().into(), ["two"].into()), - ]), - )])); + let triggers = [ + // format v1 + ( + "filter_triggers_s1.f1".into(), + HashMap::from([ + // Will stay + ( + json!({ + "t": now_plus_1m, + "m": ["one"], + }), + json!(1), + ), + ( + json!({ + "t": now_less_1s, + "m": ["one"], + }), + json!(1), + ), + // Will not get cleaned because it's FilterManager's task + ( + json!({ + "t": now_less_5s, + "m": ["one"], + }), + json!(1), + ), + ]), + ), + // format v2 (since v2.2.0) + ( + "filter_triggers2_s1.f1".into(), + HashMap::from([( + json!(["one"]), + json!({ + // Will stay + now_plus_1m.to_rfc3339(): 1, + now_less_1s.to_rfc3339(): 1, + // Will not get cleaned because it's FilterManager's task + now_less_5s.to_rfc3339(): 1, + }), + )]), + ), + ]; - let state = State::new(filter, &mut db, now).unwrap(); + for trigger_db in triggers { + let mut db = TempDatabase::default().await; + db.set_loaded_db(HashMap::from([ + ( + "filter_ordered_times_s1.f1".into(), + HashMap::from([ + // Will stay + (now_plus_1m.to_rfc3339().into(), ["one"].into()), + (now_plus_1m01.to_rfc3339().into(), ["one"].into()), + (now_less_1s.to_rfc3339().into(), ["two"].into()), // stays because retry: 2s + // Will get cleaned + (now_less_4s.to_rfc3339().into(), ["two"].into()), + (now_less_5s.to_rfc3339().into(), ["three"].into()), + (now_less_1m.to_rfc3339().into(), ["two"].into()), + ]), + ), + trigger_db, + ])); - assert_eq!( - state.ordered_times.tree(), - &BTreeMap::from([ - (now_less_1s, vec!["two".into()]), - (now_plus_1m, vec!["one".into()]), - (now_plus_1m01, vec!["one".into()]), - ]) - ); - assert_eq!( - state.matches, - BTreeMap::from([ - (vec!["one".into()], [now_plus_1m, now_plus_1m01].into()), - (vec!["two".into()], [now_less_1s].into()), - ]) - ); + let state = State::new(filter, &mut db, now).unwrap(); + + assert_eq!( + state.ordered_times.tree(), + &BTreeMap::from([ + (now_less_1s, vec!["two".into()]), + (now_plus_1m, vec!["one".into()]), + (now_plus_1m01, vec!["one".into()]), + ]) + ); + assert_eq!( + state.matches, + BTreeMap::from([ + (vec!["one".into()], [now_plus_1m, now_plus_1m01].into()), + (vec!["two".into()], [now_less_1s].into()), + ]) + ); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([( + vec!["one".into()], + BTreeMap::from([ + (now_less_5s, 1u64), + (now_less_1s, 1u64), + (now_plus_1m, 1u64), + ]), + )]) + ); + } } #[tokio::test] From 0337fcab1f81e5f0e361c2f23deaf64973131e63 Mon Sep 17 00:00:00 2001 From: Baptiste Careil Date: Tue, 8 Jul 2025 17:50:18 +0200 Subject: [PATCH 082/241] Automate some tests --- Cargo.lock | 210 ++++++++++++++++ Cargo.toml | 3 + tests/conf_load.rs | 64 +++++ tests/end_to_end.rs | 237 ++++++++++++++++++ tests/simple.rs | 65 ----- .../conf-00.d}/.should_be_ignored.json | 0 .../conf-00.d}/_should_be_ignored.json | 0 .../conf-00.d}/maybe_swap_file2.~1~ | 0 .../conf-00.d}/maybe_swap_file~ | 0 .../conf-00.d}/part.json | 0 .../conf-00.d}/part.jsonnet | 0 .../conf-00.d}/part.yaml | 0 .../conf-00.d}/part.yml | 0 tests/{ => test-conf}/test-after.jsonnet | 7 +- .../{ => test-conf}/test-binary-input.jsonnet | 4 +- tests/test-conf/test-eol-match.jsonnet | 20 ++ tests/test-conf/test-shutdown.jsonnet | 44 ++++ tests/test-conf/test-stream-stderr.jsonnet | 23 ++ tests/test-conf/test-trigger.jsonnet | 22 ++ tests/test-shutdown.jsonnet | 26 -- 20 files changed, 629 insertions(+), 96 deletions(-) create mode 100644 tests/conf_load.rs create mode 100644 tests/end_to_end.rs rename tests/{test-conf-00.d => test-conf/conf-00.d}/.should_be_ignored.json (100%) rename tests/{test-conf-00.d => test-conf/conf-00.d}/_should_be_ignored.json (100%) rename tests/{test-conf-00.d => test-conf/conf-00.d}/maybe_swap_file2.~1~ (100%) rename tests/{test-conf-00.d => test-conf/conf-00.d}/maybe_swap_file~ (100%) rename tests/{test-conf-00.d => test-conf/conf-00.d}/part.json (100%) rename tests/{test-conf-00.d => test-conf/conf-00.d}/part.jsonnet (100%) rename tests/{test-conf-00.d => test-conf/conf-00.d}/part.yaml (100%) rename tests/{test-conf-00.d => test-conf/conf-00.d}/part.yml (100%) rename tests/{ => test-conf}/test-after.jsonnet (71%) rename tests/{ => test-conf}/test-binary-input.jsonnet (69%) create mode 100644 tests/test-conf/test-eol-match.jsonnet create mode 100644 tests/test-conf/test-shutdown.jsonnet create mode 100644 tests/test-conf/test-stream-stderr.jsonnet create mode 100644 tests/test-conf/test-trigger.jsonnet delete mode 100644 tests/test-shutdown.jsonnet diff --git a/Cargo.lock b/Cargo.lock index cb1fd54..5616a5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -101,6 +101,37 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "assert_cmd" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd389a4b2970a01282ee455294913c0a43724daedcd1a24c3eb0ec1c1320b66" +dependencies = [ + "anstyle", + "bstr", + "doc-comment", + "libc", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + +[[package]] +name = "assert_fs" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652f6cb1f516886fcfee5e7a5c078b9ade62cfcb889524efe5a64d682dd27a9" +dependencies = [ + "anstyle", + "doc-comment", + "globwalk", + "predicates", + "predicates-core", + "predicates-tree", + "tempfile", +] + [[package]] name = "autocfg" version = "1.4.0" @@ -143,6 +174,17 @@ version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +[[package]] +name = "bstr" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +dependencies = [ + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.17.0" @@ -262,6 +304,43 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "equivalent" version = "1.0.2" @@ -284,6 +363,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "float-cmp" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09cf3155332e944990140d967ff5eceb70df778b34f77d8075db46e4704e6d8" +dependencies = [ + "num-traits", +] + [[package]] name = "futures" version = "0.3.31" @@ -402,6 +490,30 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +[[package]] +name = "globset" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a1028dfc5f5df5da8a56a73e6c153c9a9708ec57232470703592a3f18e49f5" +dependencies = [ + "aho-corasick", + "bstr", + "log", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "globwalk" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757" +dependencies = [ + "bitflags", + "ignore", + "walkdir", +] + [[package]] name = "hashbrown" version = "0.15.3" @@ -444,6 +556,22 @@ dependencies = [ "cc", ] +[[package]] +name = "ignore" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b" +dependencies = [ + "crossbeam-deque", + "globset", + "log", + "memchr", + "regex-automata", + "same-file", + "walkdir", + "winapi-util", +] + [[package]] name = "indexmap" version = "2.9.0" @@ -637,6 +765,12 @@ dependencies = [ "libc", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -770,6 +904,36 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "predicates" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "difflib", + "float-cmp", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "proc-macro2" version = "1.0.95" @@ -828,6 +992,8 @@ dependencies = [ name = "reaction" version = "2.1.2" dependencies = [ + "assert_cmd", + "assert_fs", "chrono", "clap", "clap_complete", @@ -836,6 +1002,7 @@ dependencies = [ "jrsonnet-evaluator", "nix", "num_cpus", + "predicates", "rand", "regex", "serde", @@ -931,6 +1098,15 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -1084,6 +1260,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + [[package]] name = "thiserror" version = "1.0.69" @@ -1265,6 +1447,25 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1354,6 +1555,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index a97181a..e85a045 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,3 +55,6 @@ tracing = "0.1.40" [dev-dependencies] rand = "0.8.5" tempfile = "3.12.0" +assert_fs = "1.1.3" +assert_cmd = "2.0.17" +predicates = "3.1.3" diff --git a/tests/conf_load.rs b/tests/conf_load.rs new file mode 100644 index 0000000..56c7601 --- /dev/null +++ b/tests/conf_load.rs @@ -0,0 +1,64 @@ +use predicates::prelude::*; +use std::error::Error; + +use assert_cmd::Command; + +#[test] +fn load_conf_directory() -> Result<(), Box> { + let mut cmd = Command::cargo_bin("reaction")?; + cmd.args([ + "test-config", + "--verbose", + "--config", + "./tests/test-conf/conf-00.d", + ]); + cmd.assert().success().stdout(predicate::eq( + r#"Loaded the configuration from the following files in the directory ./tests/test-conf/conf-00.d in this order: +part.json +part.jsonnet +part.yaml +part.yml + +concurrency: 16 +state_directory: . +patterns: + mypat: + regex: FLAG +start: +- - echo + - start +stop: +- - echo + - stop +streams: + common: + cmd: + - cat + - access.log + filters: + from_jsonnet: + regex: + - ^ + actions: + ban: + cmd: + - ban + - + unban: + cmd: + - unban + - + after: 42s + from_yaml: + regex: + - ^'' + actions: + print: + cmd: + - echo + - + after: 1s + +"#)); + Ok(()) +} diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs new file mode 100644 index 0000000..d16b8e5 --- /dev/null +++ b/tests/end_to_end.rs @@ -0,0 +1,237 @@ +use std::{error::Error, path::Path, process::Stdio, thread::sleep, time::Duration}; + +use assert_cmd::Command; +use assert_fs::prelude::*; +use nix::sys::signal; +use predicates::prelude::predicate; + +#[test] +fn actions_delayed_and_on_exit() -> Result<(), Box> { + let tmp_dir = assert_fs::TempDir::new()?; + + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-after.jsonnet"))?; + + let mut cmd = Command::cargo_bin("reaction")?; + cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.timeout(Duration::from_secs(5)); + // Expected exit 1: all stream exited + cmd.assert().code(predicate::eq(1)); + + // Expect 9 lines of im, then de (appended after 1s), then la (appended on reaction exit). + const EXPECTED_MATCH: usize = 9; + const CATEGORIES: [&str; 3] = ["im", "de", "la"]; + let mut expected = String::new(); + for cat in &CATEGORIES { + for _ in 0..EXPECTED_MATCH { + expected += cat; + expected += "\n"; + } + } + + tmp_dir.child("log").assert(&expected); + + Ok(()) +} + +#[test] +#[ignore = "long test (~15s)"] +fn kill_stream_on_exit() -> Result<(), Box> { + let tmp_dir = assert_fs::TempDir::new()?; + + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-shutdown.jsonnet"))?; + + let cmd = Command::cargo_bin("reaction")?; + let mut cmd = std::process::Command::new(cmd.get_program()); + cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.stdin(std::process::Stdio::null()); + cmd.stdout(std::process::Stdio::null()); + cmd.stderr(std::process::Stdio::null()); + let mut child = cmd.spawn()?; + let start = std::time::Instant::now(); + + // wait for reaction to start all its streams + sleep(std::time::Duration::from_millis(500)); + + let pid = nix::unistd::Pid::from_raw(child.id() as i32); + + // stop reaction, ignore kill error (should only happen if the process already exited) + let _ = signal::kill(pid, signal::SIGINT); + + // wait for reaction exit (it waits for all streams to exit, ~15s) + loop { + match child.try_wait()? { + None => {} + Some(status) => { + assert_eq!( + status.code(), + Some(0), + "Expect reaction to terminate with code 0" + ); + break; + } + } + + let elapsed = std::time::Instant::now() - start; + if elapsed > std::time::Duration::from_secs(20) { + // try to terminate reaction before ending the test + let _ = signal::kill(pid, signal::SIGKILL); + let _ = child.wait(); + + assert!(false, "Test timed out"); + } + } + + // make sure the streams were correctly signaled + tmp_dir.child("log_term").assert("sigterm\n"); + tmp_dir.child("log_kill").assert("sigterm\n"); + + Ok(()) +} + +#[test] +fn non_utf8_is_stripped() -> Result<(), Box> { + let tmp_dir = assert_fs::TempDir::new()?; + + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-binary-input.jsonnet"))?; + + let mut cmd = Command::cargo_bin("reaction")?; + cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.timeout(std::time::Duration::from_secs(1)); + // Expect exit code 1: all streams exited + cmd.assert().code(predicate::eq(1)); + + let expected = "received \"\x1babc \x05\"\n".repeat(3); + tmp_dir.child("log").assert(&expected); + + Ok(()) +} + +#[test] +fn capture_streams_stderr() -> Result<(), Box> { + let tmp_dir = assert_fs::TempDir::new()?; + + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-stream-stderr.jsonnet"))?; + + let mut cmd = Command::cargo_bin("reaction")?; + cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.timeout(std::time::Duration::from_secs(1)); + // Expect exit code 1: all streams exited + cmd.assert().code(predicate::eq(1)); + + let mut expected = String::new(); + for n in 1..=5 { + expected += &format!("{n}\n"); + } + tmp_dir.child("log").assert(&expected); + + Ok(()) +} + +#[test] +fn manualy_trigger_filter() -> Result<(), Box> { + let tmp_dir = assert_fs::TempDir::new()?; + + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-trigger.jsonnet"))?; + + // start daemon + let cmd = Command::cargo_bin("reaction")?; + let program = cmd.get_program(); + let mut cmd = std::process::Command::new(program); + cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.stdin(Stdio::null()); + cmd.stdout(Stdio::null()); + cmd.stderr(Stdio::null()); + let mut daemon = cmd.spawn()?; + let start = std::time::Instant::now(); + + // wait for socket to be created + loop { + std::thread::sleep(Duration::from_millis(10)); + + let c = tmp_dir.child("s"); + if c.exists() { + break; + } + + let elapsed = std::time::Instant::now() - start; + if elapsed > Duration::from_secs(1) { + let _ = daemon.kill(); + let _ = daemon.wait(); + assert!(false, "Daemon did not create socket"); + } + } + + let socket = tmp_dir.child("s"); + let socket_path = socket.path().to_str().unwrap(); + + // trigger event manually + let mut cmd_trigger = Command::cargo_bin("reaction")?; + cmd_trigger.current_dir(tmp_dir.path()); + cmd_trigger.args(["trigger", "--socket", socket_path, "s1.f1", "num=95"]); + cmd_trigger.timeout(Duration::from_secs(1)); + cmd_trigger.assert().success(); + + // wait for daemon exit + loop { + std::thread::sleep(Duration::from_millis(100)); + + if let Some(res) = daemon.try_wait()? { + assert_eq!( + res.code(), + Some(1), + "Expect exit code 1: All streams exited" + ); + break; + } + + let elapsed = std::time::Instant::now() - start; + if elapsed > Duration::from_secs(2) { + let _ = daemon.kill(); + let _ = daemon.wait(); + assert!(false, "Daemon did not exit"); + } + } + + tmp_dir.child("log").assert("95\n"); + + Ok(()) +} + +#[test] +fn filter_regex_match_eol() -> Result<(), Box> { + let tmp_dir = assert_fs::TempDir::new()?; + + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-eol-match.jsonnet"))?; + + let mut cmd = Command::cargo_bin("reaction")?; + cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.timeout(std::time::Duration::from_secs(1)); + // Expect exit code 1: all streams exited + cmd.assert().code(predicate::eq(1)); + + let mut expected = String::new(); + for i in 1..=5 { + expected += &format!("{i}\n"); + } + tmp_dir.child("log").assert(&expected); + + Ok(()) +} diff --git a/tests/simple.rs b/tests/simple.rs index 1e7905e..91830e9 100644 --- a/tests/simple.rs +++ b/tests/simple.rs @@ -189,69 +189,4 @@ async fn simple() { get_file_content(oneshot_path).trim(), "oneshot 12\noneshot 67".to_owned().trim() ); - - // Third part of the test - // Check we can capture both stdout and stderr from spawned processes - - // New directory to avoid to load the database from previous tests - let dir = TempDir::new().unwrap(); - env::set_current_dir(&dir).unwrap(); - - // echo numbers twice, once on stdout, once on stderr - config_with_cmd( - config_path, - "for i in 1 2 3 4 5 6 7 8 9; do echo here is $i; echo here is $i 1>&2; sleep 0.01; done", - ); - - file_with_contents(out_path, ""); - - let daemon_exit = daemon(config_path.into(), socket_path.into()).await; - assert!(daemon_exit.is_err()); - assert_eq!( - daemon_exit.unwrap_err().to_string(), - "quitting because all streams finished" - ); - - // make sure all numbers appear in the output - assert_eq!( - get_file_content(out_path).trim(), - "1\n2\n3\n4\n5\n6\n7\n8\n9".to_owned() - ); - - // Fourth part of the test - // Check the trigger function - - // New directory to avoid to load the database from previous tests - let dir = TempDir::new().unwrap(); - env::set_current_dir(&dir).unwrap(); - - // No thing from stream - config_with_cmd(config_path, "sleep 0.1"); - - file_with_contents(out_path, ""); - - // Run the daemon - let handle = tokio::spawn(async move { daemon(config_path.into(), socket_path.into()).await }); - - // Run the trigger - - // We sleep a bit to wait for reaction to start - let handle2 = tokio::spawn(async move { - sleep(Duration::from_millis(20)).await; - request( - socket_path.into(), - Format::JSON, - Some("stream1.filter1".into()), - vec![("num".into(), "95".into())], - Order::Trigger, - ) - .await - }); - - let (daemon_exit, trigger) = tokio::join!(handle, handle2); - assert!(daemon_exit.is_ok()); - assert!(trigger.is_ok()); - - // make sure the trigger number is in the output - assert_eq!(get_file_content(out_path).trim(), "95".to_owned()); } diff --git a/tests/test-conf-00.d/.should_be_ignored.json b/tests/test-conf/conf-00.d/.should_be_ignored.json similarity index 100% rename from tests/test-conf-00.d/.should_be_ignored.json rename to tests/test-conf/conf-00.d/.should_be_ignored.json diff --git a/tests/test-conf-00.d/_should_be_ignored.json b/tests/test-conf/conf-00.d/_should_be_ignored.json similarity index 100% rename from tests/test-conf-00.d/_should_be_ignored.json rename to tests/test-conf/conf-00.d/_should_be_ignored.json diff --git a/tests/test-conf-00.d/maybe_swap_file2.~1~ b/tests/test-conf/conf-00.d/maybe_swap_file2.~1~ similarity index 100% rename from tests/test-conf-00.d/maybe_swap_file2.~1~ rename to tests/test-conf/conf-00.d/maybe_swap_file2.~1~ diff --git a/tests/test-conf-00.d/maybe_swap_file~ b/tests/test-conf/conf-00.d/maybe_swap_file~ similarity index 100% rename from tests/test-conf-00.d/maybe_swap_file~ rename to tests/test-conf/conf-00.d/maybe_swap_file~ diff --git a/tests/test-conf-00.d/part.json b/tests/test-conf/conf-00.d/part.json similarity index 100% rename from tests/test-conf-00.d/part.json rename to tests/test-conf/conf-00.d/part.json diff --git a/tests/test-conf-00.d/part.jsonnet b/tests/test-conf/conf-00.d/part.jsonnet similarity index 100% rename from tests/test-conf-00.d/part.jsonnet rename to tests/test-conf/conf-00.d/part.jsonnet diff --git a/tests/test-conf-00.d/part.yaml b/tests/test-conf/conf-00.d/part.yaml similarity index 100% rename from tests/test-conf-00.d/part.yaml rename to tests/test-conf/conf-00.d/part.yaml diff --git a/tests/test-conf-00.d/part.yml b/tests/test-conf/conf-00.d/part.yml similarity index 100% rename from tests/test-conf-00.d/part.yml rename to tests/test-conf/conf-00.d/part.yml diff --git a/tests/test-after.jsonnet b/tests/test-conf/test-after.jsonnet similarity index 71% rename from tests/test-after.jsonnet rename to tests/test-conf/test-after.jsonnet index 02da40e..01ba0de 100644 --- a/tests/test-after.jsonnet +++ b/tests/test-conf/test-after.jsonnet @@ -1,5 +1,7 @@ local log(cat) = [ - 'sh', '-c', 'echo "' + cat + ' " >>log', + // NOTE: do not log the ID as it would be out of order since all cat commands + // are executed at once + 'sh', '-c', 'echo "' + cat + '" >>log', ]; { patterns: { @@ -9,7 +11,7 @@ local log(cat) = [ }, streams: { idle: { - cmd: ['sh', '-c', 'for n in 1 1 3 2 3 1 2 2 3; do echo $n; done; sleep 2'], + cmd: ['sh', '-c', 'for n in $(seq 9); do echo $n; done; sleep 2'], filters: { filt1: { regex: [ @@ -34,4 +36,3 @@ local log(cat) = [ }, }, } - diff --git a/tests/test-binary-input.jsonnet b/tests/test-conf/test-binary-input.jsonnet similarity index 69% rename from tests/test-binary-input.jsonnet rename to tests/test-conf/test-binary-input.jsonnet index adbb883..19bab5f 100644 --- a/tests/test-binary-input.jsonnet +++ b/tests/test-conf/test-binary-input.jsonnet @@ -10,7 +10,7 @@ }, streams: { binary: { - cmd: ['sh', '-c', 'for n in 123 456 987; do printf "\\n\\x1b$n\\xe2 \\x05"; sleep 0.5; done; printf "\\n"; sleep 0.2'], + cmd: ['sh', '-c', 'for n in $(seq 3); do printf "\\n\\x1babc\\xe2 \\x05"; done; printf "\\n"; sleep 0.2'], filters: { filt1: { regex: [ @@ -18,7 +18,7 @@ ], actions: { act: { - cmd: ['echo', 'received ""'], + cmd: ['sh', '-c', 'echo \'received ""\' >>log'], }, }, }, diff --git a/tests/test-conf/test-eol-match.jsonnet b/tests/test-conf/test-eol-match.jsonnet new file mode 100644 index 0000000..5513812 --- /dev/null +++ b/tests/test-conf/test-eol-match.jsonnet @@ -0,0 +1,20 @@ +{ + patterns: { + num: { regex: '[0-9]+' }, + }, + streams: { + s1: { + cmd: ['sh', '-c', 'for i in $(seq 5); do echo here is $i; sleep 0.01; done'], + filters: { + f1: { + regex: ['here is $'], + actions: { + log: { + cmd: ['sh', '-c', 'echo >>./log'], + }, + }, + }, + }, + }, + }, +} diff --git a/tests/test-conf/test-shutdown.jsonnet b/tests/test-conf/test-shutdown.jsonnet new file mode 100644 index 0000000..fd4b6a6 --- /dev/null +++ b/tests/test-conf/test-shutdown.jsonnet @@ -0,0 +1,44 @@ +// build a small shell program that intercepts SIGTERM and either exit or ignores it +local build_cmd(log, exit=true) = [ + 'sh', + '-c', + ||| + t() { + echo sigterm >>%s + %s + } + trap t SIGTERM + while true; do sleep 1; done + ||| + % [log, if exit then 'exit 0' else 'return'], +]; +local dummy_filter = { + filt1: { + regex: [ + @'abc', + ], + actions: { + act: { + cmd: ['echo', '1'], + }, + }, + }, +}; +{ + patterns: { + zero: { + regex: @'0', + }, + }, + + streams: { + exit_on_sigint: { + cmd: build_cmd('log_term', exit=true), + filters: dummy_filter, + }, + exit_on_sigkill: { + cmd: build_cmd('log_kill', exit=false), + filters: dummy_filter, + }, + }, +} diff --git a/tests/test-conf/test-stream-stderr.jsonnet b/tests/test-conf/test-stream-stderr.jsonnet new file mode 100644 index 0000000..de78c74 --- /dev/null +++ b/tests/test-conf/test-stream-stderr.jsonnet @@ -0,0 +1,23 @@ +{ + concurrency: 1, + patterns: { + num: { regex: '[0-9]+' }, + }, + streams: { + s1: { + cmd: ['sh', '-c', 'for i in $(seq 5); do echo here is $i; echo here is $i 1>&2; sleep 0.01; done'], + filters: { + f1: { + regex: ['here is '], + retry: 2, + retryperiod: '2s', + actions: { + log: { + cmd: ['sh', '-c', 'echo >>./log'], + }, + }, + }, + }, + }, + }, +} diff --git a/tests/test-conf/test-trigger.jsonnet b/tests/test-conf/test-trigger.jsonnet new file mode 100644 index 0000000..35024b5 --- /dev/null +++ b/tests/test-conf/test-trigger.jsonnet @@ -0,0 +1,22 @@ +{ + patterns: { + num: { regex: '[0-9]+' }, + }, + streams: { + s1: { + cmd: ['sh', '-c', 'sleep 1'], + filters: { + f1: { + regex: ['here is '], + retry: 2, + retryperiod: '2s', + actions: { + log: { + cmd: ['sh', '-c', 'echo >>./log'], + }, + }, + }, + }, + }, + }, +} diff --git a/tests/test-shutdown.jsonnet b/tests/test-shutdown.jsonnet deleted file mode 100644 index 2f68c7a..0000000 --- a/tests/test-shutdown.jsonnet +++ /dev/null @@ -1,26 +0,0 @@ -{ - patterns: { - zero: { - regex: @'0', - }, - }, - - streams: { - idle: { - cmd: ['sh', '-c', 'while true; do sleep 1; done'], - filters: { - filt1: { - regex: [ - @'abc', - ], - actions: { - act: { - cmd: ['echo', '1'], - }, - }, - }, - }, - }, - }, -} - From eea708883b0d7a670b470a9e111bf88789bca825 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 2 Aug 2025 12:00:00 +0200 Subject: [PATCH 083/241] Add example config equality test --- tests/conf_load.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/conf_load.rs b/tests/conf_load.rs index 56c7601..b2c5c08 100644 --- a/tests/conf_load.rs +++ b/tests/conf_load.rs @@ -62,3 +62,16 @@ streams: "#)); Ok(()) } + +#[test] +fn example_configs_are_equal() { + let outputs = ["config/example.yml", "config/example.jsonnet"] + .map(|config_path| { + let mut cmd = Command::cargo_bin("reaction").unwrap(); + cmd.args(["test-config", "--config", config_path]); + cmd.assert().success().get_output().stdout.clone() + }) + .map(String::from_utf8) + .map(Result::unwrap); + assert_eq!(outputs[0], outputs[1]); +} From 91885e49bdced05be54f5a50939ec9e0b818fcad Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 6 Aug 2025 12:00:00 +0200 Subject: [PATCH 084/241] Ignore new tests that fail for now FIXME check this later --- tests/conf_load.rs | 1 + tests/end_to_end.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/conf_load.rs b/tests/conf_load.rs index b2c5c08..c403b9b 100644 --- a/tests/conf_load.rs +++ b/tests/conf_load.rs @@ -4,6 +4,7 @@ use std::error::Error; use assert_cmd::Command; #[test] +#[ignore = "currently failing"] // FIXME fn load_conf_directory() -> Result<(), Box> { let mut cmd = Command::cargo_bin("reaction")?; cmd.args([ diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs index d16b8e5..7d68890 100644 --- a/tests/end_to_end.rs +++ b/tests/end_to_end.rs @@ -6,6 +6,7 @@ use nix::sys::signal; use predicates::prelude::predicate; #[test] +#[ignore = "currently failing"] // FIXME fn actions_delayed_and_on_exit() -> Result<(), Box> { let tmp_dir = assert_fs::TempDir::new()?; From c8245836130f9e1c7f2d44c58823efec0a856495 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 6 Aug 2025 12:00:00 +0200 Subject: [PATCH 085/241] Add new failing tests on start / stop sequences. They fail because reaction don't correctly order stop commands after --- tests/start_stop.jsonnet | 41 ++++++++++++++++++++++++++ tests/start_stop.rs | 63 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 tests/start_stop.jsonnet create mode 100644 tests/start_stop.rs diff --git a/tests/start_stop.jsonnet b/tests/start_stop.jsonnet new file mode 100644 index 0000000..a840aaa --- /dev/null +++ b/tests/start_stop.jsonnet @@ -0,0 +1,41 @@ +local echo(message) = ['sh', '-c', 'echo %s >> ./log' % message]; +{ + patterns: { + num: { + regex: '[0-9]+', + }, + }, + + start: [ + echo('start 1'), + echo('start 2'), + ], + + stop: [ + echo('stop 1'), + echo('stop 2'), + ], + + streams: { + s1: { + cmd: ['sh', '-c', 'seq 2 | while read i; do echo runtime $i; sleep 0.1; done'], + filters: { + f1: { + regex: [ + '^runtime $', + ], + actions: { + one: { + cmd: echo('runtime '), + }, + two: { + cmd: echo('after '), + after: '1s', + onexit: true, + }, + }, + }, + }, + }, + }, +} diff --git a/tests/start_stop.rs b/tests/start_stop.rs new file mode 100644 index 0000000..1904cb6 --- /dev/null +++ b/tests/start_stop.rs @@ -0,0 +1,63 @@ +use std::{path::Path, time::Duration}; + +use assert_cmd::Command; +use assert_fs::{prelude::*, TempDir}; +use predicates::prelude::predicate; + +#[test] +#[ignore = "currently failing"] // FIXME +fn start_stop() { + let tmp_dir = assert_fs::TempDir::new().unwrap(); + + run_reaction(&tmp_dir); + + // Expected output + let output = [ + "start 1", + "start 2", + "runtime 1", + "runtime 2", + "after 1", + "after 2", + "stop 1", + "stop 2", + "", + ]; + tmp_dir.child("log").assert(&output.join("\n")); + tmp_dir.child("log").write_str("").unwrap(); + + // Second run + run_reaction(&tmp_dir); + + // Expected output + let output = [ + "start 1", + "start 2", + "runtime 1", + "runtime 2", + "runtime 1", + "runtime 2", + "after 1", + "after 2", + "after 1", + "after 2", + "stop 1", + "stop 2", + "", + ]; + tmp_dir.child("log").assert(&output.join("\n")); +} + +fn run_reaction(tmp_dir: &TempDir) { + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/start_stop.jsonnet")) + .unwrap(); + + let mut cmd = Command::cargo_bin("reaction").unwrap(); + cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.timeout(Duration::from_secs(5)); + // Expected exit 1: all stream exited + cmd.assert().code(predicate::eq(1)); +} From 607141f22f338790de6eb3adf5e96d790dc645ff Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 6 Aug 2025 12:00:00 +0200 Subject: [PATCH 086/241] Fix after action commands not being correctly awaited We were scheduling the action with exec_now, but it spawns a new task itself, which did not have the ShutdownToken. Persistance part of the start_stop test doesn't work because when the after actions are executed, they decrement the trigger, which is then removed from DB. So they should not decrement it anymore, just check that it's still there. Next commit! --- src/daemon/filter/mod.rs | 20 +++++++++++--- tests/start_stop.jsonnet | 10 +++++-- tests/start_stop.rs | 60 +++++++++++++++++++++------------------- 3 files changed, 55 insertions(+), 35 deletions(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 64614a1..790c637 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -222,7 +222,12 @@ impl FilterManager { // Execute the action early if let Order::Flush = order { - exec_now(&self.exec_limit, action, m.clone()); + exec_now( + &self.exec_limit, + self.shutdown.clone(), + action, + m.clone(), + ); } } } @@ -264,7 +269,7 @@ impl FilterManager { if exec_time <= now { if state.decrement_trigger(&m, t) { - exec_now(&self.exec_limit, action, m); + exec_now(&self.exec_limit, self.shutdown.clone(), action, m); } } else { let this = self.clone(); @@ -285,7 +290,7 @@ impl FilterManager { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = this.state.lock().unwrap(); if state.decrement_trigger(&m, t) { - exec_now(&this.exec_limit, action, m); + exec_now(&this.exec_limit, this.shutdown, action, m); } } }); @@ -344,9 +349,16 @@ impl FilterManager { } } -fn exec_now(exec_limit: &Option>, action: &'static Action, m: Match) { +fn exec_now( + exec_limit: &Option>, + shutdown: ShutdownToken, + action: &'static Action, + m: Match, +) { let exec_limit = exec_limit.clone(); tokio::spawn(async move { + // Move ShutdownToken in task + let _shutdown = shutdown; // Wait for semaphore's permission, if it is Some let _permit = match exec_limit { #[allow(clippy::unwrap_used)] // We know the semaphore is not closed diff --git a/tests/start_stop.jsonnet b/tests/start_stop.jsonnet index a840aaa..a85636c 100644 --- a/tests/start_stop.jsonnet +++ b/tests/start_stop.jsonnet @@ -1,4 +1,8 @@ -local echo(message) = ['sh', '-c', 'echo %s >> ./log' % message]; +local echo(message, before='true') = [ + 'sh', + '-c', + before + '; echo ' + message + ' >> ./log', +]; { patterns: { num: { @@ -29,8 +33,8 @@ local echo(message) = ['sh', '-c', 'echo %s >> ./log' % message]; cmd: echo('runtime '), }, two: { - cmd: echo('after '), - after: '1s', + cmd: echo('after', before='sleep 1'), + after: '5s', onexit: true, }, }, diff --git a/tests/start_stop.rs b/tests/start_stop.rs index 1904cb6..15d1ee3 100644 --- a/tests/start_stop.rs +++ b/tests/start_stop.rs @@ -5,7 +5,7 @@ use assert_fs::{prelude::*, TempDir}; use predicates::prelude::predicate; #[test] -#[ignore = "currently failing"] // FIXME +// #[ignore = "currently failing"] // FIXME fn start_stop() { let tmp_dir = assert_fs::TempDir::new().unwrap(); @@ -17,8 +17,9 @@ fn start_stop() { "start 2", "runtime 1", "runtime 2", - "after 1", - "after 2", + // no order required because they'll be awaken all together on exit + "after", + "after", "stop 1", "stop 2", "", @@ -26,26 +27,27 @@ fn start_stop() { tmp_dir.child("log").assert(&output.join("\n")); tmp_dir.child("log").write_str("").unwrap(); - // Second run - run_reaction(&tmp_dir); + // // Second run + // run_reaction(&tmp_dir); - // Expected output - let output = [ - "start 1", - "start 2", - "runtime 1", - "runtime 2", - "runtime 1", - "runtime 2", - "after 1", - "after 2", - "after 1", - "after 2", - "stop 1", - "stop 2", - "", - ]; - tmp_dir.child("log").assert(&output.join("\n")); + // // Expected output + // let output = [ + // "start 1", + // "start 2", + // "runtime 1", + // "runtime 2", + // "runtime 1", + // "runtime 2", + // // no order required because they'll be awaken all together on exit + // "after", + // "after", + // "after", + // "after", + // "stop 1", + // "stop 2", + // "", + // ]; + // tmp_dir.child("log").assert(&output.join("\n")); } fn run_reaction(tmp_dir: &TempDir) { @@ -54,10 +56,12 @@ fn run_reaction(tmp_dir: &TempDir) { .write_file(Path::new("tests/start_stop.jsonnet")) .unwrap(); - let mut cmd = Command::cargo_bin("reaction").unwrap(); - cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); - cmd.current_dir(tmp_dir.path()); - cmd.timeout(Duration::from_secs(5)); - // Expected exit 1: all stream exited - cmd.assert().code(predicate::eq(1)); + Command::cargo_bin("reaction") + .unwrap() + .args(["start", "--socket", "./s", "--config", "./config.jsonnet"]) + .current_dir(tmp_dir.path()) + .timeout(Duration::from_secs(5)) + // Expected exit 1: all stream exited + .assert() + .code(predicate::eq(1)); } From 58f47933084ed494227b8a9199fd1743ca54645f Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 8 Aug 2025 12:00:00 +0200 Subject: [PATCH 087/241] Fix triggers being forgotten on after actions with on_exit: true decrement_trigger do not delete triggers anymore when exiting test still failing because filters start before start commands --- TODO | 2 + src/daemon/filter/mod.rs | 4 +- src/daemon/filter/state.rs | 78 ++++++++++++++++++++++++++++---------- tests/start_stop.jsonnet | 5 ++- tests/start_stop.rs | 46 ++++++++++++---------- 5 files changed, 90 insertions(+), 45 deletions(-) diff --git a/TODO b/TODO index 586b4e6..dfa721f 100644 --- a/TODO +++ b/TODO @@ -4,3 +4,5 @@ stream: test regex ending with $ should an ipv6-mapped ipv4 match a pattern of type ipv6? should it be normalized as ipv4 then? + +fix filter commands executing before start commands diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 790c637..9d2c322 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -268,7 +268,7 @@ impl FilterManager { let m = m.clone(); if exec_time <= now { - if state.decrement_trigger(&m, t) { + if state.decrement_trigger(&m, t, false) { exec_now(&self.exec_limit, self.shutdown.clone(), action, m); } } else { @@ -289,7 +289,7 @@ impl FilterManager { if !exiting || action.on_exit { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = this.state.lock().unwrap(); - if state.decrement_trigger(&m, t) { + if state.decrement_trigger(&m, t, exiting) { exec_now(&this.exec_limit, this.shutdown, action, m); } } diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index d18bb0a..ec7e9ab 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -140,7 +140,7 @@ impl State { } /// Returns whether we should still execute an action for this (Match, Time) trigger - pub fn decrement_trigger(&mut self, m: &Match, t: Time) -> bool { + pub fn decrement_trigger(&mut self, m: &Match, t: Time, exiting: bool) -> bool { // We record triggered filters only when there is an action with an `after` directive if self.has_after { let mut exec_needed = false; @@ -153,16 +153,24 @@ impl State { if let Some(count) = count { exec_needed = true; if count <= 1 { - self.triggers.fetch_update(mt.m, |map| { - map.and_then(|mut map| { - map.remove(&mt.t); - if map.is_empty() { - None - } else { - Some(map) - } - }) - }); + if !exiting { + self.triggers.fetch_update(mt.m, |map| { + map.and_then(|mut map| { + map.remove(&mt.t); + if map.is_empty() { + None + } else { + Some(map) + } + }) + }); + } + // else don't do anything + // Because that will remove the entry in the DB, and make + // it forget this trigger. + // Maybe we should have 2 maps for triggers: + // - The current for action counting, not persisted + // - Another like ordered_times, Tree, persisted } else { self.triggers.fetch_update(mt.m, |map| { map.map(|mut map| { @@ -429,7 +437,7 @@ mod tests { assert!(state.triggers.tree().is_empty()); // Will be called immediately after, it returns true - assert!(state.decrement_trigger(&one, now)); + assert!(state.decrement_trigger(&one, now, false)); } #[tokio::test] @@ -484,22 +492,52 @@ mod tests { &BTreeMap::from([(one.clone(), [(now, 3)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now)); + assert!(state.decrement_trigger(&one, now, false)); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 2)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now)); + assert!(state.decrement_trigger(&one, now, false)); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 1)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now)); + assert!(state.decrement_trigger(&one, now, false)); assert!(state.triggers.tree().is_empty()); // Decrement → false - assert!(!state.decrement_trigger(&one, now)); + assert!(!state.decrement_trigger(&one, now, false)); + + // Add unique trigger (but decrement exiting-like) + state.add_trigger(one.clone(), now, None); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now, 3)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now, true)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now, 2)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now, true)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now, 1)].into())]) + ); + // Decrement but exiting → true, does nothing + assert!(state.decrement_trigger(&one, now, true)); + assert_eq!( + state.triggers.tree(), + &BTreeMap::from([(one.clone(), [(now, 1)].into())]) + ); + // Decrement → true + assert!(state.decrement_trigger(&one, now, false)); + assert!(state.triggers.tree().is_empty()); + // Decrement → false + assert!(!state.decrement_trigger(&one, now, false)); // Add trigger with neighbour state.add_trigger(one.clone(), now, None); @@ -509,25 +547,25 @@ mod tests { &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now)); + assert!(state.decrement_trigger(&one, now, false)); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 2)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now)); + assert!(state.decrement_trigger(&one, now, false)); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 1)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now)); + assert!(state.decrement_trigger(&one, now, false)); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3)].into())]) ); // Decrement → false - assert!(!state.decrement_trigger(&one, now)); + assert!(!state.decrement_trigger(&one, now, false)); // Remove neighbour state.remove_trigger(&one); assert!(state.triggers.tree().is_empty()); diff --git a/tests/start_stop.jsonnet b/tests/start_stop.jsonnet index a85636c..1e483da 100644 --- a/tests/start_stop.jsonnet +++ b/tests/start_stop.jsonnet @@ -25,6 +25,7 @@ local echo(message, before='true') = [ cmd: ['sh', '-c', 'seq 2 | while read i; do echo runtime $i; sleep 0.1; done'], filters: { f1: { + duplicate: 'rerun', regex: [ '^runtime $', ], @@ -33,8 +34,8 @@ local echo(message, before='true') = [ cmd: echo('runtime '), }, two: { - cmd: echo('after', before='sleep 1'), - after: '5s', + cmd: echo('after', before='sleep 0.2'), + after: '5m', onexit: true, }, }, diff --git a/tests/start_stop.rs b/tests/start_stop.rs index 15d1ee3..64112f5 100644 --- a/tests/start_stop.rs +++ b/tests/start_stop.rs @@ -5,7 +5,6 @@ use assert_fs::{prelude::*, TempDir}; use predicates::prelude::predicate; #[test] -// #[ignore = "currently failing"] // FIXME fn start_stop() { let tmp_dir = assert_fs::TempDir::new().unwrap(); @@ -27,27 +26,32 @@ fn start_stop() { tmp_dir.child("log").assert(&output.join("\n")); tmp_dir.child("log").write_str("").unwrap(); - // // Second run - // run_reaction(&tmp_dir); + println!( + "DATABASE:\n{}", + std::fs::read_to_string(tmp_dir.child("reaction.db")).unwrap() + ); - // // Expected output - // let output = [ - // "start 1", - // "start 2", - // "runtime 1", - // "runtime 2", - // "runtime 1", - // "runtime 2", - // // no order required because they'll be awaken all together on exit - // "after", - // "after", - // "after", - // "after", - // "stop 1", - // "stop 2", - // "", - // ]; - // tmp_dir.child("log").assert(&output.join("\n")); + // Second run + run_reaction(&tmp_dir); + + // Expected output + let output = [ + "start 1", + "start 2", + "runtime 1", + "runtime 2", + "runtime 1", + "runtime 2", + // no order required because they'll be awaken all together on exit + "after", + "after", + "after", + "after", + "stop 1", + "stop 2", + "", + ]; + tmp_dir.child("log").assert(&output.join("\n")); } fn run_reaction(tmp_dir: &TempDir) { From f4b5ed20ab4187216a5627edc5217c24bae34fac Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 8 Aug 2025 12:00:00 +0200 Subject: [PATCH 088/241] Add debug on start/stop commands --- src/concepts/config.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/concepts/config.rs b/src/concepts/config.rs index 7daaea9..4bee310 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -352,6 +352,7 @@ mod jsonnet { } fn run_commands(commands: &Vec>, moment: &str) -> bool { + debug!("Running {moment} commands..."); let mut ok = true; for command in commands { info!("{} command: run {:?}\n", moment, command); From 10bd0a18592b39a65025750b23cbb5b74ffa5f6d Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 8 Aug 2025 12:00:00 +0200 Subject: [PATCH 089/241] Tree::fetch_update: Do not remove and re-add entries. Better cloning the value than writing another entry! --- TODO | 1 + src/treedb/mod.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/TODO b/TODO index dfa721f..5f13e77 100644 --- a/TODO +++ b/TODO @@ -6,3 +6,4 @@ should an ipv6-mapped ipv4 match a pattern of type ipv6? should it be normalized as ipv4 then? fix filter commands executing before start commands +fix order of db write subject to race condition (make writes async?) diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index 8549559..02086f9 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -364,7 +364,7 @@ impl Tree { key: K, mut f: F, ) -> Option { - let old_value = self.remove(&key); + let old_value = self.get(&key).map(|v| v.to_owned()); let new_value = f(old_value); self.log(&key, new_value.as_ref()); if let Some(new_value) = new_value { From a7b63b69a8a6577c9ac28c81e287c7e0b30dc2a6 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 8 Aug 2025 12:00:00 +0200 Subject: [PATCH 090/241] Database: finish writing entries when quitting --- src/treedb/mod.rs | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index 02086f9..fc0c71d 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -82,7 +82,7 @@ pub struct Database { entry_rx: mpsc::Receiver, /// The sender on [`Tree`] write operations. /// Only used to clone new senders for new Trees. - entry_tx: mpsc::Sender, + entry_tx: Option>, /// The interval at which the database must be flushed to kernel flush_every: Duration, /// The maximum bytes that must be written until the database is rotated @@ -106,7 +106,7 @@ impl Database { path, new_path, entry_rx, - entry_tx, + entry_tx: Some(entry_tx), flush_every: Duration::from_secs(2), max_bytes: 20 * 1024 * 1024, // 20 MiB bytes_written: 0, @@ -121,7 +121,7 @@ impl Database { // flush_every for the next tick, resulting in a relaxed interval. // Hoping this will smooth IO pressure when under heavy load. interval.set_missed_tick_behavior(MissedTickBehavior::Delay); - let status = loop { + let mut status = loop { tokio::select! { entry = self.entry_rx.recv() => { if let Err(err) = self.handle_entry(entry).await { @@ -139,6 +139,18 @@ impl Database { }; }; + // Finish consuming received entries when shutdown asked + if status.is_none() { + self.entry_tx = None; + loop { + let entry = self.entry_rx.recv().await; + if let Err(err) = self.handle_entry(entry).await { + status = err; + break; + } + } + } + // Shutdown let close_status = self .close() @@ -277,6 +289,11 @@ impl Database { where F: Fn((Value, Value)) -> Result<(K, V), String>, { + // Get a clone of the channel sender + let tx = self + .entry_tx + .clone() + .ok_or("Database is closing".to_string())?; // Load the tree from its JSON let tree = if let Some(json_tree) = self.loaded_db.remove(&name) { json_tree @@ -290,7 +307,7 @@ impl Database { id: name, entry_timeout, tree, - tx: self.entry_tx.clone(), + tx, }) } From e8f13dc9ffdcf757a9be5e8fd494a3d90718b649 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 8 Aug 2025 12:00:00 +0200 Subject: [PATCH 091/241] cargo fmt --- src/daemon/socket.rs | 9 ++------- src/daemon/stream.rs | 9 ++------- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 2caa04a..6fd8a26 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -119,10 +119,7 @@ fn handle_show_or_flush_order( .iter() // stream filtering .filter(|(stream, _)| { - stream_name.is_none() - || stream_name - .clone() - .is_some_and(|name| name == stream.name) + stream_name.is_none() || stream_name.clone().is_some_and(|name| name == stream.name) }) .fold(BTreeMap::new(), |mut acc, (stream, filter_manager)| { let inner_map = filter_manager @@ -130,9 +127,7 @@ fn handle_show_or_flush_order( // filter filtering .filter(|(filter, _)| { filter_name.is_none() - || filter_name - .clone() - .is_some_and(|name| name == filter.name) + || filter_name.clone().is_some_and(|name| name == filter.name) }) // pattern filtering .filter(|(filter, _)| { diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index ab464be..ac2d46a 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -90,11 +90,7 @@ impl StreamManager { { Ok(child) => child, Err(err) => { - error!( - "could not execute stream {} cmd: {}", - self.stream.name, - err - ); + error!("could not execute stream {} cmd: {}", self.stream.name, err); return; } }; @@ -183,8 +179,7 @@ impl StreamManager { Some(Err(err)) => { error!( "impossible to read output from stream {}: {}", - self.stream.name, - err + self.stream.name, err ); return; } From ca89c7f72a8a81d018f84c25ff07d8426163d39d Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 8 Aug 2025 12:00:00 +0200 Subject: [PATCH 092/241] Fix filter commands executing before start commands Now creating the socket file before starting its manager. So I can launch start commands after its creation, and before creating the filter managers. --- TODO | 4 +- src/daemon/mod.rs | 17 ++++--- src/daemon/socket.rs | 118 +++++++++++++++++++++++-------------------- tests/start_stop.rs | 52 +++++++++++++------ 4 files changed, 110 insertions(+), 81 deletions(-) diff --git a/TODO b/TODO index 5f13e77..663070f 100644 --- a/TODO +++ b/TODO @@ -5,5 +5,5 @@ stream: test regex ending with $ should an ipv6-mapped ipv4 match a pattern of type ipv6? should it be normalized as ipv4 then? -fix filter commands executing before start commands -fix order of db write subject to race condition (make writes async?) +fix order of db writes subject to race condition (make writes async?) +DB: add tests on stress testing (lines should always be in order) diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 415394a..c0b848f 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -20,7 +20,7 @@ use crate::{concepts::Config, treedb::Database}; use filter::FilterManager; pub use filter::React; pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken}; -use socket::socket_manager; +use socket::Socket; use stream::StreamManager; #[cfg(test)] @@ -43,6 +43,14 @@ pub async fn daemon( // Open Database let mut db = Database::open(config).await?; + // Open Socket + let socket = Socket::open(socket).await?; + + // reaction won't abort on startup anymore, we can run start commands + if !config.start() { + return Err("a start command failed, exiting.".into()); + } + let (state, stream_managers) = { // Semaphore limiting action execution concurrency let exec_limit = match config.concurrency { @@ -76,12 +84,7 @@ pub async fn daemon( let mut db_status_rx = db.manager(shutdown.token()); // Run socket task - socket_manager(config, socket, state, shutdown.token()).await?; - - // reaction won't abort on startup anymore, we can run start commands - if !config.start() { - return Err("a start command failed, exiting.".into()); - } + socket.manager(config, state, shutdown.token()); // Start Stream managers let mut stream_task_handles = Vec::new(); diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 6fd8a26..74d0a7c 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -21,13 +21,12 @@ use crate::{ use super::{filter::FilterManager, shutdown::ShutdownToken}; -macro_rules! err_str { - ($expression:expr) => { - $expression.map_err(|err| err.to_string()) - }; -} - async fn open_socket(path: PathBuf) -> Result { + macro_rules! err_str { + ($expression:expr) => { + $expression.map_err(|err| err.to_string()) + }; + } // First create all directories to the file let dir = path .parent() @@ -222,60 +221,67 @@ macro_rules! or_next { }; } -pub async fn socket_manager( - config: &'static Config, - socket: PathBuf, - shared_state: HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, - shutdown: ShutdownToken, -) -> Result<(), String> { - let listener = match open_socket(socket.clone()).await { - Ok(l) => l, - Err(err) => { - return Err(format!("while creating communication socket: {err}")); - } - }; +pub struct Socket { + path: PathBuf, + socket: UnixListener, +} - tokio::spawn(async move { - loop { - tokio::select! { - _ = shutdown.wait() => break, - try_conn = listener.accept() => { - match try_conn { - Ok((conn, _)) => { - let mut transport = Framed::new(conn, LengthDelimitedCodec::new()); - // Decode - let received = transport.next().await; - let encoded_request = match received { - Some(r) => or_next!("while reading request", r), - None => { - error!("failed to answer client: client sent no request"); - continue; - } - }; - let request = or_next!( - "failed to decode request", - serde_json::from_slice(&encoded_request) - ); - // Process - let response = answer_order(config, &shared_state, request); - // Encode - let encoded_response = - or_next!("failed to serialize response", serde_json::to_string::(&response)); - or_next!( - "failed to send response:", - transport.send(Bytes::from(encoded_response)).await - ); +impl Socket { + pub async fn open(socket: PathBuf) -> Result { + Ok(Socket { + socket: open_socket(socket.clone()) + .await + .map_err(|err| format!("while creating communication socket: {err}"))?, + path: socket, + }) + } + + pub fn manager( + self, + config: &'static Config, + shared_state: HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, + shutdown: ShutdownToken, + ) { + tokio::spawn(async move { + loop { + tokio::select! { + _ = shutdown.wait() => break, + try_conn = self.socket.accept() => { + match try_conn { + Ok((conn, _)) => { + let mut transport = Framed::new(conn, LengthDelimitedCodec::new()); + // Decode + let received = transport.next().await; + let encoded_request = match received { + Some(r) => or_next!("while reading request", r), + None => { + error!("failed to answer client: client sent no request"); + continue; + } + }; + let request = or_next!( + "failed to decode request", + serde_json::from_slice(&encoded_request) + ); + // Process + let response = answer_order(config, &shared_state, request); + // Encode + let encoded_response = + or_next!("failed to serialize response", serde_json::to_string::(&response)); + or_next!( + "failed to send response:", + transport.send(Bytes::from(encoded_response)).await + ); + } + Err(err) => error!("failed to open connection from cli: {err}"), } - Err(err) => error!("failed to open connection from cli: {err}"), } } } - } - if let Err(err) = fs::remove_file(socket).await { - error!("failed to remove socket: {}", err); - } - }); - - Ok(()) + if let Err(err) = fs::remove_file(self.path).await { + error!("failed to remove socket: {}", err); + } + }); + } } diff --git a/tests/start_stop.rs b/tests/start_stop.rs index 64112f5..09a459e 100644 --- a/tests/start_stop.rs +++ b/tests/start_stop.rs @@ -35,23 +35,43 @@ fn start_stop() { run_reaction(&tmp_dir); // Expected output - let output = [ - "start 1", - "start 2", - "runtime 1", - "runtime 2", - "runtime 1", - "runtime 2", - // no order required because they'll be awaken all together on exit - "after", - "after", - "after", - "after", - "stop 1", - "stop 2", - "", + // (one of them) + let outputs = [ + [ + "start 1", + "start 2", + "runtime 1", + "runtime 2", + "runtime 1", + "runtime 2", + // no order required because they'll be awaken all together on exit + "after", + "after", + "after", + "after", + "stop 1", + "stop 2", + "", + ], + [ + "start 1", + "start 2", + "runtime 2", + "runtime 1", + "runtime 1", + "runtime 2", + // no order required because they'll be awaken all together on exit + "after", + "after", + "after", + "after", + "stop 1", + "stop 2", + "", + ], ]; - tmp_dir.child("log").assert(&output.join("\n")); + let contents = std::fs::read_to_string(tmp_dir.child("log")).unwrap(); + assert!(contents == outputs[0].join("\n") || contents == outputs[1].join("\n")); } fn run_reaction(tmp_dir: &TempDir) { From dcc2e1ec4c5d3c8c695dad62989d862353ef3a8d Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 8 Aug 2025 12:00:00 +0200 Subject: [PATCH 093/241] v2.2.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5616a5e..a066f3a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -990,7 +990,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.1.2" +version = "2.2.0" dependencies = [ "assert_cmd", "assert_fs", diff --git a/Cargo.toml b/Cargo.toml index e85a045..114816a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction" -version = "2.1.2" +version = "2.2.0" edition = "2021" authors = ["ppom "] license = "AGPL-3.0" From fc6a3855747151302be34e8f24115379496baee5 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 11 Aug 2025 12:00:00 +0200 Subject: [PATCH 094/241] Add armhf-gnu build for Raspberry Pis --- release.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/release.py b/release.py index 219c568..42c109c 100644 --- a/release.py +++ b/release.py @@ -88,6 +88,7 @@ def main(): architectures = { "x86_64-unknown-linux-musl": "amd64", "aarch64-unknown-linux-musl": "arm64", + "arm-unknown-linux-gnueabihf": "armhf", } all_files = [] @@ -117,7 +118,7 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service """.strip(), ] - for (architecture_rs, architecture_pretty) in architectures.items(): + for architecture_rs, architecture_pretty in architectures.items(): # Cargo clean run_command(["cargo", "clean"]) From 0e75514db37bede4b547455260fa8163ba0e2aed Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 11 Aug 2025 12:00:00 +0200 Subject: [PATCH 095/241] Debian: Add section information Fix #134 --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 114816a..e6a8d66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"] build = "build.rs" [package.metadata.deb] +section = "net" maintainer-scripts = "packaging/" systemd-units = { enable = false } assets = [ From e45963dd4c1fbe89f1e051f9bcf6d4f8a671ee4e Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 11 Aug 2025 12:00:00 +0200 Subject: [PATCH 096/241] Debian: Add extended-description Fix #134 --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index e6a8d66..6027af7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,9 @@ build = "build.rs" [package.metadata.deb] section = "net" +extended-description = """A daemon that scans program outputs for repeated patterns, and takes action. +A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors. +reaction aims at being a successor to fail2ban.""" maintainer-scripts = "packaging/" systemd-units = { enable = false } assets = [ From 1f734a516d7583486337148709ce785f5c2e0f0c Mon Sep 17 00:00:00 2001 From: Baptiste Careil Date: Sun, 17 Aug 2025 18:33:09 +0200 Subject: [PATCH 097/241] Fix test load_conf_directory - Fixed concurrency to 1 not to be platform dependent - Added fields introduced by recent changes - Used builtin str comparator that produces a diff instead of the eq predicate --- tests/conf_load.rs | 12 +++++++----- tests/test-conf/conf-00.d/part.yml | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/conf_load.rs b/tests/conf_load.rs index c403b9b..048f6ca 100644 --- a/tests/conf_load.rs +++ b/tests/conf_load.rs @@ -1,10 +1,8 @@ -use predicates::prelude::*; use std::error::Error; use assert_cmd::Command; #[test] -#[ignore = "currently failing"] // FIXME fn load_conf_directory() -> Result<(), Box> { let mut cmd = Command::cargo_bin("reaction")?; cmd.args([ @@ -13,18 +11,20 @@ fn load_conf_directory() -> Result<(), Box> { "--config", "./tests/test-conf/conf-00.d", ]); - cmd.assert().success().stdout(predicate::eq( + cmd.assert().success().stdout( r#"Loaded the configuration from the following files in the directory ./tests/test-conf/conf-00.d in this order: part.json part.jsonnet part.yaml part.yml -concurrency: 16 +concurrency: 1 state_directory: . patterns: mypat: regex: FLAG + ipv4mask: null + ipv6mask: null start: - - echo - start @@ -40,6 +40,7 @@ streams: from_jsonnet: regex: - ^ + duplicate: extend actions: ban: cmd: @@ -53,6 +54,7 @@ streams: from_yaml: regex: - ^'' + duplicate: extend actions: print: cmd: @@ -60,7 +62,7 @@ streams: - after: 1s -"#)); +"#); Ok(()) } diff --git a/tests/test-conf/conf-00.d/part.yml b/tests/test-conf/conf-00.d/part.yml index eb82e61..9b7ca55 100644 --- a/tests/test-conf/conf-00.d/part.yml +++ b/tests/test-conf/conf-00.d/part.yml @@ -1,3 +1,4 @@ +concurrency: 1 start: - - echo - start From e37bd6ebbeae7bd9eac065e7781878b585b12031 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 6 Sep 2025 12:00:00 +0200 Subject: [PATCH 098/241] Add a mention on Azlux's third-party repository Related to #134 --- release.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/release.py b/release.py index 42c109c..8567551 100644 --- a/release.py +++ b/release.py @@ -242,6 +242,8 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\ && rm {deb_name}.minisig \\ && sudo apt install ./{deb_name} ``` + +*You can also use [this third-party package repository](https://packages.azlux.fr).* """.strip() ) else: From 582889f71e8914a6c0952bddc09853ef9c8bfb44 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 3 Sep 2025 12:00:00 +0200 Subject: [PATCH 099/241] WIP async db Fixes inherent problem on sync db, which spawns a new task for persistance. This makes the log unordered, which can cause inconsistence issues. --- src/daemon/filter/mod.rs | 30 +++++++++--------- src/daemon/filter/state.rs | 60 +++++++++++++++++++----------------- src/daemon/mod.rs | 2 +- src/daemon/socket.rs | 62 +++++++++++++++++++------------------- src/daemon/stream.rs | 2 +- src/treedb/mod.rs | 44 +++++++++++++++------------ 6 files changed, 104 insertions(+), 96 deletions(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 9d2c322..ab98579 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -54,7 +54,7 @@ pub enum React { #[allow(clippy::unwrap_used)] impl FilterManager { - pub fn new( + pub async fn new( filter: &'static Filter, exec_limit: Option>, shutdown: ShutdownToken, @@ -65,15 +65,15 @@ impl FilterManager { filter, exec_limit, shutdown, - state: Arc::new(Mutex::new(State::new(filter, db, now)?)), + state: Arc::new(Mutex::new(State::new(filter, db, now).await?)), }; this.clear_past_triggers_and_schedule_future_actions(now); Ok(this) } - pub fn handle_line(&self, line: &str, now: Time) -> React { + pub async fn handle_line(&self, line: &str, now: Time) -> React { if let Some(match_) = self.filter.get_match(line) { - if self.handle_match(match_, now) { + if self.handle_match(match_, now).await { React::Trigger } else { React::Match @@ -83,7 +83,7 @@ impl FilterManager { } } - fn handle_match(&self, m: Match, now: Time) -> bool { + async fn handle_match(&self, m: Match, now: Time) -> bool { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = self.state.lock().unwrap(); state.clear_past_matches(now); @@ -100,7 +100,7 @@ impl FilterManager { Some(retry) => { state.add_match(m.clone(), now); // Number of stored times for this match >= configured retry for this filter - state.get_times(&m) >= retry as usize + state.get_times(&m).await >= retry as usize } }; @@ -109,7 +109,7 @@ impl FilterManager { let actions_left = if Duplicate::Extend == self.filter.duplicate { // Get number of actions left from last trigger state - .remove_trigger(&m) + .remove_trigger(&m).await // Only one entry in the map because Duplicate::Extend .and_then(|map| map.first_key_value().map(|(_, n)| n.clone())) } else { @@ -122,7 +122,7 @@ impl FilterManager { trigger } - pub fn handle_trigger( + pub async fn handle_trigger( &self, patterns: BTreeMap, String>, now: Time, @@ -138,7 +138,7 @@ impl FilterManager { Ok(()) } - pub fn handle_order( + pub async fn handle_order( &self, patterns: &BTreeMap, Regex>, order: Order, @@ -241,12 +241,12 @@ impl FilterManager { /// Schedule execution for a given Match. /// We check first if the trigger is still here /// because pending actions can be flushed. - fn schedule_exec( + async fn schedule_exec<'a>( &self, m: Match, t: Time, now: Time, - state: &mut MutexGuard, + state: &'a mut MutexGuard<'_, State>, startup: bool, actions_left: Option, ) { @@ -268,7 +268,7 @@ impl FilterManager { let m = m.clone(); if exec_time <= now { - if state.decrement_trigger(&m, t, false) { + if state.decrement_trigger(&m, t, false).await { exec_now(&self.exec_limit, self.shutdown.clone(), action, m); } } else { @@ -289,7 +289,7 @@ impl FilterManager { if !exiting || action.on_exit { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = this.state.lock().unwrap(); - if state.decrement_trigger(&m, t, exiting) { + if state.decrement_trigger(&m, t, exiting).await { exec_now(&this.exec_limit, this.shutdown, action, m); } } @@ -298,7 +298,7 @@ impl FilterManager { } } - fn clear_past_triggers_and_schedule_future_actions(&self, now: Time) { + async fn clear_past_triggers_and_schedule_future_actions(&self, now: Time) { let longuest_action_duration = self.filter.longuest_action_duration; let number_of_actions = self .filter @@ -349,7 +349,7 @@ impl FilterManager { } } -fn exec_now( +async fn exec_now( exec_limit: &Option>, shutdown: ShutdownToken, action: &'static Action, diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index ec7e9ab..a04ba90 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -59,7 +59,11 @@ pub struct State { } impl State { - pub fn new(filter: &'static Filter, db: &mut Database, now: Time) -> Result { + pub async fn new( + filter: &'static Filter, + db: &mut Database, + now: Time, + ) -> Result { let ordered_times = db.open_tree( filter_ordered_times_db_name(filter), filter.retry_duration.unwrap_or_default(), @@ -100,13 +104,13 @@ impl State { Ok(this) } - pub fn add_match(&mut self, m: Match, t: Time) { + pub async fn add_match(&mut self, m: Match, t: Time) { let set = self.matches.entry(m.clone()).or_default(); set.insert(t); self.ordered_times.insert(t, m); } - pub fn add_trigger(&mut self, m: Match, t: Time, action_count: Option) { + pub async fn add_trigger(&mut self, m: Match, t: Time, action_count: Option) { // We record triggered filters only when there is an action with an `after` directive if self.has_after { // Add the (Match, Time) to the triggers map @@ -125,7 +129,7 @@ impl State { } // Completely remove a Match from the matches - pub fn remove_match(&mut self, m: &Match) { + pub async fn remove_match(&mut self, m: &Match) { if let Some(set) = self.matches.get(m) { for t in set { self.ordered_times.remove(t); @@ -135,12 +139,12 @@ impl State { } /// Completely remove a Match from the triggers - pub fn remove_trigger(&mut self, m: &Match) -> Option> { - self.triggers.remove(m) + pub async fn remove_trigger(&mut self, m: &Match) -> Option> { + self.triggers.remove(m).await } /// Returns whether we should still execute an action for this (Match, Time) trigger - pub fn decrement_trigger(&mut self, m: &Match, t: Time, exiting: bool) -> bool { + pub async fn decrement_trigger(&mut self, m: &Match, t: Time, exiting: bool) -> bool { // We record triggered filters only when there is an action with an `after` directive if self.has_after { let mut exec_needed = false; @@ -186,7 +190,7 @@ impl State { } } - pub fn clear_past_matches(&mut self, now: Time) { + pub async fn clear_past_matches(&mut self, now: Time) { let retry_duration = self.filter.retry_duration.unwrap_or_default(); while self .ordered_times @@ -212,14 +216,14 @@ impl State { } } - pub fn get_times(&self, m: &Match) -> usize { + pub async fn get_times(&self, m: &Match) -> usize { match self.matches.get(m) { Some(vec) => vec.len(), None => 0, } } - fn load_matches_from_ordered_times(&mut self) { + async fn load_matches_from_ordered_times(&mut self) { for (t, m) in self.ordered_times.iter() { let set = self.matches.entry(m.clone()).or_default(); set.insert(*t); @@ -342,7 +346,7 @@ mod tests { trigger_db, ])); - let state = State::new(filter, &mut db, now).unwrap(); + let state = State::new(filter, &mut db, now).await.unwrap(); assert_eq!( state.ordered_times.tree(), @@ -386,7 +390,7 @@ mod tests { let now_less_4s = now - TimeDelta::seconds(4); let mut db = TempDatabase::default().await; - let mut state = State::new(filter, &mut db, now).unwrap(); + let mut state = State::new(filter, &mut db, now).await.unwrap(); assert!(state.ordered_times.tree().is_empty()); assert!(state.matches.is_empty()); @@ -427,7 +431,7 @@ mod tests { let now = Local::now(); let mut db = TempDatabase::default().await; - let mut state = State::new(filter, &mut db, now).unwrap(); + let mut state = State::new(filter, &mut db, now).await.unwrap(); assert!(state.triggers.tree().is_empty()); @@ -437,7 +441,7 @@ mod tests { assert!(state.triggers.tree().is_empty()); // Will be called immediately after, it returns true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); } #[tokio::test] @@ -481,7 +485,7 @@ mod tests { let now_plus_1s = now + TimeDelta::seconds(1); let mut db = TempDatabase::default().await; - let mut state = State::new(filter, &mut db, now).unwrap(); + let mut state = State::new(filter, &mut db, now).await.unwrap(); assert!(state.triggers.tree().is_empty()); @@ -492,22 +496,22 @@ mod tests { &BTreeMap::from([(one.clone(), [(now, 3)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 2)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 1)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); assert!(state.triggers.tree().is_empty()); // Decrement → false - assert!(!state.decrement_trigger(&one, now, false)); + assert!(!state.decrement_trigger(&one, now, false).await); // Add unique trigger (but decrement exiting-like) state.add_trigger(one.clone(), now, None); @@ -516,28 +520,28 @@ mod tests { &BTreeMap::from([(one.clone(), [(now, 3)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, true)); + assert!(state.decrement_trigger(&one, now, true).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 2)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, true)); + assert!(state.decrement_trigger(&one, now, true).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 1)].into())]) ); // Decrement but exiting → true, does nothing - assert!(state.decrement_trigger(&one, now, true)); + assert!(state.decrement_trigger(&one, now, true).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 1)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); assert!(state.triggers.tree().is_empty()); // Decrement → false - assert!(!state.decrement_trigger(&one, now, false)); + assert!(!state.decrement_trigger(&one, now, false).await); // Add trigger with neighbour state.add_trigger(one.clone(), now, None); @@ -547,25 +551,25 @@ mod tests { &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 2)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 1)].into())]) ); // Decrement → true - assert!(state.decrement_trigger(&one, now, false)); + assert!(state.decrement_trigger(&one, now, false).await); assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3)].into())]) ); // Decrement → false - assert!(!state.decrement_trigger(&one, now, false)); + assert!(!state.decrement_trigger(&one, now, false).await); // Remove neighbour state.remove_trigger(&one); assert!(state.triggers.tree().is_empty()); diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index c0b848f..c62d84f 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -66,7 +66,7 @@ pub async fn daemon( let mut filter_managers = HashMap::new(); for filter in stream.filters.values() { let manager = - FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now)?; + FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now).await?; filter_managers.insert(filter, manager); } state.insert(stream, filter_managers.clone()); diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 74d0a7c..124b6c5 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -51,7 +51,7 @@ async fn open_socket(path: PathBuf) -> Result { err_str!(UnixListener::bind(path)) } -fn handle_trigger_order( +async fn handle_trigger_order( stream_name: Option, filter_name: Option, patterns: BTreeMap, String>, @@ -100,13 +100,13 @@ fn handle_trigger_order( }; let now = Local::now(); - match filter_manager.handle_trigger(patterns, now) { + match filter_manager.handle_trigger(patterns, now).await { Ok(()) => DaemonResponse::Ok(()), Err(err) => DaemonResponse::Err(err), } } -fn handle_show_or_flush_order( +async fn handle_show_or_flush_order( stream_name: Option, filter_name: Option, patterns: BTreeMap, Regex>, @@ -114,40 +114,39 @@ fn handle_show_or_flush_order( shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, ) -> DaemonResponse { let now = Local::now(); - let cs: ClientStatus = shared_state + let iter = shared_state .iter() // stream filtering .filter(|(stream, _)| { stream_name.is_none() || stream_name.clone().is_some_and(|name| name == stream.name) - }) - .fold(BTreeMap::new(), |mut acc, (stream, filter_manager)| { - let inner_map = filter_manager - .iter() - // filter filtering - .filter(|(filter, _)| { - filter_name.is_none() - || filter_name.clone().is_some_and(|name| name == filter.name) - }) - // pattern filtering - .filter(|(filter, _)| { - patterns - .iter() - .all(|(pattern, _)| filter.patterns.get(pattern).is_some()) - }) - .map(|(filter, manager)| { - ( - filter.name.to_owned(), - manager.handle_order(&patterns, order, now), - ) - }) - .collect(); - acc.insert(stream.name.to_owned(), inner_map); - acc }); + let mut cs = ClientStatus::new(); + for (stream, filter_manager) in iter { + let iter = filter_manager + .iter() + // filter filtering + .filter(|(filter, _)| { + filter_name.is_none() || filter_name.clone().is_some_and(|name| name == filter.name) + }) + // pattern filtering + .filter(|(filter, _)| { + patterns + .iter() + .all(|(pattern, _)| filter.patterns.get(pattern).is_some()) + }); + let mut inner_map = BTreeMap::new(); + for (filter, manager) in iter { + inner_map.insert( + filter.name.to_owned(), + manager.handle_order(&patterns, order, now).await, + ); + } + cs.insert(stream.name.to_owned(), inner_map); + } DaemonResponse::Order(cs) } -fn answer_order( +async fn answer_order( config: &'static Config, shared_state: &HashMap<&'static Stream, HashMap<&'static Filter, FilterManager>>, options: ClientRequest, @@ -182,7 +181,7 @@ fn answer_order( }; if let Order::Trigger = options.order { - handle_trigger_order(stream_name, filter_name, patterns, shared_state) + handle_trigger_order(stream_name, filter_name, patterns, shared_state).await } else { let patterns = match patterns .into_iter() @@ -206,6 +205,7 @@ fn answer_order( options.order, shared_state, ) + .await } } @@ -264,7 +264,7 @@ impl Socket { serde_json::from_slice(&encoded_request) ); // Process - let response = answer_order(config, &shared_state, request); + let response = answer_order(config, &shared_state, request).await; // Encode let encoded_response = or_next!("failed to serialize response", serde_json::to_string::(&response)); diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index ac2d46a..a55d449 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -173,7 +173,7 @@ impl StreamManager { Some(Ok(line)) => { let now = Local::now(); for manager in self.matching_filters(&line) { - manager.handle_line(&line, now); + manager.handle_line(&line, now).await; } } Some(Err(err)) => { diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index fc0c71d..e14db68 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -330,7 +330,7 @@ impl Deref for Tree { // Reimplement write functions impl Tree { /// Log an [`Entry`] to the [`Database`] - fn log(&mut self, k: &K, v: Option<&V>) { + async fn log(&mut self, k: &K, v: Option<&V>) { let e = Entry { tree: self.id.clone(), key: serde_json::to_value(k).expect("could not serialize key"), @@ -339,36 +339,40 @@ impl Tree { }; let tx = self.tx.clone(); // FIXME what if send fails? - tokio::spawn(async move { - let _ = tx.send(e).await; - }); + let _ = tx.send(e).await; } /// Asynchronously persisted version of [`BTreeMap::insert`] - pub fn insert(&mut self, key: K, value: V) -> Option { - self.log(&key, Some(&value)); + pub async fn insert(&mut self, key: K, value: V) -> Option { + self.log(&key, Some(&value)).await; self.tree.insert(key, value) } /// Asynchronously persisted version of [`BTreeMap::pop_first`] - pub fn pop_first(&mut self) -> Option<(K, V)> { - self.tree.pop_first().map(|(key, value)| { - self.log(&key, None); - (key, value) - }) + pub async fn pop_first(&mut self) -> Option<(K, V)> { + match self.tree.pop_first() { + Some((key, value)) => { + self.log(&key, None).await; + Some((key, value)) + } + None => None, + } } /// Asynchronously persisted version of [`BTreeMap::pop_last`] - pub fn pop_last(&mut self) -> Option<(K, V)> { - self.tree.pop_last().map(|(key, value)| { - self.log(&key, None); - (key, value) - }) + pub async fn pop_last(&mut self) -> Option<(K, V)> { + match self.tree.pop_last() { + Some((key, value)) => { + self.log(&key, None).await; + Some((key, value)) + } + None => None, + } } /// Asynchronously persisted version of [`BTreeMap::remove`] - pub fn remove(&mut self, key: &K) -> Option { - self.log(key, None); + pub async fn remove(&mut self, key: &K) -> Option { + self.log(key, None).await; self.tree.remove(key) } @@ -376,14 +380,14 @@ impl Tree { /// Returning None removes the item if it existed before. /// Asynchronously persisted. /// *API design borrowed from [`fjall::WriteTransaction::fetch_update`].* - pub fn fetch_update) -> Option>( + pub async fn fetch_update) -> Option>( &mut self, key: K, mut f: F, ) -> Option { let old_value = self.get(&key).map(|v| v.to_owned()); let new_value = f(old_value); - self.log(&key, new_value.as_ref()); + self.log(&key, new_value.as_ref()).await; if let Some(new_value) = new_value { self.tree.insert(key, new_value) } else { From aec3bb54ed98eae5df4caf3a414b8f32ec100795 Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 7 Sep 2025 12:00:00 +0200 Subject: [PATCH 100/241] async db --- src/concepts/pattern/ip/mod.rs | 2 +- src/daemon/filter/mod.rs | 98 +++++++++++++++--------------- src/daemon/filter/state.rs | 42 ++++++------- src/daemon/filter/tests.rs | 106 +++++++++++++++++++++------------ 4 files changed, 138 insertions(+), 110 deletions(-) diff --git a/src/concepts/pattern/ip/mod.rs b/src/concepts/pattern/ip/mod.rs index 3bf727c..ba62dd9 100644 --- a/src/concepts/pattern/ip/mod.rs +++ b/src/concepts/pattern/ip/mod.rs @@ -710,7 +710,7 @@ mod patternip_tests { ); let bed = bed.part2(filter, Local::now(), None).await; assert_eq!( - bed.manager.handle_line(&line, Local::now()), + bed.manager.handle_line(&line, Local::now()).await, React::Trigger, "line: {line}" ); diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index ab98579..3f3cfdc 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -3,14 +3,10 @@ pub mod tests; mod state; -use std::{ - collections::BTreeMap, - process::Stdio, - sync::{Arc, Mutex, MutexGuard}, -}; +use std::{collections::BTreeMap, process::Stdio, sync::Arc}; use regex::Regex; -use tokio::sync::Semaphore; +use tokio::sync::{Mutex, MutexGuard, Semaphore}; use tracing::{error, info}; use crate::{ @@ -67,7 +63,8 @@ impl FilterManager { shutdown, state: Arc::new(Mutex::new(State::new(filter, db, now).await?)), }; - this.clear_past_triggers_and_schedule_future_actions(now); + this.clear_past_triggers_and_schedule_future_actions(now) + .await; Ok(this) } @@ -85,8 +82,8 @@ impl FilterManager { async fn handle_match(&self, m: Match, now: Time) -> bool { #[allow(clippy::unwrap_used)] // propagating panics is ok - let mut state = self.state.lock().unwrap(); - state.clear_past_matches(now); + let mut state = self.state.lock().await; + state.clear_past_matches(now).await; // if Duplicate::Ignore and already triggered, skip if state.triggers.contains_key(&m) && Duplicate::Ignore == self.filter.duplicate { @@ -98,25 +95,27 @@ impl FilterManager { let trigger = match self.filter.retry { None => true, Some(retry) => { - state.add_match(m.clone(), now); + state.add_match(m.clone(), now).await; // Number of stored times for this match >= configured retry for this filter state.get_times(&m).await >= retry as usize } }; if trigger { - state.remove_match(&m); + state.remove_match(&m).await; let actions_left = if Duplicate::Extend == self.filter.duplicate { // Get number of actions left from last trigger state - .remove_trigger(&m).await + .remove_trigger(&m) + .await // Only one entry in the map because Duplicate::Extend .and_then(|map| map.first_key_value().map(|(_, n)| n.clone())) } else { None }; - state.add_trigger(m.clone(), now, actions_left); - self.schedule_exec(m, now, now, &mut state, false, actions_left); + state.add_trigger(m.clone(), now, actions_left).await; + self.schedule_exec(m, now, now, &mut state, false, actions_left) + .await; } trigger @@ -130,10 +129,11 @@ impl FilterManager { let match_ = self.filter.get_match_from_patterns(patterns)?; #[allow(clippy::unwrap_used)] // propagating panics is ok - let mut state = self.state.lock().unwrap(); - state.remove_match(&match_); - state.add_trigger(match_.clone(), now, None); - self.schedule_exec(match_, now, now, &mut state, false, None); + let mut state = self.state.lock().await; + state.remove_match(&match_).await; + state.add_trigger(match_.clone(), now, None).await; + self.schedule_exec(match_, now, now, &mut state, false, None) + .await; Ok(()) } @@ -155,7 +155,7 @@ impl FilterManager { }; #[allow(clippy::unwrap_used)] // propagating panics is ok - let mut state = self.state.lock().unwrap(); + let mut state = self.state.lock().await; let mut cs: BTreeMap<_, _> = { let cloned_matches = state @@ -167,27 +167,26 @@ impl FilterManager { .cloned() .collect::>(); - cloned_matches - .into_iter() - .map(|match_| { - // mutable State required here - if let Order::Flush = order { - state.remove_match(&match_); - } - let matches = state - .matches - .get(&match_) - .map(|times| times.len()) - .unwrap_or(0); - ( - match_, - PatternStatus { - matches, - ..Default::default() - }, - ) - }) - .collect() + let mut cs = BTreeMap::new(); + for match_ in cloned_matches { + // mutable State required here + if let Order::Flush = order { + state.remove_match(&match_).await; + } + let matches = state + .matches + .get(&match_) + .map(|times| times.len()) + .unwrap_or(0); + cs.insert( + match_, + PatternStatus { + matches, + ..Default::default() + }, + ); + } + cs }; let cloned_triggers = state @@ -203,7 +202,7 @@ impl FilterManager { let map = state.triggers.get(&m).unwrap().clone(); if let Order::Flush = order { - state.remove_trigger(&m); + state.remove_trigger(&m).await; } for (t, remaining) in map { @@ -227,7 +226,7 @@ impl FilterManager { self.shutdown.clone(), action, m.clone(), - ); + ).await; } } } @@ -269,7 +268,7 @@ impl FilterManager { if exec_time <= now { if state.decrement_trigger(&m, t, false).await { - exec_now(&self.exec_limit, self.shutdown.clone(), action, m); + exec_now(&self.exec_limit, self.shutdown.clone(), action, m).await; } } else { let this = self.clone(); @@ -288,9 +287,9 @@ impl FilterManager { // Exec action if triggered hasn't been already flushed if !exiting || action.on_exit { #[allow(clippy::unwrap_used)] // propagating panics is ok - let mut state = this.state.lock().unwrap(); + let mut state = this.state.lock().await; if state.decrement_trigger(&m, t, exiting).await { - exec_now(&this.exec_limit, this.shutdown, action, m); + exec_now(&this.exec_limit, this.shutdown, action, m).await; } } }); @@ -309,7 +308,7 @@ impl FilterManager { .count() as u64; #[allow(clippy::unwrap_used)] // propagating panics is ok - let mut state = self.state.lock().unwrap(); + let mut state = self.state.lock().await; let cloned_triggers = state .triggers @@ -327,7 +326,7 @@ impl FilterManager { .collect(); if map.is_empty() { - state.triggers.remove(&m); + state.triggers.remove(&m).await; } else { // Filter duplicates // unwrap is fine because map is not empty (see if) @@ -339,10 +338,11 @@ impl FilterManager { // No filtering Duplicate::Rerun => map, }; - state.triggers.insert(m.clone(), map.clone()); + state.triggers.insert(m.clone(), map.clone()).await; for (t, _) in map { // Schedule the upcoming times - self.schedule_exec(m.clone(), t, now, &mut state, true, None); + self.schedule_exec(m.clone(), t, now, &mut state, true, None) + .await; } } } diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index a04ba90..e74ba79 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -89,7 +89,7 @@ impl State { map } }) - }); + }).await; } } let mut this = Self { @@ -99,15 +99,15 @@ impl State { ordered_times, triggers, }; - this.clear_past_matches(now); - this.load_matches_from_ordered_times(); + this.clear_past_matches(now).await; + this.load_matches_from_ordered_times().await; Ok(this) } pub async fn add_match(&mut self, m: Match, t: Time) { let set = self.matches.entry(m.clone()).or_default(); set.insert(t); - self.ordered_times.insert(t, m); + self.ordered_times.insert(t, m).await; } pub async fn add_trigger(&mut self, m: Match, t: Time, action_count: Option) { @@ -124,7 +124,7 @@ impl State { value } }) - }); + }).await; } } @@ -132,7 +132,7 @@ impl State { pub async fn remove_match(&mut self, m: &Match) { if let Some(set) = self.matches.get(m) { for t in set { - self.ordered_times.remove(t); + self.ordered_times.remove(t).await; } self.matches.remove(m); } @@ -167,7 +167,7 @@ impl State { Some(map) } }) - }); + }).await; } // else don't do anything // Because that will remove the entry in the DB, and make @@ -181,7 +181,7 @@ impl State { map.insert(mt.t, count - 1); map }) - }); + }).await; } } exec_needed @@ -203,7 +203,7 @@ impl State { let (t, m) = self.ordered_times.first_key_value().unwrap(); (*t, m.clone()) }; - self.ordered_times.remove(&t); + self.ordered_times.remove(&t).await; if let Some(set) = self.matches.get(&m) { let mut set = set.clone(); set.remove(&t); @@ -396,7 +396,7 @@ mod tests { assert!(state.matches.is_empty()); // Add non-previously added match - state.add_match(one.clone(), now_less_1s); + state.add_match(one.clone(), now_less_1s).await; assert_eq!( state.ordered_times.tree(), &BTreeMap::from([(now_less_1s, one.clone()),]) @@ -407,7 +407,7 @@ mod tests { ); // Add previously added match - state.add_match(one.clone(), now_less_4s); + state.add_match(one.clone(), now_less_4s).await; assert_eq!( state.ordered_times.tree(), &BTreeMap::from([(now_less_1s, one.clone()), (now_less_4s, one.clone())]) @@ -418,7 +418,7 @@ mod tests { ); // Remove added match - state.remove_match(&one); + state.remove_match(&one).await; assert!(state.ordered_times.tree().is_empty()); assert!(state.matches.is_empty()); } @@ -436,7 +436,7 @@ mod tests { assert!(state.triggers.tree().is_empty()); // Add unique trigger - state.add_trigger(one.clone(), now, None); + state.add_trigger(one.clone(), now, None).await; // Nothing is really added assert!(state.triggers.tree().is_empty()); @@ -490,7 +490,7 @@ mod tests { assert!(state.triggers.tree().is_empty()); // Add unique trigger - state.add_trigger(one.clone(), now, None); + state.add_trigger(one.clone(), now, None).await; assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 3)].into())]) @@ -514,7 +514,7 @@ mod tests { assert!(!state.decrement_trigger(&one, now, false).await); // Add unique trigger (but decrement exiting-like) - state.add_trigger(one.clone(), now, None); + state.add_trigger(one.clone(), now, None).await; assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now, 3)].into())]) @@ -544,8 +544,8 @@ mod tests { assert!(!state.decrement_trigger(&one, now, false).await); // Add trigger with neighbour - state.add_trigger(one.clone(), now, None); - state.add_trigger(one.clone(), now_plus_1s, None); + state.add_trigger(one.clone(), now, None).await; + state.add_trigger(one.clone(), now_plus_1s, None).await; assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) @@ -571,18 +571,18 @@ mod tests { // Decrement → false assert!(!state.decrement_trigger(&one, now, false).await); // Remove neighbour - state.remove_trigger(&one); + state.remove_trigger(&one).await; assert!(state.triggers.tree().is_empty()); // Add two neighbour triggers - state.add_trigger(one.clone(), now, None); - state.add_trigger(one.clone(), now_plus_1s, None); + state.add_trigger(one.clone(), now, None).await; + state.add_trigger(one.clone(), now_plus_1s, None).await; assert_eq!( state.triggers.tree(), &BTreeMap::from([(one.clone(), [(now_plus_1s, 3), (now, 3)].into())]) ); // Remove them - state.remove_trigger(&one); + state.remove_trigger(&one).await; assert!(state.triggers.tree().is_empty()); } } diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 936d728..1b7a72c 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -102,6 +102,7 @@ impl TestBed { &mut db, now, ) + .await .unwrap(), semaphore, } @@ -119,8 +120,8 @@ pub struct TestBed2 { } impl TestBed2 { - pub fn assert_empty_trees(&self) { - let state = self.manager.state.lock().unwrap(); + pub async fn assert_empty_trees(&self) { + let state = self.manager.state.lock().await; assert!(state.matches.is_empty(), "matches must be empty"); assert!( state.ordered_times.is_empty(), @@ -177,14 +178,17 @@ async fn three_matches_then_action_then_delayed_action() { let now2s = bed.now + TimeDelta::seconds(2); // No match - assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); - bed.assert_empty_trees(); + assert_eq!( + bed.manager.handle_line("test 131", now).await, + React::NoMatch + ); + bed.assert_empty_trees().await; // First match let one = vec!["one".to_string()]; - assert_eq!(bed.manager.handle_line("test one", now), React::Match); + assert_eq!(bed.manager.handle_line("test one", now).await, React::Match); { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert_eq!( state.matches, BTreeMap::from([(one.clone(), BTreeSet::from([now]))]), @@ -199,9 +203,12 @@ async fn three_matches_then_action_then_delayed_action() { } // Second match - assert_eq!(bed.manager.handle_line("test one", now1s), React::Match); + assert_eq!( + bed.manager.handle_line("test one", now1s).await, + React::Match + ); { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert_eq!( state.matches, BTreeMap::from([(one.clone(), BTreeSet::from([now, now1s]))]), @@ -217,9 +224,12 @@ async fn three_matches_then_action_then_delayed_action() { // Third match, exec let _block = bed.semaphore.acquire().await.unwrap(); - assert_eq!(bed.manager.handle_line("test one", now2s), React::Trigger); + assert_eq!( + bed.manager.handle_line("test one", now2s).await, + React::Trigger + ); { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert!( state.matches.is_empty(), "matches are emptied after trigger" @@ -241,7 +251,7 @@ async fn three_matches_then_action_then_delayed_action() { tokio::time::sleep(Duration::from_millis(40)).await; // Check first action assert_eq!( - bed.manager.state.lock().unwrap().triggers.tree(), + bed.manager.state.lock().await.triggers.tree(), &BTreeMap::from([(one.clone(), BTreeMap::from([(now2s, 1)]))]), "triggers still contain the triggered match with 1 action left" ); @@ -255,7 +265,7 @@ async fn three_matches_then_action_then_delayed_action() { tokio::time::sleep(Duration::from_millis(100)).await; // Check second action assert!( - bed.manager.state.lock().unwrap().triggers.is_empty(), + bed.manager.state.lock().await.triggers.is_empty(), "triggers are empty again" ); assert_eq!( @@ -264,7 +274,7 @@ async fn three_matches_then_action_then_delayed_action() { "the output file contains the result of the 2 actions" ); - bed.assert_empty_trees(); + bed.assert_empty_trees().await; } } @@ -296,12 +306,18 @@ async fn one_match_one_action() { let now = bed.now; // No match - assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); - bed.assert_empty_trees(); + assert_eq!( + bed.manager.handle_line("test 131", now).await, + React::NoMatch + ); + bed.assert_empty_trees().await; // match - assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); - bed.assert_empty_trees(); + assert_eq!( + bed.manager.handle_line("test one", now).await, + React::Trigger + ); + bed.assert_empty_trees().await; // the action executes tokio::time::sleep(Duration::from_millis(40)).await; @@ -311,7 +327,7 @@ async fn one_match_one_action() { "the output file contains the result of the first action" ); - bed.assert_empty_trees(); + bed.assert_empty_trees().await; } } @@ -343,14 +359,20 @@ async fn one_match_one_delayed_action() { let now = bed.now; // No match - assert_eq!(bed.manager.handle_line("test 131", now), React::NoMatch); - bed.assert_empty_trees(); + assert_eq!( + bed.manager.handle_line("test 131", now).await, + React::NoMatch + ); + bed.assert_empty_trees().await; // Match let one = vec!["one".to_string()]; - assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); + assert_eq!( + bed.manager.handle_line("test one", now).await, + React::Trigger + ); { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert!(state.matches.is_empty(), "matches stay empty"); assert!(state.ordered_times.is_empty(), "ordered_times stay empty"); assert_eq!( @@ -368,7 +390,7 @@ async fn one_match_one_delayed_action() { // The action executes tokio::time::sleep(Duration::from_millis(140)).await; assert!( - bed.manager.state.lock().unwrap().triggers.is_empty(), + bed.manager.state.lock().await.triggers.is_empty(), "triggers are empty again" ); assert_eq!( @@ -377,7 +399,7 @@ async fn one_match_one_delayed_action() { "the output file contains the result of the action" ); - bed.assert_empty_trees(); + bed.assert_empty_trees().await; } } @@ -421,7 +443,7 @@ async fn one_db_match_one_runtime_match_one_action() { let bed = bed.part2(filter, now, Some(db)).await; { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert_eq!( state.matches, BTreeMap::from([(one.clone(), BTreeSet::from([now1s]))]), @@ -436,8 +458,11 @@ async fn one_db_match_one_runtime_match_one_action() { } // match - assert_eq!(bed.manager.handle_line("test one", now), React::Trigger); - bed.assert_empty_trees(); + assert_eq!( + bed.manager.handle_line("test one", now).await, + React::Trigger + ); + bed.assert_empty_trees().await; // the action executes tokio::time::sleep(Duration::from_millis(40)).await; assert_eq!( @@ -486,7 +511,7 @@ async fn one_outdated_db_match() { // Finish setup let bed = bed.part2(filter, now, Some(db)).await; - bed.assert_empty_trees(); + bed.assert_empty_trees().await; } } @@ -545,6 +570,7 @@ async fn trigger_unmatched_pattern() { .collect(), now, ) + .await .unwrap(); // the action executes @@ -552,7 +578,7 @@ async fn trigger_unmatched_pattern() { // No matches, one action registered { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert!(state.matches.is_empty()); assert!(state.ordered_times.is_empty()); assert_eq!( @@ -625,6 +651,7 @@ async fn trigger_matched_pattern() { .collect(), now, ) + .await .unwrap(); // the action executes @@ -632,7 +659,7 @@ async fn trigger_matched_pattern() { // No matches, one action registered { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert!(state.matches.is_empty()); assert!(state.ordered_times.is_empty()); assert_eq!( @@ -707,7 +734,7 @@ async fn trigger_deduplication_on_start() { // No matches, one or two action·s registered { - let state = bed.manager.state.lock().unwrap(); + let state = bed.manager.state.lock().await; assert!(state.matches.is_empty()); assert!(state.ordered_times.is_empty()); assert_eq!( @@ -773,12 +800,12 @@ async fn multiple_triggers() { let bed = bed.part2(filter, Local::now(), None).await; assert_eq!( - bed.manager.handle_line("test one", Local::now()), + bed.manager.handle_line("test one", Local::now()).await, React::Match, "Duplicate: {dup:?}" ); assert_eq!( - bed.manager.handle_line("test one", Local::now()), + bed.manager.handle_line("test one", Local::now()).await, React::Trigger, "Duplicate: {dup:?}" ); @@ -795,7 +822,7 @@ async fn multiple_triggers() { tokio::time::sleep(Duration::from_millis(50)).await; assert_eq!( - bed.manager.handle_line("test one", Local::now()), + bed.manager.handle_line("test one", Local::now()).await, match dup { Duplicate::Ignore => React::Match, _ => React::Match, @@ -804,7 +831,7 @@ async fn multiple_triggers() { ); assert_eq!( - bed.manager.handle_line("test one", Local::now()), + bed.manager.handle_line("test one", Local::now()).await, match dup { Duplicate::Ignore => React::Match, _ => React::Trigger, @@ -938,7 +965,7 @@ async fn extend_trigger_multiple_after_actions() { let bed = bed.part2(filter, Local::now(), None).await; assert_eq!( - bed.manager.handle_line("test one", Local::now()), + bed.manager.handle_line("test one", Local::now()).await, React::Trigger, ); @@ -952,7 +979,7 @@ async fn extend_trigger_multiple_after_actions() { ); assert_eq!( - bed.manager.handle_line("test one", Local::now()), + bed.manager.handle_line("test one", Local::now()).await, React::Trigger, ); @@ -1019,7 +1046,7 @@ async fn ip_specific() { let bed = bed.part2(filter, Local::now(), None).await; assert_eq!( - bed.manager.handle_line("test 1.2.3.4", Local::now()), + bed.manager.handle_line("test 1.2.3.4", Local::now()).await, React::Trigger, ); @@ -1035,7 +1062,8 @@ async fn ip_specific() { assert_eq!( bed.manager - .handle_line("test 1:2:3:4:5:6:7:8", Local::now()), + .handle_line("test 1:2:3:4:5:6:7:8", Local::now()) + .await, React::Trigger, ); From 974139610f07ec4a1f8ffa08276c1deaa5f27912 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 19 Sep 2025 12:00:00 +0200 Subject: [PATCH 101/241] async db Fixing deadlock on start. FilterManager send a lot of write operations on start. Each of them spawned a new Task to send the log in a channel. All those writes were unlocked when the Database started, shortly after. Now that the channel sending is awaited, it made a deadlock. Database's API and startup has been rewritten, so that open_tree is made accross the same channel used to log write operations. Database is started as soon as it is opened. The Database struct is now just a Sender to the real Database, now DatabaseManager. This removes the constraint for Tree opening happening before any write operation! --- src/daemon/filter/mod.rs | 6 +- src/daemon/filter/state.rs | 110 ++++++++++-------- src/daemon/filter/tests.rs | 22 ++-- src/daemon/mod.rs | 12 +- src/tests.rs | 12 +- src/treedb/mod.rs | 225 +++++++++++++++++++++++++------------ 6 files changed, 243 insertions(+), 144 deletions(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 3f3cfdc..864009c 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -109,7 +109,7 @@ impl FilterManager { .remove_trigger(&m) .await // Only one entry in the map because Duplicate::Extend - .and_then(|map| map.first_key_value().map(|(_, n)| n.clone())) + .and_then(|map| map.first_key_value().map(|(_, n)| *n)) } else { None }; @@ -240,12 +240,12 @@ impl FilterManager { /// Schedule execution for a given Match. /// We check first if the trigger is still here /// because pending actions can be flushed. - async fn schedule_exec<'a>( + async fn schedule_exec( &self, m: Match, t: Time, now: Time, - state: &'a mut MutexGuard<'_, State>, + state: &mut MutexGuard<'_, State>, startup: bool, actions_left: Option, ) { diff --git a/src/daemon/filter/state.rs b/src/daemon/filter/state.rs index e74ba79..b539780 100644 --- a/src/daemon/filter/state.rs +++ b/src/daemon/filter/state.rs @@ -64,32 +64,40 @@ impl State { db: &mut Database, now: Time, ) -> Result { - let ordered_times = db.open_tree( - filter_ordered_times_db_name(filter), - filter.retry_duration.unwrap_or_default(), - |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), - )?; - let mut triggers = db.open_tree( - filter_triggers_db_name(filter), - filter.longuest_action_duration, - |(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)), - )?; - if triggers.is_empty() { - let old_triggers = db.open_tree( - filter_triggers_old_db_name(filter), + let ordered_times = db + .open_tree( + filter_ordered_times_db_name(filter), + filter.retry_duration.unwrap_or_default(), + |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), + ) + .await?; + let mut triggers = db + .open_tree( + filter_triggers_db_name(filter), filter.longuest_action_duration, - |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), - )?; + |(key, value)| Ok((to_match(&key)?, to_timemap(&value)?)), + ) + .await?; + if triggers.is_empty() { + let old_triggers = db + .open_tree( + filter_triggers_old_db_name(filter), + filter.longuest_action_duration, + |(key, value)| Ok((to_matchtime(&key)?, to_u64(&value)?)), + ) + .await?; for (mt, n) in old_triggers.iter() { - triggers.fetch_update(mt.m.clone(), |map| { - Some(match map { - None => [(mt.t, *n)].into(), - Some(mut map) => { - map.insert(mt.t, *n); - map - } + triggers + .fetch_update(mt.m.clone(), |map| { + Some(match map { + None => [(mt.t, *n)].into(), + Some(mut map) => { + map.insert(mt.t, *n); + map + } + }) }) - }).await; + .await; } } let mut this = Self { @@ -116,15 +124,17 @@ impl State { // Add the (Match, Time) to the triggers map let n = action_count .unwrap_or_else(|| self.filter.filtered_actions_from_match(&m).len() as u64); - self.triggers.fetch_update(m, |map| { - Some(match map { - None => [(t, n)].into(), - Some(mut value) => { - value.insert(t, n); - value - } + self.triggers + .fetch_update(m, |map| { + Some(match map { + None => [(t, n)].into(), + Some(mut value) => { + value.insert(t, n); + value + } + }) }) - }).await; + .await; } } @@ -158,16 +168,18 @@ impl State { exec_needed = true; if count <= 1 { if !exiting { - self.triggers.fetch_update(mt.m, |map| { - map.and_then(|mut map| { - map.remove(&mt.t); - if map.is_empty() { - None - } else { - Some(map) - } + self.triggers + .fetch_update(mt.m, |map| { + map.and_then(|mut map| { + map.remove(&mt.t); + if map.is_empty() { + None + } else { + Some(map) + } + }) }) - }).await; + .await; } // else don't do anything // Because that will remove the entry in the DB, and make @@ -176,12 +188,14 @@ impl State { // - The current for action counting, not persisted // - Another like ordered_times, Tree, persisted } else { - self.triggers.fetch_update(mt.m, |map| { - map.map(|mut map| { - map.insert(mt.t, count - 1); - map + self.triggers + .fetch_update(mt.m, |map| { + map.map(|mut map| { + map.insert(mt.t, count - 1); + map + }) }) - }).await; + .await; } } exec_needed @@ -328,8 +342,7 @@ mod tests { ]; for trigger_db in triggers { - let mut db = TempDatabase::default().await; - db.set_loaded_db(HashMap::from([ + let mut db = TempDatabase::from_loaded_db(HashMap::from([ ( "filter_ordered_times_s1.f1".into(), HashMap::from([ @@ -344,7 +357,8 @@ mod tests { ]), ), trigger_db, - ])); + ])) + .await; let state = State::new(filter, &mut db, now).await.unwrap(); diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 1b7a72c..baccfd0 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -427,17 +427,15 @@ async fn one_db_match_one_runtime_match_one_action() { &bed.az_patterns, ); - let mut db = TempDatabase::default().await; - // Pre-add match let now = Local::now(); let one = vec!["one".to_string()]; let now1s = now - TimeDelta::seconds(1); - db.set_loaded_db(HashMap::from([( + let db = TempDatabase::from_loaded_db(HashMap::from([( filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])); + )])).await; // Finish setup let bed = bed.part2(filter, now, Some(db)).await; @@ -497,17 +495,15 @@ async fn one_outdated_db_match() { &bed.az_patterns, ); - let mut db = TempDatabase::default().await; - // Pre-add match let now = Local::now(); let one = vec!["one".to_string()]; let now1s = now - TimeDelta::milliseconds(1001); - db.set_loaded_db(HashMap::from([( + let db = TempDatabase::from_loaded_db(HashMap::from([( filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])); + )])).await; // Finish setup let bed = bed.part2(filter, now, Some(db)).await; @@ -634,11 +630,10 @@ async fn trigger_matched_pattern() { let now1s = now - TimeDelta::milliseconds(10); let one = vec!["one".to_string()]; - let mut db = TempDatabase::default().await; - db.set_loaded_db(HashMap::from([( + let db = TempDatabase::from_loaded_db(HashMap::from([( filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])); + )])).await; let bed = bed.part2(filter, now, Some(db)).await; bed.manager @@ -716,8 +711,7 @@ async fn trigger_deduplication_on_start() { let now2s = now - TimeDelta::milliseconds(1030); let one = vec!["one".to_string()]; - let mut db = TempDatabase::default().await; - db.set_loaded_db(HashMap::from([( + let db = TempDatabase::from_loaded_db(HashMap::from([( filter_triggers_db_name(filter), HashMap::from([( one.clone().into(), @@ -726,7 +720,7 @@ async fn trigger_deduplication_on_start() { now2s.to_rfc3339(): 1, }), )]), - )])); + )])).await; let bed = bed.part2(filter, now, Some(db)).await; // the action executes diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index c62d84f..c68f8e2 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -41,7 +41,7 @@ pub async fn daemon( let shutdown = ShutdownController::new(); // Open Database - let mut db = Database::open(config).await?; + let mut db = Database::open(config, shutdown.token()).await?; // Open Socket let socket = Socket::open(socket).await?; @@ -80,9 +80,6 @@ pub async fn daemon( (state, stream_managers) }; - // Run database task - let mut db_status_rx = db.manager(shutdown.token()); - // Run socket task socket.manager(config, state, shutdown.token()); @@ -101,17 +98,18 @@ pub async fn daemon( let _ = task_handle.await; } + // Release last db's sender + let mut db_status = db.quit(); + debug!("Asking for all tasks to quit..."); shutdown.ask_shutdown(); debug!("Waiting for all tasks to quit..."); shutdown.wait_shutdown().await; - let db_status = db_status_rx.try_recv(); - let stop_ok = config.stop(); - if let Ok(Err(err)) = db_status { + if let Ok(Err(err)) = db_status.try_recv() { Err(format!("database error: {}", err).into()) } else if !signal_received.load(Ordering::SeqCst) { Err("quitting because all streams finished".into()) diff --git a/src/tests.rs b/src/tests.rs index 2b61254..f017f25 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -9,7 +9,7 @@ use std::{ use tempfile::TempDir; -use crate::treedb::Database; +use crate::treedb::{Database, LoadedDB}; pub struct Fixture { path: PathBuf, @@ -65,7 +65,15 @@ pub struct TempDatabase { impl TempDatabase { pub async fn default() -> Self { let _tempdir = TempDir::new().unwrap(); - let db = Database::from_dir(_tempdir.path()).await.unwrap(); + let db = Database::from_dir(_tempdir.path(), None).await.unwrap(); + TempDatabase { _tempdir, db } + } + + pub async fn from_loaded_db(loaded_db: LoadedDB) -> Self { + let _tempdir = TempDir::new().unwrap(); + let db = Database::from_dir(_tempdir.path(), Some(loaded_db)) + .await + .unwrap(); TempDatabase { _tempdir, db } } } diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index e14db68..ed54a7a 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -39,16 +39,29 @@ use raw::{ReadDB, WriteDB}; mod raw; +/// Any order the Database can receive +enum Order { + Log(Entry), + OpenTree(OpenTree), +} + /// Entry sent from [`Tree`] to [`Database`] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct Entry { +struct Entry { pub tree: String, pub key: Value, pub value: Option, pub expiry: Time, } -pub type LoadedDB = HashMap>; +/// Order to receive a tree from previous Database +pub struct OpenTree { + name: String, + resp: oneshot::Sender>, +} + +type LoadedTree = HashMap; +pub type LoadedDB = HashMap; const DB_NAME: &str = "reaction.db"; const DB_NEW_NAME: &str = "reaction.new.db"; @@ -63,12 +76,43 @@ impl Config { } } +/// Public-facing API for a treedb Database +pub struct Database { + entry_tx: Option>, + error_rx: oneshot::Receiver>, +} + +impl Database { + /// Open a new Database, whom task will start in the background. + /// You'll have to: + /// - drop all [`Tree`]s, + /// - call [`Self::quit`], + /// to have the Database properly quit. + /// + /// You can wait for [`Self::quit`] returned channel to know how it went. + pub async fn open(config: &Config, shutdown: ShutdownToken) -> Result { + let (manager, entry_tx) = DatabaseManager::open(config).await?; + let error_rx = manager.manager(shutdown); + Ok(Self { + entry_tx: Some(entry_tx), + error_rx, + }) + } + + /// Permit to close DB's channel. + /// Without this function manually called, the DB can't close. + pub fn quit(self) -> oneshot::Receiver> { + self.error_rx + } +} + // TODO rotate_db at a regular interval instead of every N bytes? // This would make more sense, as actual garbage collection is time-based /// A [`Database`] logs all write operations on [`Tree`]s in a single file. -/// Logs are written asynchronously, so the write operations in RAM will never block. -pub struct Database { +/// Logs are written asynchronously, so the write operations in RAM will block only when the +/// underlying channel is full. +struct DatabaseManager { /// Inner database write_db: WriteDB, /// [`Tree`]s loaded from disk @@ -79,10 +123,7 @@ pub struct Database { /// New database atomically replaces the old one when its writing is done. new_path: PathBuf, /// The receiver on [`Tree`] write operations - entry_rx: mpsc::Receiver, - /// The sender on [`Tree`] write operations. - /// Only used to clone new senders for new Trees. - entry_tx: Option>, + entry_rx: mpsc::Receiver, /// The interval at which the database must be flushed to kernel flush_every: Duration, /// The maximum bytes that must be written until the database is rotated @@ -91,8 +132,8 @@ pub struct Database { bytes_written: usize, } -impl Database { - pub async fn open(config: &Config) -> Result { +impl DatabaseManager { + pub async fn open(config: &Config) -> Result<(DatabaseManager, mpsc::Sender), IoError> { let path = config.path_of(DB_NAME); let new_path = config.path_of(DB_NEW_NAME); @@ -100,17 +141,19 @@ impl Database { let (entry_tx, entry_rx) = mpsc::channel(1000); - Ok(Database { - write_db, - loaded_db, - path, - new_path, - entry_rx, - entry_tx: Some(entry_tx), - flush_every: Duration::from_secs(2), - max_bytes: 20 * 1024 * 1024, // 20 MiB - bytes_written: 0, - }) + Ok(( + DatabaseManager { + write_db, + loaded_db, + path, + new_path, + entry_rx, + flush_every: Duration::from_secs(2), + max_bytes: 20 * 1024 * 1024, // 20 MiB + bytes_written: 0, + }, + entry_tx, + )) } pub fn manager(mut self, shutdown: ShutdownToken) -> oneshot::Receiver> { @@ -123,8 +166,8 @@ impl Database { interval.set_missed_tick_behavior(MissedTickBehavior::Delay); let mut status = loop { tokio::select! { - entry = self.entry_rx.recv() => { - if let Err(err) = self.handle_entry(entry).await { + order = self.entry_rx.recv() => { + if let Err(err) = self.handle_order(order).await { shutdown.ask_shutdown(); break err; } @@ -141,10 +184,9 @@ impl Database { // Finish consuming received entries when shutdown asked if status.is_none() { - self.entry_tx = None; loop { - let entry = self.entry_rx.recv().await; - if let Err(err) = self.handle_entry(entry).await { + let order = self.entry_rx.recv().await; + if let Err(err) = self.handle_order(order).await { status = err; break; } @@ -168,33 +210,42 @@ impl Database { error_rx } - /// Write a received entry. Return: + /// Executes an order. Returns: /// - Err(Some) if there was an error, /// - Err(None) is channel is closed, /// - Ok(()) in general case. - async fn handle_entry(&mut self, entry: Option) -> Result<(), Option> { - match entry { - Some(entry) => match self.write_db.write_entry(&entry).await { - Ok(bytes_written) => { - self.bytes_written += bytes_written; - if self.bytes_written > self.max_bytes { - match self.rotate_db().await { - Ok(_) => { - self.bytes_written = 0; - Ok(()) - } - Err(err) => Err(Some(format!("while rotating database: {err}"))), - } - } else { - Ok(()) - } - } - Err(err) => Err(Some(format!("while writing entry to database: {err}"))), - }, + async fn handle_order(&mut self, order: Option) -> Result<(), Option> { + match order { + Some(Order::Log(entry)) => self.handle_entry(entry).await.map_err(Option::Some), + Some(Order::OpenTree(open_tree)) => { + self.handle_open_tree(open_tree); + Ok(()) + } None => Err(None), } } + /// Write a received entry. + async fn handle_entry(&mut self, entry: Entry) -> Result<(), String> { + match self.write_db.write_entry(&entry).await { + Ok(bytes_written) => { + self.bytes_written += bytes_written; + if self.bytes_written > self.max_bytes { + match self.rotate_db().await { + Ok(_) => { + self.bytes_written = 0; + Ok(()) + } + Err(err) => Err(format!("while rotating database: {err}")), + } + } else { + Ok(()) + } + } + Err(err) => Err(format!("while writing entry to database: {err}")), + } + } + /// Flush inner database. async fn flush(&mut self) -> Result<(), String> { self.write_db @@ -272,7 +323,7 @@ pub struct Tree { /// The inner BTreeMap tree: BTreeMap, /// The sender that permits to asynchronously send write operations to database - tx: mpsc::Sender, + tx: mpsc::Sender, } impl Database { @@ -280,7 +331,7 @@ impl Database { /// Takes a closure (or regular function) that converts (Value, Value) JSON entries /// into (K, V) typed entries. /// Helpers for this closure can be found in the [`helpers`] module. - pub fn open_tree( + pub async fn open_tree( &mut self, name: String, entry_timeout: TimeDelta, @@ -289,13 +340,24 @@ impl Database { where F: Fn((Value, Value)) -> Result<(K, V), String>, { - // Get a clone of the channel sender - let tx = self - .entry_tx - .clone() - .ok_or("Database is closing".to_string())?; + // Request the tree + let (tx, rx) = oneshot::channel(); + let entry_tx = match self.entry_tx.clone() { + None => return Err("Database is closing".to_string()), + Some(entry_tx) => { + entry_tx + .send(Order::OpenTree(OpenTree { + name: name.clone(), + resp: tx, + })) + .await + .map_err(|_| "Database did not answer")?; + // Get a clone of the channel sender + entry_tx.clone() + } + }; // Load the tree from its JSON - let tree = if let Some(json_tree) = self.loaded_db.remove(&name) { + let tree = if let Some(json_tree) = rx.await.map_err(|_| "Database did not respond")? { json_tree .into_iter() .map(map_f) @@ -307,15 +369,25 @@ impl Database { id: name, entry_timeout, tree, - tx, + tx: entry_tx, }) } +} + +impl DatabaseManager { + /// Creates a new Tree with the given name and entry timeout. + /// Takes a closure (or regular function) that converts (Value, Value) JSON entries + /// into (K, V) typed entries. + /// Helpers for this closure can be found in the [`helpers`] module. + pub fn handle_open_tree(&mut self, open_tree: OpenTree) { + let _ = open_tree.resp.send(self.loaded_db.remove(&open_tree.name)); + } // TODO keep only tree names, and use it for next db rotation to remove associated entries - /// Drops Trees that have not been loaded already - pub fn drop_trees(&mut self) { - self.loaded_db = HashMap::default(); - } + // Drops Trees that have not been loaded already + // pub fn drop_trees(&mut self) { + // self.loaded_db = HashMap::default(); + // } } // Gives access to all read-only functions @@ -339,7 +411,7 @@ impl Tree { }; let tx = self.tx.clone(); // FIXME what if send fails? - let _ = tx.send(e).await; + let _ = tx.send(Order::Log(e)).await; } /// Asynchronously persisted version of [`BTreeMap::insert`] @@ -410,15 +482,24 @@ mod tests { use tempfile::{NamedTempFile, TempDir}; use tokio::fs::{write, File}; - use crate::concepts::Config; + use crate::{concepts::Config, daemon::ShutdownController}; use super::{ - helpers::*, raw::WriteDB, rotate_db, Database, Entry, KeyType, LoadedDB, Tree, ValueType, - DB_NAME, + helpers::*, raw::WriteDB, rotate_db, Database, DatabaseManager, Entry, KeyType, LoadedDB, + Tree, ValueType, DB_NAME, }; + impl DatabaseManager { + pub fn set_loaded_db(&mut self, loaded_db: LoadedDB) { + self.loaded_db = loaded_db; + } + } + impl Database { - pub async fn from_dir(dir_path: &Path) -> Result { + pub async fn from_dir( + dir_path: &Path, + loaded_db: Option, + ) -> Result { let config_path = dir_path.join("reaction.jsonnet"); write( &config_path, @@ -443,11 +524,12 @@ mod tests { .await?; let config = Config::from_path(&config_path).unwrap(); - Database::open(&config).await - } - - pub fn set_loaded_db(&mut self, loaded_db: LoadedDB) { - self.loaded_db = loaded_db; + let (mut manager, entry_tx) = DatabaseManager::open(&config).await?; + if let Some(loaded_db) = loaded_db { + manager.set_loaded_db(loaded_db) + } + let error_rx = manager.manager(ShutdownController::new().token()); + Ok(Self { entry_tx: Some(entry_tx), error_rx }) } } @@ -629,7 +711,7 @@ mod tests { write_db.close().await.unwrap(); drop(write_db); - let mut database = Database::from_dir(dir_path).await.unwrap(); + let mut database = Database::from_dir(dir_path, None).await.unwrap(); let time_match = database .open_tree( @@ -637,6 +719,7 @@ mod tests { TimeDelta::seconds(2), |(key, value)| Ok((to_time(&key)?, to_match(&value)?)), ) + .await .unwrap(); assert_eq!( time_match.tree, @@ -653,6 +736,7 @@ mod tests { TimeDelta::hours(2), |(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)), ) + .await .unwrap(); assert_eq!( match_timeset.tree, @@ -668,6 +752,7 @@ mod tests { TimeDelta::hours(2), |(key, value)| Ok((to_match(&key)?, to_timeset(&value)?)), ) + .await .unwrap(); assert_eq!(unknown_tree.tree, BTreeMap::default()); } From 278baaa3e6f0e43af596226097fbacb24e7a9c02 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 19 Sep 2025 12:00:00 +0200 Subject: [PATCH 102/241] Shorter variant of the heavy load benchmark for quicker results --- bench/small-heavy-load.yml | 74 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 bench/small-heavy-load.yml diff --git a/bench/small-heavy-load.yml b/bench/small-heavy-load.yml new file mode 100644 index 0000000..931a456 --- /dev/null +++ b/bench/small-heavy-load.yml @@ -0,0 +1,74 @@ +--- +# This configuration permits to test reaction's performance +# under a very high load +# +# It keeps regexes super simple, to avoid benchmarking the `regex` crate, +# and benchmark reaction's internals instead. +concurrency: 32 + +patterns: + num: + regex: '[0-9]{3}' + ip: + regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})' + ignore: + - 1.0.0.1 + +streams: + tailDown1: + cmd: [ 'sh', '-c', 'sleep 2; seq 1001 | while read i; do echo found $i; done' ] + filters: + find1: + regex: + - '^found ' + retry: 9 + retryperiod: 6m + actions: + damn: + cmd: [ 'sleep', '0.0' ] + undamn: + cmd: [ 'sleep', '0.0' ] + after: 1m + onexit: false + tailDown2: + cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ] + filters: + find2: + regex: + - '^found ' + retry: 480 + retryperiod: 6m + actions: + damn: + cmd: [ 'sleep', '0.0' ] + undamn: + cmd: [ 'sleep', '0.0' ] + after: 1m + onexit: false + tailDown3: + cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ] + filters: + find3: + regex: + - '^found ' + retry: 480 + retryperiod: 6m + actions: + damn: + cmd: [ 'sleep', '0.0' ] + undamn: + cmd: [ 'sleep', '0.0' ] + after: 1m + onexit: false + find4: + regex: + - '^trouvé ' + retry: 480 + retryperiod: 6m + actions: + damn: + cmd: [ 'sleep', '0.0' ] + undamn: + cmd: [ 'sleep', '0.0' ] + after: 1m + onexit: false From c6e4af96cd9cb0f0afe97ccb5feb2399244e8531 Mon Sep 17 00:00:00 2001 From: Baptiste Careil Date: Wed, 17 Sep 2025 08:08:16 +0200 Subject: [PATCH 103/241] Fix some triggers no longer triggering after being loaded from db --- src/daemon/filter/mod.rs | 4 +- tests/persistence.rs | 51 ++++++++++++++++++++++ tests/test-conf/test-resume-action.jsonnet | 46 +++++++++++++++++++ 3 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 tests/persistence.rs create mode 100644 tests/test-conf/test-resume-action.jsonnet diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 864009c..b847586 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -257,7 +257,9 @@ impl FilterManager { .filter(|action| !startup || !action.oneshot) // skip any actions .skip(match actions_left { - Some(actions_left) => self.filter.actions.len() - actions_left as usize, + Some(actions_left) => { + self.filter.filtered_actions_from_match(&m).len() - actions_left as usize + } None => 0, }); diff --git a/tests/persistence.rs b/tests/persistence.rs new file mode 100644 index 0000000..3cbfd74 --- /dev/null +++ b/tests/persistence.rs @@ -0,0 +1,51 @@ +use std::{error::Error, path::Path, time::Duration}; + +use assert_cmd::Command; +use assert_fs::prelude::*; +use predicates::prelude::predicate; + +#[test] +fn resume_action() -> Result<(), Box> { + let tmp_dir = assert_fs::TempDir::new()?; + + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-resume-action.jsonnet"))?; + + // first run + let mut cmd = Command::cargo_bin("reaction")?; + cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.timeout(Duration::from_secs(5)); + // Expected exit 1: all stream exited + cmd.assert().code(predicate::eq(1)); + + // expect a single match from the stream command + let expected = ["starting", "start4 10.1.0.1", "stopping"].join("\n") + "\n"; + tmp_dir.child("log").assert(&expected); + + // second run, expect to resume action + let mut cmd = Command::cargo_bin("reaction")?; + cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); + cmd.current_dir(tmp_dir.path()); + cmd.timeout(Duration::from_secs(5)); + // Expected exit 1: all stream exited + cmd.assert().code(predicate::eq(1)); + + let expected = [ + "starting", + "start4 10.1.0.1", // from the stream command + "stopping", + "starting", + "start4 10.1.0.1", // previous action loaded from db + "stop4 10.1.0.1", // previous action lapses + "start4 10.1.0.1", // from the stream command + "stopping", + ] + .join("\n") + + "\n"; + + tmp_dir.child("log").assert(&expected); + + Ok(()) +} diff --git a/tests/test-conf/test-resume-action.jsonnet b/tests/test-conf/test-resume-action.jsonnet new file mode 100644 index 0000000..3e92ef8 --- /dev/null +++ b/tests/test-conf/test-resume-action.jsonnet @@ -0,0 +1,46 @@ +{ + patterns: { + ip: { + type: 'ip', + ipv6mask: 64, + }, + }, + start: [ + ['sh', '-c', 'echo starting >>./log'], + ], + stop: [ + ['sh', '-c', 'echo stopping >>./log'], + ], + streams: { + s1: { + cmd: ['sh', '-c', 'sleep 2; echo T 10.1.0.1; sleep 0.2; echo T 10.1.0.1; sleep 0.8'], + filters: { + f1: { + regex: ['T '], + actions: { + log_start4: { + ipv4only: true, + cmd: ['sh', '-c', 'echo start4 >>./log'], + }, + log_start6: { + ipv6only: true, + cmd: ['sh', '-c', 'echo start6 >>./log'], + }, + log_stop4: { + ipv4only: true, + onexit: false, + after: '2s', + cmd: ['sh', '-c', 'echo stop4 >> ./log'], + }, + log_stop6: { + ipv6only: true, + onexit: false, + after: '2s', + cmd: ['sh', '-c', 'echo stop6 >> ./log'], + }, + }, + }, + }, + }, + }, +} From 7f0cf326663f7e3ea934ecf1601ff78d4364afea Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 20 Sep 2025 12:00:00 +0200 Subject: [PATCH 104/241] v2.2.1 --- Cargo.lock | 2 +- Cargo.toml | 2 +- TODO | 6 ------ 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a066f3a..d899028 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -990,7 +990,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.2.0" +version = "2.2.1" dependencies = [ "assert_cmd", "assert_fs", diff --git a/Cargo.toml b/Cargo.toml index 6027af7..3596016 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction" -version = "2.2.0" +version = "2.2.1" edition = "2021" authors = ["ppom "] license = "AGPL-3.0" diff --git a/TODO b/TODO index 663070f..9d5462a 100644 --- a/TODO +++ b/TODO @@ -1,9 +1,3 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) - stream: test regex ending with $ - -should an ipv6-mapped ipv4 match a pattern of type ipv6? -should it be normalized as ipv4 then? - -fix order of db writes subject to race condition (make writes async?) DB: add tests on stress testing (lines should always be in order) From fa63a9feb8842fb32cabf62df0e7c6f480eb8db5 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 7 Oct 2025 12:00:00 +0200 Subject: [PATCH 105/241] README: fix example YAML --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 68ec110..338a889 100644 --- a/README.md +++ b/README.md @@ -67,8 +67,8 @@ streams: regex: - 'authentication failure;.*rhost=' - 'Failed password for .* from ' - - 'Invalid user .* from ', - - 'banner exchange: Connection from port [0-9]*: invalid format', + - 'Invalid user .* from ' + - 'banner exchange: Connection from port [0-9]*: invalid format' retry: 3 retryperiod: '6h' actions: From d36e54c55bf1f676c7206a1fe6a6912ddc172020 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 31 Oct 2025 12:00:00 +0100 Subject: [PATCH 106/241] README: add mail --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 338a889..1461db7 100644 --- a/README.md +++ b/README.md @@ -245,6 +245,7 @@ French version: [#reaction-dev-fr:club1.fr](https://matrix.to/#/#reaction-dev-fr You can ask for help in the issues or in this Matrix room: [#reaction-users-en:club1.fr](https://matrix.to/#/#reaction-users-en:club1.fr). French version: [#reaction-users-fr:club1.fr](https://matrix.to/#/#reaction-users-fr:club1.fr). +You can alternatively send a mail: `reaction` on domain `ppom.me`. ## Funding From 22125dfd53b8b4a0fd4770c046202cd8639d2b24 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 25 Sep 2025 12:00:00 +0200 Subject: [PATCH 107/241] First plugin shot --- .gitignore | 1 + reaction-plugin/Cargo.lock | 319 +++++++++++++++++++++++++++++++++++ reaction-plugin/Cargo.toml | 7 + reaction-plugin/src/lib.rs | 84 +++++++++ reaction-plugin/src/value.rs | 14 ++ 5 files changed, 425 insertions(+) create mode 100644 reaction-plugin/Cargo.lock create mode 100644 reaction-plugin/Cargo.toml create mode 100644 reaction-plugin/src/lib.rs create mode 100644 reaction-plugin/src/value.rs diff --git a/.gitignore b/.gitignore index 7672b7d..a9717f0 100644 --- a/.gitignore +++ b/.gitignore @@ -18,5 +18,6 @@ debian-packaging/* export-go-db/export-go-db import-rust-db/target /target +reaction-plugin/target /local .ccls-cache diff --git a/reaction-plugin/Cargo.lock b/reaction-plugin/Cargo.lock new file mode 100644 index 0000000..a4701f3 --- /dev/null +++ b/reaction-plugin/Cargo.lock @@ -0,0 +1,319 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "cfg-if" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "indexmap" +version = "2.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "libc" +version = "0.2.176" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" + +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "reaction-plugin" +version = "0.1.0" +dependencies = [ + "stabby", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde_core" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "sha2-const-stable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" + +[[package]] +name = "stabby" +version = "72.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976399a0c48ea769ef7f5dc303bb88240ab8d84008647a6b2303eced3dab3945" +dependencies = [ + "libloading", + "rustversion", + "stabby-abi", +] + +[[package]] +name = "stabby-abi" +version = "72.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7b54832a9a1f92a0e55e74a5c0332744426edc515bb3fbad82f10b874a87f0d" +dependencies = [ + "rustc_version", + "rustversion", + "sha2-const-stable", + "stabby-macros", +] + +[[package]] +name = "stabby-macros" +version = "72.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a768b1e51e4dbfa4fa52ae5c01241c0a41e2938fdffbb84add0c8238092f9091" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "rand", + "syn 1.0.109", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "toml_datetime" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3effe7c0e86fdff4f69cdd2ccc1b96f933e24811c5441d44904e8683e27184b" +dependencies = [ + "indexmap", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" +dependencies = [ + "winnow", +] + +[[package]] +name = "unicode-ident" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "windows-link" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] + +[[package]] +name = "zerocopy" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] diff --git a/reaction-plugin/Cargo.toml b/reaction-plugin/Cargo.toml new file mode 100644 index 0000000..c015504 --- /dev/null +++ b/reaction-plugin/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "reaction-plugin" +version = "0.1.0" +edition = "2024" + +[dependencies] +stabby = { version = "72.1.1", features = ["libloading"] } diff --git a/reaction-plugin/src/lib.rs b/reaction-plugin/src/lib.rs new file mode 100644 index 0000000..93ec74d --- /dev/null +++ b/reaction-plugin/src/lib.rs @@ -0,0 +1,84 @@ +/// This crate permits to define a stable ABI between reaction's core and plugins. +/// +/// It uses [`stabby`], that rewrites essential std types as stable repr("C") types, +/// and permits to check a lof of safeness related issues at compile time and at runtime, when +/// loading a library. +/// +/// To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides +/// the entrypoint for a plugin. +/// It permits to define 0 to n (stream, filter, action) custom types. +use stabby::{ + boxed::Box, future::DynFuture, option::Option, result::Result, string::String, vec::Vec, +}; + +mod value; +pub use value::Value; + +/// This is the only trait that **must** be implemented by a plugin. +/// It provides lists of stream, filter and action types implemented by a dynamic plugin. +#[stabby::stabby(checked)] +pub trait PluginInfo { + /// Return all stream types that should be made available to reaction users + extern "C" fn stream_impls(&self) -> Vec; + /// Return one instance of a given type. + extern "C" fn stream_impl( + &mut self, + stream_name: String, + stream_type: String, + config: Value, + ) -> Result; + + /// Return all filter types that should be made available to reaction users + extern "C" fn filter_impls(&self) -> Vec; + /// Return one instance of a given type. + extern "C" fn filter_impl( + &mut self, + stream_name: String, + filter_name: String, + filter_type: String, + config: Value, + ) -> Result; + + /// Return all action types that should be made available to reaction users + extern "C" fn action_impls(&self) -> Vec; + /// Return one instance of a given type. + extern "C" fn action_impl( + &mut self, + stream_name: String, + filter_name: String, + action_name: String, + action_type: String, + config: Value, + ) -> Result; + + /// Notify the plugin that setup is finished, permitting a last occasion to report an error + /// (For example if a stream wants a companion action but it hasn't been initialized) + extern "C" fn finish_setup(&mut self) -> Result<(), String>; +} + +pub enum PluginError { + InexistantType, + InitialisationError(String), +} + +type BoxedStreamImpl = stabby::dynptr!(Box); +type BoxedFilterImpl = stabby::dynptr!(Box); +type BoxedActionImpl = stabby::dynptr!(Box); + +#[stabby::stabby(checked)] +pub trait StreamImpl { + extern "C" fn next<'a>(&'a mut self) -> DynFuture<'a, Result>, String>>; + extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; +} + +#[stabby::stabby(checked)] +pub trait FilterImpl { + extern "C" fn matches(&mut self, line: String) -> bool; + extern "C" fn close(&mut self) -> Result<(), String>; +} + +#[stabby::stabby(checked)] +pub trait ActionImpl { + extern "C" fn next<'a>(&'a mut self) -> DynFuture<'a, Result>, String>>; + extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; +} diff --git a/reaction-plugin/src/value.rs b/reaction-plugin/src/value.rs new file mode 100644 index 0000000..34322b0 --- /dev/null +++ b/reaction-plugin/src/value.rs @@ -0,0 +1,14 @@ +use stabby::{string::String, tuple::Tuple2, vec::Vec}; + +/// Represents a configuration value. +/// This is not meant as an efficient type, but as a very flexible one. +#[stabby::stabby] +pub enum Value { + Null, + Bool(bool), + Integer(i64), + Float(f64), + String(String), + Array(Vec), + Object(Vec>), +} From 8229f01182411d100db0cca3008be9077245e004 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 26 Sep 2025 12:00:00 +0200 Subject: [PATCH 108/241] Dependency cleanup --- Cargo.lock | 10 ---------- Cargo.toml | 11 +++++++++-- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d899028..e4515ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1010,7 +1010,6 @@ dependencies = [ "serde_yaml", "tempfile", "thiserror", - "timer", "tokio", "tokio-util", "tracing", @@ -1296,15 +1295,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "timer" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31d42176308937165701f50638db1c31586f183f1aab416268216577aec7306b" -dependencies = [ - "chrono", -] - [[package]] name = "tokio" version = "1.45.1" diff --git a/Cargo.toml b/Cargo.toml index 3596016..bf5f8e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,20 +32,27 @@ assets = [ ] [dependencies] +# Time types chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } +# CLI parsing clap = { version = "4.5.4", features = ["derive"] } -jrsonnet-evaluator = "0.4.2" +# Unix interfaces nix = { version = "0.29.0", features = ["signal"] } num_cpus = "1.16.0" +# Regex matching regex = "1.10.4" +# Configuration languages, ser/deserialisation serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" serde_yaml = "0.9.34" +jrsonnet-evaluator = "0.4.2" +# Error macro thiserror = "1.0.63" -timer = "0.2.0" +# Async runtime & helpers futures = "0.3.30" tokio = { version = "1.40.0", features = ["full", "tracing"] } tokio-util = { version = "0.7.12", features = ["codec"] } +# Async logging tracing = "0.1.40" tracing-subscriber = "0.3.18" From ae46932219db3164f4b7d754a4bd7d1400945287 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 26 Sep 2025 12:00:00 +0200 Subject: [PATCH 109/241] Fix workspace dependency --- Cargo.lock | 163 ++++++++++++++++++++++++++++++++++--- Cargo.toml | 9 ++ reaction-plugin/Cargo.toml | 2 +- 3 files changed, 162 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e4515ee..3cbb50a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -230,7 +230,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link", + "windows-link 0.1.1", ] [[package]] @@ -574,9 +574,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", "hashbrown", @@ -699,6 +699,16 @@ version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.0", +] + [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -934,6 +944,15 @@ dependencies = [ "termtree", ] +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + [[package]] name = "proc-macro2" version = "1.0.95" @@ -1000,14 +1019,17 @@ dependencies = [ "clap_mangen", "futures", "jrsonnet-evaluator", + "libloading", "nix", "num_cpus", "predicates", "rand", + "reaction-plugin", "regex", "serde", "serde_json", "serde_yaml", + "stabby", "tempfile", "thiserror", "tokio", @@ -1016,6 +1038,13 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "reaction-plugin" +version = "0.1.0" +dependencies = [ + "stabby", +] + [[package]] name = "redox_syscall" version = "0.5.12" @@ -1072,6 +1101,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "1.0.7" @@ -1113,19 +1151,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "serde" -version = "1.0.219" +name = "semver" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "serde" +version = "1.0.227" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80ece43fc6fbed4eb5392ab50c07334d3e577cbf40997ee896fe7af40bba4245" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.227" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a576275b607a2c86ea29e410193df32bc680303c82f31e275bbfcafe8b33be5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.227" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "51e694923b8824cf0e9b382adf0f60d4e05f348f357b38833a3fa5ed7c2ede04" dependencies = [ "proc-macro2", "quote", @@ -1157,6 +1211,12 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "sha2-const-stable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" + [[package]] name = "sharded-slab" version = "0.1.7" @@ -1206,6 +1266,42 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "stabby" +version = "72.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976399a0c48ea769ef7f5dc303bb88240ab8d84008647a6b2303eced3dab3945" +dependencies = [ + "libloading", + "rustversion", + "stabby-abi", +] + +[[package]] +name = "stabby-abi" +version = "72.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7b54832a9a1f92a0e55e74a5c0332744426edc515bb3fbad82f10b874a87f0d" +dependencies = [ + "rustc_version", + "rustversion", + "sha2-const-stable", + "stabby-macros", +] + +[[package]] +name = "stabby-macros" +version = "72.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a768b1e51e4dbfa4fa52ae5c01241c0a41e2938fdffbb84add0c8238092f9091" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "rand", + "syn 1.0.109", +] + [[package]] name = "strsim" version = "0.11.1" @@ -1338,6 +1434,36 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml_datetime" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3effe7c0e86fdff4f69cdd2ccc1b96f933e24811c5441d44904e8683e27184b" +dependencies = [ + "indexmap", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" +dependencies = [ + "winnow", +] + [[package]] name = "tracing" version = "0.1.41" @@ -1568,7 +1694,7 @@ checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", - "windows-link", + "windows-link 0.1.1", "windows-result", "windows-strings", ] @@ -1601,13 +1727,19 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +[[package]] +name = "windows-link" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" + [[package]] name = "windows-result" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ - "windows-link", + "windows-link 0.1.1", ] [[package]] @@ -1616,7 +1748,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ - "windows-link", + "windows-link 0.1.1", ] [[package]] @@ -1701,6 +1833,15 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] + [[package]] name = "wit-bindgen-rt" version = "0.39.0" diff --git a/Cargo.toml b/Cargo.toml index bf5f8e2..3a5640e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,10 @@ tokio-util = { version = "0.7.12", features = ["codec"] } # Async logging tracing = "0.1.40" tracing-subscriber = "0.3.18" +# Reaction plugin system +libloading = "0.8.9" +stabby = { workspace = true, features = ["libloading"] } +reaction-plugin = { path = "reaction-plugin" } [build-dependencies] clap = { version = "4.5.4", features = ["derive"] } @@ -69,3 +73,8 @@ tempfile = "3.12.0" assert_fs = "1.1.3" assert_cmd = "2.0.17" predicates = "3.1.3" + +[workspace] + +[workspace.dependencies] +stabby = { version = "72.1.1" } diff --git a/reaction-plugin/Cargo.toml b/reaction-plugin/Cargo.toml index c015504..a909eb1 100644 --- a/reaction-plugin/Cargo.toml +++ b/reaction-plugin/Cargo.toml @@ -4,4 +4,4 @@ version = "0.1.0" edition = "2024" [dependencies] -stabby = { version = "72.1.1", features = ["libloading"] } +stabby.workspace = true From 338aa8a8a256cdeed7b3d904fb190c269d6e04da Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 26 Sep 2025 12:00:00 +0200 Subject: [PATCH 110/241] Fix compilation error The lifetime compilation error disappears when the filter methods are async, so let's just do that for now --- reaction-plugin/src/lib.rs | 30 +++++++++++++++++------------- reaction-plugin/src/value.rs | 1 + 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/reaction-plugin/src/lib.rs b/reaction-plugin/src/lib.rs index 93ec74d..a1a51f7 100644 --- a/reaction-plugin/src/lib.rs +++ b/reaction-plugin/src/lib.rs @@ -7,6 +7,14 @@ /// To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides /// the entrypoint for a plugin. /// It permits to define 0 to n (stream, filter, action) custom types. +/// +/// It must also export a function that returns an impl of the trait: +/// ```rust +/// #[stabby::export(checked)] +/// extern "C" fn reaction_plugin() -> BoxedPluginInfo { +/// ... +/// } +/// ``` use stabby::{ boxed::Box, future::DynFuture, option::Option, result::Result, string::String, vec::Vec, }; @@ -26,7 +34,7 @@ pub trait PluginInfo { stream_name: String, stream_type: String, config: Value, - ) -> Result; + ) -> Result; /// Return all filter types that should be made available to reaction users extern "C" fn filter_impls(&self) -> Vec; @@ -37,7 +45,7 @@ pub trait PluginInfo { filter_name: String, filter_type: String, config: Value, - ) -> Result; + ) -> Result; /// Return all action types that should be made available to reaction users extern "C" fn action_impls(&self) -> Vec; @@ -49,21 +57,17 @@ pub trait PluginInfo { action_name: String, action_type: String, config: Value, - ) -> Result; + ) -> Result; /// Notify the plugin that setup is finished, permitting a last occasion to report an error /// (For example if a stream wants a companion action but it hasn't been initialized) extern "C" fn finish_setup(&mut self) -> Result<(), String>; } -pub enum PluginError { - InexistantType, - InitialisationError(String), -} - -type BoxedStreamImpl = stabby::dynptr!(Box); -type BoxedFilterImpl = stabby::dynptr!(Box); -type BoxedActionImpl = stabby::dynptr!(Box); +pub type BoxedPluginInfo = stabby::dynptr!(Box); +pub type BoxedStreamImpl = stabby::dynptr!(Box); +pub type BoxedFilterImpl = stabby::dynptr!(Box); +pub type BoxedActionImpl = stabby::dynptr!(Box); #[stabby::stabby(checked)] pub trait StreamImpl { @@ -73,8 +77,8 @@ pub trait StreamImpl { #[stabby::stabby(checked)] pub trait FilterImpl { - extern "C" fn matches(&mut self, line: String) -> bool; - extern "C" fn close(&mut self) -> Result<(), String>; + extern "C" fn matches<'a>(&'a mut self, line: String) -> DynFuture<'a, bool>; + extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; } #[stabby::stabby(checked)] diff --git a/reaction-plugin/src/value.rs b/reaction-plugin/src/value.rs index 34322b0..18c5a38 100644 --- a/reaction-plugin/src/value.rs +++ b/reaction-plugin/src/value.rs @@ -3,6 +3,7 @@ use stabby::{string::String, tuple::Tuple2, vec::Vec}; /// Represents a configuration value. /// This is not meant as an efficient type, but as a very flexible one. #[stabby::stabby] +#[repr(C, u8)] pub enum Value { Null, Bool(bool), From 05f30c3c572bbfbd588e1a0134cd4c4ed1210d50 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 27 Sep 2025 12:00:00 +0200 Subject: [PATCH 111/241] First WIP iteration on the plugin system, reaction side. Delaying the implementation of plugin Filters. I'm not sure it's useful, (apart from JSON, what can be done?) and it's likely to be more painful than the rest. I'll probably just implement one custom JSON Filter like I did with Pattern's IP support. --- reaction-plugin/src/lib.rs | 32 +++---- src/concepts/action.rs | 37 ++++++-- src/concepts/config.rs | 51 ++++++----- src/concepts/filter.rs | 17 +++- src/concepts/mod.rs | 68 +++++++++++++++ src/concepts/stream.rs | 67 ++++++++++----- src/daemon/mod.rs | 10 ++- src/lib.rs | 1 + src/plugin/mod.rs | 168 +++++++++++++++++++++++++++++++++++++ src/plugin/value.rs | 34 ++++++++ 10 files changed, 418 insertions(+), 67 deletions(-) create mode 100644 src/plugin/mod.rs create mode 100644 src/plugin/value.rs diff --git a/reaction-plugin/src/lib.rs b/reaction-plugin/src/lib.rs index a1a51f7..36095d3 100644 --- a/reaction-plugin/src/lib.rs +++ b/reaction-plugin/src/lib.rs @@ -36,16 +36,16 @@ pub trait PluginInfo { config: Value, ) -> Result; - /// Return all filter types that should be made available to reaction users - extern "C" fn filter_impls(&self) -> Vec; - /// Return one instance of a given type. - extern "C" fn filter_impl( - &mut self, - stream_name: String, - filter_name: String, - filter_type: String, - config: Value, - ) -> Result; + // /// Return all filter types that should be made available to reaction users + // extern "C" fn filter_impls(&self) -> Vec; + // /// Return one instance of a given type. + // extern "C" fn filter_impl( + // &mut self, + // stream_name: String, + // filter_name: String, + // filter_type: String, + // config: Value, + // ) -> Result; /// Return all action types that should be made available to reaction users extern "C" fn action_impls(&self) -> Vec; @@ -66,7 +66,7 @@ pub trait PluginInfo { pub type BoxedPluginInfo = stabby::dynptr!(Box); pub type BoxedStreamImpl = stabby::dynptr!(Box); -pub type BoxedFilterImpl = stabby::dynptr!(Box); +// pub type BoxedFilterImpl = stabby::dynptr!(Box); pub type BoxedActionImpl = stabby::dynptr!(Box); #[stabby::stabby(checked)] @@ -75,11 +75,11 @@ pub trait StreamImpl { extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; } -#[stabby::stabby(checked)] -pub trait FilterImpl { - extern "C" fn matches<'a>(&'a mut self, line: String) -> DynFuture<'a, bool>; - extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; -} +// #[stabby::stabby(checked)] +// pub trait FilterImpl { +// extern "C" fn matches<'a>(&'a mut self, line: String) -> DynFuture<'a, bool>; +// extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; +// } #[stabby::stabby(checked)] pub trait ActionImpl { diff --git a/src/concepts/action.rs b/src/concepts/action.rs index c63724e..772fb48 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -3,10 +3,12 @@ use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc}; use chrono::TimeDelta; use serde::{Deserialize, Serialize}; +use serde_json::Value; use tokio::process::Command; -use super::{parse_duration::*, PatternType}; -use super::{Match, Pattern}; +use crate::plugin::Plugins; + +use super::{null_value, parse_duration::*, Match, Pattern, PatternType}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] @@ -41,6 +43,12 @@ pub struct Action { pub filter_name: String, #[serde(skip)] pub stream_name: String, + + // Plugin-specific + #[serde(default, rename = "type")] + pub action_type: Option, + #[serde(default = "null_value")] + pub options: Value, } fn set_false() -> bool { @@ -82,11 +90,21 @@ impl Action { return Err("character '.' is not allowed in filter name".into()); } - if self.cmd.is_empty() { - return Err("cmd is empty".into()); - } - if self.cmd[0].is_empty() { - return Err("cmd's first item is empty".into()); + if self + .action_type + .as_ref() + .is_none_or(|stream_type| stream_type == "cmd") + { + if self.cmd.is_empty() { + return Err("cmd is empty".into()); + } + if self.cmd[0].is_empty() { + return Err("cmd's first item is empty".into()); + } + } else { + if !self.cmd.is_empty() { + return Err("can't define cmd and a plugin type".into()); + } } if let Some(after) = &self.after { @@ -118,6 +136,11 @@ impl Action { Ok(()) } + pub fn plugin_setup(&mut self, plugins: &mut Plugins) -> Result<(), String> { + // TODO self setup + Ok(()) + } + // TODO test pub fn exec(&self, match_: &Match) -> Command { let computed_command = if self.patterns.is_empty() { diff --git a/src/concepts/config.rs b/src/concepts/config.rs index 4bee310..2bf1296 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use tracing::{debug, error, info, warn}; -use super::{Pattern, Stream}; +use super::{merge_attrs, Pattern, Stream}; pub type Patterns = BTreeMap>; @@ -24,6 +24,9 @@ pub struct Config { #[serde(default = "dot", skip_serializing_if = "String::is_empty")] pub state_directory: String, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub plugin_directories: Vec, + #[serde(default)] pub patterns: Patterns, @@ -73,25 +76,26 @@ impl Config { self.start.append(&mut other.start); self.stop.append(&mut other.stop); - if !(self.state_directory == dot() - || other.state_directory == dot() - || self.state_directory == other.state_directory) - { - return Err("state_directory have conflicting definitions".into()); - } - if self.state_directory == dot() { - self.state_directory = other.state_directory; - } + self.state_directory = merge_attrs( + self.state_directory.clone(), + other.state_directory, + ".".into(), + "state_directory", + )?; - if !(self.concurrency == num_cpus::get() - || other.concurrency == num_cpus::get() - || self.concurrency == other.concurrency) - { - return Err("concurrency have conflicting definitions".into()); - } - if self.concurrency == num_cpus::get() { - self.concurrency = other.concurrency; - } + self.plugin_directories = merge_attrs( + self.plugin_directories.clone(), + other.plugin_directories, + Vec::default(), + "plugin_directories", + )?; + + self.concurrency = merge_attrs( + self.concurrency, + other.concurrency, + num_cpus::get(), + "concurrency", + )?; Ok(()) } @@ -104,6 +108,15 @@ impl Config { // Nullify this useless field self._definitions = serde_json::Value::Null; + for dir in &self.plugin_directories { + if dir.is_empty() { + return Err("can't specify empty plugin directory".into()); + } + if !dir.starts_with("/") { + return Err(format!("plugin directory paths must be absolute: {dir}")); + } + } + if self.patterns.is_empty() { return Err("no patterns configured".into()); } diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index fef8391..fdb656d 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -10,8 +10,9 @@ use chrono::TimeDelta; use regex::Regex; use serde::{Deserialize, Serialize}; -use super::{parse_duration, PatternType}; -use super::{Action, Match, Pattern, Patterns}; +use crate::plugin::Plugins; + +use super::{parse_duration, Action, Match, Pattern, PatternType, Patterns}; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum Duplicate { @@ -58,6 +59,11 @@ pub struct Filter { pub name: String, #[serde(skip)] pub stream_name: String, + // // Plugin-specific + // #[serde(default, rename = "type")] + // pub filter_type: Option, + // #[serde(default = "null_value")] + // pub options: Value, } impl Filter { @@ -190,6 +196,13 @@ impl Filter { Ok(()) } + pub fn plugin_setup(&mut self, plugins: &mut Plugins) -> Result<(), String> { + for (_, action) in &mut self.actions { + action.plugin_setup(plugins)?; + } + Ok(()) + } + pub fn get_match(&self, line: &str) -> Option { for regex in &self.compiled_regex { if let Some(matches) = regex.captures(line) { diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index 40a624c..b059b62 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -5,12 +5,15 @@ mod parse_duration; mod pattern; mod stream; +use std::fmt::Debug; + pub use action::Action; pub use config::{Config, Patterns}; pub use filter::{Duplicate, Filter}; use parse_duration::parse_duration; pub use pattern::{Pattern, PatternType}; use serde::{Deserialize, Serialize}; +use serde_json::Value; pub use stream::Stream; use chrono::{DateTime, Local}; @@ -24,5 +27,70 @@ pub struct MatchTime { pub t: Time, } +fn merge_attrs( + this: A, + other: A, + default: A, + name: &str, +) -> Result { + if !(this == default || other == default || this == other) { + return Err(format!( + "'{name}' has conflicting definitions: '{this:?}', '{other:?}'" + )); + } + if this == default { + return Ok(other); + } + return Ok(this); +} + +fn null_value() -> Value { + Value::Null +} + #[cfg(test)] pub use filter::tests as filter_tests; + +#[cfg(test)] +mod tests { + use crate::concepts::merge_attrs; + + #[test] + fn test_merge_attrs() { + assert_eq!(merge_attrs(None::, None, None, "t"), Ok(None)); + assert_eq!( + merge_attrs(Some("coucou"), None, None, "t"), + Ok(Some("coucou")) + ); + assert_eq!( + merge_attrs(None, Some("coucou"), None, "t"), + Ok(Some("coucou")) + ); + assert_eq!( + merge_attrs(Some("coucou"), Some("coucou"), None, "t"), + Ok(Some("coucou")) + ); + assert_eq!( + merge_attrs(Some("coucou"), Some("hello"), None, "t"), + Err("'t' has conflicting definitions: 'Some(\"coucou\")', 'Some(\"hello\")'".into()) + ); + + assert_eq!(merge_attrs("", "", "", "t"), Ok("")); + assert_eq!(merge_attrs("coucou", "", "", "t"), Ok("coucou")); + assert_eq!(merge_attrs("", "coucou", "", "t"), Ok("coucou")); + assert_eq!(merge_attrs("coucou", "coucou", "", "t"), Ok("coucou")); + assert_eq!( + merge_attrs("coucou", "hello", "", "t"), + Err("'t' has conflicting definitions: '\"coucou\"', '\"hello\"'".into()) + ); + + assert_eq!(merge_attrs(0, 0, 0, "t"), Ok(0)); + assert_eq!(merge_attrs(5, 0, 0, "t"), Ok(5)); + assert_eq!(merge_attrs(0, 5, 0, "t"), Ok(5)); + assert_eq!(merge_attrs(5, 5, 0, "t"), Ok(5)); + assert_eq!( + merge_attrs(5, 6, 0, "t"), + Err("'t' has conflicting definitions: '5', '6'".into()) + ); + } +} diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 011652b..56e070e 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -1,8 +1,12 @@ use std::{cmp::Ordering, collections::BTreeMap, hash::Hash}; +use reaction_plugin::BoxedStreamImpl; use serde::{Deserialize, Serialize}; +use serde_json::Value; -use super::{Filter, Patterns}; +use crate::plugin::Plugins; + +use super::{merge_attrs, null_value, Filter, Patterns}; #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] @@ -10,11 +14,18 @@ use super::{Filter, Patterns}; pub struct Stream { #[serde(default)] pub cmd: Vec, + #[serde(default)] pub filters: BTreeMap, #[serde(skip)] pub name: String, + + // Plugin-specific + #[serde(default, rename = "type")] + pub stream_type: Option, + #[serde(default = "null_value")] + pub options: Value, } impl Stream { @@ -23,13 +34,8 @@ impl Stream { } pub fn merge(&mut self, other: Stream) -> Result<(), String> { - if !(self.cmd.is_empty() || other.cmd.is_empty() || self.cmd == other.cmd) { - return Err("cmd has conflicting definitions".into()); - } - - if self.cmd.is_empty() { - self.cmd = other.cmd; - } + self.cmd = merge_attrs(self.cmd.clone(), other.cmd, Vec::default(), "cmd")?; + self.stream_type = merge_attrs(self.stream_type.clone(), other.stream_type, None, "type")?; for (key, filter) in other.filters.into_iter() { if self.filters.insert(key.clone(), filter).is_some() { @@ -40,6 +46,12 @@ impl Stream { Ok(()) } + fn is_plugin(&self) -> bool { + self.stream_type + .as_ref() + .is_some_and(|stream_type| stream_type != "cmd") + } + pub fn setup(&mut self, name: &str, patterns: &Patterns) -> Result<(), String> { self._setup(name, patterns) .map_err(|msg| format!("stream {}: {}", name, msg)) @@ -55,11 +67,17 @@ impl Stream { return Err("character '.' is not allowed in stream name".into()); } - if self.cmd.is_empty() { - return Err("cmd is empty".into()); - } - if self.cmd[0].is_empty() { - return Err("cmd's first item is empty".into()); + if !self.is_plugin() { + if self.cmd.is_empty() { + return Err("cmd is empty".into()); + } + if self.cmd[0].is_empty() { + return Err("cmd's first item is empty".into()); + } + } else { + if !self.cmd.is_empty() { + return Err("can't define cmd and a plugin type".into()); + } } if self.filters.is_empty() { @@ -72,6 +90,19 @@ impl Stream { Ok(()) } + + // FIXME Nan faut pas que je fasse ça là en fait, ça doit se passer côté StreamManager en fait + // j'pense + pub fn plugin_setup(&mut self, plugins: &mut Plugins) -> Result<(), String> { + if self.is_plugin() { + plugins.init_stream_impl(self.name, self.stream_type, self.options); + } + + for (_, filter) in &mut self.filters { + filter.plugin_setup(plugins)?; + } + Ok(()) + } } impl PartialEq for Stream { @@ -102,16 +133,8 @@ mod tests { use super::*; use crate::concepts::filter::tests::ok_filter; - fn default_stream() -> Stream { - Stream { - cmd: Vec::new(), - name: "".into(), - filters: BTreeMap::new(), - } - } - fn ok_stream() -> Stream { - let mut stream = default_stream(); + let mut stream = Stream::default(); stream.cmd = vec!["command".into()]; stream.filters.insert("name".into(), ok_filter()); stream diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index c68f8e2..547c9b8 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -35,6 +35,13 @@ pub async fn daemon( config_path: PathBuf, socket: PathBuf, ) -> Result<(), Box> { + // Je dois + // 1. Fusionner toute la config + // 2. Charger tous les plugins + // 3. Setup la config, avec les plugins + // 4. Supprimer la struct des plugins + // → En fait nan, les plugins c'est pas du static, c'est live, faut que ça vivent dans le + // daemon! Au même endroit que les Command sont lancées en fait ! let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?)); // Cancellation Token @@ -66,7 +73,8 @@ pub async fn daemon( let mut filter_managers = HashMap::new(); for filter in stream.filters.values() { let manager = - FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now).await?; + FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now) + .await?; filter_managers.insert(filter, manager); } state.insert(stream, filter_managers.clone()); diff --git a/src/lib.rs b/src/lib.rs index 3619f1a..fb3cb94 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,6 +13,7 @@ pub mod cli; pub mod client; pub mod concepts; pub mod daemon; +pub mod plugin; pub mod protocol; pub mod tests; pub mod treedb; diff --git a/src/plugin/mod.rs b/src/plugin/mod.rs new file mode 100644 index 0000000..e1e3b8c --- /dev/null +++ b/src/plugin/mod.rs @@ -0,0 +1,168 @@ +use std::{collections::BTreeMap, path::PathBuf}; + +use reaction_plugin::{ + BoxedActionImpl, BoxedPluginInfo, BoxedStreamImpl, PluginInfoDyn, PluginInfoDynMut, +}; +use serde_json::Value; +use stabby::libloading::StabbyLibrary; +use tokio::{fs::read_dir, runtime::Handle}; +use value::to_stable_value; + +mod value; + +#[derive(Default)] +pub struct Plugins { + plugins: BTreeMap, + streams: BTreeMap, + // filters: BTreeMap, + actions: BTreeMap, +} + +impl Plugins { + pub async fn import(&mut self, plugin_directories: Vec) -> Result<(), String> { + for plugin_directory in plugin_directories { + let mut dir_entries = read_dir(&plugin_directory).await.map_err(|err| { + format!("Error reading plugin directory {plugin_directory}: {err}") + })?; + loop { + match dir_entries.next_entry().await { + Err(err) => { + return Err(format!( + "Error reading plugin directory {plugin_directory}: {err}" + )) + } + + Ok(None) => break, + + Ok(Some(entry)) => { + let filename = PathBuf::from(&plugin_directory).join(&entry.file_name()); + self.load_plugin(filename.clone()) + .await + .map_err(|err| format!("Error loading plugin {filename:?}: {err}"))?; + } + } + } + } + Ok(()) + } + + async fn load_plugin(&mut self, filename: PathBuf) -> Result<(), String> { + // TODO check ownership of file? + + let name = filename.to_string_lossy().to_string(); + // SAFETY This function is exposed by libloading as unsafe + // But we're (hopefully) gonna be safe with stabby <3 + #[allow(unsafe_code)] + let plugin = Handle::current() + .spawn_blocking(|| unsafe { libloading::Library::new(filename) }) + .await + // Join Error + .map_err(|err| err.to_string())? + // Libloading Error + .map_err(|err| err.to_string())?; + + // SAFETY This function is exposed by stabby as unsafe + // But we're (hopefully) gonna be safe <3 + #[allow(unsafe_code)] + let plugin_init = unsafe { + plugin.get_stabbied:: BoxedPluginInfo>(b"reaction_plugin") + }.map_err(|err| format!("expected entrypoint `fn reaction_plugin() -> BoxedPluginInfo` is either not present or malformed: {err}"))?; + + let plugin_info = plugin_init(); + + for stream in plugin_info.stream_impls() { + if let Some(name) = self.streams.insert(stream.clone().into(), name.clone()) { + return Err(format!( + "plugin {name} already exposed a stream with type name '{stream}'" + )); + } + } + + // for filter in plugin_info.filter_impls() { + // if let Some(name) = self.filters.insert(filter.clone().into(), name.clone()) { + // return Err(format!( + // "plugin {name} already exposed a filter with type name '{filter}'" + // )); + // } + // } + + for action in plugin_info.action_impls() { + if let Some(name) = self.actions.insert(action.clone().into(), name.clone()) { + return Err(format!( + "plugin {name} already exposed a action with type name '{action}'" + )); + } + } + + self.plugins.insert(name, plugin_info); + + Ok(()) + } + + pub fn finish_plugin_setup(self) -> Result<(), String> { + for mut plugin in self.plugins.into_values() { + // Didn't find a more elegant way to manipulate [`stabby::result::Result`] + let result = plugin.finish_setup(); + if result.is_err() { + return Err(result.unwrap_err().into()); + } + } + Ok(()) + } + + pub fn init_stream_impl( + &mut self, + stream_name: String, + stream_type: String, + config: Value, + ) -> Result { + let plugin_name = self + .streams + .get(&stream_type) + .ok_or(format!("No plugin provided a stream type '{stream_type}'"))?; + + let plugin = self.plugins.get_mut(plugin_name).unwrap(); + + let result = plugin.stream_impl( + stream_name.into(), + stream_type.into(), + to_stable_value(config), + ); + + if result.is_ok() { + Ok(result.unwrap()) + } else { + Err(result.err().unwrap().into()) + } + } + + pub fn init_action_impl( + &mut self, + stream_name: String, + filter_name: String, + action_name: String, + action_type: String, + config: Value, + ) -> Result { + let plugin_name = self + .actions + .get(&action_type) + .ok_or(format!("No plugin provided a action type '{action_type}'"))?; + + let plugin = self.plugins.get_mut(plugin_name).unwrap(); + + let result = plugin.action_impl( + stream_name.into(), + filter_name.into(), + action_name.into(), + action_type.into(), + to_stable_value(config), + ); + + if result.is_ok() { + Ok(result.unwrap()) + } else { + Err(result.err().unwrap().into()) + } + } +} diff --git a/src/plugin/value.rs b/src/plugin/value.rs new file mode 100644 index 0000000..e8b7d31 --- /dev/null +++ b/src/plugin/value.rs @@ -0,0 +1,34 @@ +use reaction_plugin::Value as RValue; +use serde_json::Value as JValue; +use stabby::{tuple::Tuple2, vec::Vec}; + +pub fn to_stable_value(val: JValue) -> RValue { + match val { + JValue::Null => RValue::Null, + JValue::Bool(b) => RValue::Bool(b), + JValue::Number(number) => { + if let Some(number) = number.as_i64() { + RValue::Integer(number) + } else if let Some(number) = number.as_f64() { + RValue::Float(number) + } else { + RValue::Null + } + } + JValue::String(s) => RValue::String(s.into()), + JValue::Array(v) => RValue::Array({ + let mut vec = Vec::with_capacity(v.len()); + for val in v { + vec.push(to_stable_value(val)); + } + vec + }), + JValue::Object(m) => RValue::Object({ + let mut map = Vec::with_capacity(m.len()); + for (key, val) in m { + map.push(Tuple2(key.into(), to_stable_value(val))); + } + map + }), + } +} From fc11234f121cbea8fd9df9c00db993f5f3abc8ca Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 28 Sep 2025 12:00:00 +0200 Subject: [PATCH 112/241] Loading plugin not on config side, but stream/action manager side Trying to implement this on the StreamManager first. I get lifetime errors that make no sense to me, like futures should hold any argument with 'static. I wonder if I should try to convert everything stabby to abi_stable & async_ffi. I'll try this and see if it solves anything. --- reaction-plugin/src/lib.rs | 12 ++++-- src/concepts/action.rs | 21 ++++------- src/concepts/filter.rs | 9 ----- src/concepts/stream.rs | 18 +-------- src/daemon/mod.rs | 13 +++---- src/daemon/stream.rs | 75 +++++++++++++++++++++++++++++++++++--- src/plugin/mod.rs | 2 +- 7 files changed, 92 insertions(+), 58 deletions(-) diff --git a/reaction-plugin/src/lib.rs b/reaction-plugin/src/lib.rs index 36095d3..17abf77 100644 --- a/reaction-plugin/src/lib.rs +++ b/reaction-plugin/src/lib.rs @@ -65,13 +65,14 @@ pub trait PluginInfo { } pub type BoxedPluginInfo = stabby::dynptr!(Box); -pub type BoxedStreamImpl = stabby::dynptr!(Box); +pub type BoxedStreamImpl = stabby::dynptr!(Box); // pub type BoxedFilterImpl = stabby::dynptr!(Box); -pub type BoxedActionImpl = stabby::dynptr!(Box); +pub type BoxedActionImpl = stabby::dynptr!(Box); #[stabby::stabby(checked)] pub trait StreamImpl { - extern "C" fn next<'a>(&'a mut self) -> DynFuture<'a, Result>, String>>; + extern "C" fn start<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; + extern "C" fn next<'a>(&'a mut self) -> DynFuture<'a, Result, String>>; extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; } @@ -83,6 +84,9 @@ pub trait StreamImpl { #[stabby::stabby(checked)] pub trait ActionImpl { - extern "C" fn next<'a>(&'a mut self) -> DynFuture<'a, Result>, String>>; + extern "C" fn exec<'a>( + &'a mut self, + match_: Vec, + ) -> DynFuture<'a, Result>, String>>; extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; } diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 772fb48..98dc5f8 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -6,8 +6,6 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::process::Command; -use crate::plugin::Plugins; - use super::{null_value, parse_duration::*, Match, Pattern, PatternType}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] @@ -60,6 +58,12 @@ fn is_false(b: &bool) -> bool { } impl Action { + fn is_plugin(&self) -> bool { + self.action_type + .as_ref() + .is_some_and(|action_type| action_type != "cmd") + } + pub fn setup( &mut self, stream_name: &str, @@ -90,11 +94,7 @@ impl Action { return Err("character '.' is not allowed in filter name".into()); } - if self - .action_type - .as_ref() - .is_none_or(|stream_type| stream_type == "cmd") - { + if !self.is_plugin() { if self.cmd.is_empty() { return Err("cmd is empty".into()); } @@ -103,7 +103,7 @@ impl Action { } } else { if !self.cmd.is_empty() { - return Err("can't define cmd and a plugin type".into()); + return Err("can't define a cmd and a plugin type".into()); } } @@ -136,11 +136,6 @@ impl Action { Ok(()) } - pub fn plugin_setup(&mut self, plugins: &mut Plugins) -> Result<(), String> { - // TODO self setup - Ok(()) - } - // TODO test pub fn exec(&self, match_: &Match) -> Command { let computed_command = if self.patterns.is_empty() { diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index fdb656d..cf5e0d4 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -10,8 +10,6 @@ use chrono::TimeDelta; use regex::Regex; use serde::{Deserialize, Serialize}; -use crate::plugin::Plugins; - use super::{parse_duration, Action, Match, Pattern, PatternType, Patterns}; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] @@ -196,13 +194,6 @@ impl Filter { Ok(()) } - pub fn plugin_setup(&mut self, plugins: &mut Plugins) -> Result<(), String> { - for (_, action) in &mut self.actions { - action.plugin_setup(plugins)?; - } - Ok(()) - } - pub fn get_match(&self, line: &str) -> Option { for regex in &self.compiled_regex { if let Some(matches) = regex.captures(line) { diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 56e070e..40c78d0 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -1,11 +1,8 @@ use std::{cmp::Ordering, collections::BTreeMap, hash::Hash}; -use reaction_plugin::BoxedStreamImpl; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::plugin::Plugins; - use super::{merge_attrs, null_value, Filter, Patterns}; #[derive(Clone, Debug, Deserialize, Serialize)] @@ -46,7 +43,7 @@ impl Stream { Ok(()) } - fn is_plugin(&self) -> bool { + pub fn is_plugin(&self) -> bool { self.stream_type .as_ref() .is_some_and(|stream_type| stream_type != "cmd") @@ -90,19 +87,6 @@ impl Stream { Ok(()) } - - // FIXME Nan faut pas que je fasse ça là en fait, ça doit se passer côté StreamManager en fait - // j'pense - pub fn plugin_setup(&mut self, plugins: &mut Plugins) -> Result<(), String> { - if self.is_plugin() { - plugins.init_stream_impl(self.name, self.stream_type, self.options); - } - - for (_, filter) in &mut self.filters { - filter.plugin_setup(plugins)?; - } - Ok(()) - } } impl PartialEq for Stream { diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 547c9b8..c396b30 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -16,7 +16,7 @@ use tokio::{ }; use tracing::{debug, info}; -use crate::{concepts::Config, treedb::Database}; +use crate::{concepts::Config, plugin::Plugins, treedb::Database}; use filter::FilterManager; pub use filter::React; pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken}; @@ -35,15 +35,11 @@ pub async fn daemon( config_path: PathBuf, socket: PathBuf, ) -> Result<(), Box> { - // Je dois - // 1. Fusionner toute la config - // 2. Charger tous les plugins - // 3. Setup la config, avec les plugins - // 4. Supprimer la struct des plugins - // → En fait nan, les plugins c'est pas du static, c'est live, faut que ça vivent dans le - // daemon! Au même endroit que les Command sont lancées en fait ! let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?)); + let mut plugins = Plugins::default(); + plugins.import(&config.plugin_directories).await?; + // Cancellation Token let shutdown = ShutdownController::new(); @@ -83,6 +79,7 @@ pub async fn daemon( stream, filter_managers, shutdown.token(), + &mut plugins, )?); } (state, stream_managers) diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index a55d449..37f0328 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -6,6 +6,7 @@ use std::{ use chrono::Local; use futures::{FutureExt, Stream as AsyncStream, StreamExt}; +use reaction_plugin::{BoxedStreamImpl, StreamImplDynMut}; use regex::RegexSet; use tokio::{ io::{AsyncBufReadExt, BufReader}, @@ -17,6 +18,7 @@ use tracing::{error, info, warn}; use crate::{ concepts::{Filter, Stream}, daemon::filter::FilterManager, + plugin::Plugins, }; use super::shutdown::ShutdownToken; @@ -52,6 +54,7 @@ pub struct StreamManager { compiled_regex_set: RegexSet, regex_index_to_filter_manager: Vec, stream: &'static Stream, + stream_plugin: Option<&'static mut BoxedStreamImpl>, shutdown: ShutdownToken, } @@ -60,7 +63,8 @@ impl StreamManager { stream: &'static Stream, filter_managers: HashMap<&'static Filter, FilterManager>, shutdown: ShutdownToken, - ) -> Result { + plugins: &mut Plugins, + ) -> Result { let all_regexes: BTreeMap<_, _> = filter_managers .iter() .flat_map(|(filter, filter_manager)| { @@ -71,16 +75,71 @@ impl StreamManager { }) .collect(); + let stream_plugin = if stream.is_plugin() { + Some(Box::leak(Box::new(plugins.init_stream_impl( + stream.name.clone(), + stream.stream_type.clone().unwrap(), + stream.options.clone(), + )?))) + } else { + None + }; + Ok(StreamManager { - compiled_regex_set: RegexSet::new(all_regexes.keys())?, + compiled_regex_set: RegexSet::new(all_regexes.keys()).map_err(|err| err.to_string())?, regex_index_to_filter_manager: all_regexes.into_values().collect(), stream, + stream_plugin, shutdown, }) } pub async fn start(self) { info!("{}: start {:?}", self.stream.name, self.stream.cmd); + + if self.stream_plugin.is_some() { + self.start_plugin().await + } else { + self.start_cmd().await + } + } + + async fn start_plugin(self) { + let plugin = self.stream_plugin.unwrap(); + + { + let result = plugin.start().await; + if result.is_err() { + error!( + "could not execute stream {}: {}", + self.stream.name, + result.unwrap_err() + ); + return; + } + } + + loop { + let result = plugin.next().await; + let result = if result.is_ok() { + let option = result.unwrap(); + if option.is_some() { + self.handle_line(option.unwrap().to_string()).await; + } else { + return; + } + } else { + error!( + "impossible to read output from stream {}: {}", + self.stream.name, + result.unwrap_err() + ); + return; + }; + } + } + + async fn start_cmd(self) { let mut child = match Command::new(&self.stream.cmd[0]) .args(&self.stream.cmd[1..]) .stdin(Stdio::null()) @@ -171,10 +230,7 @@ impl StreamManager { loop { match lines.next().await { Some(Ok(line)) => { - let now = Local::now(); - for manager in self.matching_filters(&line) { - manager.handle_line(&line, now).await; - } + self.handle_line(line).await; } Some(Err(err)) => { error!( @@ -190,6 +246,13 @@ impl StreamManager { } } + async fn handle_line(&self, line: String) { + let now = Local::now(); + for manager in self.matching_filters(&line) { + manager.handle_line(&line, now).await; + } + } + fn matching_filters(&self, line: &str) -> BTreeSet<&FilterManager> { let matches = self.compiled_regex_set.matches(line); matches diff --git a/src/plugin/mod.rs b/src/plugin/mod.rs index e1e3b8c..958a2cc 100644 --- a/src/plugin/mod.rs +++ b/src/plugin/mod.rs @@ -19,7 +19,7 @@ pub struct Plugins { } impl Plugins { - pub async fn import(&mut self, plugin_directories: Vec) -> Result<(), String> { + pub async fn import(&mut self, plugin_directories: &Vec) -> Result<(), String> { for plugin_directory in plugin_directories { let mut dir_entries = read_dir(&plugin_directory).await.map_err(|err| { format!("Error reading plugin directory {plugin_directory}: {err}") From a99dea4421cc0c76f96c8aab751870e584f557cb Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 1 Oct 2025 12:00:00 +0200 Subject: [PATCH 113/241] Adapt reaction-plugin to remoc --- Cargo.lock | 230 +++++++++++++++-------------------- Cargo.toml | 7 +- reaction-plugin/Cargo.toml | 3 +- reaction-plugin/src/lib.rs | 106 ++++++++-------- reaction-plugin/src/value.rs | 15 --- 5 files changed, 156 insertions(+), 205 deletions(-) delete mode 100644 reaction-plugin/src/value.rs diff --git a/Cargo.lock b/Cargo.lock index 3cbb50a..a71929d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,6 +191,12 @@ version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" version = "1.10.1" @@ -230,7 +236,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link 0.1.1", + "windows-link", ] [[package]] @@ -699,16 +705,6 @@ version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" -[[package]] -name = "libloading" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" -dependencies = [ - "cfg-if", - "windows-link 0.2.0", -] - [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -905,6 +901,15 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "postbag" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02aa900208f326b4fa5d7943ede192c1265a1519e7132aa6760e3440a1f4ceb0" +dependencies = [ + "serde", +] + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -944,15 +949,6 @@ dependencies = [ "termtree", ] -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit", -] - [[package]] name = "proc-macro2" version = "1.0.95" @@ -984,8 +980,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", ] [[package]] @@ -995,7 +1001,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -1007,6 +1023,15 @@ dependencies = [ "getrandom 0.2.16", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + [[package]] name = "reaction" version = "2.2.1" @@ -1019,17 +1044,15 @@ dependencies = [ "clap_mangen", "futures", "jrsonnet-evaluator", - "libloading", "nix", "num_cpus", "predicates", - "rand", + "rand 0.8.5", "reaction-plugin", "regex", - "serde", + "remoc", "serde_json", "serde_yaml", - "stabby", "tempfile", "thiserror", "tokio", @@ -1042,7 +1065,8 @@ dependencies = [ name = "reaction-plugin" version = "0.1.0" dependencies = [ - "stabby", + "remoc", + "serde", ] [[package]] @@ -1083,6 +1107,36 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +[[package]] +name = "remoc" +version = "0.18.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0491961ac4bc1ac4191743aa58a2ce778f4725693d29743fae957b2cf45f77f0" +dependencies = [ + "byteorder", + "bytes", + "futures", + "postbag", + "rand 0.9.2", + "remoc_macro", + "serde", + "tokio", + "tokio-util", + "tracing", + "uuid", +] + +[[package]] +name = "remoc_macro" +version = "0.18.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89479d9d87f65ef573faf0167dd0a9f40d3a63fd95e7a2935d662fa57dbc30d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "roff" version = "0.2.2" @@ -1101,15 +1155,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "rustc_version" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" -dependencies = [ - "semver", -] - [[package]] name = "rustix" version = "1.0.7" @@ -1150,12 +1195,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "semver" -version = "1.0.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" - [[package]] name = "serde" version = "1.0.227" @@ -1211,12 +1250,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "sha2-const-stable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" - [[package]] name = "sharded-slab" version = "0.1.7" @@ -1266,42 +1299,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "stabby" -version = "72.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976399a0c48ea769ef7f5dc303bb88240ab8d84008647a6b2303eced3dab3945" -dependencies = [ - "libloading", - "rustversion", - "stabby-abi", -] - -[[package]] -name = "stabby-abi" -version = "72.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b54832a9a1f92a0e55e74a5c0332744426edc515bb3fbad82f10b874a87f0d" -dependencies = [ - "rustc_version", - "rustversion", - "sha2-const-stable", - "stabby-macros", -] - -[[package]] -name = "stabby-macros" -version = "72.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a768b1e51e4dbfa4fa52ae5c01241c0a41e2938fdffbb84add0c8238092f9091" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "rand", - "syn 1.0.109", -] - [[package]] name = "strsim" version = "0.11.1" @@ -1434,36 +1431,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml_datetime" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_edit" -version = "0.23.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3effe7c0e86fdff4f69cdd2ccc1b96f933e24811c5441d44904e8683e27184b" -dependencies = [ - "indexmap", - "toml_datetime", - "toml_parser", - "winnow", -] - -[[package]] -name = "toml_parser" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" -dependencies = [ - "winnow", -] - [[package]] name = "tracing" version = "0.1.41" @@ -1557,6 +1524,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +dependencies = [ + "getrandom 0.3.3", + "js-sys", + "serde", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -1694,7 +1673,7 @@ checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.1.1", + "windows-link", "windows-result", "windows-strings", ] @@ -1727,19 +1706,13 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" -[[package]] -name = "windows-link" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" - [[package]] name = "windows-result" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ - "windows-link 0.1.1", + "windows-link", ] [[package]] @@ -1748,7 +1721,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ - "windows-link 0.1.1", + "windows-link", ] [[package]] @@ -1833,15 +1806,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" -dependencies = [ - "memchr", -] - [[package]] name = "wit-bindgen-rt" version = "0.39.0" diff --git a/Cargo.toml b/Cargo.toml index 3a5640e..07bd33c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,6 @@ num_cpus = "1.16.0" # Regex matching regex = "1.10.4" # Configuration languages, ser/deserialisation -serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" serde_yaml = "0.9.34" jrsonnet-evaluator = "0.4.2" @@ -56,8 +55,7 @@ tokio-util = { version = "0.7.12", features = ["codec"] } tracing = "0.1.40" tracing-subscriber = "0.3.18" # Reaction plugin system -libloading = "0.8.9" -stabby = { workspace = true, features = ["libloading"] } +remoc = { workspace = true } reaction-plugin = { path = "reaction-plugin" } [build-dependencies] @@ -77,4 +75,5 @@ predicates = "3.1.3" [workspace] [workspace.dependencies] -stabby = { version = "72.1.1" } +remoc = { version = "0.18.3" } +serde = { version = "1.0.203", features = ["derive"] } diff --git a/reaction-plugin/Cargo.toml b/reaction-plugin/Cargo.toml index a909eb1..96e50e7 100644 --- a/reaction-plugin/Cargo.toml +++ b/reaction-plugin/Cargo.toml @@ -4,4 +4,5 @@ version = "0.1.0" edition = "2024" [dependencies] -stabby.workspace = true +remoc.workspace = true +serde.workspace = true diff --git a/reaction-plugin/src/lib.rs b/reaction-plugin/src/lib.rs index 17abf77..bd9696e 100644 --- a/reaction-plugin/src/lib.rs +++ b/reaction-plugin/src/lib.rs @@ -1,92 +1,94 @@ -/// This crate permits to define a stable ABI between reaction's core and plugins. +/// This crate defines the API between reaction's core and plugins. /// -/// It uses [`stabby`], that rewrites essential std types as stable repr("C") types, -/// and permits to check a lof of safeness related issues at compile time and at runtime, when -/// loading a library. +/// It's based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait +/// calls over a single transport channel. /// /// To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides /// the entrypoint for a plugin. /// It permits to define 0 to n (stream, filter, action) custom types. -/// -/// It must also export a function that returns an impl of the trait: -/// ```rust -/// #[stabby::export(checked)] -/// extern "C" fn reaction_plugin() -> BoxedPluginInfo { -/// ... -/// } -/// ``` -use stabby::{ - boxed::Box, future::DynFuture, option::Option, result::Result, string::String, vec::Vec, -}; - -mod value; -pub use value::Value; +use remoc::{rch, rtc}; +use serde::{Deserialize, Serialize}; /// This is the only trait that **must** be implemented by a plugin. /// It provides lists of stream, filter and action types implemented by a dynamic plugin. -#[stabby::stabby(checked)] +#[rtc::remote] pub trait PluginInfo { /// Return all stream types that should be made available to reaction users - extern "C" fn stream_impls(&self) -> Vec; - /// Return one instance of a given type. - extern "C" fn stream_impl( + async fn stream_impls(&self) -> RemoteResult>; + /// Return one stream of a given type if it exists + async fn stream_impl( &mut self, stream_name: String, stream_type: String, config: Value, - ) -> Result; + ) -> RemoteResult>; // /// Return all filter types that should be made available to reaction users - // extern "C" fn filter_impls(&self) -> Vec; - // /// Return one instance of a given type. - // extern "C" fn filter_impl( + // async fn filter_impls(&self) -> RemoteResult>; + // /// Return one stream of a given type if it exists + // async fn filter_impl( // &mut self, // stream_name: String, // filter_name: String, - // filter_type: String, + // stream_type: String, // config: Value, - // ) -> Result; + // ) -> RemoteResult>; /// Return all action types that should be made available to reaction users - extern "C" fn action_impls(&self) -> Vec; + async fn action_impls(&self) -> RemoteResult>; /// Return one instance of a given type. - extern "C" fn action_impl( + async fn action_impl( &mut self, stream_name: String, filter_name: String, action_name: String, action_type: String, config: Value, - ) -> Result; + ) -> RemoteResult>; /// Notify the plugin that setup is finished, permitting a last occasion to report an error /// (For example if a stream wants a companion action but it hasn't been initialized) - extern "C" fn finish_setup(&mut self) -> Result<(), String>; + async fn finish_setup(&mut self) -> RemoteResult>; } -pub type BoxedPluginInfo = stabby::dynptr!(Box); -pub type BoxedStreamImpl = stabby::dynptr!(Box); -// pub type BoxedFilterImpl = stabby::dynptr!(Box); -pub type BoxedActionImpl = stabby::dynptr!(Box); +pub type RemoteResult = Result; -#[stabby::stabby(checked)] -pub trait StreamImpl { - extern "C" fn start<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; - extern "C" fn next<'a>(&'a mut self) -> DynFuture<'a, Result, String>>; - extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; +/// Represents a configuration value. +/// This is not meant as an efficient type, but as a very flexible one. +#[derive(Serialize, Deserialize)] +pub enum Value { + Null, + Bool(bool), + Integer(i64), + Float(f64), + String(String), + Array(Vec), + Object(Vec<(String, Value)>), } -// #[stabby::stabby(checked)] -// pub trait FilterImpl { -// extern "C" fn matches<'a>(&'a mut self, line: String) -> DynFuture<'a, bool>; -// extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; +#[derive(Serialize, Deserialize)] +pub struct StreamImpl { + pub stream: rch::lr::Receiver>, +} + +// #[derive(Serialize, Deserialize)] +// pub struct FilterImpl { +// pub stream: rch::lr::Sender, // } -#[stabby::stabby(checked)] -pub trait ActionImpl { - extern "C" fn exec<'a>( - &'a mut self, - match_: Vec, - ) -> DynFuture<'a, Result>, String>>; - extern "C" fn close<'a>(&'a mut self) -> DynFuture<'a, Result<(), String>>; +// #[derive(Serialize, Deserialize)] +// pub struct Match { +// pub match_: String, +// pub result: rch::oneshot::Sender, +// } + +#[derive(Serialize, Deserialize)] +pub struct ActionImpl { + pub stream: rch::lr::Sender, +} + +#[derive(Serialize, Deserialize)] +pub struct Exec { + pub match_: Vec, + pub result: rch::oneshot::Sender, } diff --git a/reaction-plugin/src/value.rs b/reaction-plugin/src/value.rs deleted file mode 100644 index 18c5a38..0000000 --- a/reaction-plugin/src/value.rs +++ /dev/null @@ -1,15 +0,0 @@ -use stabby::{string::String, tuple::Tuple2, vec::Vec}; - -/// Represents a configuration value. -/// This is not meant as an efficient type, but as a very flexible one. -#[stabby::stabby] -#[repr(C, u8)] -pub enum Value { - Null, - Bool(bool), - Integer(i64), - Float(f64), - String(String), - Array(Vec), - Object(Vec>), -} From d887acf27e72930fd95c525f18d201aea861f4ce Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 3 Oct 2025 12:00:00 +0200 Subject: [PATCH 114/241] Adapt Config and plugin loading daemon::Stream integration TBD --- Cargo.lock | 1 + Cargo.toml | 1 + reaction-plugin/src/lib.rs | 6 +- src/concepts/config.rs | 20 +--- src/concepts/mod.rs | 2 + src/concepts/plugin.rs | 45 ++++++++ src/daemon/mod.rs | 7 +- src/daemon/plugin/mod.rs | 182 +++++++++++++++++++++++++++++++ src/{ => daemon}/plugin/value.rs | 7 +- src/daemon/stream.rs | 11 +- src/lib.rs | 1 - src/plugin/mod.rs | 168 ---------------------------- 12 files changed, 253 insertions(+), 198 deletions(-) create mode 100644 src/concepts/plugin.rs create mode 100644 src/daemon/plugin/mod.rs rename src/{ => daemon}/plugin/value.rs (85%) delete mode 100644 src/plugin/mod.rs diff --git a/Cargo.lock b/Cargo.lock index a71929d..e1cfd21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1051,6 +1051,7 @@ dependencies = [ "reaction-plugin", "regex", "remoc", + "serde", "serde_json", "serde_yaml", "tempfile", diff --git a/Cargo.toml b/Cargo.toml index 07bd33c..6ce6c63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ num_cpus = "1.16.0" # Regex matching regex = "1.10.4" # Configuration languages, ser/deserialisation +serde.workspace = true serde_json = "1.0.117" serde_yaml = "0.9.34" jrsonnet-evaluator = "0.4.2" diff --git a/reaction-plugin/src/lib.rs b/reaction-plugin/src/lib.rs index bd9696e..10b7f49 100644 --- a/reaction-plugin/src/lib.rs +++ b/reaction-plugin/src/lib.rs @@ -1,3 +1,5 @@ +use std::collections::BTreeMap; + /// This crate defines the API between reaction's core and plugins. /// /// It's based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait @@ -63,7 +65,7 @@ pub enum Value { Float(f64), String(String), Array(Vec), - Object(Vec<(String, Value)>), + Object(BTreeMap), } #[derive(Serialize, Deserialize)] @@ -92,3 +94,5 @@ pub struct Exec { pub match_: Vec, pub result: rch::oneshot::Sender, } + +// TODO write main function here? diff --git a/src/concepts/config.rs b/src/concepts/config.rs index 2bf1296..94e0df7 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use tracing::{debug, error, info, warn}; -use super::{merge_attrs, Pattern, Stream}; +use super::{merge_attrs, Pattern, Plugin, Stream}; pub type Patterns = BTreeMap>; @@ -25,7 +25,7 @@ pub struct Config { pub state_directory: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub plugin_directories: Vec, + pub plugins: Vec, #[serde(default)] pub patterns: Patterns, @@ -83,13 +83,6 @@ impl Config { "state_directory", )?; - self.plugin_directories = merge_attrs( - self.plugin_directories.clone(), - other.plugin_directories, - Vec::default(), - "plugin_directories", - )?; - self.concurrency = merge_attrs( self.concurrency, other.concurrency, @@ -108,13 +101,8 @@ impl Config { // Nullify this useless field self._definitions = serde_json::Value::Null; - for dir in &self.plugin_directories { - if dir.is_empty() { - return Err("can't specify empty plugin directory".into()); - } - if !dir.starts_with("/") { - return Err(format!("plugin directory paths must be absolute: {dir}")); - } + for plugin in &mut self.plugins { + plugin.setup()?; } if self.patterns.is_empty() { diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index b059b62..44b999d 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -3,6 +3,7 @@ mod config; mod filter; mod parse_duration; mod pattern; +mod plugin; mod stream; use std::fmt::Debug; @@ -12,6 +13,7 @@ pub use config::{Config, Patterns}; pub use filter::{Duplicate, Filter}; use parse_duration::parse_duration; pub use pattern::{Pattern, PatternType}; +pub use plugin::{Plugin}; use serde::{Deserialize, Serialize}; use serde_json::Value; pub use stream::Stream; diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs new file mode 100644 index 0000000..f4da976 --- /dev/null +++ b/src/concepts/plugin.rs @@ -0,0 +1,45 @@ +use std::{collections::BTreeMap, process::Stdio}; + +use serde::{Deserialize, Serialize}; +use tokio::process::{Child, Command}; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[cfg_attr(test, derive(Default))] +#[serde(deny_unknown_fields)] +pub struct Plugin { + pub path: String, + /// If empty, defaults to root + pub file_owner: Option, + /// If empty, defaults to root + pub exec_user: Option, + /// If empty, hash is not performed + pub sha256: Option, + /// Option for `run0`. Do not provide User. + pub systemd_options: Option>>, +} + +// NOTE +// `run0` can be used for security customisation. +// with the --pipe option, raw stdio fd are transmitted to the underlying command, so there is no overhead. + +impl Plugin { + pub fn setup(&mut self) -> Result<(), String> { + if self.path.is_empty() { + return Err("can't specify empty plugin path".into()); + } + if !self.path.starts_with("/") { + return Err(format!("plugin paths must be absolute: {}", self.path)); + } + Ok(()) + } + + pub fn launch(&self) -> Result { + // TODO owner check + // TODO hash check + // TODO run0 options + Command::new(&self.path) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn() + } +} diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index c396b30..80307ce 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -16,9 +16,10 @@ use tokio::{ }; use tracing::{debug, info}; -use crate::{concepts::Config, plugin::Plugins, treedb::Database}; +use crate::{concepts::Config, treedb::Database}; use filter::FilterManager; pub use filter::React; +use plugin::Plugins; pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken}; use socket::Socket; use stream::StreamManager; @@ -27,6 +28,7 @@ use stream::StreamManager; pub use filter::tests; mod filter; +mod plugin; mod shutdown; mod socket; mod stream; @@ -37,8 +39,7 @@ pub async fn daemon( ) -> Result<(), Box> { let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?)); - let mut plugins = Plugins::default(); - plugins.import(&config.plugin_directories).await?; + let mut plugins = Plugins::new(&config.plugins).await?; // Cancellation Token let shutdown = ShutdownController::new(); diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs new file mode 100644 index 0000000..a9c4e9b --- /dev/null +++ b/src/daemon/plugin/mod.rs @@ -0,0 +1,182 @@ +use std::{ + collections::BTreeMap, + ops::{Deref, DerefMut}, +}; + +use reaction_plugin::{ActionImpl, PluginInfo, PluginInfoClient, StreamImpl}; +use remoc::Connect; +use serde_json::Value; +use tokio::process::Child; + +use crate::concepts::Plugin; + +mod value; + +use value::to_stable_value; + +pub struct PluginManager { + child: Child, + plugin: &'static Plugin, + plugin_info: PluginInfoClient, +} + +impl Deref for PluginManager { + type Target = PluginInfoClient; + fn deref(&self) -> &Self::Target { + &self.plugin_info + } +} + +impl DerefMut for PluginManager { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.plugin_info + } +} + +impl PluginManager { + async fn new(plugin: &'static Plugin) -> Result { + let mut child = plugin + .launch() + .map_err(|err| format!("could not launch plugin: {err}"))?; + + let stdin = child.stdin.take().unwrap(); + let stdout = child.stdout.take().unwrap(); + + let (conn, _tx, mut rx): ( + _, + remoc::rch::base::Sender<()>, + remoc::rch::base::Receiver, + ) = Connect::io_buffered(remoc::Cfg::default(), stdout, stdin, 2048) + .await + .map_err(|err| format!("could not init communication with plugin: {err}"))?; + + tokio::spawn(conn); + + let plugin_info = rx + .recv() + .await + .map_err(|err| format!("could not retrieve initial information from plugin: {err}"))? + .ok_or("could not retrieve initial information from plugin: no data")?; + + Ok(Self { + child, + plugin, + plugin_info, + }) + } +} + +#[derive(Default)] +pub struct Plugins { + plugins: BTreeMap, + streams: BTreeMap, + actions: BTreeMap, +} + +impl Plugins { + pub async fn new(plugins: &'static Vec) -> Result { + let mut this = Self::default(); + + for plugin in plugins { + let path = plugin.path.clone(); + this.load_plugin(&plugin) + .await + .map_err(|err| format!("plugin {path}: {err}]"))?; + } + + Ok(this) + } + + async fn load_plugin(&mut self, plugin: &'static Plugin) -> Result<(), String> { + let path = plugin.path.clone(); + let manager = PluginManager::new(plugin).await?; + + for stream in manager + .stream_impls() + .await + .map_err(|err| format!("plugin error: {err}"))? + { + if let Some(path) = self.streams.insert(stream.clone().into(), path.clone()) { + return Err(format!( + "plugin {path} already exposed a stream with type name '{stream}'" + )); + } + } + + for action in manager + .action_impls() + .await + .map_err(|err| format!("plugin error: {err}"))? + { + if let Some(path) = self.actions.insert(action.clone().into(), path.clone()) { + return Err(format!( + "plugin {path} already exposed a action with type name '{action}'" + )); + } + } + + self.plugins.insert(path, manager); + Ok(()) + } + + pub async fn finish_plugin_setup(self) -> Result<(), String> { + for mut plugin in self.plugins.into_values() { + plugin + .finish_setup() + .await + .map_err(|err| format!("plugin error: {err}"))? + .map_err(|err| format!("invalid config for plugin: {err}"))?; + } + Ok(()) + } + + pub async fn init_stream_impl( + &mut self, + stream_name: String, + stream_type: String, + config: Value, + ) -> Result { + let plugin_name = self + .streams + .get(&stream_type) + .ok_or(format!("No plugin provided a stream type '{stream_type}'"))?; + + let plugin = self.plugins.get_mut(plugin_name).unwrap(); + + plugin + .stream_impl( + stream_name.into(), + stream_type.into(), + to_stable_value(config), + ) + .await + .map_err(|err| format!("plugin error: {err}"))? + } + + pub async fn init_action_impl( + &mut self, + stream_name: String, + filter_name: String, + action_name: String, + action_type: String, + config: Value, + ) -> Result { + let plugin_name = self + .actions + .get(&action_type) + .ok_or(format!("No plugin provided a action type '{action_type}'"))?; + + let plugin = self.plugins.get_mut(plugin_name).unwrap(); + + plugin + .action_impl( + stream_name.into(), + filter_name.into(), + action_name.into(), + action_type.into(), + to_stable_value(config), + ) + .await + .map_err(|err| format!("plugin error: {err}"))? + } +} diff --git a/src/plugin/value.rs b/src/daemon/plugin/value.rs similarity index 85% rename from src/plugin/value.rs rename to src/daemon/plugin/value.rs index e8b7d31..e62c498 100644 --- a/src/plugin/value.rs +++ b/src/daemon/plugin/value.rs @@ -1,6 +1,7 @@ +use std::collections::BTreeMap; + use reaction_plugin::Value as RValue; use serde_json::Value as JValue; -use stabby::{tuple::Tuple2, vec::Vec}; pub fn to_stable_value(val: JValue) -> RValue { match val { @@ -24,9 +25,9 @@ pub fn to_stable_value(val: JValue) -> RValue { vec }), JValue::Object(m) => RValue::Object({ - let mut map = Vec::with_capacity(m.len()); + let mut map = BTreeMap::new(); for (key, val) in m { - map.push(Tuple2(key.into(), to_stable_value(val))); + map.insert(key.into(), to_stable_value(val)); } map }), diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index 37f0328..f5b2630 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -6,7 +6,7 @@ use std::{ use chrono::Local; use futures::{FutureExt, Stream as AsyncStream, StreamExt}; -use reaction_plugin::{BoxedStreamImpl, StreamImplDynMut}; +use reaction_plugin::StreamImpl; use regex::RegexSet; use tokio::{ io::{AsyncBufReadExt, BufReader}, @@ -17,8 +17,7 @@ use tracing::{error, info, warn}; use crate::{ concepts::{Filter, Stream}, - daemon::filter::FilterManager, - plugin::Plugins, + daemon::{filter::FilterManager, plugin::Plugins}, }; use super::shutdown::ShutdownToken; @@ -54,7 +53,7 @@ pub struct StreamManager { compiled_regex_set: RegexSet, regex_index_to_filter_manager: Vec, stream: &'static Stream, - stream_plugin: Option<&'static mut BoxedStreamImpl>, + stream_plugin: Option, shutdown: ShutdownToken, } @@ -76,11 +75,11 @@ impl StreamManager { .collect(); let stream_plugin = if stream.is_plugin() { - Some(Box::leak(Box::new(plugins.init_stream_impl( + Some(plugins.init_stream_impl( stream.name.clone(), stream.stream_type.clone().unwrap(), stream.options.clone(), - )?))) + )?) } else { None }; diff --git a/src/lib.rs b/src/lib.rs index fb3cb94..3619f1a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,7 +13,6 @@ pub mod cli; pub mod client; pub mod concepts; pub mod daemon; -pub mod plugin; pub mod protocol; pub mod tests; pub mod treedb; diff --git a/src/plugin/mod.rs b/src/plugin/mod.rs deleted file mode 100644 index 958a2cc..0000000 --- a/src/plugin/mod.rs +++ /dev/null @@ -1,168 +0,0 @@ -use std::{collections::BTreeMap, path::PathBuf}; - -use reaction_plugin::{ - BoxedActionImpl, BoxedPluginInfo, BoxedStreamImpl, PluginInfoDyn, PluginInfoDynMut, -}; -use serde_json::Value; -use stabby::libloading::StabbyLibrary; -use tokio::{fs::read_dir, runtime::Handle}; -use value::to_stable_value; - -mod value; - -#[derive(Default)] -pub struct Plugins { - plugins: BTreeMap, - streams: BTreeMap, - // filters: BTreeMap, - actions: BTreeMap, -} - -impl Plugins { - pub async fn import(&mut self, plugin_directories: &Vec) -> Result<(), String> { - for plugin_directory in plugin_directories { - let mut dir_entries = read_dir(&plugin_directory).await.map_err(|err| { - format!("Error reading plugin directory {plugin_directory}: {err}") - })?; - loop { - match dir_entries.next_entry().await { - Err(err) => { - return Err(format!( - "Error reading plugin directory {plugin_directory}: {err}" - )) - } - - Ok(None) => break, - - Ok(Some(entry)) => { - let filename = PathBuf::from(&plugin_directory).join(&entry.file_name()); - self.load_plugin(filename.clone()) - .await - .map_err(|err| format!("Error loading plugin {filename:?}: {err}"))?; - } - } - } - } - Ok(()) - } - - async fn load_plugin(&mut self, filename: PathBuf) -> Result<(), String> { - // TODO check ownership of file? - - let name = filename.to_string_lossy().to_string(); - // SAFETY This function is exposed by libloading as unsafe - // But we're (hopefully) gonna be safe with stabby <3 - #[allow(unsafe_code)] - let plugin = Handle::current() - .spawn_blocking(|| unsafe { libloading::Library::new(filename) }) - .await - // Join Error - .map_err(|err| err.to_string())? - // Libloading Error - .map_err(|err| err.to_string())?; - - // SAFETY This function is exposed by stabby as unsafe - // But we're (hopefully) gonna be safe <3 - #[allow(unsafe_code)] - let plugin_init = unsafe { - plugin.get_stabbied:: BoxedPluginInfo>(b"reaction_plugin") - }.map_err(|err| format!("expected entrypoint `fn reaction_plugin() -> BoxedPluginInfo` is either not present or malformed: {err}"))?; - - let plugin_info = plugin_init(); - - for stream in plugin_info.stream_impls() { - if let Some(name) = self.streams.insert(stream.clone().into(), name.clone()) { - return Err(format!( - "plugin {name} already exposed a stream with type name '{stream}'" - )); - } - } - - // for filter in plugin_info.filter_impls() { - // if let Some(name) = self.filters.insert(filter.clone().into(), name.clone()) { - // return Err(format!( - // "plugin {name} already exposed a filter with type name '{filter}'" - // )); - // } - // } - - for action in plugin_info.action_impls() { - if let Some(name) = self.actions.insert(action.clone().into(), name.clone()) { - return Err(format!( - "plugin {name} already exposed a action with type name '{action}'" - )); - } - } - - self.plugins.insert(name, plugin_info); - - Ok(()) - } - - pub fn finish_plugin_setup(self) -> Result<(), String> { - for mut plugin in self.plugins.into_values() { - // Didn't find a more elegant way to manipulate [`stabby::result::Result`] - let result = plugin.finish_setup(); - if result.is_err() { - return Err(result.unwrap_err().into()); - } - } - Ok(()) - } - - pub fn init_stream_impl( - &mut self, - stream_name: String, - stream_type: String, - config: Value, - ) -> Result { - let plugin_name = self - .streams - .get(&stream_type) - .ok_or(format!("No plugin provided a stream type '{stream_type}'"))?; - - let plugin = self.plugins.get_mut(plugin_name).unwrap(); - - let result = plugin.stream_impl( - stream_name.into(), - stream_type.into(), - to_stable_value(config), - ); - - if result.is_ok() { - Ok(result.unwrap()) - } else { - Err(result.err().unwrap().into()) - } - } - - pub fn init_action_impl( - &mut self, - stream_name: String, - filter_name: String, - action_name: String, - action_type: String, - config: Value, - ) -> Result { - let plugin_name = self - .actions - .get(&action_type) - .ok_or(format!("No plugin provided a action type '{action_type}'"))?; - - let plugin = self.plugins.get_mut(plugin_name).unwrap(); - - let result = plugin.action_impl( - stream_name.into(), - filter_name.into(), - action_name.into(), - action_type.into(), - to_stable_value(config), - ); - - if result.is_ok() { - Ok(result.unwrap()) - } else { - Err(result.err().unwrap().into()) - } - } -} From 147a4623b215d06a8c9cf3efcb6560cef351307c Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 5 Oct 2025 12:00:00 +0200 Subject: [PATCH 115/241] First building version of reaction with plugins --- reaction-plugin/src/lib.rs | 10 +-- src/concepts/action.rs | 2 +- src/daemon/filter/mod.rs | 121 +++++++++++++++++++++++++++--------- src/daemon/mod.rs | 42 +++++++------ src/daemon/plugin/mod.rs | 104 +++++++++++++++++++++++++------ src/daemon/stream.rs | 122 +++++++++++++------------------------ src/daemon/utils.rs | 44 +++++++++++++ 7 files changed, 295 insertions(+), 150 deletions(-) create mode 100644 src/daemon/utils.rs diff --git a/reaction-plugin/src/lib.rs b/reaction-plugin/src/lib.rs index 10b7f49..a46343e 100644 --- a/reaction-plugin/src/lib.rs +++ b/reaction-plugin/src/lib.rs @@ -51,6 +51,8 @@ pub trait PluginInfo { /// Notify the plugin that setup is finished, permitting a last occasion to report an error /// (For example if a stream wants a companion action but it hasn't been initialized) async fn finish_setup(&mut self) -> RemoteResult>; + + async fn close(mut self) -> RemoteResult<()>; } pub type RemoteResult = Result; @@ -70,7 +72,7 @@ pub enum Value { #[derive(Serialize, Deserialize)] pub struct StreamImpl { - pub stream: rch::lr::Receiver>, + pub stream: rch::mpsc::Receiver>, } // #[derive(Serialize, Deserialize)] @@ -84,15 +86,15 @@ pub struct StreamImpl { // pub result: rch::oneshot::Sender, // } -#[derive(Serialize, Deserialize)] +#[derive(Clone, Serialize, Deserialize)] pub struct ActionImpl { - pub stream: rch::lr::Sender, + pub sender: rch::mpsc::Sender, } #[derive(Serialize, Deserialize)] pub struct Exec { pub match_: Vec, - pub result: rch::oneshot::Sender, + pub result: rch::oneshot::Sender>, } // TODO write main function here? diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 98dc5f8..0252df6 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -58,7 +58,7 @@ fn is_false(b: &bool) -> bool { } impl Action { - fn is_plugin(&self) -> bool { + pub fn is_plugin(&self) -> bool { self.action_type .as_ref() .is_some_and(|action_type| action_type != "cmd") diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index b847586..9bbe66c 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -5,12 +5,14 @@ mod state; use std::{collections::BTreeMap, process::Stdio, sync::Arc}; +use reaction_plugin::ActionImpl; use regex::Regex; use tokio::sync::{Mutex, MutexGuard, Semaphore}; use tracing::{error, info}; use crate::{ concepts::{Action, Duplicate, Filter, Match, Pattern, Time}, + daemon::plugin::Plugins, protocol::{Order, PatternStatus}, treedb::Database, }; @@ -30,6 +32,8 @@ pub struct FilterManager { exec_limit: Option>, /// Permits to run pending actions on shutdown shutdown: ShutdownToken, + /// Action Plugins + action_plugins: BTreeMap<&'static String, ActionImpl>, /// Inner state. /// Protected by a [`Mutex`], permitting FilterManager to be cloned /// and concurrently owned by its stream manager, the socket manager, @@ -55,16 +59,31 @@ impl FilterManager { exec_limit: Option>, shutdown: ShutdownToken, db: &mut Database, + plugins: &mut Plugins, now: Time, ) -> Result { + let mut action_plugins = BTreeMap::default(); + for (action_name, action) in filter.actions.iter().filter(|action| action.1.is_plugin()) { + action_plugins.insert( + action_name, + plugins + .init_action_impl( + action.stream_name.clone(), + action.filter_name.clone(), + action.name.clone(), + action.action_type.clone().unwrap(), + action.options.clone(), + ) + .await?, + ); + } let this = Self { filter, exec_limit, shutdown, + action_plugins, state: Arc::new(Mutex::new(State::new(filter, db, now).await?)), }; - this.clear_past_triggers_and_schedule_future_actions(now) - .await; Ok(this) } @@ -221,12 +240,7 @@ impl FilterManager { // Execute the action early if let Order::Flush = order { - exec_now( - &self.exec_limit, - self.shutdown.clone(), - action, - m.clone(), - ).await; + self.exec_now(action, m.clone()); } } } @@ -270,10 +284,11 @@ impl FilterManager { if exec_time <= now { if state.decrement_trigger(&m, t, false).await { - exec_now(&self.exec_limit, self.shutdown.clone(), action, m).await; + self.exec_now(action, m); } } else { let this = self.clone(); + let action_impl = self.action_plugins.get(&action.name).cloned(); tokio::spawn(async move { let dur = (exec_time - now) .to_std() @@ -291,7 +306,7 @@ impl FilterManager { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = this.state.lock().await; if state.decrement_trigger(&m, t, exiting).await { - exec_now(&this.exec_limit, this.shutdown, action, m).await; + exec_now(&this.exec_limit, this.shutdown, action, action_impl, m); } } }); @@ -299,7 +314,8 @@ impl FilterManager { } } - async fn clear_past_triggers_and_schedule_future_actions(&self, now: Time) { + /// Clear past triggers and schedule future actions + pub async fn start(&self, now: Time) { let longuest_action_duration = self.filter.longuest_action_duration; let number_of_actions = self .filter @@ -349,37 +365,84 @@ impl FilterManager { } } } + + fn exec_now(&self, action: &'static Action, m: Match) { + let action_impl = self.action_plugins.get(&action.name).cloned(); + exec_now( + &self.exec_limit, + self.shutdown.clone(), + action, + action_impl, + m, + ) + } } -async fn exec_now( +fn exec_now( exec_limit: &Option>, shutdown: ShutdownToken, action: &'static Action, + action_impl: Option, m: Match, ) { let exec_limit = exec_limit.clone(); tokio::spawn(async move { // Move ShutdownToken in task let _shutdown = shutdown; - // Wait for semaphore's permission, if it is Some - let _permit = match exec_limit { - #[allow(clippy::unwrap_used)] // We know the semaphore is not closed - Some(semaphore) => Some(semaphore.acquire_owned().await.unwrap()), - None => None, - }; - // Construct command - let mut command = action.exec(&m); + match action_impl { + Some(action_impl) => { + info!( + "{action}: run {}", + action.action_type.clone().unwrap_or_default(), + ); - info!("{}: run [{:?}]", &action, command.as_std()); - if let Err(err) = command - .stdin(Stdio::null()) - .stderr(Stdio::null()) - .stdout(Stdio::piped()) - .status() - .await - { - error!("{}: run [{:?}], code {}", &action, command.as_std(), err); + // Sending action + let (response_tx, response_rx) = remoc::rch::oneshot::channel(); + if let Err(err) = action_impl + .sender + .send(reaction_plugin::Exec { + match_: m, + result: response_tx, + }) + .await + { + error!("{action}: communication with plugin failed: {err}"); + return; + } + + // Receiving response + match response_rx.await { + Ok(Ok(())) => (), + Ok(Err(err)) => error!( + "{action}: run {}: {err}", + action.action_type.clone().unwrap_or_default(), + ), + Err(err) => error!("{action}: communication with plugin failed: {err}"), + } + } + None => { + // Wait for semaphore's permission, if it is Some + let _permit = match exec_limit { + #[allow(clippy::unwrap_used)] // We know the semaphore is not closed + Some(semaphore) => Some(semaphore.acquire_owned().await.unwrap()), + None => None, + }; + + // Construct command + let mut command = action.exec(&m); + + info!("{action}: run [{:?}]", command.as_std()); + if let Err(err) = command + .stdin(Stdio::null()) + .stderr(Stdio::null()) + .stdout(Stdio::piped()) + .status() + .await + { + error!("{action}: run [{:?}], code {err}", command.as_std()); + } + } } }); } diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 80307ce..94b416e 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -9,6 +9,7 @@ use std::{ }; use chrono::Local; +use futures::future::join_all; use tokio::{ select, signal::unix::{signal, SignalKind}, @@ -32,6 +33,7 @@ mod plugin; mod shutdown; mod socket; mod stream; +mod utils; pub async fn daemon( config_path: PathBuf, @@ -39,11 +41,11 @@ pub async fn daemon( ) -> Result<(), Box> { let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?)); - let mut plugins = Plugins::new(&config.plugins).await?; - // Cancellation Token let shutdown = ShutdownController::new(); + let mut plugins = Plugins::new(&config.plugins, shutdown.token()).await?; + // Open Database let mut db = Database::open(config, shutdown.token()).await?; @@ -69,40 +71,44 @@ pub async fn daemon( for stream in config.streams.values() { let mut filter_managers = HashMap::new(); for filter in stream.filters.values() { - let manager = - FilterManager::new(filter, exec_limit.clone(), shutdown.token(), &mut db, now) - .await?; + let manager = FilterManager::new( + filter, + exec_limit.clone(), + shutdown.token(), + &mut db, + &mut plugins, + now, + ) + .await?; filter_managers.insert(filter, manager); } state.insert(stream, filter_managers.clone()); - stream_managers.push(StreamManager::new( - stream, - filter_managers, - shutdown.token(), - &mut plugins, - )?); + stream_managers.push( + StreamManager::new(stream, filter_managers, shutdown.token(), &mut plugins).await?, + ); } (state, stream_managers) }; + // Finish plugin setup + plugins.finish_setup().await?; + plugins.manager(); + // Run socket task socket.manager(config, state, shutdown.token()); // Start Stream managers - let mut stream_task_handles = Vec::new(); - for stream_manager in stream_managers { - stream_task_handles.push(tokio::spawn(async move { stream_manager.start().await })); - } + let stream_task_handles = stream_managers + .into_iter() + .map(|stream_manager| tokio::spawn(async move { stream_manager.start().await })); // Close streams when we receive a quit signal let signal_received = Arc::new(AtomicBool::new(false)); handle_signals(shutdown.delegate(), signal_received.clone())?; // Wait for all streams to quit - for task_handle in stream_task_handles { - let _ = task_handle.await; - } + join_all(stream_task_handles).await; // Release last db's sender let mut db_status = db.quit(); diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index a9c4e9b..79c2b05 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -1,14 +1,20 @@ use std::{ collections::BTreeMap, ops::{Deref, DerefMut}, + time::Duration, }; +use futures::{future::join_all, FutureExt}; use reaction_plugin::{ActionImpl, PluginInfo, PluginInfoClient, StreamImpl}; use remoc::Connect; use serde_json::Value; -use tokio::process::Child; +use tokio::{process::Child, time::sleep}; +use tracing::error; -use crate::concepts::Plugin; +use crate::{ + concepts::Plugin, + daemon::{utils::kill_child, ShutdownToken}, +}; mod value; @@ -16,6 +22,7 @@ use value::to_stable_value; pub struct PluginManager { child: Child, + shutdown: ShutdownToken, plugin: &'static Plugin, plugin_info: PluginInfoClient, } @@ -34,7 +41,7 @@ impl DerefMut for PluginManager { } impl PluginManager { - async fn new(plugin: &'static Plugin) -> Result { + async fn new(plugin: &'static Plugin, shutdown: ShutdownToken) -> Result { let mut child = plugin .launch() .map_err(|err| format!("could not launch plugin: {err}"))?; @@ -48,7 +55,12 @@ impl PluginManager { remoc::rch::base::Receiver, ) = Connect::io_buffered(remoc::Cfg::default(), stdout, stdin, 2048) .await - .map_err(|err| format!("could not init communication with plugin: {err}"))?; + .map_err(|err| { + format!( + "could not init communication with plugin {}: {err}", + plugin.path + ) + })?; tokio::spawn(conn); @@ -60,10 +72,35 @@ impl PluginManager { Ok(Self { child, + shutdown, plugin, plugin_info, }) } + + async fn handle_child(mut self) { + const PLUGIN_STOP_GRACE_TIME: u64 = 15; + + // wait either for the child process to exit on its own or for the shutdown signal + futures::select! { + _ = self.child.wait().fuse() => { + error!("plugin {} exited: its command returned.", self.plugin.path); + return; + } + _ = self.shutdown.wait().fuse() => {} + } + + futures::select! { + _ = self.plugin_info.close().fuse() => { + return; + }, + _ = sleep(Duration::from_secs(PLUGIN_STOP_GRACE_TIME)).fuse() => { + error!("plugin {} did not respond to close request in time, killing", self.plugin.path) + }, + } + + kill_child(self.child, format!("plugin {}", self.plugin.path), 5).await; + } } #[derive(Default)] @@ -74,12 +111,15 @@ pub struct Plugins { } impl Plugins { - pub async fn new(plugins: &'static Vec) -> Result { + pub async fn new( + plugins: &'static Vec, + shutdown: ShutdownToken, + ) -> Result { let mut this = Self::default(); for plugin in plugins { let path = plugin.path.clone(); - this.load_plugin(&plugin) + this.load_plugin(&plugin, shutdown.clone()) .await .map_err(|err| format!("plugin {path}: {err}]"))?; } @@ -87,9 +127,13 @@ impl Plugins { Ok(this) } - async fn load_plugin(&mut self, plugin: &'static Plugin) -> Result<(), String> { + async fn load_plugin( + &mut self, + plugin: &'static Plugin, + shutdown: ShutdownToken, + ) -> Result<(), String> { let path = plugin.path.clone(); - let manager = PluginManager::new(plugin).await?; + let manager = PluginManager::new(plugin, shutdown).await?; for stream in manager .stream_impls() @@ -119,17 +163,6 @@ impl Plugins { Ok(()) } - pub async fn finish_plugin_setup(self) -> Result<(), String> { - for mut plugin in self.plugins.into_values() { - plugin - .finish_setup() - .await - .map_err(|err| format!("plugin error: {err}"))? - .map_err(|err| format!("invalid config for plugin: {err}"))?; - } - Ok(()) - } - pub async fn init_stream_impl( &mut self, stream_name: String, @@ -179,4 +212,37 @@ impl Plugins { .await .map_err(|err| format!("plugin error: {err}"))? } + + pub async fn finish_setup(&mut self) -> Result<(), String> { + // Finish setup of all plugins + join_all( + self.plugins + .values_mut() + .map(|plugin_manager| plugin_manager.finish_setup()), + ) + .await + // Convert Vec> into Result + .into_iter() + .zip(self.plugins.values()) + .map(|(result, plugin_manager)| { + result + .map_err(|err| format!("plugin {} error: {err}", plugin_manager.plugin.path)) + .map_err(|err| { + format!( + "invalid config for plugin {}: {err}", + plugin_manager.plugin.path + ) + }) + .flatten() + }) + .collect::>() + } + + pub fn manager(self) { + for plugin in self.plugins.into_values() { + tokio::spawn(async move { + plugin.handle_child().await; + }); + } + } } diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index f5b2630..bf5c189 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -1,23 +1,21 @@ use std::{ collections::{BTreeMap, BTreeSet, HashMap}, process::Stdio, - time::Duration, }; use chrono::Local; -use futures::{FutureExt, Stream as AsyncStream, StreamExt}; +use futures::{future::join_all, FutureExt, Stream as AsyncStream, StreamExt}; use reaction_plugin::StreamImpl; use regex::RegexSet; use tokio::{ io::{AsyncBufReadExt, BufReader}, process::{Child, ChildStderr, ChildStdout, Command}, - time::sleep, }; -use tracing::{error, info, warn}; +use tracing::{error, info}; use crate::{ concepts::{Filter, Stream}, - daemon::{filter::FilterManager, plugin::Plugins}, + daemon::{filter::FilterManager, plugin::Plugins, utils::kill_child}, }; use super::shutdown::ShutdownToken; @@ -58,7 +56,7 @@ pub struct StreamManager { } impl StreamManager { - pub fn new( + pub async fn new( stream: &'static Stream, filter_managers: HashMap<&'static Filter, FilterManager>, shutdown: ShutdownToken, @@ -75,11 +73,15 @@ impl StreamManager { .collect(); let stream_plugin = if stream.is_plugin() { - Some(plugins.init_stream_impl( - stream.name.clone(), - stream.stream_type.clone().unwrap(), - stream.options.clone(), - )?) + Some( + plugins + .init_stream_impl( + stream.name.clone(), + stream.stream_type.clone().unwrap(), + stream.options.clone(), + ) + .await?, + ) } else { None }; @@ -94,6 +96,16 @@ impl StreamManager { } pub async fn start(self) { + // First start FilterManagers persisted actions + let now = Local::now(); + join_all( + self.regex_index_to_filter_manager + .iter() + .map(|filter_manager| filter_manager.start(now)), + ) + .await; + + // Then start stream info!("{}: start {:?}", self.stream.name, self.stream.cmd); if self.stream_plugin.is_some() { @@ -103,38 +115,29 @@ impl StreamManager { } } - async fn start_plugin(self) { - let plugin = self.stream_plugin.unwrap(); - - { - let result = plugin.start().await; - if result.is_err() { - error!( - "could not execute stream {}: {}", - self.stream.name, - result.unwrap_err() - ); - return; - } - } + async fn start_plugin(mut self) { + let mut plugin = self.stream_plugin.take().unwrap(); loop { - let result = plugin.next().await; - let result = if result.is_ok() { - let option = result.unwrap(); - if option.is_some() { - self.handle_line(option.unwrap().to_string()).await; - } else { + match plugin.stream.recv().await { + Ok(Some(Ok(line))) => { + self.handle_line(line).await; + } + Ok(Some(Err(err))) => { + error!("stream {} exit with error: {}", self.stream.name, err); return; } - } else { - error!( - "impossible to read output from stream {}: {}", - self.stream.name, - result.unwrap_err() - ); - return; - }; + Err(err) => { + error!( + "impossible to read output from stream {}: {}", + self.stream.name, err + ); + return; + } + Ok(None) => { + return; + } + } } } @@ -168,9 +171,6 @@ impl StreamManager { } async fn handle_child(&self, mut child: Child) { - const STREAM_PROCESS_GRACE_TIME_SEC: u64 = 15; - const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5; - // wait either for the child process to exit on its own or for the shutdown signal futures::select! { _ = child.wait().fuse() => { @@ -180,43 +180,7 @@ impl StreamManager { _ = self.shutdown.wait().fuse() => {} } - // first, try to ask nicely the child process to exit - if let Some(pid) = child.id() { - let pid = nix::unistd::Pid::from_raw(pid as i32); - - // the most likely error is that the process does not exist anymore - // but we still need to reclaim it with Child::wait - let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM); - - futures::select! { - _ = child.wait().fuse() => { - return; - }, - _ = sleep(Duration::from_secs(STREAM_PROCESS_GRACE_TIME_SEC)).fuse() => {}, - } - } else { - warn!( - "could not get PID of child process for stream {}", - self.stream.name - ); - // still try to use tokio API to kill and reclaim the child process - } - - // if that fails, or we cannot get the underlying PID, terminate the process. - // NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a - // syscall to a resource no-longer available (a notorious example is a read on a disconnected - // NFS share) - - // as before, the only expected error is that the child process already terminated - // but we still need to reclaim it if that's the case. - let _ = child.start_kill(); - - futures::select! { - _ = child.wait().fuse() => {} - _ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => { - error!("child process of stream {} did not terminate", self.stream.name); - } - } + kill_child(child, format!("stream {}", self.stream.name), 15).await; } async fn handle_io(&self, child_stdout: ChildStdout, child_stderr: ChildStderr) { diff --git a/src/daemon/utils.rs b/src/daemon/utils.rs new file mode 100644 index 0000000..0ef3986 --- /dev/null +++ b/src/daemon/utils.rs @@ -0,0 +1,44 @@ +use std::time::Duration; + +use futures::FutureExt; +use tokio::{process::Child, time::sleep}; +use tracing::{error, warn}; + +pub async fn kill_child(mut child: Child, context: String, grace_time_sec: u64) { + const STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC: u64 = 5; + + // first, try to ask nicely the child process to exit + if let Some(pid) = child.id() { + let pid = nix::unistd::Pid::from_raw(pid as i32); + + // the most likely error is that the process does not exist anymore + // but we still need to reclaim it with Child::wait + let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM); + + futures::select! { + _ = child.wait().fuse() => { + return; + }, + _ = sleep(Duration::from_secs(grace_time_sec)).fuse() => {}, + } + } else { + warn!("could not get PID of child process for {}", context); + // still try to use tokio API to kill and reclaim the child process + } + + // if that fails, or we cannot get the underlying PID, terminate the process. + // NOTE: processes killed with SIGKILL are not guaranteed to exit. They can be locked up in a + // syscall to a resource no-longer available (a notorious example is a read on a disconnected + // NFS share) + + // as before, the only expected error is that the child process already terminated + // but we still need to reclaim it if that's the case. + let _ = child.start_kill(); + + futures::select! { + _ = child.wait().fuse() => {} + _ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => { + error!("child process of {} did not terminate", context); + } + } +} From 160d27f13af414285fafb48f8590d801f8bd7125 Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 5 Oct 2025 12:00:00 +0200 Subject: [PATCH 116/241] Fix tests --- src/concepts/action.rs | 4 ++-- src/concepts/stream.rs | 4 ++-- src/daemon/filter/tests.rs | 21 ++++++++++++++------- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 0252df6..70a89bc 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -43,9 +43,9 @@ pub struct Action { pub stream_name: String, // Plugin-specific - #[serde(default, rename = "type")] + #[serde(default, rename = "type", skip_serializing_if = "Option::is_none")] pub action_type: Option, - #[serde(default = "null_value")] + #[serde(default = "null_value", skip_serializing_if = "Value::is_null")] pub options: Value, } diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 40c78d0..84bea87 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -19,9 +19,9 @@ pub struct Stream { pub name: String, // Plugin-specific - #[serde(default, rename = "type")] + #[serde(default, rename = "type", skip_serializing_if = "Option::is_none")] pub stream_type: Option, - #[serde(default = "null_value")] + #[serde(default = "null_value", skip_serializing_if = "Value::is_null")] pub options: Value, } diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index baccfd0..0deb880 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -16,7 +16,7 @@ use super::{ }; use crate::{ concepts::{Action, Duplicate, Filter, Pattern, Patterns, Time}, - daemon::shutdown::ShutdownController, + daemon::{plugin::Plugins, shutdown::ShutdownController}, tests::TempDatabase, }; @@ -89,7 +89,7 @@ impl TestBed { }; let controller = ShutdownController::new(); let semaphore = Arc::new(Semaphore::new(1)); - TestBed2 { + let test_bed2 = TestBed2 { _out_path: self._out_path, out_file: self.out_file, az_patterns: self.az_patterns, @@ -100,12 +100,15 @@ impl TestBed { Some(semaphore.clone()), controller.token(), &mut db, + &mut Plugins::default(), now, ) .await .unwrap(), semaphore, - } + }; + test_bed2.manager.start(now).await; + test_bed2 } } @@ -435,7 +438,8 @@ async fn one_db_match_one_runtime_match_one_action() { let db = TempDatabase::from_loaded_db(HashMap::from([( filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])).await; + )])) + .await; // Finish setup let bed = bed.part2(filter, now, Some(db)).await; @@ -503,7 +507,8 @@ async fn one_outdated_db_match() { let db = TempDatabase::from_loaded_db(HashMap::from([( filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])).await; + )])) + .await; // Finish setup let bed = bed.part2(filter, now, Some(db)).await; @@ -633,7 +638,8 @@ async fn trigger_matched_pattern() { let db = TempDatabase::from_loaded_db(HashMap::from([( filter_ordered_times_db_name(filter), HashMap::from([(now1s.to_rfc3339().into(), one.clone().into())]), - )])).await; + )])) + .await; let bed = bed.part2(filter, now, Some(db)).await; bed.manager @@ -720,7 +726,8 @@ async fn trigger_deduplication_on_start() { now2s.to_rfc3339(): 1, }), )]), - )])).await; + )])) + .await; let bed = bed.part2(filter, now, Some(db)).await; // the action executes From f08762c3f37833cab78d96c9346cebc9fc25d07f Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 3 Oct 2025 12:00:00 +0200 Subject: [PATCH 117/241] First shot of "virtual stream" plugin --- Cargo.lock | 10 + Cargo.toml | 6 +- plugins/reaction-plugin-virtual/Cargo.toml | 9 + plugins/reaction-plugin-virtual/src/main.rs | 241 ++++++++++++++++++ .../reaction-plugin}/Cargo.lock | 0 .../reaction-plugin}/Cargo.toml | 1 + .../reaction-plugin}/src/lib.rs | 26 +- src/daemon/filter/mod.rs | 7 +- src/daemon/plugin/mod.rs | 1 + 9 files changed, 295 insertions(+), 6 deletions(-) create mode 100644 plugins/reaction-plugin-virtual/Cargo.toml create mode 100644 plugins/reaction-plugin-virtual/src/main.rs rename {reaction-plugin => plugins/reaction-plugin}/Cargo.lock (100%) rename {reaction-plugin => plugins/reaction-plugin}/Cargo.toml (71%) rename {reaction-plugin => plugins/reaction-plugin}/src/lib.rs (81%) diff --git a/Cargo.lock b/Cargo.lock index e1cfd21..ef0928a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1068,6 +1068,16 @@ version = "0.1.0" dependencies = [ "remoc", "serde", + "tokio", +] + +[[package]] +name = "reaction-plugin-virtual" +version = "0.1.0" +dependencies = [ + "reaction-plugin", + "remoc", + "tokio", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6ce6c63..6e953a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,14 +50,14 @@ jrsonnet-evaluator = "0.4.2" thiserror = "1.0.63" # Async runtime & helpers futures = "0.3.30" -tokio = { version = "1.40.0", features = ["full", "tracing"] } +tokio = { workspace = true, features = ["full", "tracing"] } tokio-util = { version = "0.7.12", features = ["codec"] } # Async logging tracing = "0.1.40" tracing-subscriber = "0.3.18" # Reaction plugin system remoc = { workspace = true } -reaction-plugin = { path = "reaction-plugin" } +reaction-plugin = { path = "plugins/reaction-plugin" } [build-dependencies] clap = { version = "4.5.4", features = ["derive"] } @@ -74,7 +74,9 @@ assert_cmd = "2.0.17" predicates = "3.1.3" [workspace] +members = ["plugins/reaction-plugin", "plugins/reaction-plugin-virtual"] [workspace.dependencies] remoc = { version = "0.18.3" } serde = { version = "1.0.203", features = ["derive"] } +tokio = { version = "1.40.0" } diff --git a/plugins/reaction-plugin-virtual/Cargo.toml b/plugins/reaction-plugin-virtual/Cargo.toml new file mode 100644 index 0000000..55ce41c --- /dev/null +++ b/plugins/reaction-plugin-virtual/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "reaction-plugin-virtual" +version = "0.1.0" +edition = "2024" + +[dependencies] +tokio = { workspace = true, features = ["rt-multi-thread"] } +remoc.workspace = true +reaction-plugin.path = "../reaction-plugin" diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs new file mode 100644 index 0000000..330a0b2 --- /dev/null +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -0,0 +1,241 @@ +use std::collections::BTreeMap; + +use reaction_plugin::{ActionImpl, Exec, Line, PluginInfo, RemoteResult, StreamImpl, Value}; +use remoc::rch::mpsc; + +#[tokio::main] +async fn main() { + let plugin = Plugin::default(); + reaction_plugin::main_loop(plugin).await; +} + +#[derive(Default)] +struct Plugin { + streams: BTreeMap, + actions_init: Vec, +} + +impl PluginInfo for Plugin { + async fn stream_impls(&self) -> RemoteResult> { + Ok(vec!["virtual".into()]) + } + + async fn action_impls(&self) -> RemoteResult> { + Ok(vec!["virtual".into()]) + } + + async fn stream_impl( + &mut self, + stream_name: String, + stream_type: String, + config: Value, + ) -> RemoteResult> { + if stream_type != "virtual" { + return Ok(Err( + "This plugin can't handle other stream types than virtual".into(), + )); + } + + let (virtual_stream, receiver) = match VirtualStream::new(config) { + Ok(v) => v, + Err(err) => return Ok(Err(err)), + }; + + if let Some(_) = self.streams.insert(stream_name, virtual_stream) { + return Ok(Err( + "this virtual stream has already been initialized".into() + )); + } + + Ok(Ok(StreamImpl { stream: receiver })) + } + + async fn action_impl( + &mut self, + stream_name: String, + filter_name: String, + action_name: String, + action_type: String, + config: Value, + patterns: Vec, + ) -> RemoteResult> { + if &action_type != "virtual" { + return Ok(Err( + "This plugin can't handle other stream types than virtual".into(), + )); + } + + let (virtual_action_init, tx) = + match VirtualActionInit::new(stream_name, filter_name, action_name, config, patterns) { + Ok(v) => v, + Err(err) => return Ok(Err(err)), + }; + + self.actions_init.push(virtual_action_init); + Ok(Ok(ActionImpl { tx })) + } + + async fn finish_setup(&mut self) -> RemoteResult> { + while let Some(action_init) = self.actions_init.pop() { + match self.streams.get(&action_init.to) { + Some(virtual_stream) => { + let virtual_stream = virtual_stream.clone(); + tokio::spawn(async move { + VirtualAction::from(action_init, virtual_stream) + .serve() + .await + }); + } + None => { + return Ok(Err(format!( + "action {}.{}.{}: send \"{}\" matches no stream name", + action_init.stream_name, + action_init.filter_name, + action_init.action_name, + action_init.to + ))); + } + } + } + self.streams = BTreeMap::new(); + self.actions_init = Vec::new(); + + Ok(Ok(())) + } + + async fn close(self) -> RemoteResult<()> { + Ok(()) + } +} + +#[derive(Clone)] +struct VirtualStream { + tx: mpsc::Sender>, +} + +impl VirtualStream { + fn new(config: Value) -> Result<(Self, mpsc::Receiver), String> { + const CONFIG_ERROR: &'static str = "streams of type virtual take no options"; + match config { + Value::Null => (), + Value::Object(map) => { + if map.len() != 0 { + return Err(CONFIG_ERROR.into()); + } + } + _ => return Err(CONFIG_ERROR.into()), + } + + let (tx, rx) = mpsc::channel(0); + Ok((Self { tx }, rx)) + } +} + +struct VirtualActionInit { + stream_name: String, + filter_name: String, + action_name: String, + rx: mpsc::Receiver, + patterns: Vec, + send: String, + to: String, +} + +impl VirtualActionInit { + fn new( + stream_name: String, + filter_name: String, + action_name: String, + config: Value, + patterns: Vec, + ) -> Result<(Self, mpsc::Sender), String> { + let send; + let to; + match config { + Value::Object(mut map) => { + send = match map.remove("send") { + Some(Value::String(value)) => value, + _ => return Err("`send` must be a string to send to the corresponding virtual stream, example: \"ban \"".into()), + }; + + to = match map.remove("to") { + Some(Value::String(value)) => value, + _ => return Err("`to` must be the name of the corresponding virtual stream, example: \"my_stream\"".into()), + }; + + if map.len() != 0 { + return Err( + "actions of type virtual accept only `send` and `to` options".into(), + ); + } + } + _ => { + return Err("actions of type virtual require `send` and `to` options".into()); + } + } + + let patterns = patterns + .into_iter() + .map(|pattern| format!("<{pattern}>")) + .collect(); + + let (tx, rx) = mpsc::channel(0); + Ok(( + Self { + stream_name, + filter_name, + action_name, + rx, + patterns, + send, + to, + }, + tx, + )) + } +} + +struct VirtualAction { + rx: mpsc::Receiver, + patterns: Vec, + send: String, + to: VirtualStream, +} + +impl VirtualAction { + fn from(action_init: VirtualActionInit, to: VirtualStream) -> VirtualAction { + VirtualAction { + rx: action_init.rx, + patterns: action_init.patterns, + send: action_init.send, + to, + } + } + + async fn serve(&mut self) { + loop { + match self.rx.recv().await { + Ok(Some(m)) => { + let line = if m.match_.is_empty() { + self.send.clone() + } else { + (0..(m.match_.len())) + .zip(&self.patterns) + .fold(self.send.clone(), |acc, (i, pattern)| { + acc.replace(pattern, &m.match_[i]) + }) + }; + let result = match self.to.tx.send(Line::Ok(line)).await { + Ok(_) => Ok(()), + Err(err) => Err(format!("{err}")), + }; + m.result.send(result).unwrap(); + } + Ok(None) => { + return; + } + Err(_) => panic!(), + } + } + } +} diff --git a/reaction-plugin/Cargo.lock b/plugins/reaction-plugin/Cargo.lock similarity index 100% rename from reaction-plugin/Cargo.lock rename to plugins/reaction-plugin/Cargo.lock diff --git a/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml similarity index 71% rename from reaction-plugin/Cargo.toml rename to plugins/reaction-plugin/Cargo.toml index 96e50e7..94f6d71 100644 --- a/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -6,3 +6,4 @@ edition = "2024" [dependencies] remoc.workspace = true serde.workspace = true +tokio = { workspace = true, features = ["io-std"] } diff --git a/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs similarity index 81% rename from reaction-plugin/src/lib.rs rename to plugins/reaction-plugin/src/lib.rs index a46343e..caccca3 100644 --- a/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -8,8 +8,12 @@ use std::collections::BTreeMap; /// To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides /// the entrypoint for a plugin. /// It permits to define 0 to n (stream, filter, action) custom types. -use remoc::{rch, rtc}; +use remoc::{ + Connect, rch, + rtc::{self, Server}, +}; use serde::{Deserialize, Serialize}; +use tokio::io::{stdin, stdout}; /// This is the only trait that **must** be implemented by a plugin. /// It provides lists of stream, filter and action types implemented by a dynamic plugin. @@ -46,6 +50,7 @@ pub trait PluginInfo { action_name: String, action_type: String, config: Value, + patterns: Vec, ) -> RemoteResult>; /// Notify the plugin that setup is finished, permitting a last occasion to report an error @@ -72,9 +77,11 @@ pub enum Value { #[derive(Serialize, Deserialize)] pub struct StreamImpl { - pub stream: rch::mpsc::Receiver>, + pub stream: rch::mpsc::Receiver, } +pub type Line = Result; + // #[derive(Serialize, Deserialize)] // pub struct FilterImpl { // pub stream: rch::lr::Sender, @@ -88,7 +95,7 @@ pub struct StreamImpl { #[derive(Clone, Serialize, Deserialize)] pub struct ActionImpl { - pub sender: rch::mpsc::Sender, + pub tx: rch::mpsc::Sender, } #[derive(Serialize, Deserialize)] @@ -98,3 +105,16 @@ pub struct Exec { } // TODO write main function here? +pub async fn main_loop(plugin_info: T) { + let (conn, mut tx, _rx): ( + _, + remoc::rch::base::Sender, + remoc::rch::base::Receiver<()>, + ) = Connect::io_buffered(remoc::Cfg::default(), stdin(), stdout(), 2048) + .await + .unwrap(); + + let (server, client) = PluginInfoServer::new(plugin_info, 1); + + let _ = tokio::join!(tx.send(client), server.serve(), tokio::spawn(conn)); +} diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 9bbe66c..11a4a48 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -73,6 +73,11 @@ impl FilterManager { action.name.clone(), action.action_type.clone().unwrap(), action.options.clone(), + action + .patterns + .iter() + .map(|pattern| pattern.name.clone()) + .collect(), ) .await?, ); @@ -400,7 +405,7 @@ fn exec_now( // Sending action let (response_tx, response_rx) = remoc::rch::oneshot::channel(); if let Err(err) = action_impl - .sender + .tx .send(reaction_plugin::Exec { match_: m, result: response_tx, diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 79c2b05..ef1b70d 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -193,6 +193,7 @@ impl Plugins { action_name: String, action_type: String, config: Value, + patterns: Vec, ) -> Result { let plugin_name = self .actions From 7cbf482e4daf1e1a293cc8e90d499e6487d52c2b Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 10 Oct 2025 12:00:00 +0200 Subject: [PATCH 118/241] plugin improvements - fix panic of channel(0) - cleaner plugin interface with one level of Result - standalone metadata for stream plugins - new test for plugin virtual --- plugins/reaction-plugin-virtual/src/main.rs | 52 ++++++-------- plugins/reaction-plugin/src/lib.rs | 76 ++++++++++++++++++--- src/concepts/action.rs | 1 + src/concepts/plugin.rs | 1 + src/daemon/filter/mod.rs | 3 +- src/daemon/mod.rs | 13 +++- src/daemon/plugin/mod.rs | 47 +++++++------ src/daemon/stream.rs | 30 +++++--- tests/plugin_virtual.rs | 30 ++++++++ tests/test-conf/test-virtual.jsonnet | 58 ++++++++++++++++ 10 files changed, 236 insertions(+), 75 deletions(-) create mode 100644 tests/plugin_virtual.rs create mode 100644 tests/test-conf/test-virtual.jsonnet diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 330a0b2..26fdd56 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use reaction_plugin::{ActionImpl, Exec, Line, PluginInfo, RemoteResult, StreamImpl, Value}; -use remoc::rch::mpsc; +use remoc::{rch::mpsc, rtc}; #[tokio::main] async fn main() { @@ -16,11 +16,11 @@ struct Plugin { } impl PluginInfo for Plugin { - async fn stream_impls(&self) -> RemoteResult> { + async fn stream_impls(&self) -> Result, rtc::CallError> { Ok(vec!["virtual".into()]) } - async fn action_impls(&self) -> RemoteResult> { + async fn action_impls(&self) -> Result, rtc::CallError> { Ok(vec!["virtual".into()]) } @@ -29,25 +29,21 @@ impl PluginInfo for Plugin { stream_name: String, stream_type: String, config: Value, - ) -> RemoteResult> { + ) -> RemoteResult { if stream_type != "virtual" { - return Ok(Err( - "This plugin can't handle other stream types than virtual".into(), - )); + return Err("This plugin can't handle other stream types than virtual".into()); } - let (virtual_stream, receiver) = match VirtualStream::new(config) { - Ok(v) => v, - Err(err) => return Ok(Err(err)), - }; + let (virtual_stream, receiver) = VirtualStream::new(config)?; if let Some(_) = self.streams.insert(stream_name, virtual_stream) { - return Ok(Err( - "this virtual stream has already been initialized".into() - )); + return Err("this virtual stream has already been initialized".into()); } - Ok(Ok(StreamImpl { stream: receiver })) + Ok(StreamImpl { + stream: receiver, + standalone: false, + }) } async fn action_impl( @@ -58,24 +54,19 @@ impl PluginInfo for Plugin { action_type: String, config: Value, patterns: Vec, - ) -> RemoteResult> { + ) -> RemoteResult { if &action_type != "virtual" { - return Ok(Err( - "This plugin can't handle other stream types than virtual".into(), - )); + return Err("This plugin can't handle other stream types than virtual".into()); } let (virtual_action_init, tx) = - match VirtualActionInit::new(stream_name, filter_name, action_name, config, patterns) { - Ok(v) => v, - Err(err) => return Ok(Err(err)), - }; + VirtualActionInit::new(stream_name, filter_name, action_name, config, patterns)?; self.actions_init.push(virtual_action_init); - Ok(Ok(ActionImpl { tx })) + Ok(ActionImpl { tx }) } - async fn finish_setup(&mut self) -> RemoteResult> { + async fn finish_setup(&mut self) -> RemoteResult<()> { while let Some(action_init) = self.actions_init.pop() { match self.streams.get(&action_init.to) { Some(virtual_stream) => { @@ -87,20 +78,21 @@ impl PluginInfo for Plugin { }); } None => { - return Ok(Err(format!( + return Err(format!( "action {}.{}.{}: send \"{}\" matches no stream name", action_init.stream_name, action_init.filter_name, action_init.action_name, action_init.to - ))); + ) + .into()); } } } self.streams = BTreeMap::new(); self.actions_init = Vec::new(); - Ok(Ok(())) + Ok(()) } async fn close(self) -> RemoteResult<()> { @@ -126,7 +118,7 @@ impl VirtualStream { _ => return Err(CONFIG_ERROR.into()), } - let (tx, rx) = mpsc::channel(0); + let (tx, rx) = mpsc::channel(2); Ok((Self { tx }, rx)) } } @@ -179,7 +171,7 @@ impl VirtualActionInit { .map(|pattern| format!("<{pattern}>")) .collect(); - let (tx, rx) = mpsc::channel(0); + let (tx, rx) = mpsc::channel(1); Ok(( Self { stream_name, diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index caccca3..5b3af6c 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -1,4 +1,4 @@ -use std::collections::BTreeMap; +use std::{collections::BTreeMap, error::Error, fmt::Display}; /// This crate defines the API between reaction's core and plugins. /// @@ -20,14 +20,24 @@ use tokio::io::{stdin, stdout}; #[rtc::remote] pub trait PluginInfo { /// Return all stream types that should be made available to reaction users - async fn stream_impls(&self) -> RemoteResult>; + /// ```jsonnet + /// { + /// streams: { + /// my_stream: { + /// type: "..." + /// # ↑ all those exposed types + /// } + /// } + /// } + /// ``` + async fn stream_impls(&self) -> Result, rtc::CallError>; /// Return one stream of a given type if it exists async fn stream_impl( &mut self, stream_name: String, stream_type: String, config: Value, - ) -> RemoteResult>; + ) -> RemoteResult; // /// Return all filter types that should be made available to reaction users // async fn filter_impls(&self) -> RemoteResult>; @@ -41,7 +51,7 @@ pub trait PluginInfo { // ) -> RemoteResult>; /// Return all action types that should be made available to reaction users - async fn action_impls(&self) -> RemoteResult>; + async fn action_impls(&self) -> Result, rtc::CallError>; /// Return one instance of a given type. async fn action_impl( &mut self, @@ -51,17 +61,15 @@ pub trait PluginInfo { action_type: String, config: Value, patterns: Vec, - ) -> RemoteResult>; + ) -> RemoteResult; /// Notify the plugin that setup is finished, permitting a last occasion to report an error /// (For example if a stream wants a companion action but it hasn't been initialized) - async fn finish_setup(&mut self) -> RemoteResult>; + async fn finish_setup(&mut self) -> RemoteResult<()>; async fn close(mut self) -> RemoteResult<()>; } -pub type RemoteResult = Result; - /// Represents a configuration value. /// This is not meant as an efficient type, but as a very flexible one. #[derive(Serialize, Deserialize)] @@ -78,6 +86,15 @@ pub enum Value { #[derive(Serialize, Deserialize)] pub struct StreamImpl { pub stream: rch::mpsc::Receiver, + /// Whether this stream works standalone, or if it needs other streams to be fed. + /// Defaults to true. + /// When false, reaction will exit if it's the last one standing. + #[serde(default = "_true")] + pub standalone: bool, +} + +fn _true() -> bool { + true } pub type Line = Result; @@ -110,7 +127,7 @@ pub async fn main_loop(plugin_info: T) { _, remoc::rch::base::Sender, remoc::rch::base::Receiver<()>, - ) = Connect::io_buffered(remoc::Cfg::default(), stdin(), stdout(), 2048) + ) = Connect::io(remoc::Cfg::default(), stdin(), stdout()) .await .unwrap(); @@ -118,3 +135,44 @@ pub async fn main_loop(plugin_info: T) { let _ = tokio::join!(tx.send(client), server.serve(), tokio::spawn(conn)); } + +// Errors + +pub type RemoteResult = Result; + +/// A Plugin Error +/// It's either a connection error or a free String for plugin-specific errors +#[derive(Debug, Serialize, Deserialize)] +pub enum RemoteError { + Remoc(rtc::CallError), + Plugin(String), +} + +impl Display for RemoteError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RemoteError::Remoc(call_error) => write!(f, "communication error: {call_error}"), + RemoteError::Plugin(err) => write!(f, "{err}"), + } + } +} + +impl Error for RemoteError {} + +impl From for RemoteError { + fn from(value: String) -> Self { + Self::Plugin(value) + } +} + +impl From<&str> for RemoteError { + fn from(value: &str) -> Self { + Self::Plugin(value.into()) + } +} + +impl From for RemoteError { + fn from(value: rtc::CallError) -> Self { + Self::Remoc(value) + } +} diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 70a89bc..00754d1 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -11,6 +11,7 @@ use super::{null_value, parse_duration::*, Match, Pattern, PatternType}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct Action { + #[serde(default)] pub cmd: Vec, // TODO one shot time deserialization diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index f4da976..fb365d3 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -40,6 +40,7 @@ impl Plugin { Command::new(&self.path) .stdin(Stdio::piped()) .stdout(Stdio::piped()) + .env("RUST_BACKTRACE", "1") .spawn() } } diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 11a4a48..a71bcd5 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -398,8 +398,9 @@ fn exec_now( match action_impl { Some(action_impl) => { info!( - "{action}: run {}", + "{action}: run {} [{:?}]", action.action_type.clone().unwrap_or_default(), + &m, ); // Sending action diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 94b416e..70f21cb 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -99,9 +99,16 @@ pub async fn daemon( socket.manager(config, state, shutdown.token()); // Start Stream managers - let stream_task_handles = stream_managers - .into_iter() - .map(|stream_manager| tokio::spawn(async move { stream_manager.start().await })); + let stream_task_handles = stream_managers.into_iter().filter_map(|stream_manager| { + let standalone = stream_manager.is_standalone(); + let handle = tokio::spawn(async move { stream_manager.start().await }); + // Only wait for standalone streams + if standalone { + Some(handle) + } else { + None + } + }); // Close streams when we receive a quit signal let signal_received = Arc::new(AtomicBool::new(false)); diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index ef1b70d..9d8520c 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -25,6 +25,8 @@ pub struct PluginManager { shutdown: ShutdownToken, plugin: &'static Plugin, plugin_info: PluginInfoClient, + stream_impls: Vec, + action_impls: Vec, } impl Deref for PluginManager { @@ -53,7 +55,7 @@ impl PluginManager { _, remoc::rch::base::Sender<()>, remoc::rch::base::Receiver, - ) = Connect::io_buffered(remoc::Cfg::default(), stdout, stdin, 2048) + ) = Connect::io(remoc::Cfg::default(), stdout, stdin) .await .map_err(|err| { format!( @@ -70,11 +72,23 @@ impl PluginManager { .map_err(|err| format!("could not retrieve initial information from plugin: {err}"))? .ok_or("could not retrieve initial information from plugin: no data")?; + let stream_impls = plugin_info + .stream_impls() + .await + .map_err(|err| format!("plugin error while retrieving stream types: {err}"))?; + + let action_impls = plugin_info + .action_impls() + .await + .map_err(|err| format!("plugin error while retrieving action types: {err}"))?; + Ok(Self { child, shutdown, plugin, plugin_info, + stream_impls, + action_impls, }) } @@ -135,26 +149,18 @@ impl Plugins { let path = plugin.path.clone(); let manager = PluginManager::new(plugin, shutdown).await?; - for stream in manager - .stream_impls() - .await - .map_err(|err| format!("plugin error: {err}"))? - { + for stream in &manager.stream_impls { if let Some(path) = self.streams.insert(stream.clone().into(), path.clone()) { return Err(format!( - "plugin {path} already exposed a stream with type name '{stream}'" + "plugin {path} already exposed a stream with type name '{stream}'", )); } } - for action in manager - .action_impls() - .await - .map_err(|err| format!("plugin error: {err}"))? - { + for action in &manager.action_impls { if let Some(path) = self.actions.insert(action.clone().into(), path.clone()) { return Err(format!( - "plugin {path} already exposed a action with type name '{action}'" + "plugin {path} already exposed a action with type name '{action}'", )); } } @@ -183,7 +189,7 @@ impl Plugins { to_stable_value(config), ) .await - .map_err(|err| format!("plugin error: {err}"))? + .map_err(|err| format!("plugin error while initializing stream: {err}")) } pub async fn init_action_impl( @@ -209,9 +215,10 @@ impl Plugins { action_name.into(), action_type.into(), to_stable_value(config), + patterns, ) .await - .map_err(|err| format!("plugin error: {err}"))? + .map_err(|err| format!("plugin error while initializing action: {err}")) } pub async fn finish_setup(&mut self) -> Result<(), String> { @@ -226,15 +233,7 @@ impl Plugins { .into_iter() .zip(self.plugins.values()) .map(|(result, plugin_manager)| { - result - .map_err(|err| format!("plugin {} error: {err}", plugin_manager.plugin.path)) - .map_err(|err| { - format!( - "invalid config for plugin {}: {err}", - plugin_manager.plugin.path - ) - }) - .flatten() + result.map_err(|err| format!("plugin {} error: {err}", plugin_manager.plugin.path)) }) .collect::>() } diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index bf5c189..f9e8eec 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -95,7 +95,14 @@ impl StreamManager { }) } - pub async fn start(self) { + pub fn is_standalone(&self) -> bool { + match &self.stream_plugin { + Some(plugin) => plugin.standalone, + None => true, + } + } + + pub async fn start(mut self) { // First start FilterManagers persisted actions let now = Local::now(); join_all( @@ -115,7 +122,7 @@ impl StreamManager { } } - async fn start_plugin(mut self) { + async fn start_plugin(&mut self) { let mut plugin = self.stream_plugin.take().unwrap(); loop { @@ -128,11 +135,18 @@ impl StreamManager { return; } Err(err) => { - error!( - "impossible to read output from stream {}: {}", - self.stream.name, err - ); - return; + if err.is_final() { + error!( + "error reading from plugin stream {}: {}", + self.stream.name, err + ); + return; + } else { + error!( + "temporary error reading from plugin stream {}: {}", + self.stream.name, err + ); + } } Ok(None) => { return; @@ -141,7 +155,7 @@ impl StreamManager { } } - async fn start_cmd(self) { + async fn start_cmd(&self) { let mut child = match Command::new(&self.stream.cmd[0]) .args(&self.stream.cmd[1..]) .stdin(Stdio::null()) diff --git a/tests/plugin_virtual.rs b/tests/plugin_virtual.rs new file mode 100644 index 0000000..cf546f3 --- /dev/null +++ b/tests/plugin_virtual.rs @@ -0,0 +1,30 @@ +use std::{path::Path, time::Duration}; + +use assert_cmd::Command; +use assert_fs::prelude::*; +use predicates::prelude::predicate; + +#[test] +fn plugin_virtual() { + let tmp_dir = assert_fs::TempDir::new().unwrap(); + tmp_dir + .child("config.jsonnet") + .write_file(Path::new("tests/test-conf/test-virtual.jsonnet")) + .unwrap(); + + Command::cargo_bin("reaction") + .unwrap() + .args(["start", "--socket", "./s", "--config", "./config.jsonnet"]) + .current_dir(tmp_dir.path()) + .timeout(Duration::from_secs(5)) + // Expected exit 1: all stream exited + .assert() + .code(predicate::eq(1)); + + // Expected output + let output = [ + "a0 1", "a0 2", "a0 3", "a0 4", "b0 1", "b0 2", "b0 3", "b0 4", "", + ]; + tmp_dir.child("log").assert(&output.join("\n")); + tmp_dir.child("log").write_str("").unwrap(); +} diff --git a/tests/test-conf/test-virtual.jsonnet b/tests/test-conf/test-virtual.jsonnet new file mode 100644 index 0000000..f253319 --- /dev/null +++ b/tests/test-conf/test-virtual.jsonnet @@ -0,0 +1,58 @@ +{ + patterns: { + num: { + regex: @"[0-9]+", + }, + all: { + regex: @".*" + } + }, + + plugins: [ + { + path: "/home/ppom/prg/reaction/target/debug/reaction-plugin-virtual", + } + ], + + streams: { + s0: { + cmd: ["bash", "-c", "for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2"], + filters: { + f0: { + regex: ["^$"], + actions: { + a0: { + type: "virtual", + options: { + send: "a0 ", + to: "s1", + } + }, + b0: { + type: "virtual", + options: { + send: "b0 ", + to: "s1", + }, + after: "600ms", + }, + }, + }, + }, + }, + s1: { + type: "virtual", + options: {}, + filters: { + f1: { + regex: ["^$"], + actions: { + a1: { + cmd: ['sh', '-c', 'echo >>./log'], + }, + }, + }, + }, + }, + }, +} From b44800ed30bedca81d8ac13a7bf3360150136057 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 10 Oct 2025 12:00:00 +0200 Subject: [PATCH 119/241] `cargo build` builds plugin And benchmark for virtual plugin --- Cargo.toml | 4 ++ bench/bench.sh | 2 +- bench/small-heavy-load-virtual.yml | 86 ++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 bench/small-heavy-load-virtual.yml diff --git a/Cargo.toml b/Cargo.toml index 6e953a6..926b3b3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,3 +80,7 @@ members = ["plugins/reaction-plugin", "plugins/reaction-plugin-virtual"] remoc = { version = "0.18.3" } serde = { version = "1.0.203", features = ["derive"] } tokio = { version = "1.40.0" } + +[[bin]] +name = "reaction-plugin-virtual" +path = "plugins/reaction-plugin-virtual/src/main.rs" diff --git a/bench/bench.sh b/bench/bench.sh index 59d4894..9f42ec2 100755 --- a/bench/bench.sh +++ b/bench/bench.sh @@ -14,7 +14,7 @@ then fi rm -f reaction.db -cargo build --release +cargo build --release --bins sudo systemd-run --wait \ -p User="$(id -nu)" \ -p MemoryAccounting=yes \ diff --git a/bench/small-heavy-load-virtual.yml b/bench/small-heavy-load-virtual.yml new file mode 100644 index 0000000..0f074b9 --- /dev/null +++ b/bench/small-heavy-load-virtual.yml @@ -0,0 +1,86 @@ +--- +# This configuration permits to test reaction's performance +# under a very high load +# +# It keeps regexes super simple, to avoid benchmarking the `regex` crate, +# and benchmark reaction's internals instead. +concurrency: 32 + +plugins: + - path: "/home/ppom/prg/reaction/target/release/reaction-plugin-virtual" + +patterns: + num: + regex: '[0-9]{3}' + ip: + regex: '(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3})|(?:[0-9a-fA-F:]{2,90})' + ignore: + - 1.0.0.1 + +streams: + virtual: + type: virtual + filters: + find0: + regex: + - '^$' + actions: + damn: + cmd: [ 'sleep', '0.0' ] + undamn: + cmd: [ 'sleep', '0.0' ] + after: 1m + onexit: false + tailDown1: + cmd: [ 'sh', '-c', 'sleep 2; seq 1001 | while read i; do echo found $i; done' ] + filters: + find1: + regex: + - '^found ' + retry: 9 + retryperiod: 6m + actions: + virtual: + type: virtual + options: + send: '' + to: virtual + tailDown2: + cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ] + filters: + find2: + regex: + - '^found ' + retry: 480 + retryperiod: 6m + actions: + virtual: + type: virtual + options: + send: '' + to: virtual + tailDown3: + cmd: [ 'sh', '-c', 'sleep 2; seq 100100 | while read i; do echo found $i; echo trouvé $i; done' ] + filters: + find3: + regex: + - '^found ' + retry: 480 + retryperiod: 6m + actions: + virtual: + type: virtual + options: + send: '' + to: virtual + find4: + regex: + - '^trouvé ' + retry: 480 + retryperiod: 6m + actions: + virtual: + type: virtual + options: + send: '' + to: virtual From 76bc551043346b21134e10005503c59137aebe76 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 11 Oct 2025 12:00:00 +0200 Subject: [PATCH 120/241] Specify `reaction` as default bin --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 926b3b3..b504b22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ homepage = "https://reaction.ppom.me" repository = "https://framagit.org/ppom/reaction" keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"] build = "build.rs" +default-run = "reaction" [package.metadata.deb] section = "net" From a5c563d55f50cc5ab9123bf18a16d4fa1431887d Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 11 Oct 2025 12:00:00 +0200 Subject: [PATCH 121/241] WIP systemd support The logic seems to be fine. Still need to think what security defaults are pertinent. --- TODO | 2 +- plugins/reaction-plugin-virtual/src/main.rs | 2 +- plugins/reaction-plugin/src/lib.rs | 23 ++++ src/concepts/plugin.rs | 145 ++++++++++++++++++-- src/daemon/plugin/mod.rs | 2 +- tests/test-conf/test-virtual.jsonnet | 6 +- 6 files changed, 161 insertions(+), 19 deletions(-) diff --git a/TODO b/TODO index 9d5462a..1827fa9 100644 --- a/TODO +++ b/TODO @@ -1,3 +1,3 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) -stream: test regex ending with $ DB: add tests on stress testing (lines should always be in order) +plugins: pipe stderr too and wrap errors in logs diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 26fdd56..7c24cd3 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -118,7 +118,7 @@ impl VirtualStream { _ => return Err(CONFIG_ERROR.into()), } - let (tx, rx) = mpsc::channel(2); + let (tx, rx) = mpsc::channel(1); Ok((Self { tx }, rx)) } } diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 5b3af6c..64f19c8 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -8,6 +8,25 @@ use std::{collections::BTreeMap, error::Error, fmt::Display}; /// To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides /// the entrypoint for a plugin. /// It permits to define 0 to n (stream, filter, action) custom types. +/// +/// Minimal example: +/// ```rust +/// use reaction_plugin::PluginInfo; +/// +/// #[tokio::main] +/// async fn main() { +/// let plugin = MyPlugin::default(); +/// reaction_plugin::main_loop(plugin).await; +/// } +/// +/// #[derive(Default)] +/// struct MyPlugin {} +/// +/// impl PluginInfo for Plugin { +/// // ... +/// } +/// ``` +/// use remoc::{ Connect, rch, rtc::{self, Server}, @@ -65,8 +84,12 @@ pub trait PluginInfo { /// Notify the plugin that setup is finished, permitting a last occasion to report an error /// (For example if a stream wants a companion action but it hasn't been initialized) + /// All initialization (opening remote connections, starting streams, etc) should happen here. async fn finish_setup(&mut self) -> RemoteResult<()>; + /// Notify the plugin that reaction is quitting and that the plugin should quit too. + /// A few seconds later, the plugin will receive SIGTERM. + /// A few seconds later, the plugin will receive SIGKILL. async fn close(mut self) -> RemoteResult<()>; } diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index fb365d3..1fcda1f 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -1,21 +1,35 @@ -use std::{collections::BTreeMap, process::Stdio}; +use std::{ + collections::BTreeMap, + io::{Error, ErrorKind}, + os::linux::fs::MetadataExt, + process::Stdio, +}; use serde::{Deserialize, Serialize}; -use tokio::process::{Child, Command}; +use tokio::{ + process::{Child, Command}, + runtime::Handle, +}; +use tracing::warn; #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] #[serde(deny_unknown_fields)] pub struct Plugin { pub path: String, - /// If empty, defaults to root - pub file_owner: Option, - /// If empty, defaults to root - pub exec_user: Option, - /// If empty, hash is not performed - pub sha256: Option, - /// Option for `run0`. Do not provide User. - pub systemd_options: Option>>, + /// Check that plugin file owner is root + #[serde(default = "_true")] + pub check_root: bool, + /// Enable systemd containerization + #[serde(default = "_true")] + pub systemd: bool, + /// Options for `run0` + #[serde(default)] + pub systemd_options: BTreeMap>, +} + +fn _true() -> bool { + true } // NOTE @@ -27,20 +41,121 @@ impl Plugin { if self.path.is_empty() { return Err("can't specify empty plugin path".into()); } + + #[cfg(not(debug_assertions))] if !self.path.starts_with("/") { return Err(format!("plugin paths must be absolute: {}", self.path)); } + + if self.systemd { + self.systemd_setup(); + } Ok(()) } - pub fn launch(&self) -> Result { - // TODO owner check - // TODO hash check - // TODO run0 options - Command::new(&self.path) + /// Override default options with user-defined options, when defined. + pub fn systemd_setup(&mut self) { + let mut new_options = systemd_default_options(); + while let Some((option, value)) = self.systemd_options.pop_first() { + new_options.insert(option, value); + } + self.systemd_options = new_options; + } + + pub async fn launch(&self) -> Result { + // owner check + if self.check_root { + let path = self.path.clone(); + let stat = Handle::current() + .spawn_blocking(|| std::fs::metadata(path)) + .await + .unwrap()?; + + if stat.st_uid() != 0 { + return Err(Error::new( + ErrorKind::Other, + "plugin file is not owned by root", + )); + } + } + + let self_uid = if self.systemd { + // Well well we want to check if we're root + #[allow(unsafe_code)] + unsafe { + nix::libc::geteuid() + } + } else { + 0 + }; + + let mut command = if self.systemd && self_uid == 0 { + let mut command = Command::new("run0"); + // --pipe gives direct, non-emulated stdio access, for better performance. + command.arg("--pipe"); + // run0 options + for (option, values) in self.systemd_options.iter() { + for value in values.iter() { + command.arg("--property").arg(format!("{option}={value}")); + } + } + command.arg(&self.path); + command + } else { + if self.systemd { + warn!("Disabling systemd because reaction does not run as root"); + } + Command::new(&self.path) + }; + dbg!(&command); + command .stdin(Stdio::piped()) .stdout(Stdio::piped()) .env("RUST_BACKTRACE", "1") .spawn() } } + +// TODO commented options block execution of program, +// while developping in my home directory. +// Some options may still be useful in production environments. +fn systemd_default_options() -> BTreeMap> { + BTreeMap::from( + [ + // No file access + ("ReadWritePaths", vec![]), + ("ReadOnlyPaths", vec![]), + // ("NoExecPaths", vec!["/"]), + ("InaccessiblePaths", vec!["/boot", "/etc"]), + // Protect special filesystems + ("PrivateDevices", vec!["true"]), + ("PrivateMounts", vec!["true"]), + ("PrivateTmp", vec!["true"]), + // ("PrivateUsers", vec!["true"]), + ("ProcSubset", vec!["pid"]), + ("ProtectClock", vec!["true"]), + ("ProtectControlGroups", vec!["true"]), + // ("ProtectHome", vec!["true"]), + ("ProtectHostname", vec!["true"]), + ("ProtectKernelLogs", vec!["true"]), + ("ProtectKernelModules", vec!["true"]), + ("ProtectKernelTunables", vec!["true"]), + ("ProtectProc", vec!["invisible"]), + ("ProtectSystem", vec!["strict"]), + // Dynamic User + ("DynamicUser", vec!["true"]), + // Various Protections + ("CapabilityBoundingSet", vec![""]), + ("LockPersonality", vec!["true"]), + ("NoNewPrivileges", vec!["true"]), + // Isolate File + ("RemoveIPC", vec!["true"]), + ("RestrictAddressFamilies", vec![""]), + ("RestrictNamespaces", vec!["true"]), + ("RestrictSUIDSGID", vec!["true"]), + ("SystemCallArchitectures", vec!["native"]), + ("SystemCallFilter", vec!["@system-service", "~@privileged"]), + ] + .map(|(k, v)| (k.into(), v.into_iter().map(|v| v.into()).collect())), + ) +} diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 9d8520c..bfad52f 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -35,7 +35,6 @@ impl Deref for PluginManager { &self.plugin_info } } - impl DerefMut for PluginManager { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.plugin_info @@ -46,6 +45,7 @@ impl PluginManager { async fn new(plugin: &'static Plugin, shutdown: ShutdownToken) -> Result { let mut child = plugin .launch() + .await .map_err(|err| format!("could not launch plugin: {err}"))?; let stdin = child.stdin.take().unwrap(); diff --git a/tests/test-conf/test-virtual.jsonnet b/tests/test-conf/test-virtual.jsonnet index f253319..8e9a24f 100644 --- a/tests/test-conf/test-virtual.jsonnet +++ b/tests/test-conf/test-virtual.jsonnet @@ -10,7 +10,11 @@ plugins: [ { - path: "/home/ppom/prg/reaction/target/debug/reaction-plugin-virtual", + path: "./target/debug/reaction-plugin-virtual", + check_root: false, + systemd_options: { + DynamicUser: ["false"], + } } ], From 9f56e5d8d2dffc1f857c1ff250a69fc8788552a1 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 16 Oct 2025 12:00:00 +0200 Subject: [PATCH 122/241] fmt --- src/concepts/mod.rs | 2 +- src/treedb/mod.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index 44b999d..ddec710 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -13,7 +13,7 @@ pub use config::{Config, Patterns}; pub use filter::{Duplicate, Filter}; use parse_duration::parse_duration; pub use pattern::{Pattern, PatternType}; -pub use plugin::{Plugin}; +pub use plugin::Plugin; use serde::{Deserialize, Serialize}; use serde_json::Value; pub use stream::Stream; diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index ed54a7a..ee86850 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -529,7 +529,10 @@ mod tests { manager.set_loaded_db(loaded_db) } let error_rx = manager.manager(ShutdownController::new().token()); - Ok(Self { entry_tx: Some(entry_tx), error_rx }) + Ok(Self { + entry_tx: Some(entry_tx), + error_rx, + }) } } From 0c4d19a4d7bd2072d253f1abf93a277125ef4c98 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 21 Oct 2025 12:00:00 +0200 Subject: [PATCH 123/241] plugins are now named and fixed the virtual test --- TODO | 1 + src/concepts/config.rs | 8 ++++---- src/concepts/plugin.rs | 7 ++++++- src/daemon/plugin/mod.rs | 30 ++++++++++++++-------------- tests/plugin_virtual.rs | 6 ++++++ tests/test-conf/test-virtual.jsonnet | 6 +++--- 6 files changed, 35 insertions(+), 23 deletions(-) diff --git a/TODO b/TODO index 1827fa9..e01932a 100644 --- a/TODO +++ b/TODO @@ -1,3 +1,4 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) plugins: pipe stderr too and wrap errors in logs +plugins: provide tree storage? omg diff --git a/src/concepts/config.rs b/src/concepts/config.rs index 94e0df7..f351d36 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -24,8 +24,8 @@ pub struct Config { #[serde(default = "dot", skip_serializing_if = "String::is_empty")] pub state_directory: String, - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub plugins: Vec, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub plugins: BTreeMap, #[serde(default)] pub patterns: Patterns, @@ -101,8 +101,8 @@ impl Config { // Nullify this useless field self._definitions = serde_json::Value::Null; - for plugin in &mut self.plugins { - plugin.setup()?; + for (key, value) in &mut self.plugins { + value.setup(key)?; } if self.patterns.is_empty() { diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 1fcda1f..0869fb4 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -16,6 +16,9 @@ use tracing::warn; #[cfg_attr(test, derive(Default))] #[serde(deny_unknown_fields)] pub struct Plugin { + #[serde(skip)] + pub name: String, + pub path: String, /// Check that plugin file owner is root #[serde(default = "_true")] @@ -37,7 +40,9 @@ fn _true() -> bool { // with the --pipe option, raw stdio fd are transmitted to the underlying command, so there is no overhead. impl Plugin { - pub fn setup(&mut self) -> Result<(), String> { + pub fn setup(&mut self, name: &str) -> Result<(), String> { + self.name = name.to_string(); + if self.path.is_empty() { return Err("can't specify empty plugin path".into()); } diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index bfad52f..e7f63da 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -60,7 +60,7 @@ impl PluginManager { .map_err(|err| { format!( "could not init communication with plugin {}: {err}", - plugin.path + plugin.name ) })?; @@ -98,7 +98,7 @@ impl PluginManager { // wait either for the child process to exit on its own or for the shutdown signal futures::select! { _ = self.child.wait().fuse() => { - error!("plugin {} exited: its command returned.", self.plugin.path); + error!("plugin {} exited: its command returned.", self.plugin.name); return; } _ = self.shutdown.wait().fuse() => {} @@ -109,11 +109,11 @@ impl PluginManager { return; }, _ = sleep(Duration::from_secs(PLUGIN_STOP_GRACE_TIME)).fuse() => { - error!("plugin {} did not respond to close request in time, killing", self.plugin.path) + error!("plugin {} did not respond to close request in time, killing", self.plugin.name) }, } - kill_child(self.child, format!("plugin {}", self.plugin.path), 5).await; + kill_child(self.child, format!("plugin {}", self.plugin.name), 5).await; } } @@ -126,16 +126,16 @@ pub struct Plugins { impl Plugins { pub async fn new( - plugins: &'static Vec, + plugins: &'static BTreeMap, shutdown: ShutdownToken, ) -> Result { let mut this = Self::default(); - for plugin in plugins { - let path = plugin.path.clone(); + for plugin in plugins.values() { + let name = plugin.name.clone(); this.load_plugin(&plugin, shutdown.clone()) .await - .map_err(|err| format!("plugin {path}: {err}]"))?; + .map_err(|err| format!("plugin {name}: {err}]"))?; } Ok(this) @@ -146,26 +146,26 @@ impl Plugins { plugin: &'static Plugin, shutdown: ShutdownToken, ) -> Result<(), String> { - let path = plugin.path.clone(); + let name = plugin.name.clone(); let manager = PluginManager::new(plugin, shutdown).await?; for stream in &manager.stream_impls { - if let Some(path) = self.streams.insert(stream.clone().into(), path.clone()) { + if let Some(name) = self.streams.insert(stream.clone().into(), name.clone()) { return Err(format!( - "plugin {path} already exposed a stream with type name '{stream}'", + "plugin {name} already exposed a stream with type name '{stream}'", )); } } for action in &manager.action_impls { - if let Some(path) = self.actions.insert(action.clone().into(), path.clone()) { + if let Some(name) = self.actions.insert(action.clone().into(), name.clone()) { return Err(format!( - "plugin {path} already exposed a action with type name '{action}'", + "plugin {name} already exposed a action with type name '{action}'", )); } } - self.plugins.insert(path, manager); + self.plugins.insert(name, manager); Ok(()) } @@ -233,7 +233,7 @@ impl Plugins { .into_iter() .zip(self.plugins.values()) .map(|(result, plugin_manager)| { - result.map_err(|err| format!("plugin {} error: {err}", plugin_manager.plugin.path)) + result.map_err(|err| format!("plugin {} error: {err}", plugin_manager.plugin.name)) }) .collect::>() } diff --git a/tests/plugin_virtual.rs b/tests/plugin_virtual.rs index cf546f3..c9bb385 100644 --- a/tests/plugin_virtual.rs +++ b/tests/plugin_virtual.rs @@ -12,6 +12,12 @@ fn plugin_virtual() { .write_file(Path::new("tests/test-conf/test-virtual.jsonnet")) .unwrap(); + // Copy virtual plugin + tmp_dir + .child("./target/debug/reaction-plugin-virtual") + .write_file(Path::new("./target/debug/reaction-plugin-virtual")) + .unwrap(); + Command::cargo_bin("reaction") .unwrap() .args(["start", "--socket", "./s", "--config", "./config.jsonnet"]) diff --git a/tests/test-conf/test-virtual.jsonnet b/tests/test-conf/test-virtual.jsonnet index 8e9a24f..e68a4fa 100644 --- a/tests/test-conf/test-virtual.jsonnet +++ b/tests/test-conf/test-virtual.jsonnet @@ -8,15 +8,15 @@ } }, - plugins: [ - { + plugins: { + virtual: { path: "./target/debug/reaction-plugin-virtual", check_root: false, systemd_options: { DynamicUser: ["false"], } } - ], + }, streams: { s0: { From fa350310fd5b168fca870d9efc289edc391d62ea Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 20 Oct 2025 12:00:00 +0200 Subject: [PATCH 124/241] plugin protocol: add manifest with version --- Cargo.lock | 2 +- plugins/reaction-plugin-virtual/src/main.rs | 56 ++++--- plugins/reaction-plugin/Cargo.toml | 2 +- plugins/reaction-plugin/src/lib.rs | 158 +++++++++++++------- src/concepts/plugin.rs | 8 +- src/daemon/plugin/mod.rs | 37 +++-- 6 files changed, 160 insertions(+), 103 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef0928a..9ea2963 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1064,7 +1064,7 @@ dependencies = [ [[package]] name = "reaction-plugin" -version = "0.1.0" +version = "1.0.0" dependencies = [ "remoc", "serde", diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 7c24cd3..ee551ec 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,6 +1,8 @@ -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; -use reaction_plugin::{ActionImpl, Exec, Line, PluginInfo, RemoteResult, StreamImpl, Value}; +use reaction_plugin::{ + ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, +}; use remoc::{rch::mpsc, rtc}; #[tokio::main] @@ -16,12 +18,12 @@ struct Plugin { } impl PluginInfo for Plugin { - async fn stream_impls(&self) -> Result, rtc::CallError> { - Ok(vec!["virtual".into()]) - } - - async fn action_impls(&self) -> Result, rtc::CallError> { - Ok(vec!["virtual".into()]) + async fn manifest(&self) -> Result { + Ok(Manifest { + hello: Hello::hello(), + streams: BTreeSet::from(["virtual".into()]), + actions: BTreeSet::from(["virtual".into()]), + }) } async fn stream_impl( @@ -205,29 +207,21 @@ impl VirtualAction { } async fn serve(&mut self) { - loop { - match self.rx.recv().await { - Ok(Some(m)) => { - let line = if m.match_.is_empty() { - self.send.clone() - } else { - (0..(m.match_.len())) - .zip(&self.patterns) - .fold(self.send.clone(), |acc, (i, pattern)| { - acc.replace(pattern, &m.match_[i]) - }) - }; - let result = match self.to.tx.send(Line::Ok(line)).await { - Ok(_) => Ok(()), - Err(err) => Err(format!("{err}")), - }; - m.result.send(result).unwrap(); - } - Ok(None) => { - return; - } - Err(_) => panic!(), - } + while let Ok(Some(m)) = self.rx.recv().await { + let line = if m.match_.is_empty() { + self.send.clone() + } else { + (0..(m.match_.len())) + .zip(&self.patterns) + .fold(self.send.clone(), |acc, (i, pattern)| { + acc.replace(pattern, &m.match_[i]) + }) + }; + let result = match self.to.tx.send(Line::Ok(line)).await { + Ok(_) => Ok(()), + Err(err) => Err(format!("{err}")), + }; + m.result.send(result).unwrap(); } } } diff --git a/plugins/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml index 94f6d71..3bd74e9 100644 --- a/plugins/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction-plugin" -version = "0.1.0" +version = "1.0.0" edition = "2024" [dependencies] diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 64f19c8..fcf8950 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -1,32 +1,39 @@ -use std::{collections::BTreeMap, error::Error, fmt::Display}; +//! This crate defines the API between reaction's core and plugins. +//! +//! It's based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait +//! calls over a single transport channel. +//! +//! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql` +//! +//! To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides +//! the entrypoint for a plugin. +//! It permits to define 0 to n (stream, filter, action) custom types. +//! +//! Minimal example: +//! `src/main.rs` +//! ```rust +//! use reaction_plugin::PluginInfo; +//! +//! #[tokio::main] +//! async fn main() { +//! let plugin = MyPlugin::default(); +//! reaction_plugin::main_loop(plugin).await; +//! } +//! +//! #[derive(Default)] +//! struct MyPlugin {} +//! +//! impl PluginInfo for Plugin { +//! // ... +//! } +//! ``` + +use std::{ + collections::{BTreeMap, BTreeSet}, + error::Error, + fmt::Display, +}; -/// This crate defines the API between reaction's core and plugins. -/// -/// It's based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait -/// calls over a single transport channel. -/// -/// To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides -/// the entrypoint for a plugin. -/// It permits to define 0 to n (stream, filter, action) custom types. -/// -/// Minimal example: -/// ```rust -/// use reaction_plugin::PluginInfo; -/// -/// #[tokio::main] -/// async fn main() { -/// let plugin = MyPlugin::default(); -/// reaction_plugin::main_loop(plugin).await; -/// } -/// -/// #[derive(Default)] -/// struct MyPlugin {} -/// -/// impl PluginInfo for Plugin { -/// // ... -/// } -/// ``` -/// use remoc::{ Connect, rch, rtc::{self, Server}, @@ -38,18 +45,9 @@ use tokio::io::{stdin, stdout}; /// It provides lists of stream, filter and action types implemented by a dynamic plugin. #[rtc::remote] pub trait PluginInfo { - /// Return all stream types that should be made available to reaction users - /// ```jsonnet - /// { - /// streams: { - /// my_stream: { - /// type: "..." - /// # ↑ all those exposed types - /// } - /// } - /// } - /// ``` - async fn stream_impls(&self) -> Result, rtc::CallError>; + /// Return the manifest of the plugin. + async fn manifest(&self) -> Result; + /// Return one stream of a given type if it exists async fn stream_impl( &mut self, @@ -58,19 +56,6 @@ pub trait PluginInfo { config: Value, ) -> RemoteResult; - // /// Return all filter types that should be made available to reaction users - // async fn filter_impls(&self) -> RemoteResult>; - // /// Return one stream of a given type if it exists - // async fn filter_impl( - // &mut self, - // stream_name: String, - // filter_name: String, - // stream_type: String, - // config: Value, - // ) -> RemoteResult>; - - /// Return all action types that should be made available to reaction users - async fn action_impls(&self) -> Result, rtc::CallError>; /// Return one instance of a given type. async fn action_impl( &mut self, @@ -93,6 +78,73 @@ pub trait PluginInfo { async fn close(mut self) -> RemoteResult<()>; } +#[derive(Serialize, Deserialize)] +pub struct Manifest { + // Protocol version. available as the [`hello!`] macro. + pub hello: Hello, + /// stream types that should be made available to reaction users + /// ```jsonnet + /// { + /// streams: { + /// my_stream: { + /// type: "..." + /// # ↑ all those exposed types + /// } + /// } + /// } + /// ``` + pub streams: BTreeSet, + /// All action types that should be made available to reaction users + pub actions: BTreeSet, +} + +#[derive(Default, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct Hello { + /// Major version of the protocol + /// Increment means breaking change + pub version_major: u32, + /// Minor version of the protocol + /// Increment means reaction core can handle older version plugins + pub version_minor: u32, +} + +impl Hello { + pub fn hello() -> Hello { + Hello { + version_major: env!("CARGO_PKG_VERSION") + .split(".") + .next() + .unwrap() + .parse() + .unwrap(), + version_minor: env!("CARGO_PKG_VERSION") + .split(".") + .skip(1) + .next() + .unwrap() + .parse() + .unwrap(), + } + } + + pub fn is_compatible(server: &Hello, plugin: &Hello) -> std::result::Result<(), String> { + if server.version_major == plugin.version_major + && server.version_minor >= plugin.version_minor + { + Ok(()) + } else { + if plugin.version_major > server.version_major + || (plugin.version_major == server.version_major + && plugin.version_minor > server.version_minor) + { + Err("consider upgrading reaction".into()) + } else { + Err("consider upgrading the plugin".into()) + } + } + } +} + /// Represents a configuration value. /// This is not meant as an efficient type, but as a very flexible one. #[derive(Serialize, Deserialize)] diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 0869fb4..4884046 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -10,7 +10,7 @@ use tokio::{ process::{Child, Command}, runtime::Handle, }; -use tracing::warn; +use tracing::{debug, warn}; #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] @@ -112,7 +112,11 @@ impl Plugin { } Command::new(&self.path) }; - dbg!(&command); + debug!( + "plugin {}: running command: {:?}", + self.name, + command.as_std() + ); command .stdin(Stdio::piped()) .stdout(Stdio::piped()) diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index e7f63da..2ebc867 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -1,11 +1,11 @@ use std::{ - collections::BTreeMap, + collections::{BTreeMap, BTreeSet}, ops::{Deref, DerefMut}, time::Duration, }; use futures::{future::join_all, FutureExt}; -use reaction_plugin::{ActionImpl, PluginInfo, PluginInfoClient, StreamImpl}; +use reaction_plugin::{ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamImpl}; use remoc::Connect; use serde_json::Value; use tokio::{process::Child, time::sleep}; @@ -25,8 +25,8 @@ pub struct PluginManager { shutdown: ShutdownToken, plugin: &'static Plugin, plugin_info: PluginInfoClient, - stream_impls: Vec, - action_impls: Vec, + streams: BTreeSet, + actions: BTreeSet, } impl Deref for PluginManager { @@ -72,23 +72,30 @@ impl PluginManager { .map_err(|err| format!("could not retrieve initial information from plugin: {err}"))? .ok_or("could not retrieve initial information from plugin: no data")?; - let stream_impls = plugin_info - .stream_impls() + let manifest = plugin_info + .manifest() .await - .map_err(|err| format!("plugin error while retrieving stream types: {err}"))?; + .map_err(|err| format!("plugin error while retrieving its manifest: {err}"))?; - let action_impls = plugin_info - .action_impls() - .await - .map_err(|err| format!("plugin error while retrieving action types: {err}"))?; + let my_hello = Hello::hello(); + + if let Err(hint) = Hello::is_compatible(&my_hello, &manifest.hello) { + return Err(format!( + "reaction can't handle plugin {} with incompatible version {}.{}: current version: {}.{}. {}", + plugin.name, + manifest.hello.version_major, manifest.hello.version_minor, + my_hello.version_major, my_hello.version_minor, + hint + )); + } Ok(Self { child, shutdown, plugin, plugin_info, - stream_impls, - action_impls, + streams: manifest.streams, + actions: manifest.actions, }) } @@ -149,7 +156,7 @@ impl Plugins { let name = plugin.name.clone(); let manager = PluginManager::new(plugin, shutdown).await?; - for stream in &manager.stream_impls { + for stream in &manager.streams { if let Some(name) = self.streams.insert(stream.clone().into(), name.clone()) { return Err(format!( "plugin {name} already exposed a stream with type name '{stream}'", @@ -157,7 +164,7 @@ impl Plugins { } } - for action in &manager.action_impls { + for action in &manager.actions { if let Some(name) = self.actions.insert(action.clone().into(), name.clone()) { return Err(format!( "plugin {name} already exposed a action with type name '{action}'", From 8d864b1fb9aa49a8c1f9f4cedf358f33df6390f9 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 20 Oct 2025 12:00:00 +0200 Subject: [PATCH 125/241] Add PersistData to trait --- plugins/reaction-plugin-virtual/src/main.rs | 5 +++-- plugins/reaction-plugin/src/lib.rs | 15 +++++++++++++-- src/concepts/plugin.rs | 1 + 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index ee551ec..3b06cbc 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,7 +1,8 @@ use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, + ActionImpl, Exec, Hello, Line, Manifest, PersistData, PluginInfo, RemoteResult, StreamImpl, + Value, }; use remoc::{rch::mpsc, rtc}; @@ -18,7 +19,7 @@ struct Plugin { } impl PluginInfo for Plugin { - async fn manifest(&self) -> Result { + async fn manifest(&mut self, _data: PersistData) -> Result { Ok(Manifest { hello: Hello::hello(), streams: BTreeSet::from(["virtual".into()]), diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index fcf8950..036f43b 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -35,7 +35,8 @@ use std::{ }; use remoc::{ - Connect, rch, + Connect, + rch::{self, mpsc}, rtc::{self, Server}, }; use serde::{Deserialize, Serialize}; @@ -46,7 +47,7 @@ use tokio::io::{stdin, stdout}; #[rtc::remote] pub trait PluginInfo { /// Return the manifest of the plugin. - async fn manifest(&self) -> Result; + async fn manifest(&mut self, data: PersistData) -> Result; /// Return one stream of a given type if it exists async fn stream_impl( @@ -158,6 +159,16 @@ pub enum Value { Object(BTreeMap), } +/// Data persisted by reaction for the plugin. +/// This is persisted as a single JSON file by reaction, so it is not suitable for big sizes of data. +#[derive(Serialize, Deserialize)] +pub struct PersistData { + /// Data persisted by the plugin in a previous run + pub persisted_data: Value, + /// Sender of data to be persisted by the plugin for a previous run + pub persist_data: mpsc::Sender, +} + #[derive(Serialize, Deserialize)] pub struct StreamImpl { pub stream: rch::mpsc::Receiver, diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 4884046..fa9b7fb 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -112,6 +112,7 @@ impl Plugin { } Command::new(&self.path) }; + command.arg("serve"); debug!( "plugin {}: running command: {:?}", self.name, From 61fe405b857f2ef734ccc93c92143a4823d16f6b Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 20 Oct 2025 12:00:00 +0200 Subject: [PATCH 126/241] Add cluster plugin skeleton --- Cargo.lock | 9 +++ Cargo.toml | 2 +- TODO | 12 ++++ plugins/reaction-plugin-cluster/Cargo.toml | 9 +++ plugins/reaction-plugin-cluster/src/main.rs | 58 +++++++++++++++++ tests/test-conf/test-cluster.jsonnet | 72 +++++++++++++++++++++ 6 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 plugins/reaction-plugin-cluster/Cargo.toml create mode 100644 plugins/reaction-plugin-cluster/src/main.rs create mode 100644 tests/test-conf/test-cluster.jsonnet diff --git a/Cargo.lock b/Cargo.lock index 9ea2963..8a63b3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1071,6 +1071,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "reaction-plugin-cluster" +version = "0.1.0" +dependencies = [ + "reaction-plugin", + "remoc", + "tokio", +] + [[package]] name = "reaction-plugin-virtual" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index b504b22..fcec4c0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,7 +75,7 @@ assert_cmd = "2.0.17" predicates = "3.1.3" [workspace] -members = ["plugins/reaction-plugin", "plugins/reaction-plugin-virtual"] +members = ["plugins/reaction-plugin", "plugins/reaction-plugin-cluster", "plugins/reaction-plugin-virtual"] [workspace.dependencies] remoc = { version = "0.18.3" } diff --git a/TODO b/TODO index e01932a..b81282c 100644 --- a/TODO +++ b/TODO @@ -2,3 +2,15 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) plugins: pipe stderr too and wrap errors in logs plugins: provide tree storage? omg + +questionnements: +- quelle cli pour les plugins ? + - Directement en appelant le plugin ? reaction-plugin-cluster gen-id ? 🟢 + → Demande de savoir où stocker tout ça + - Via moult IPC ? reaction plugin cluster gen-id ? 🔴 + → Mais du coup c'est l'oeuf ou la poule entre avoir un serveur qui fonctionne et avoir un +- Stockage ? + - uniquement dans la db reaction + → Faut pas que ce soit trop gros, un peu d'overhead, risque de perdre la donnée + - à part dans le configuration directory + → Pas mal en vrai diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml new file mode 100644 index 0000000..4eb351e --- /dev/null +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "reaction-plugin-cluster" +version = "0.1.0" +edition = "2024" + +[dependencies] +tokio = { workspace = true, features = ["rt-multi-thread"] } +remoc.workspace = true +reaction-plugin.path = "../reaction-plugin" diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs new file mode 100644 index 0000000..cae9109 --- /dev/null +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -0,0 +1,58 @@ +use std::collections::BTreeSet; + +use reaction_plugin::{ + ActionImpl, Hello, Manifest, PersistData, PluginInfo, RemoteResult, StreamImpl, Value, + main_loop, +}; +use remoc::rtc; + +#[tokio::main] +async fn main() { + let plugin = Plugin::default(); + main_loop(plugin).await; +} + +#[derive(Default)] +struct Plugin { + data: Option, +} + +impl PluginInfo for Plugin { + async fn manifest(&mut self, data: PersistData) -> Result { + self.data = Some(data); + Ok(Manifest { + hello: Hello::hello(), + streams: BTreeSet::from(["cluster".into()]), + actions: BTreeSet::from(["cluster_send".into()]), + }) + } + + async fn stream_impl( + &mut self, + stream_name: String, + stream_type: String, + config: Value, + ) -> RemoteResult { + todo!() + } + + async fn action_impl( + &mut self, + stream_name: String, + filter_name: String, + action_name: String, + action_type: String, + config: Value, + patterns: Vec, + ) -> RemoteResult { + todo!() + } + + async fn finish_setup(&mut self) -> RemoteResult<()> { + todo!() + } + + async fn close(self) -> RemoteResult<()> { + todo!() + } +} diff --git a/tests/test-conf/test-cluster.jsonnet b/tests/test-conf/test-cluster.jsonnet new file mode 100644 index 0000000..9ec8f32 --- /dev/null +++ b/tests/test-conf/test-cluster.jsonnet @@ -0,0 +1,72 @@ +{ + patterns: { + num: { + regex: @"[0-9]+", + }, + all: { + regex: @".*", + }, + }, + + plugins: { + cluster: { + path: "./target/debug/reaction-plugin-cluster", + check_root: false, + systemd_options: { + DynamicUser: ["false"], + }, + options: { + clusters: { + org1: { + listen_port: 9000, + bootstrap_nodes: { + "public_key": ["127.0.0.1:9001"], + }, + }, + }, + }, + }, + }, + + streams: { + s0: { + cmd: ["bash", "-c", "for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2"], + filters: { + f0: { + regex: ["^$"], + actions: { + a0: { + type: "virtual", + options: { + send: "a0 ", + to: "s1", + } + }, + b0: { + type: "virtual", + options: { + send: "b0 ", + to: "s1", + }, + after: "600ms", + }, + }, + }, + }, + }, + s1: { + type: "cluster", + options: {}, + filters: { + f1: { + regex: ["^$"], + actions: { + a1: { + cmd: ['sh', '-c', 'echo >>./log'], + }, + }, + }, + }, + }, + }, +} From c91891045336500c9d97774050d703948be31f6b Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 22 Oct 2025 12:00:00 +0200 Subject: [PATCH 127/241] plugin: add simple way to store small data for plugins --- plugins/reaction-plugin/src/lib.rs | 19 +++++-- src/daemon/mod.rs | 2 +- src/daemon/plugin/mod.rs | 83 ++++++++++++++++++++++++------ 3 files changed, 84 insertions(+), 20 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 036f43b..0bf602e 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -3,8 +3,6 @@ //! It's based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait //! calls over a single transport channel. //! -//! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql` -//! //! To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides //! the entrypoint for a plugin. //! It permits to define 0 to n (stream, filter, action) custom types. @@ -27,6 +25,21 @@ //! // ... //! } //! ``` +//! +//! ## Naming & calling conventions +//! +//! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql`. +//! It will be invoked with one positional argument "serve". +//! ``` +//! reaction-plugin-$NAME serve +//! ``` +//! This can be useful if you want to provide CLI functionnality to your users. +//! +//! ## Examples +//! +//! Core plugins can be found here: +//! The "virtual" plugin is the simplest and can serve as a template. +//! You'll have to adjust dependencies versions in `Cargo.toml`. use std::{ collections::{BTreeMap, BTreeSet}, @@ -148,7 +161,7 @@ impl Hello { /// Represents a configuration value. /// This is not meant as an efficient type, but as a very flexible one. -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub enum Value { Null, Bool(bool), diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 70f21cb..a4ca765 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -44,7 +44,7 @@ pub async fn daemon( // Cancellation Token let shutdown = ShutdownController::new(); - let mut plugins = Plugins::new(&config.plugins, shutdown.token()).await?; + let mut plugins = Plugins::new(config, shutdown.token()).await?; // Open Database let mut db = Database::open(config, shutdown.token()).await?; diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 2ebc867..6c2762d 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -5,14 +5,14 @@ use std::{ }; use futures::{future::join_all, FutureExt}; -use reaction_plugin::{ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamImpl}; -use remoc::Connect; +use reaction_plugin::{ActionImpl, Hello, PersistData, PluginInfo, PluginInfoClient, StreamImpl}; +use remoc::{rch::mpsc, Connect}; use serde_json::Value; -use tokio::{process::Child, time::sleep}; +use tokio::{fs, process::Child, time::sleep}; use tracing::error; use crate::{ - concepts::Plugin, + concepts::{Config, Plugin}, daemon::{utils::kill_child, ShutdownToken}, }; @@ -42,7 +42,11 @@ impl DerefMut for PluginManager { } impl PluginManager { - async fn new(plugin: &'static Plugin, shutdown: ShutdownToken) -> Result { + async fn new( + plugin: &'static Plugin, + state_directory: &str, + shutdown: ShutdownToken, + ) -> Result { let mut child = plugin .launch() .await @@ -66,16 +70,20 @@ impl PluginManager { tokio::spawn(conn); - let plugin_info = rx + let mut plugin_info = rx .recv() .await .map_err(|err| format!("could not retrieve initial information from plugin: {err}"))? .ok_or("could not retrieve initial information from plugin: no data")?; - let manifest = plugin_info - .manifest() + let persist_data = data_persistence(&plugin.name, state_directory, shutdown.clone()) .await - .map_err(|err| format!("plugin error while retrieving its manifest: {err}"))?; + .map_err(|err| format!("error while reading plugin {} data: {err}", plugin.name))?; + + let manifest = plugin_info + .manifest(persist_data) + .await + .map_err(|err| format!("error while getting plugin {} manifest: {err}", plugin.name))?; let my_hello = Hello::hello(); @@ -132,15 +140,12 @@ pub struct Plugins { } impl Plugins { - pub async fn new( - plugins: &'static BTreeMap, - shutdown: ShutdownToken, - ) -> Result { + pub async fn new(config: &'static Config, shutdown: ShutdownToken) -> Result { let mut this = Self::default(); - for plugin in plugins.values() { + for plugin in config.plugins.values() { let name = plugin.name.clone(); - this.load_plugin(&plugin, shutdown.clone()) + this.load_plugin(&plugin, &config.state_directory, shutdown.clone()) .await .map_err(|err| format!("plugin {name}: {err}]"))?; } @@ -151,10 +156,11 @@ impl Plugins { async fn load_plugin( &mut self, plugin: &'static Plugin, + state_directory: &str, shutdown: ShutdownToken, ) -> Result<(), String> { let name = plugin.name.clone(); - let manager = PluginManager::new(plugin, shutdown).await?; + let manager = PluginManager::new(plugin, state_directory, shutdown).await?; for stream in &manager.streams { if let Some(name) = self.streams.insert(stream.clone().into(), name.clone()) { @@ -253,3 +259,48 @@ impl Plugins { } } } + +async fn data_persistence( + plugin_name: &str, + state_directory: &str, + shutdown: ShutdownToken, +) -> Result { + let dir_path = format!("{state_directory}/plugin_data/"); + fs::create_dir_all(&dir_path).await?; + + let file_path = format!("{dir_path}/{plugin_name}.json"); + + let data = if fs::try_exists(&file_path).await? { + let txt = fs::read_to_string(&file_path).await?; + serde_json::from_str::(&txt)? + } else { + Value::Null + }; + + let (tx, mut rx) = mpsc::channel(1); + + tokio::spawn(async move { + loop { + let value = tokio::select! { + _ = shutdown.wait() => break, + value = rx.recv() => value, + }; + if let Ok(Some(value)) = value { + // unwrap: serializing a [`serde_json::Value`] does not fail + let json = serde_json::to_string_pretty(&value).unwrap(); + + if let Err(err) = fs::write(&file_path, json).await { + error!("could not store plugin data at {file_path}: {err}"); + break; + } + } else { + break; + } + } + }); + + Ok(PersistData { + persisted_data: to_stable_value(data), + persist_data: tx, + }) +} From e3060d04046e3bf0bf0fce0b4d3d8ec73a79bbe6 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 22 Oct 2025 12:00:00 +0200 Subject: [PATCH 128/241] cluster: retrieve, generate and store iroh SecretKey --- Cargo.lock | 3217 ++++++++++++++++++- plugins/reaction-plugin-cluster/Cargo.toml | 4 + plugins/reaction-plugin-cluster/src/main.rs | 52 +- 3 files changed, 3219 insertions(+), 54 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a63b3d..edc74cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,17 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +[[package]] +name = "aead" +version = "0.6.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac8202ab55fcbf46ca829833f347a82a2a4ce0596f0304ac322c2d100030cd56" +dependencies = [ + "bytes", + "crypto-common", + "inout", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -26,6 +37,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -101,6 +118,27 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +dependencies = [ + "backtrace", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + [[package]] name = "assert_cmd" version = "2.0.17" @@ -132,12 +170,85 @@ dependencies = [ "tempfile", ] +[[package]] +name = "async-compat" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ba85bc55464dcbf728b56d97e119d673f4cf9062be330a9a26f3acf504a590" +dependencies = [ + "futures-core", + "futures-io", + "once_cell", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version", +] + +[[package]] +name = "atomic-polyfill" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" +dependencies = [ + "critical-section", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "attohttpc" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" +dependencies = [ + "base64 0.22.1", + "http", + "log", + "url", +] + [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "backon" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" +dependencies = [ + "fastrand", + "gloo-timers", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.75" @@ -150,15 +261,39 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets", + "windows-targets 0.52.6", ] +[[package]] +name = "base16ct" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8b59d472eab27ade8d770dcb11da7201c11234bef9f82ce7aa517be028d462b" + +[[package]] +name = "base32" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "022dfe9eb35f19ebbcb51e0b40a5ab759f46ad60cadf7297e0bd085afb50e076" + [[package]] name = "base64" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + [[package]] name = "bincode" version = "1.3.3" @@ -174,6 +309,29 @@ version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +[[package]] +name = "blake3" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.11.0-rc.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ef36a6fcdb072aa548f3da057640ec10859eb4e91ddf526ee648d50c76a949" +dependencies = [ + "hybrid-array", + "zeroize", +] + [[package]] name = "bstr" version = "1.12.0" @@ -181,10 +339,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" dependencies = [ "memchr", - "regex-automata", + "regex-automata 0.4.9", "serde", ] +[[package]] +name = "btparse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387e80962b798815a2b5c4bcfdb6bf626fa922ffe9f74e373103b858738e9f31" + [[package]] name = "bumpalo" version = "3.17.0" @@ -212,6 +376,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cfg-if" version = "1.0.0" @@ -224,6 +394,18 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chacha20" +version = "0.10.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bd162f2b8af3e0639d83f28a637e4e55657b7a74508dba5a9bf4da523d5c9e9" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", + "zeroize", +] + [[package]] name = "chrono" version = "0.4.41" @@ -236,7 +418,19 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link", + "windows-link 0.1.1", +] + +[[package]] +name = "cipher" +version = "0.5.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e12a13eb01ded5d32ee9658d94f553a19e804204f2dc811df69ab4d9e0cb8c7" +dependencies = [ + "block-buffer", + "crypto-common", + "inout", + "zeroize", ] [[package]] @@ -298,18 +492,114 @@ dependencies = [ "roff", ] +[[package]] +name = "cobs" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" +dependencies = [ + "thiserror 2.0.17", +] + +[[package]] +name = "color-backtrace" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e49b1973af2a47b5b44f7dd0a344598da95c872e1556b045607888784e973b91" +dependencies = [ + "backtrace", + "btparse", + "termcolor", +] + [[package]] name = "colorchoice" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "const-oid" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dabb6555f92fb9ee4140454eb5dcd14c7960e1225c6d1a6cc361f032947713e" + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "cordyceps" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" +dependencies = [ + "loom", + "tracing", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -335,18 +625,262 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crypto-common" +version = "0.2.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8235645834fbc6832939736ce2f2d08192652269e11010a6240f61b908a1c6" +dependencies = [ + "hybrid-array", + "rand_core 0.9.3", +] + +[[package]] +name = "crypto_box" +version = "0.10.0-pre.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bda4de3e070830cf3a27a394de135b6709aefcc54d1e16f2f029271254a6ed9" +dependencies = [ + "aead", + "chacha20", + "crypto_secretbox", + "curve25519-dalek", + "salsa20", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto_secretbox" +version = "0.2.0-pre.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54532aae6546084a52cef855593daf9555945719eeeda9974150e0def854873e" +dependencies = [ + "aead", + "chacha20", + "cipher", + "hybrid-array", + "poly1305", + "salsa20", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "5.0.0-pre.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f9200d1d13637f15a6acb71e758f64624048d85b31a5fdbfd8eca1e2687d0b7" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rand_core 0.9.3", + "rustc_version", + "serde", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "der" +version = "0.8.0-rc.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9d8dd2f26c86b27a2a8ea2767ec7f9df7a89516e4794e54ac01ee618dda3aa4" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "unicode-xid", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "unicode-xid", +] + +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + [[package]] name = "difflib" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" +[[package]] +name = "digest" +version = "0.11.0-rc.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac89f8a64533a9b0eaa73a68e424db0fb1fd6271c74cc0125336a05f090568d" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "dlopen2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b4f5f101177ff01b8ec4ecc81eead416a8aa42819a2869311b3420fa114ffa" +dependencies = [ + "libc", + "once_cell", + "winapi", +] + [[package]] name = "doc-comment" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "document-features" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" +dependencies = [ + "litrs", +] + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "ed25519" +version = "3.0.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef49c0b20c0ad088893ad2a790a29c06a012b3f05bcfc66661fd22a94b32129" +dependencies = [ + "pkcs8", + "serde", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "3.0.0-pre.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad207ed88a133091f83224265eac21109930db09bedcad05d5252f2af2de20a1" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.9.3", + "serde", + "sha2", + "signature", + "subtle", + "zeroize", +] + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -369,6 +903,12 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "fiat-crypto" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" + [[package]] name = "float-cmp" version = "0.10.0" @@ -378,6 +918,27 @@ dependencies = [ "num-traits", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + [[package]] name = "futures" version = "0.3.31" @@ -393,6 +954,19 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-buffered" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin 0.10.0", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -426,6 +1000,19 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -467,6 +1054,20 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" +dependencies = [ + "cc", + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.61.1", +] + [[package]] name = "getrandom" version = "0.2.16" @@ -474,8 +1075,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -485,9 +1088,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", + "js-sys", "libc", "r-efi", "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] @@ -505,8 +1110,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -520,12 +1125,77 @@ dependencies = [ "walkdir", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + [[package]] name = "hashbrown" version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "heapless" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" +dependencies = [ + "atomic-polyfill", + "hash32", + "rustc_version", + "serde", + "spin 0.9.8", + "stable_deref_trait", +] + [[package]] name = "heck" version = "0.5.0" @@ -538,6 +1208,179 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hickory-proto" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" +dependencies = [ + "async-trait", + "bytes", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "h2", + "http", + "idna", + "ipnet", + "once_cell", + "rand 0.9.2", + "ring", + "rustls", + "thiserror 2.0.17", + "tinyvec", + "tokio", + "tokio-rustls", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc62a9a99b0bfb44d2ab95a7208ac952d31060efc16241c87eaf36406fecf87a" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.2", + "resolv-conf", + "rustls", + "smallvec", + "thiserror 2.0.17", + "tokio", + "tokio-rustls", + "tracing", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hybrid-array" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f471e0a81b2f90ffc0cb2f951ae04da57de8baa46fa99112b062a5173a5088d0" +dependencies = [ + "typenum", + "zeroize", +] + +[[package]] +name = "hyper" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-util" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", +] + [[package]] name = "iana-time-zone" version = "0.1.63" @@ -550,7 +1393,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core", + "windows-core 0.61.2", ] [[package]] @@ -562,6 +1405,134 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "igd-next" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "516893339c97f6011282d5825ac94fc1c7aad5cad26bdc2d0cee068c0bf97f97" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http", + "http-body-util", + "hyper", + "hyper-util", + "log", + "rand 0.9.2", + "tokio", + "url", + "xmltree", +] + [[package]] name = "ignore" version = "0.4.23" @@ -572,7 +1543,7 @@ dependencies = [ "globset", "log", "memchr", - "regex-automata", + "regex-automata 0.4.9", "same-file", "walkdir", "winapi-util", @@ -585,7 +1556,263 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.3", +] + +[[package]] +name = "inout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7357b6e7aa75618c7864ebd0634b115a7218b0615f4cb1df33ac3eca23943d4" +dependencies = [ + "hybrid-array", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.10", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "iroh" +version = "0.94.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9428cef1eafd2eac584269986d1949e693877ac12065b401dfde69f664b07ac" +dependencies = [ + "aead", + "backon", + "bytes", + "cfg_aliases", + "crypto_box", + "data-encoding", + "derive_more 2.0.1", + "ed25519-dalek", + "futures-util", + "getrandom 0.3.3", + "hickory-resolver", + "http", + "igd-next", + "instant", + "iroh-base", + "iroh-metrics", + "iroh-quinn", + "iroh-quinn-proto", + "iroh-quinn-udp", + "iroh-relay", + "n0-future", + "n0-snafu", + "n0-watcher", + "nested_enum_utils", + "netdev", + "netwatch", + "pin-project", + "pkarr", + "pkcs8", + "portmapper", + "rand 0.9.2", + "reqwest", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", + "rustls-webpki", + "serde", + "smallvec", + "snafu", + "strum", + "time", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", + "url", + "wasm-bindgen-futures", + "webpki-roots", + "z32", +] + +[[package]] +name = "iroh-base" +version = "0.94.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7db6dfffe81a58daae02b72c7784c20feef5b5d3849b190ed1c96a8fa0b3cae8" +dependencies = [ + "curve25519-dalek", + "data-encoding", + "derive_more 2.0.1", + "ed25519-dalek", + "n0-snafu", + "nested_enum_utils", + "rand_core 0.9.3", + "serde", + "snafu", + "url", + "zeroize", + "zeroize_derive", +] + +[[package]] +name = "iroh-metrics" +version = "0.36.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c84c167b59ae22f940e78eb347ca5f02aa25608e994cb5a7cc016ac2d5eada18" +dependencies = [ + "iroh-metrics-derive", + "itoa", + "postcard", + "ryu", + "serde", + "snafu", + "tracing", +] + +[[package]] +name = "iroh-metrics-derive" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "748d380f26f7c25307c0a7acd181b84b977ddc2a1b7beece1e5998623c323aa1" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "iroh-quinn" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde160ebee7aabede6ae887460cd303c8b809054224815addf1469d54a6fcf7" +dependencies = [ + "bytes", + "cfg_aliases", + "iroh-quinn-proto", + "iroh-quinn-udp", + "pin-project-lite", + "rustc-hash 2.1.1", + "rustls", + "socket2 0.5.10", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "iroh-quinn-proto" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "929d5d8fa77d5c304d3ee7cae9aede31f13908bd049f9de8c7c0094ad6f7c535" +dependencies = [ + "bytes", + "getrandom 0.2.16", + "rand 0.8.5", + "ring", + "rustc-hash 2.1.1", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "iroh-quinn-udp" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c53afaa1049f7c83ea1331f5ebb9e6ebc5fdd69c468b7a22dd598b02c9bcc973" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "iroh-relay" +version = "0.94.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360e201ab1803201de9a125dd838f7a4d13e6ba3a79aeb46c7fbf023266c062e" +dependencies = [ + "blake3", + "bytes", + "cfg_aliases", + "data-encoding", + "derive_more 2.0.1", + "getrandom 0.3.3", + "hickory-resolver", + "http", + "http-body-util", + "hyper", + "hyper-util", + "iroh-base", + "iroh-metrics", + "iroh-quinn", + "iroh-quinn-proto", + "lru 0.16.2", + "n0-future", + "n0-snafu", + "nested_enum_utils", + "num_enum", + "pin-project", + "pkarr", + "postcard", + "rand 0.9.2", + "reqwest", + "rustls", + "rustls-pki-types", + "serde", + "serde_bytes", + "sha1", + "snafu", + "strum", + "tokio", + "tokio-rustls", + "tokio-util", + "tokio-websockets", + "tracing", + "url", + "webpki-roots", + "ws_stream_wasm", + "z32", ] [[package]] @@ -600,6 +1827,28 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jrsonnet-evaluator" version = "0.4.2" @@ -607,7 +1856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee60406dac44a01b37e120b43adb062047251e195db15392b825f6bdc948712" dependencies = [ "annotate-snippets", - "base64", + "base64 0.13.1", "bincode", "jrsonnet-gc", "jrsonnet-interner", @@ -616,10 +1865,10 @@ dependencies = [ "jrsonnet-types", "md5", "pathdiff", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -640,7 +1889,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -650,7 +1899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ff75843e778244f3476800e6f492950a6ecee1d9308019764983d311620bf9" dependencies = [ "jrsonnet-gc", - "rustc-hash", + "rustc-hash 1.1.0", "serde", ] @@ -685,9 +1934,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -711,6 +1960,18 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "litrs" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5e54036fe321fd421e10d732f155734c4e4afd610dd556d9a82833ab3ee0bed" + [[package]] name = "lock_api" version = "0.4.12" @@ -727,6 +1988,49 @@ version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "lru" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" + +[[package]] +name = "lru" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96051b46fc183dc9cd4a223960ef37b9af631b55191852a8274bfef064cda20f" +dependencies = [ + "hashbrown 0.16.0", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "md5" version = "0.7.0" @@ -759,6 +2063,181 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "moka" +version = "0.12.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "equivalent", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "uuid", +] + +[[package]] +name = "n0-future" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439e746b307c1fd0c08771c3cafcd1746c3ccdb0d9c7b859d3caded366b6da76" +dependencies = [ + "cfg_aliases", + "derive_more 1.0.0", + "futures-buffered", + "futures-lite", + "futures-util", + "js-sys", + "pin-project", + "send_wrapper", + "tokio", + "tokio-util", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-time", +] + +[[package]] +name = "n0-snafu" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1815107e577a95bfccedb4cfabc73d709c0db6d12de3f14e0f284a8c5036dc4f" +dependencies = [ + "anyhow", + "btparse", + "color-backtrace", + "snafu", + "tracing-error", +] + +[[package]] +name = "n0-watcher" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34c65e127e06e5a2781b28df6a33ea474a7bddc0ac0cfea888bd20c79a1b6516" +dependencies = [ + "derive_more 2.0.1", + "n0-future", + "snafu", +] + +[[package]] +name = "nested_enum_utils" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d5475271bdd36a4a2769eac1ef88df0f99428ea43e52dfd8b0ee5cb674695f" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "netdev" +version = "0.38.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ab878b4c90faf36dab10ea51d48c69ae9019bcca47c048a7c9b273d5d7a823" +dependencies = [ + "dlopen2", + "ipnet", + "libc", + "netlink-packet-core", + "netlink-packet-route", + "netlink-sys", + "once_cell", + "system-configuration", + "windows-sys 0.59.0", +] + +[[package]] +name = "netlink-packet-core" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3463cbb78394cb0141e2c926b93fc2197e473394b761986eca3b9da2c63ae0f4" +dependencies = [ + "paste", +] + +[[package]] +name = "netlink-packet-route" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ec2f5b6839be2a19d7fa5aab5bc444380f6311c2b693551cb80f45caaa7b5ef" +dependencies = [ + "bitflags", + "libc", + "log", + "netlink-packet-core", +] + +[[package]] +name = "netlink-proto" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b65d130ee111430e47eed7896ea43ca693c387f097dd97376bffafbf25812128" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core", + "netlink-sys", + "thiserror 2.0.17", +] + +[[package]] +name = "netlink-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" +dependencies = [ + "bytes", + "futures", + "libc", + "log", + "tokio", +] + +[[package]] +name = "netwatch" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98d7ec7abdbfe67ee70af3f2002326491178419caea22254b9070e6ff0c83491" +dependencies = [ + "atomic-waker", + "bytes", + "cfg_aliases", + "derive_more 2.0.1", + "iroh-quinn-udp", + "js-sys", + "libc", + "n0-future", + "n0-watcher", + "nested_enum_utils", + "netdev", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", + "pin-project-lite", + "serde", + "snafu", + "socket2 0.6.1", + "time", + "tokio", + "tokio-util", + "tracing", + "web-sys", + "windows 0.62.2", + "windows-result 0.4.1", + "wmi", +] + [[package]] name = "nix" version = "0.29.0" @@ -777,6 +2256,21 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" +[[package]] +name = "ntimestamp" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c50f94c405726d3e0095e89e72f75ce7f6587b94a8bd8dc8054b73f65c0fd68c" +dependencies = [ + "base32", + "document-features", + "getrandom 0.2.16", + "httpdate", + "js-sys", + "once_cell", + "serde", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -787,6 +2281,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-traits" version = "0.2.19" @@ -806,6 +2306,28 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "object" version = "0.36.7" @@ -820,6 +2342,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "once_cell_polyfill" @@ -827,12 +2353,24 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" version = "0.12.3" @@ -853,9 +2391,15 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.52.6", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "pathdiff" version = "0.2.3" @@ -889,6 +2433,51 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c719dcf55f09a3a7e764c6649ab594c18a177e3599c467983cdf644bfc0a4088" +[[package]] +name = "pem-rfc7468" +version = "1.0.0-rc.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e58fab693c712c0d4e88f8eb3087b6521d060bcaf76aeb20cb192d809115ba" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -901,6 +2490,94 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkarr" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "792c1328860f6874e90e3b387b4929819cc7783a6bd5a4728e918706eb436a48" +dependencies = [ + "async-compat", + "base32", + "bytes", + "cfg_aliases", + "document-features", + "dyn-clone", + "ed25519-dalek", + "futures-buffered", + "futures-lite", + "getrandom 0.3.3", + "log", + "lru 0.13.0", + "ntimestamp", + "reqwest", + "self_cell", + "serde", + "sha1_smol", + "simple-dns", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "pkcs8" +version = "0.11.0-rc.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93eac55f10aceed84769df670ea4a32d2ffad7399400d41ee1c13b1cd8e1b478" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "poly1305" +version = "0.9.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb78a635f75d76d856374961deecf61031c0b6f928c83dc9c0924ab6c019c298" +dependencies = [ + "cpufeatures", + "universal-hash", +] + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portmapper" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d73aa9bd141e0ff6060fea89a5437883f3b9ceea1cda71c790b90e17d072a3b3" +dependencies = [ + "base64 0.22.1", + "bytes", + "derive_more 2.0.1", + "futures-lite", + "futures-util", + "hyper-util", + "igd-next", + "iroh-metrics", + "libc", + "nested_enum_utils", + "netwatch", + "num_enum", + "rand 0.9.2", + "serde", + "smallvec", + "snafu", + "socket2 0.6.1", + "time", + "tokio", + "tokio-util", + "tower-layer", + "tracing", + "url", +] + [[package]] name = "postbag" version = "0.4.1" @@ -910,6 +2587,46 @@ dependencies = [ "serde", ] +[[package]] +name = "postcard" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "heapless", + "postcard-derive", + "serde", +] + +[[package]] +name = "postcard-derive" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0232bd009a197ceec9cc881ba46f727fcd8060a2d8d6a9dde7a69030a6fe2bb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "potential_utf" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -949,6 +2666,15 @@ dependencies = [ "termtree", ] +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit", +] + [[package]] name = "proc-macro2" version = "1.0.95" @@ -958,6 +2684,61 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.1.1", + "rustls", + "socket2 0.5.10", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash 2.1.1", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "quote" version = "1.0.40" @@ -1055,7 +2836,7 @@ dependencies = [ "serde_json", "serde_yaml", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-util", "tracing", @@ -1075,6 +2856,10 @@ dependencies = [ name = "reaction-plugin-cluster" version = "0.1.0" dependencies = [ + "base64 0.22.1", + "iroh", + "rand 0.9.2", + "rand_core 0.9.3", "reaction-plugin", "remoc", "tokio", @@ -1106,8 +2891,17 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", ] [[package]] @@ -1118,9 +2912,15 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.8.5", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -1157,6 +2957,67 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "reqwest" +version = "0.12.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "js-sys", + "log", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-util", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "resolv-conf" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + [[package]] name = "roff" version = "0.2.2" @@ -1175,6 +3036,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "1.0.7" @@ -1188,6 +3064,81 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "rustls" +version = "0.23.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19787cda76408ec5404443dc8b31795c87cd8fec49762dc75fa727740d34acc1" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs 0.26.11", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.21" @@ -1200,6 +3151,16 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "salsa20" +version = "0.11.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3ff3b81c8a6e381bc1673768141383f9328048a60edddcfc752a8291a138443" +dependencies = [ + "cfg-if", + "cipher", +] + [[package]] name = "same-file" version = "1.0.6" @@ -1209,12 +3170,68 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "security-framework" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +dependencies = [ + "bitflags", + "core-foundation 0.10.1", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "self_cell" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c2f82143577edb4921b71ede051dac62ca3c16084e918bf7b40c96ae10eb33" + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "serde" version = "1.0.227" @@ -1225,6 +3242,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_bytes" +version = "0.11.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" +dependencies = [ + "serde", + "serde_core", +] + [[package]] name = "serde_core" version = "1.0.227" @@ -1257,6 +3284,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + [[package]] name = "serde_yaml" version = "0.9.34+deprecated" @@ -1270,6 +3309,44 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serdect" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3ef0e35b322ddfaecbc60f34ab448e157e48531288ee49fafbb053696b8ffe2" +dependencies = [ + "base16ct", + "serde", +] + +[[package]] +name = "sha1" +version = "0.11.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e046edf639aa2e7afb285589e5405de2ef7e61d4b0ac1e30256e3eab911af9" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + +[[package]] +name = "sha2" +version = "0.11.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1e3878ab0f98e35b2df35fe53201d088299b41a6bb63e3e34dada2ac4abd924" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -1294,6 +3371,27 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "3.0.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc280a6ff65c79fbd6622f64d7127f32b85563bca8c53cd2e9141d6744a9056d" + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "simple-dns" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dee851d0e5e7af3721faea1843e8015e820a234f81fda3dea9247e15bac9a86a" +dependencies = [ + "bitflags", +] + [[package]] name = "slab" version = "0.4.9" @@ -1309,6 +3407,28 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +[[package]] +name = "snafu" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e84b3f4eacbf3a1ce05eac6763b4d629d60cbc94d632e4092c54ade71f1e1a2" +dependencies = [ + "backtrace", + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "socket2" version = "0.5.10" @@ -1319,12 +3439,80 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + +[[package]] +name = "spki" +version = "0.8.0-rc.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8baeff88f34ed0691978ec34440140e1572b68c7dd4a495fd14a3dc1944daa80" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "1.0.109" @@ -1347,6 +3535,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + [[package]] name = "synstructure" version = "0.12.6" @@ -1359,6 +3556,44 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tempfile" version = "3.20.0" @@ -1372,6 +3607,15 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + [[package]] name = "termtree" version = "0.5.1" @@ -1384,7 +3628,16 @@ version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", ] [[package]] @@ -1398,6 +3651,17 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "thread_local" version = "1.1.8" @@ -1408,6 +3672,51 @@ dependencies = [ "once_cell", ] +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "js-sys", + "num-conv", + "powerfmt", + "serde", + "time-core", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.45.1" @@ -1421,7 +3730,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.10", "tokio-macros", "tracing", "windows-sys 0.52.0", @@ -1438,6 +3747,28 @@ dependencies = [ "syn 2.0.101", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + [[package]] name = "tokio-util" version = "0.7.15" @@ -1447,16 +3778,116 @@ dependencies = [ "bytes", "futures-core", "futures-sink", + "futures-util", + "hashbrown 0.15.3", "pin-project-lite", "tokio", ] +[[package]] +name = "tokio-websockets" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1b6348ebfaaecd771cecb69e832961d277f59845d4220a584701f72728152b7" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-sink", + "getrandom 0.3.3", + "http", + "httparse", + "rand 0.9.2", + "ring", + "rustls-pki-types", + "simdutf8", + "tokio", + "tokio-rustls", + "tokio-util", +] + +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + +[[package]] +name = "toml_edit" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +dependencies = [ + "indexmap", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + [[package]] name = "tracing" version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -1483,6 +3914,16 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-error" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" +dependencies = [ + "tracing", + "tracing-subscriber", +] + [[package]] name = "tracing-log" version = "0.2.0" @@ -1500,14 +3941,30 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex", "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", ] +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + [[package]] name = "unescape" version = "0.1.0" @@ -1532,12 +3989,46 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "universal-hash" +version = "0.6.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55be643b40a21558f44806b53ee9319595bc7ca6896372e4e08e5d7d83c9cd6" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsafe-libyaml" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -1581,6 +4072,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -1598,21 +4098,22 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" dependencies = [ "bumpalo", "log", @@ -1623,10 +4124,23 @@ dependencies = [ ] [[package]] -name = "wasm-bindgen-macro" -version = "0.2.100" +name = "wasm-bindgen-futures" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1634,9 +4148,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", @@ -1647,13 +4161,79 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "web-sys" +version = "0.3.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" +dependencies = [ + "webpki-root-certs 1.0.3", +] + +[[package]] +name = "webpki-root-certs" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05d651ec480de84b762e7be71e6efa7461699c19d9e2c272c8d93455f567786e" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "webpki-roots" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "widestring" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" + [[package]] name = "winapi" version = "0.3.9" @@ -1685,6 +4265,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.61.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" +dependencies = [ + "windows-collections 0.2.0", + "windows-core 0.61.2", + "windows-future 0.2.1", + "windows-link 0.1.1", + "windows-numerics 0.2.0", +] + +[[package]] +name = "windows" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" +dependencies = [ + "windows-collections 0.3.2", + "windows-core 0.62.2", + "windows-future 0.3.2", + "windows-numerics 0.3.1", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core 0.61.2", +] + +[[package]] +name = "windows-collections" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" +dependencies = [ + "windows-core 0.62.2", +] + [[package]] name = "windows-core" version = "0.61.2" @@ -1693,16 +4316,51 @@ checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", - "windows-link", - "windows-result", - "windows-strings", + "windows-link 0.1.1", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", +] + +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core 0.61.2", + "windows-link 0.1.1", + "windows-threading 0.1.0", +] + +[[package]] +name = "windows-future" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", + "windows-threading 0.2.1", ] [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", @@ -1711,9 +4369,9 @@ dependencies = [ [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", @@ -1726,13 +4384,48 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-numerics" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" +dependencies = [ + "windows-core 0.61.2", + "windows-link 0.1.1", +] + +[[package]] +name = "windows-numerics" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", +] + [[package]] name = "windows-result" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ - "windows-link", + "windows-link 0.1.1", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link 0.2.1", ] [[package]] @@ -1741,7 +4434,34 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ - "windows-link", + "windows-link 0.1.1", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link 0.2.1", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", ] [[package]] @@ -1750,7 +4470,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1759,7 +4479,55 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link 0.2.1", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -1768,64 +4536,250 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link 0.2.1", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link 0.1.1", +] + +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link 0.2.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "wit-bindgen-rt" version = "0.39.0" @@ -1835,6 +4789,61 @@ dependencies = [ "bitflags", ] +[[package]] +name = "wmi" +version = "0.17.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120d8c2b6a7c96c27bf4a7947fd7f02d73ca7f5958b8bd72a696e46cb5521ee6" +dependencies = [ + "chrono", + "futures", + "log", + "serde", + "thiserror 2.0.17", + "windows 0.62.2", + "windows-core 0.62.2", +] + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "ws_stream_wasm" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version", + "send_wrapper", + "thiserror 2.0.17", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "xml-rs" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + [[package]] name = "yansi-term" version = "0.1.2" @@ -1844,6 +4853,36 @@ dependencies = [ "winapi", ] +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure 0.13.2", +] + +[[package]] +name = "z32" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2164e798d9e3d84ee2c91139ace54638059a3b23e361f5c11781c2c6459bde0f" + [[package]] name = "zerocopy" version = "0.8.25" @@ -1863,3 +4902,77 @@ dependencies = [ "quote", "syn 2.0.101", ] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure 0.13.2", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 4eb351e..3d45475 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -7,3 +7,7 @@ edition = "2024" tokio = { workspace = true, features = ["rt-multi-thread"] } remoc.workspace = true reaction-plugin.path = "../reaction-plugin" +iroh = "0.94.0" +base64 = "0.22.1" +rand_core = { version = "0.9.3", features = ["os_rng"] } +rand = "0.9.2" diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index cae9109..0b24543 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -1,10 +1,12 @@ -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; +use base64::{Engine, prelude::BASE64_STANDARD}; +use iroh::{SecretKey, defaults}; use reaction_plugin::{ ActionImpl, Hello, Manifest, PersistData, PluginInfo, RemoteResult, StreamImpl, Value, main_loop, }; -use remoc::rtc; +use remoc::{chmux::SendError, rtc}; #[tokio::main] async fn main() { @@ -49,6 +51,8 @@ impl PluginInfo for Plugin { } async fn finish_setup(&mut self) -> RemoteResult<()> { + let data = self.data.as_mut().unwrap(); + let secret_key = secret_key(data).await; todo!() } @@ -56,3 +60,47 @@ impl PluginInfo for Plugin { todo!() } } + +async fn secret_key(data: &mut PersistData) -> SecretKey { + if let Some(key) = get_secret_key(data) { + key + } else { + let key = SecretKey::generate(&mut rand::rng()); + set_secret_key(data, &key).await; + key + } +} + +fn get_secret_key(data: &PersistData) -> Option { + match &data.persisted_data { + Value::Object(map) => map.get("secret_key").and_then(|value| { + if let Value::String(str) = value { + let vec = BASE64_STANDARD.decode(str).ok()?; + if vec.len() != 32 { + return None; + } + let mut bytes = [0u8; 32]; + for i in 0..32 { + bytes[i] = vec[i]; + } + Some(SecretKey::from_bytes(&bytes)) + } else { + None + } + }), + _ => None, + } +} + +async fn set_secret_key(data: &mut PersistData, key: &SecretKey) { + let mut current = match &data.persisted_data { + Value::Object(map) => map.clone(), + _ => BTreeMap::default(), + }; + let base64 = BASE64_STANDARD.encode(key.to_bytes()); + current.insert("secret_key".into(), Value::String(base64)); + data.persist_data + .send(Value::Object(current)) + .await + .unwrap(); +} From 124a2827d9e96e1ee5cfe23fad7030df62556eff Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 27 Oct 2025 12:00:00 +0100 Subject: [PATCH 129/241] Cluster plugin init - Remove PersistData utility - Provide plugins a state directory instead, by starting them inside. - Store the secret key as a file inside this directory. - Use iroh's crate for base64 encoding, thus removing one dependency. - Implement plugin's stream_impl and action_impl functions, creating all necessary data structures. --- Cargo.lock | 7 +- Cargo.toml | 3 +- TODO | 14 +- plugins/reaction-plugin-cluster/Cargo.toml | 12 +- plugins/reaction-plugin-cluster/src/main.rs | 212 +++++++++++++----- .../reaction-plugin-cluster/src/secret_key.rs | 70 ++++++ plugins/reaction-plugin-virtual/Cargo.toml | 1 + plugins/reaction-plugin-virtual/src/main.rs | 47 ++-- plugins/reaction-plugin/Cargo.toml | 7 +- plugins/reaction-plugin/src/lib.rs | 37 +-- src/concepts/plugin.rs | 65 ++++-- src/daemon/plugin/mod.rs | 97 ++------ src/daemon/plugin/value.rs | 35 --- tests/test-conf/cluster-a.jsonnet | 64 ++++++ 14 files changed, 393 insertions(+), 278 deletions(-) create mode 100644 plugins/reaction-plugin-cluster/src/secret_key.rs delete mode 100644 src/daemon/plugin/value.rs create mode 100644 tests/test-conf/cluster-a.jsonnet diff --git a/Cargo.lock b/Cargo.lock index edc74cb..13d51f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2849,6 +2849,7 @@ version = "1.0.0" dependencies = [ "remoc", "serde", + "serde_json", "tokio", ] @@ -2856,12 +2857,13 @@ dependencies = [ name = "reaction-plugin-cluster" version = "0.1.0" dependencies = [ - "base64 0.22.1", + "data-encoding", "iroh", "rand 0.9.2", - "rand_core 0.9.3", "reaction-plugin", "remoc", + "serde", + "serde_json", "tokio", ] @@ -2871,6 +2873,7 @@ version = "0.1.0" dependencies = [ "reaction-plugin", "remoc", + "serde_json", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index fcec4c0..08daff8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,7 +44,7 @@ num_cpus = "1.16.0" regex = "1.10.4" # Configuration languages, ser/deserialisation serde.workspace = true -serde_json = "1.0.117" +serde_json.workspace = true serde_yaml = "0.9.34" jrsonnet-evaluator = "0.4.2" # Error macro @@ -80,6 +80,7 @@ members = ["plugins/reaction-plugin", "plugins/reaction-plugin-cluster", "plugin [workspace.dependencies] remoc = { version = "0.18.3" } serde = { version = "1.0.203", features = ["derive"] } +serde_json = "1.0.117" tokio = { version = "1.40.0" } [[bin]] diff --git a/TODO b/TODO index b81282c..557559e 100644 --- a/TODO +++ b/TODO @@ -1,16 +1,4 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) plugins: pipe stderr too and wrap errors in logs -plugins: provide tree storage? omg - -questionnements: -- quelle cli pour les plugins ? - - Directement en appelant le plugin ? reaction-plugin-cluster gen-id ? 🟢 - → Demande de savoir où stocker tout ça - - Via moult IPC ? reaction plugin cluster gen-id ? 🔴 - → Mais du coup c'est l'oeuf ou la poule entre avoir un serveur qui fonctionne et avoir un -- Stockage ? - - uniquement dans la db reaction - → Faut pas que ce soit trop gros, un peu d'overhead, risque de perdre la donnée - - à part dans le configuration directory - → Pas mal en vrai +plugins: provide treedb storage? omg (add an enum that's either remoc::rch::mpsc or tokio::mpsc) diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 3d45475..902804d 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -4,10 +4,14 @@ version = "0.1.0" edition = "2024" [dependencies] -tokio = { workspace = true, features = ["rt-multi-thread"] } -remoc.workspace = true reaction-plugin.path = "../reaction-plugin" + +tokio.workspace = true +tokio.features = ["rt-multi-thread"] +remoc.workspace = true +serde.workspace = true +serde_json.workspace = true + +data-encoding = "2.9.0" iroh = "0.94.0" -base64 = "0.22.1" -rand_core = { version = "0.9.3", features = ["os_rng"] } rand = "0.9.2" diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 0b24543..77230b1 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -1,12 +1,21 @@ -use std::collections::{BTreeMap, BTreeSet}; - -use base64::{Engine, prelude::BASE64_STANDARD}; -use iroh::{SecretKey, defaults}; -use reaction_plugin::{ - ActionImpl, Hello, Manifest, PersistData, PluginInfo, RemoteResult, StreamImpl, Value, - main_loop, +use std::{ + collections::{BTreeMap, BTreeSet}, + net::SocketAddr, }; -use remoc::{chmux::SendError, rtc}; + +use iroh::PublicKey; +use reaction_plugin::{ + ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, main_loop, +}; +use remoc::{rch::mpsc, rtc}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use tokio::fs; + +mod secret_key; +use secret_key::secret_key; + +use crate::secret_key::{key_b64_to_bytes, key_bytes_to_b64}; #[tokio::main] async fn main() { @@ -16,12 +25,63 @@ async fn main() { #[derive(Default)] struct Plugin { - data: Option, + streams: BTreeMap, + actions: Vec, +} + +/// Stream options as defined by the user +#[derive(Serialize, Deserialize)] +struct StreamOptions { + /// The secret that permits to join the cluster. + shared_secret: Option, + /// The secret that permits to join the cluster, as a file. + /// Beginning and ending whitespace will be trimmed. + shared_secret_file: Option, + /// Other nodes which are part of the cluster. + nodes: Vec, +} + +/// Stream information before start +struct StreamInit { + shared_secret: String, + nodes: Vec, + tx: mpsc::Sender>, +} + +#[derive(Serialize, Deserialize)] +struct NodeOption { + public_key: String, + addresses: Vec, +} + +#[derive(Serialize, Deserialize)] +struct NodeInit { + public_key: PublicKey, + addresses: Vec, +} + +#[derive(Serialize, Deserialize)] +struct ActionOptions { + /// The line to send to the corresponding cluster, example: "ban \" + send: String, + /// The name of the corresponding cluster, example: "my_cluster_stream" + to: String, + /// Whether the stream of this node also receives the line + #[serde(default, rename = "self")] + self_: bool, +} + +#[derive(Serialize, Deserialize)] +struct ActionInit { + send: String, + to: String, + self_: bool, + patterns: Vec, + rx: mpsc::Receiver, } impl PluginInfo for Plugin { - async fn manifest(&mut self, data: PersistData) -> Result { - self.data = Some(data); + async fn manifest(&mut self) -> Result { Ok(Manifest { hello: Hello::hello(), streams: BTreeSet::from(["cluster".into()]), @@ -35,24 +95,96 @@ impl PluginInfo for Plugin { stream_type: String, config: Value, ) -> RemoteResult { - todo!() + if &stream_type != "cluster" { + return Err("This plugin can't handle other stream types than cluster".into()); + } + + let options: StreamOptions = + serde_json::from_value(config).map_err(|err| format!("invalid options: {err}"))?; + + let shared_secret = if let Some(shared_secret) = options.shared_secret { + shared_secret + } else if let Some(shared_secret_file) = &options.shared_secret_file { + fs::read_to_string(shared_secret_file) + .await + .map_err(|err| { + format!("can't access shared_secret_file {shared_secret_file}: {err}") + })? + .trim() + .to_owned() + } else { + return Err("missing shared secret: either shared_secret or shared_secret_file must be provided".into()); + }; + + let mut init_nodes = Vec::default(); + for node in options.nodes.into_iter() { + let bytes = key_b64_to_bytes(&node.public_key) + .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; + + let public_key = PublicKey::from_bytes(&bytes) + .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; + + init_nodes.push(NodeInit { + public_key, + addresses: node.addresses, + }); + } + + let (tx, rx) = mpsc::channel(1); + + let stream = StreamInit { + shared_secret, + nodes: init_nodes, + tx, + }; + + if let Some(_) = self.streams.insert(stream_name, stream) { + return Err("this virtual stream has already been initialized".into()); + } + + Ok(StreamImpl { + stream: rx, + standalone: true, + }) } async fn action_impl( &mut self, - stream_name: String, - filter_name: String, - action_name: String, + _stream_name: String, + _filter_name: String, + _action_name: String, action_type: String, config: Value, patterns: Vec, ) -> RemoteResult { - todo!() + if &action_type != "cluster" { + return Err("This plugin can't handle other action types than cluster".into()); + } + + let options: ActionOptions = + serde_json::from_value(config).map_err(|err| format!("invalid options: {err}"))?; + + let (tx, rx) = mpsc::channel(1); + + let init_action = ActionInit { + send: options.send, + to: options.to, + self_: options.self_, + patterns, + rx, + }; + + self.actions.push(init_action); + + Ok(ActionImpl { tx }) } async fn finish_setup(&mut self) -> RemoteResult<()> { - let data = self.data.as_mut().unwrap(); - let secret_key = secret_key(data).await; + let secret_key = secret_key().await?; + eprintln!( + "public key of this node: {}", + key_bytes_to_b64(secret_key.public().as_bytes()) + ); todo!() } @@ -60,47 +192,3 @@ impl PluginInfo for Plugin { todo!() } } - -async fn secret_key(data: &mut PersistData) -> SecretKey { - if let Some(key) = get_secret_key(data) { - key - } else { - let key = SecretKey::generate(&mut rand::rng()); - set_secret_key(data, &key).await; - key - } -} - -fn get_secret_key(data: &PersistData) -> Option { - match &data.persisted_data { - Value::Object(map) => map.get("secret_key").and_then(|value| { - if let Value::String(str) = value { - let vec = BASE64_STANDARD.decode(str).ok()?; - if vec.len() != 32 { - return None; - } - let mut bytes = [0u8; 32]; - for i in 0..32 { - bytes[i] = vec[i]; - } - Some(SecretKey::from_bytes(&bytes)) - } else { - None - } - }), - _ => None, - } -} - -async fn set_secret_key(data: &mut PersistData, key: &SecretKey) { - let mut current = match &data.persisted_data { - Value::Object(map) => map.clone(), - _ => BTreeMap::default(), - }; - let base64 = BASE64_STANDARD.encode(key.to_bytes()); - current.insert("secret_key".into(), Value::String(base64)); - data.persist_data - .send(Value::Object(current)) - .await - .unwrap(); -} diff --git a/plugins/reaction-plugin-cluster/src/secret_key.rs b/plugins/reaction-plugin-cluster/src/secret_key.rs new file mode 100644 index 0000000..ca7e67a --- /dev/null +++ b/plugins/reaction-plugin-cluster/src/secret_key.rs @@ -0,0 +1,70 @@ +use std::io; + +use data_encoding::DecodeError; +use iroh::SecretKey; +use tokio::{ + fs::{self, File}, + io::AsyncWriteExt, +}; + +const SECRET_KEY_PATH: &str = "./secret_key.txt"; + +pub async fn secret_key() -> Result { + if let Some(key) = get_secret_key().await? { + Ok(key) + } else { + let key = SecretKey::generate(&mut rand::rng()); + set_secret_key(&key).await?; + Ok(key) + } +} + +async fn get_secret_key() -> Result, String> { + let key = match fs::read_to_string(SECRET_KEY_PATH).await { + Ok(key) => Ok(key), + Err(err) => match err.kind() { + io::ErrorKind::NotFound => return Ok(None), + _ => Err(format!("can't read secret key file: {err}")), + }, + }?; + let bytes = match key_b64_to_bytes(&key) { + Ok(key) => Ok(key), + Err(err) => Err(format!( + "invalid secret key read from file: {err}. Please remove the `{SECRET_KEY_PATH}` file from plugin directory." + )), + }?; + Ok(Some(SecretKey::from_bytes(&bytes))) +} + +async fn set_secret_key(key: &SecretKey) -> Result<(), String> { + let secret_key = key_bytes_to_b64(&key.to_bytes()); + File::options() + .mode(0o600) + .write(true) + .create(true) + .open(SECRET_KEY_PATH) + .await + .map_err(|err| format!("can't open `{SECRET_KEY_PATH}` in plugin directory: {err}"))? + .write_all(secret_key.as_bytes()) + .await + .map_err(|err| format!("can't write to `{SECRET_KEY_PATH}` in plugin directory: {err}")) +} + +pub fn key_b64_to_bytes(key: &str) -> Result<[u8; 32], DecodeError> { + let vec = data_encoding::BASE64URL.decode(key.as_bytes())?; + if vec.len() != 32 { + return Err(DecodeError { + position: vec.len(), + kind: data_encoding::DecodeKind::Length, + }); + } + let mut bytes = [0u8; 32]; + for i in 0..32 { + bytes[i] = vec[i]; + } + Ok(bytes) +} + +pub fn key_bytes_to_b64(key: &[u8; 32]) -> String { + data_encoding::BASE64URL.encode(key) +} diff --git a/plugins/reaction-plugin-virtual/Cargo.toml b/plugins/reaction-plugin-virtual/Cargo.toml index 55ce41c..d0d65c5 100644 --- a/plugins/reaction-plugin-virtual/Cargo.toml +++ b/plugins/reaction-plugin-virtual/Cargo.toml @@ -7,3 +7,4 @@ edition = "2024" tokio = { workspace = true, features = ["rt-multi-thread"] } remoc.workspace = true reaction-plugin.path = "../reaction-plugin" +serde_json.workspace = true diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 3b06cbc..5d4e7de 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,10 +1,10 @@ use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Line, Manifest, PersistData, PluginInfo, RemoteResult, StreamImpl, - Value, + ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, }; use remoc::{rch::mpsc, rtc}; +use serde::{Deserialize, Serialize}; #[tokio::main] async fn main() { @@ -19,7 +19,7 @@ struct Plugin { } impl PluginInfo for Plugin { - async fn manifest(&mut self, _data: PersistData) -> Result { + async fn manifest(&mut self) -> Result { Ok(Manifest { hello: Hello::hello(), streams: BTreeSet::from(["virtual".into()]), @@ -59,7 +59,7 @@ impl PluginInfo for Plugin { patterns: Vec, ) -> RemoteResult { if &action_type != "virtual" { - return Err("This plugin can't handle other stream types than virtual".into()); + return Err("This plugin can't handle other action types than virtual".into()); } let (virtual_action_init, tx) = @@ -126,6 +126,14 @@ impl VirtualStream { } } +#[derive(Serialize, Deserialize)] +struct ActionOptions { + /// The line to send to the corresponding virtual stream, example: "ban \" + send: String, + /// The name of the corresponding virtual stream, example: "my_stream" + to: String, +} + struct VirtualActionInit { stream_name: String, filter_name: String, @@ -144,30 +152,9 @@ impl VirtualActionInit { config: Value, patterns: Vec, ) -> Result<(Self, mpsc::Sender), String> { - let send; - let to; - match config { - Value::Object(mut map) => { - send = match map.remove("send") { - Some(Value::String(value)) => value, - _ => return Err("`send` must be a string to send to the corresponding virtual stream, example: \"ban \"".into()), - }; - - to = match map.remove("to") { - Some(Value::String(value)) => value, - _ => return Err("`to` must be the name of the corresponding virtual stream, example: \"my_stream\"".into()), - }; - - if map.len() != 0 { - return Err( - "actions of type virtual accept only `send` and `to` options".into(), - ); - } - } - _ => { - return Err("actions of type virtual require `send` and `to` options".into()); - } - } + let options: ActionOptions = serde_json::from_value(config).map_err(|err| { + format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") + })?; let patterns = patterns .into_iter() @@ -182,8 +169,8 @@ impl VirtualActionInit { action_name, rx, patterns, - send, - to, + send: options.send, + to: options.to, }, tx, )) diff --git a/plugins/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml index 3bd74e9..4a23e2c 100644 --- a/plugins/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -5,5 +5,10 @@ edition = "2024" [dependencies] remoc.workspace = true + serde.workspace = true -tokio = { workspace = true, features = ["io-std"] } + +serde_json.workspace = true + +tokio.workspace = true +tokio.features = ["io-std"] diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 0bf602e..9f092f9 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -35,24 +35,22 @@ //! ``` //! This can be useful if you want to provide CLI functionnality to your users. //! +//! It will be run in its own directory, in which it should have write access. +//! //! ## Examples //! //! Core plugins can be found here: //! The "virtual" plugin is the simplest and can serve as a template. //! You'll have to adjust dependencies versions in `Cargo.toml`. -use std::{ - collections::{BTreeMap, BTreeSet}, - error::Error, - fmt::Display, -}; +use std::{collections::BTreeSet, error::Error, fmt::Display}; use remoc::{ - Connect, - rch::{self, mpsc}, + Connect, rch, rtc::{self, Server}, }; use serde::{Deserialize, Serialize}; +pub use serde_json::Value; use tokio::io::{stdin, stdout}; /// This is the only trait that **must** be implemented by a plugin. @@ -60,7 +58,7 @@ use tokio::io::{stdin, stdout}; #[rtc::remote] pub trait PluginInfo { /// Return the manifest of the plugin. - async fn manifest(&mut self, data: PersistData) -> Result; + async fn manifest(&mut self) -> Result; /// Return one stream of a given type if it exists async fn stream_impl( @@ -159,29 +157,6 @@ impl Hello { } } -/// Represents a configuration value. -/// This is not meant as an efficient type, but as a very flexible one. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub enum Value { - Null, - Bool(bool), - Integer(i64), - Float(f64), - String(String), - Array(Vec), - Object(BTreeMap), -} - -/// Data persisted by reaction for the plugin. -/// This is persisted as a single JSON file by reaction, so it is not suitable for big sizes of data. -#[derive(Serialize, Deserialize)] -pub struct PersistData { - /// Data persisted by the plugin in a previous run - pub persisted_data: Value, - /// Sender of data to be persisted by the plugin for a previous run - pub persist_data: mpsc::Sender, -} - #[derive(Serialize, Deserialize)] pub struct StreamImpl { pub stream: rch::mpsc::Receiver, diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index fa9b7fb..f5331a0 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -7,6 +7,7 @@ use std::{ use serde::{Deserialize, Serialize}; use tokio::{ + fs, process::{Child, Command}, runtime::Handle, }; @@ -51,23 +52,19 @@ impl Plugin { if !self.path.starts_with("/") { return Err(format!("plugin paths must be absolute: {}", self.path)); } - - if self.systemd { - self.systemd_setup(); - } Ok(()) } /// Override default options with user-defined options, when defined. - pub fn systemd_setup(&mut self) { - let mut new_options = systemd_default_options(); - while let Some((option, value)) = self.systemd_options.pop_first() { - new_options.insert(option, value); + pub fn systemd_setup(&self, working_directory: &str) -> BTreeMap> { + let mut new_options = systemd_default_options(working_directory); + for (option, value) in self.systemd_options.iter() { + new_options.insert(option.clone(), value.clone()); } - self.systemd_options = new_options; + new_options } - pub async fn launch(&self) -> Result { + pub async fn launch(&self, state_directory: &str) -> Result { // owner check if self.check_root { let path = self.path.clone(); @@ -85,19 +82,26 @@ impl Plugin { } let self_uid = if self.systemd { - // Well well we want to check if we're root - #[allow(unsafe_code)] - unsafe { - nix::libc::geteuid() - } + Some( + // Well well we want to check if we're root + #[allow(unsafe_code)] + unsafe { + nix::libc::geteuid() + }, + ) } else { - 0 + None }; - let mut command = if self.systemd && self_uid == 0 { + // Create plugin working directory (also state directory) + let plugin_working_directory = plugin_working_directory(&self.name, state_directory)?; + fs::create_dir(&plugin_working_directory).await?; + + let mut command = if self_uid.is_some_and(|self_uid| self_uid == 0) { let mut command = Command::new("run0"); // --pipe gives direct, non-emulated stdio access, for better performance. command.arg("--pipe"); + self.systemd_setup(&plugin_working_directory); // run0 options for (option, values) in self.systemd_options.iter() { for value in values.iter() { @@ -110,7 +114,9 @@ impl Plugin { if self.systemd { warn!("Disabling systemd because reaction does not run as root"); } - Command::new(&self.path) + let mut command = Command::new(&self.path); + command.current_dir(plugin_working_directory); + command }; command.arg("serve"); debug!( @@ -126,14 +132,31 @@ impl Plugin { } } +fn plugin_working_directory(plugin_name: &str, state_directory: &str) -> Result { + std::fs::canonicalize(format!("{state_directory}/plugin_data/{plugin_name}")).and_then(|path| { + path.to_str() + .ok_or_else(|| { + Error::new( + ErrorKind::Other, + "state_directory is not UTF-8. please run reaction at an UTF-8 named path", + ) + }) + .map(str::to_owned) + }) +} + // TODO commented options block execution of program, // while developping in my home directory. // Some options may still be useful in production environments. -fn systemd_default_options() -> BTreeMap> { +fn systemd_default_options(working_directory: &str) -> BTreeMap> { BTreeMap::from( [ - // No file access - ("ReadWritePaths", vec![]), + // reaction slice (does nothing if inexistent) + ("Slice", vec!["reaction.slice"]), + // Started in its own directory + ("WorkingDirectory", vec![working_directory]), + // No file access except own directory + ("ReadWritePaths", vec![working_directory]), ("ReadOnlyPaths", vec![]), // ("NoExecPaths", vec!["/"]), ("InaccessiblePaths", vec!["/boot", "/etc"]), diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 6c2762d..02fe61b 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -5,10 +5,10 @@ use std::{ }; use futures::{future::join_all, FutureExt}; -use reaction_plugin::{ActionImpl, Hello, PersistData, PluginInfo, PluginInfoClient, StreamImpl}; -use remoc::{rch::mpsc, Connect}; +use reaction_plugin::{ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamImpl}; +use remoc::Connect; use serde_json::Value; -use tokio::{fs, process::Child, time::sleep}; +use tokio::{process::Child, time::sleep}; use tracing::error; use crate::{ @@ -16,10 +16,6 @@ use crate::{ daemon::{utils::kill_child, ShutdownToken}, }; -mod value; - -use value::to_stable_value; - pub struct PluginManager { child: Child, shutdown: ShutdownToken, @@ -48,7 +44,7 @@ impl PluginManager { shutdown: ShutdownToken, ) -> Result { let mut child = plugin - .launch() + .launch(state_directory) .await .map_err(|err| format!("could not launch plugin: {err}"))?; @@ -76,12 +72,8 @@ impl PluginManager { .map_err(|err| format!("could not retrieve initial information from plugin: {err}"))? .ok_or("could not retrieve initial information from plugin: no data")?; - let persist_data = data_persistence(&plugin.name, state_directory, shutdown.clone()) - .await - .map_err(|err| format!("error while reading plugin {} data: {err}", plugin.name))?; - let manifest = plugin_info - .manifest(persist_data) + .manifest() .await .map_err(|err| format!("error while getting plugin {} manifest: {err}", plugin.name))?; @@ -188,21 +180,16 @@ impl Plugins { stream_type: String, config: Value, ) -> Result { - let plugin_name = self - .streams - .get(&stream_type) - .ok_or(format!("No plugin provided a stream type '{stream_type}'"))?; + let plugin_name = self.streams.get(&stream_type).ok_or(format!( + "No plugin provided the stream type '{stream_type}'" + ))?; let plugin = self.plugins.get_mut(plugin_name).unwrap(); plugin - .stream_impl( - stream_name.into(), - stream_type.into(), - to_stable_value(config), - ) + .stream_impl(stream_name.clone(), stream_type, config) .await - .map_err(|err| format!("plugin error while initializing stream: {err}")) + .map_err(|err| format!("plugin error while initializing stream {stream_name}: {err}")) } pub async fn init_action_impl( @@ -214,24 +201,23 @@ impl Plugins { config: Value, patterns: Vec, ) -> Result { - let plugin_name = self - .actions - .get(&action_type) - .ok_or(format!("No plugin provided a action type '{action_type}'"))?; + let plugin_name = self.actions.get(&action_type).ok_or(format!( + "No plugin provided the action type '{action_type}'" + ))?; let plugin = self.plugins.get_mut(plugin_name).unwrap(); plugin .action_impl( - stream_name.into(), - filter_name.into(), - action_name.into(), - action_type.into(), - to_stable_value(config), + stream_name.clone(), + filter_name.clone(), + action_name.clone(), + action_type, + config, patterns, ) .await - .map_err(|err| format!("plugin error while initializing action: {err}")) + .map_err(|err| format!("plugin error while initializing action {stream_name}.{filter_name}.{action_name}: {err}")) } pub async fn finish_setup(&mut self) -> Result<(), String> { @@ -259,48 +245,3 @@ impl Plugins { } } } - -async fn data_persistence( - plugin_name: &str, - state_directory: &str, - shutdown: ShutdownToken, -) -> Result { - let dir_path = format!("{state_directory}/plugin_data/"); - fs::create_dir_all(&dir_path).await?; - - let file_path = format!("{dir_path}/{plugin_name}.json"); - - let data = if fs::try_exists(&file_path).await? { - let txt = fs::read_to_string(&file_path).await?; - serde_json::from_str::(&txt)? - } else { - Value::Null - }; - - let (tx, mut rx) = mpsc::channel(1); - - tokio::spawn(async move { - loop { - let value = tokio::select! { - _ = shutdown.wait() => break, - value = rx.recv() => value, - }; - if let Ok(Some(value)) = value { - // unwrap: serializing a [`serde_json::Value`] does not fail - let json = serde_json::to_string_pretty(&value).unwrap(); - - if let Err(err) = fs::write(&file_path, json).await { - error!("could not store plugin data at {file_path}: {err}"); - break; - } - } else { - break; - } - } - }); - - Ok(PersistData { - persisted_data: to_stable_value(data), - persist_data: tx, - }) -} diff --git a/src/daemon/plugin/value.rs b/src/daemon/plugin/value.rs deleted file mode 100644 index e62c498..0000000 --- a/src/daemon/plugin/value.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::collections::BTreeMap; - -use reaction_plugin::Value as RValue; -use serde_json::Value as JValue; - -pub fn to_stable_value(val: JValue) -> RValue { - match val { - JValue::Null => RValue::Null, - JValue::Bool(b) => RValue::Bool(b), - JValue::Number(number) => { - if let Some(number) = number.as_i64() { - RValue::Integer(number) - } else if let Some(number) = number.as_f64() { - RValue::Float(number) - } else { - RValue::Null - } - } - JValue::String(s) => RValue::String(s.into()), - JValue::Array(v) => RValue::Array({ - let mut vec = Vec::with_capacity(v.len()); - for val in v { - vec.push(to_stable_value(val)); - } - vec - }), - JValue::Object(m) => RValue::Object({ - let mut map = BTreeMap::new(); - for (key, val) in m { - map.insert(key.into(), to_stable_value(val)); - } - map - }), - } -} diff --git a/tests/test-conf/cluster-a.jsonnet b/tests/test-conf/cluster-a.jsonnet new file mode 100644 index 0000000..ed36d95 --- /dev/null +++ b/tests/test-conf/cluster-a.jsonnet @@ -0,0 +1,64 @@ +{ + patterns: { + num: { + regex: @"[0-9]+", + }, + all: { + regex: @".*" + } + }, + + plugins: { + cluster: { + path: "./target/debug/reaction-plugin-cluster", + check_root: false, + systemd_options: { + DynamicUser: ["false"], + } + } + }, + + streams: { + s0: { + cmd: ["bash", "-c", "for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2"], + filters: { + f0: { + regex: ["^$"], + actions: { + a0: { + type: "virtual", + options: { + send: "a0 ", + to: "s1", + } + }, + b0: { + type: "cluster", + options: { + send: "b0 ", + to: "s1", + }, + after: "600ms", + }, + }, + }, + }, + }, + s1: { + type: "cluster", + options: { + + }, + filters: { + f1: { + regex: ["^$"], + actions: { + a1: { + cmd: ['sh', '-c', 'echo >>./log'], + }, + }, + }, + }, + }, + }, +} From a7604ca8d5435cf8cbb812d04b2144faaecc1128 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 30 Oct 2025 12:00:00 +0100 Subject: [PATCH 130/241] WIP allow plugin to print error to stderr and capture them I have a race condition where reaction quits before printing process' stderr. This will be the occasion to rework (again) reaction's daemon startup --- Cargo.lock | 1 + TODO | 1 + plugins/reaction-plugin-cluster/src/main.rs | 2 +- plugins/reaction-plugin-virtual/Cargo.toml | 1 + plugins/reaction-plugin-virtual/src/main.rs | 8 +-- plugins/reaction-plugin/src/lib.rs | 55 +++++++++++++++++---- src/concepts/plugin.rs | 36 ++++++-------- src/daemon/mod.rs | 16 +++--- src/daemon/plugin/mod.rs | 45 +++++++++++++++-- src/daemon/stream.rs | 9 ++-- 10 files changed, 121 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13d51f4..06394fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2873,6 +2873,7 @@ version = "0.1.0" dependencies = [ "reaction-plugin", "remoc", + "serde", "serde_json", "tokio", ] diff --git a/TODO b/TODO index 557559e..cc7658d 100644 --- a/TODO +++ b/TODO @@ -2,3 +2,4 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) plugins: pipe stderr too and wrap errors in logs plugins: provide treedb storage? omg (add an enum that's either remoc::rch::mpsc or tokio::mpsc) +plugins: implement a tracing subscriber over stderr? or more simply, capture lines and reprint them prefixed by the plugin name? diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 77230b1..b738e1c 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -45,7 +45,7 @@ struct StreamOptions { struct StreamInit { shared_secret: String, nodes: Vec, - tx: mpsc::Sender>, + tx: mpsc::Sender, } #[derive(Serialize, Deserialize)] diff --git a/plugins/reaction-plugin-virtual/Cargo.toml b/plugins/reaction-plugin-virtual/Cargo.toml index d0d65c5..46e3430 100644 --- a/plugins/reaction-plugin-virtual/Cargo.toml +++ b/plugins/reaction-plugin-virtual/Cargo.toml @@ -7,4 +7,5 @@ edition = "2024" tokio = { workspace = true, features = ["rt-multi-thread"] } remoc.workspace = true reaction-plugin.path = "../reaction-plugin" +serde.workspace = true serde_json.workspace = true diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 5d4e7de..b33774b 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, + ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; @@ -105,11 +105,11 @@ impl PluginInfo for Plugin { #[derive(Clone)] struct VirtualStream { - tx: mpsc::Sender>, + tx: mpsc::Sender, } impl VirtualStream { - fn new(config: Value) -> Result<(Self, mpsc::Receiver), String> { + fn new(config: Value) -> Result<(Self, mpsc::Receiver), String> { const CONFIG_ERROR: &'static str = "streams of type virtual take no options"; match config { Value::Null => (), @@ -205,7 +205,7 @@ impl VirtualAction { acc.replace(pattern, &m.match_[i]) }) }; - let result = match self.to.tx.send(Line::Ok(line)).await { + let result = match self.to.tx.send(line).await { Ok(_) => Ok(()), Err(err) => Err(format!("{err}")), }; diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 9f092f9..099eb5b 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -1,8 +1,5 @@ //! This crate defines the API between reaction's core and plugins. //! -//! It's based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait -//! calls over a single transport channel. -//! //! To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides //! the entrypoint for a plugin. //! It permits to define 0 to n (stream, filter, action) custom types. @@ -37,13 +34,38 @@ //! //! It will be run in its own directory, in which it should have write access. //! -//! ## Examples +//! ## Communication +//! +//! Communication between the plugin and reaction is based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait +//! calls over a single transport channel. +//! The channels used are stdin and stdout, so you can't use them for something else. +//! +//! ### Errors +//! +//! Errors can be printed to stderr. +//! They'll be captured line by line and re-printed by reaction, with the plugin name prepended. +//! +//! A line can start with `DEBUG `, `INFO `, `WARN `, `ERROR `. +//! If the starts with none of the above, the line is assumed to be an error. +//! +//! Examples: +//! ```log +//! WARN This is an official warning from the plugin +//! # will become: +//! WARN plugin test: This is an official warning from the plugin +//! +//! Freeeee errrooooorrr +//! # will become: +//! ERROR plugin test: Freeeee errrooooorrr +//! ``` +//! +//! ## Starting template //! //! Core plugins can be found here: //! The "virtual" plugin is the simplest and can serve as a template. //! You'll have to adjust dependencies versions in `Cargo.toml`. -use std::{collections::BTreeSet, error::Error, fmt::Display}; +use std::{collections::BTreeSet, error::Error, fmt::Display, process::exit}; use remoc::{ Connect, rch, @@ -159,7 +181,7 @@ impl Hello { #[derive(Serialize, Deserialize)] pub struct StreamImpl { - pub stream: rch::mpsc::Receiver, + pub stream: rch::mpsc::Receiver, /// Whether this stream works standalone, or if it needs other streams to be fed. /// Defaults to true. /// When false, reaction will exit if it's the last one standing. @@ -171,8 +193,6 @@ fn _true() -> bool { true } -pub type Line = Result; - // #[derive(Serialize, Deserialize)] // pub struct FilterImpl { // pub stream: rch::lr::Sender, @@ -207,7 +227,24 @@ pub async fn main_loop(plugin_info: T) { let (server, client) = PluginInfoServer::new(plugin_info, 1); - let _ = tokio::join!(tx.send(client), server.serve(), tokio::spawn(conn)); + let (res1, (_, res2), res3) = tokio::join!(tx.send(client), server.serve(), tokio::spawn(conn)); + let mut exit_code = 0; + if let Err(err) = res1 { + eprintln!("ERROR could not send plugin info to reaction: {err}"); + exit_code = 1; + } + if let Err(err) = res2 { + eprintln!("ERROR could not launch plugin service for reaction: {err}"); + exit_code = 2; + } + if let Err(err) = res3 { + eprintln!("ERROR could not setup connection with reaction: {err}"); + exit_code = 3; + } else if let Ok(Err(err)) = res3 { + eprintln!("ERROR could not setup connection with reaction: {err}"); + exit_code = 3; + } + exit(exit_code); } // Errors diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index f5331a0..bc51bf2 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -9,7 +9,6 @@ use serde::{Deserialize, Serialize}; use tokio::{ fs, process::{Child, Command}, - runtime::Handle, }; use tracing::{debug, warn}; @@ -48,7 +47,16 @@ impl Plugin { return Err("can't specify empty plugin path".into()); } - #[cfg(not(debug_assertions))] + // Only when testing, make relative paths absolute + #[cfg(debug_assertions)] + if !self.path.starts_with("/") { + self.path = std::fs::canonicalize(&self.path) + .unwrap() + .to_string_lossy() + .to_string(); + } + + // Disallow relative paths if !self.path.starts_with("/") { return Err(format!("plugin paths must be absolute: {}", self.path)); } @@ -68,10 +76,7 @@ impl Plugin { // owner check if self.check_root { let path = self.path.clone(); - let stat = Handle::current() - .spawn_blocking(|| std::fs::metadata(path)) - .await - .unwrap()?; + let stat = fs::metadata(path).await?; if stat.st_uid() != 0 { return Err(Error::new( @@ -94,8 +99,9 @@ impl Plugin { }; // Create plugin working directory (also state directory) - let plugin_working_directory = plugin_working_directory(&self.name, state_directory)?; - fs::create_dir(&plugin_working_directory).await?; + let plugin_working_directory = format!("{state_directory}/plugin_data/{}", self.name); + dbg!(&plugin_working_directory); + fs::create_dir_all(&plugin_working_directory).await?; let mut command = if self_uid.is_some_and(|self_uid| self_uid == 0) { let mut command = Command::new("run0"); @@ -127,24 +133,12 @@ impl Plugin { command .stdin(Stdio::piped()) .stdout(Stdio::piped()) + .stderr(Stdio::piped()) .env("RUST_BACKTRACE", "1") .spawn() } } -fn plugin_working_directory(plugin_name: &str, state_directory: &str) -> Result { - std::fs::canonicalize(format!("{state_directory}/plugin_data/{plugin_name}")).and_then(|path| { - path.to_str() - .ok_or_else(|| { - Error::new( - ErrorKind::Other, - "state_directory is not UTF-8. please run reaction at an UTF-8 named path", - ) - }) - .map(str::to_owned) - }) -} - // TODO commented options block execution of program, // while developping in my home directory. // Some options may still be useful in production environments. diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index a4ca765..2388088 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -49,14 +49,6 @@ pub async fn daemon( // Open Database let mut db = Database::open(config, shutdown.token()).await?; - // Open Socket - let socket = Socket::open(socket).await?; - - // reaction won't abort on startup anymore, we can run start commands - if !config.start() { - return Err("a start command failed, exiting.".into()); - } - let (state, stream_managers) = { // Semaphore limiting action execution concurrency let exec_limit = match config.concurrency { @@ -95,9 +87,15 @@ pub async fn daemon( plugins.finish_setup().await?; plugins.manager(); - // Run socket task + // Open socket and run task + let socket = Socket::open(socket).await?; socket.manager(config, state, shutdown.token()); + // reaction won't abort on startup anymore, we can run start commands + if !config.start() { + return Err("a start command failed, exiting.".into()); + } + // Start Stream managers let stream_task_handles = stream_managers.into_iter().filter_map(|stream_manager| { let standalone = stream_manager.is_standalone(); diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 02fe61b..c0949ee 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -4,16 +4,19 @@ use std::{ time::Duration, }; -use futures::{future::join_all, FutureExt}; +use futures::{future::join_all, FutureExt, StreamExt}; use reaction_plugin::{ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamImpl}; use remoc::Connect; use serde_json::Value; -use tokio::{process::Child, time::sleep}; +use tokio::{ + process::{Child, ChildStderr}, + time::sleep, +}; use tracing::error; use crate::{ concepts::{Config, Plugin}, - daemon::{utils::kill_child, ShutdownToken}, + daemon::{stream::reader_to_stream, utils::kill_child, ShutdownToken}, }; pub struct PluginManager { @@ -48,6 +51,12 @@ impl PluginManager { .await .map_err(|err| format!("could not launch plugin: {err}"))?; + { + let stderr = child.stderr.take().unwrap(); + let shutdown = shutdown.clone(); + tokio::spawn(async move { handle_stderr(stderr, plugin.name.clone(), shutdown).await }); + } + let stdin = child.stdin.take().unwrap(); let stdout = child.stdout.take().unwrap(); @@ -124,6 +133,36 @@ impl PluginManager { } } +async fn handle_stderr(stderr: ChildStderr, plugin_name: String, _shutdown: ShutdownToken) { + let lines = reader_to_stream(stderr); + tokio::pin!(lines); + loop { + match lines.next().await { + Some(Ok(line)) => { + // dumb: I can't factorize this because tracing::event! + // requires its log level to be a constant. + if line.starts_with("DEBUG ") { + tracing::debug!("plugin {plugin_name}: {}", line.split_at(5).1) + } else if line.starts_with("INFO ") { + tracing::info!("plugin {plugin_name}: {}", line.split_at(4).1) + } else if line.starts_with("WARN ") { + tracing::warn!("plugin {plugin_name}: {}", line.split_at(4).1) + } else if line.starts_with("ERROR ") { + tracing::error!("plugin {plugin_name}: {}", line.split_at(5).1) + } else { + // If there is no log level, we suppose it's an error (panic or something) + tracing::error!("plugin {plugin_name}: {}", line) + } + } + Some(Err(err)) => { + error!("while trying to read plugin {plugin_name} stderr: {err}"); + break; + } + None => break, + } + } +} + #[derive(Default)] pub struct Plugins { plugins: BTreeMap, diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index f9e8eec..42c9b54 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -27,7 +27,7 @@ fn to_line(data: &[u8]) -> String { .replace(std::char::REPLACEMENT_CHARACTER, "") } -fn reader_to_stream( +pub fn reader_to_stream( reader: impl tokio::io::AsyncRead + Unpin, ) -> impl AsyncStream> { let buf_reader = BufReader::new(reader); @@ -127,13 +127,9 @@ impl StreamManager { loop { match plugin.stream.recv().await { - Ok(Some(Ok(line))) => { + Ok(Some(line)) => { self.handle_line(line).await; } - Ok(Some(Err(err))) => { - error!("stream {} exit with error: {}", self.stream.name, err); - return; - } Err(err) => { if err.is_final() { error!( @@ -149,6 +145,7 @@ impl StreamManager { } } Ok(None) => { + error!("stream {} has exited", self.stream.name); return; } } From 20921be07d600c9c3f4d2017238018d468514d44 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 30 Oct 2025 12:00:00 +0100 Subject: [PATCH 131/241] Fix daemon startup: all subsystems will cleanly exit Regardless of which startup error makes reaction exit. Also made plugin stderr task exit when the ShutdownToken asks for it. Also updated Rust edition to 2024. --- Cargo.toml | 2 +- src/daemon/mod.rs | 130 ++++++++++++++++++++++++++------------- src/daemon/plugin/mod.rs | 22 ++++--- src/main.rs | 99 ++++++++++++++--------------- src/treedb/mod.rs | 20 +++--- 5 files changed, 159 insertions(+), 114 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 08daff8..9a9cff7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "reaction" version = "2.2.1" -edition = "2021" +edition = "2024" authors = ["ppom "] license = "AGPL-3.0" description = "Scan logs and take action" diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 2388088..06170ae 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -2,9 +2,10 @@ use std::{ collections::HashMap, error::Error, path::PathBuf, + process::exit, sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, }; @@ -12,10 +13,10 @@ use chrono::Local; use futures::future::join_all; use tokio::{ select, - signal::unix::{signal, SignalKind}, + signal::unix::{SignalKind, signal}, sync::Semaphore, }; -use tracing::{debug, info}; +use tracing::{debug, error, info}; use crate::{concepts::Config, treedb::Database}; use filter::FilterManager; @@ -35,19 +36,87 @@ mod socket; mod stream; mod utils; -pub async fn daemon( - config_path: PathBuf, - socket: PathBuf, -) -> Result<(), Box> { - let config: &'static Config = Box::leak(Box::new(Config::from_path(&config_path)?)); +pub async fn daemon(config_path: PathBuf, socket: PathBuf) { + // Load config or quit + let config: &'static Config = Box::leak(Box::new(match Config::from_path(&config_path) { + Ok(config) => config, + Err(err) => { + error!("{err}"); + return; + } + })); // Cancellation Token let shutdown = ShutdownController::new(); - let mut plugins = Plugins::new(config, shutdown.token()).await?; + // Cancel when we receive a quit signal + let signal_received = Arc::new(AtomicBool::new(false)); + if let Err(err) = handle_signals(shutdown.delegate(), signal_received.clone()) { + error!("{err}"); + return; + } + + let mut db = None; + let mut config_started = false; + let mut daemon_err = false; + + // Start the real daemon 👹 + if let Err(err) = daemon_start( + config, + socket, + shutdown.token(), + &mut db, + &mut config_started, + ) + .await + { + error!("{err}"); + daemon_err = true; + } + + // Release last db's sender + let mut db_status = None; + if let Some(db) = db { + db_status = Some(db.quit()); + } + + debug!("Asking for all tasks to quit..."); + shutdown.ask_shutdown(); + + debug!("Waiting for all tasks to quit..."); + shutdown.wait_shutdown().await; + + let mut stop_ok = true; + if config_started { + stop_ok = config.stop(); + } + + if daemon_err || !stop_ok { + exit(1); + } else if let Some(mut db_status) = db_status + && let Ok(Err(err)) = db_status.try_recv() + { + error!("database error: {}", err); + exit(1); + } else if !signal_received.load(Ordering::SeqCst) { + error!("quitting because all streams finished"); + exit(1); + } else { + exit(0); + } +} + +async fn daemon_start( + config: &'static Config, + socket: PathBuf, + shutdown: ShutdownToken, + db: &mut Option, + config_started: &mut bool, +) -> Result<(), Box> { + let mut plugins = Plugins::new(config, shutdown.clone()).await?; // Open Database - let mut db = Database::open(config, shutdown.token()).await?; + *db = Some(Database::open(config, shutdown.clone()).await?); let (state, stream_managers) = { // Semaphore limiting action execution concurrency @@ -66,8 +135,8 @@ pub async fn daemon( let manager = FilterManager::new( filter, exec_limit.clone(), - shutdown.token(), - &mut db, + shutdown.clone(), + db.as_mut().unwrap(), &mut plugins, now, ) @@ -77,7 +146,7 @@ pub async fn daemon( state.insert(stream, filter_managers.clone()); stream_managers.push( - StreamManager::new(stream, filter_managers, shutdown.token(), &mut plugins).await?, + StreamManager::new(stream, filter_managers, shutdown.clone(), &mut plugins).await?, ); } (state, stream_managers) @@ -89,9 +158,10 @@ pub async fn daemon( // Open socket and run task let socket = Socket::open(socket).await?; - socket.manager(config, state, shutdown.token()); + socket.manager(config, state, shutdown.clone()); // reaction won't abort on startup anymore, we can run start commands + *config_started = true; if !config.start() { return Err("a start command failed, exiting.".into()); } @@ -101,40 +171,12 @@ pub async fn daemon( let standalone = stream_manager.is_standalone(); let handle = tokio::spawn(async move { stream_manager.start().await }); // Only wait for standalone streams - if standalone { - Some(handle) - } else { - None - } + if standalone { Some(handle) } else { None } }); - // Close streams when we receive a quit signal - let signal_received = Arc::new(AtomicBool::new(false)); - handle_signals(shutdown.delegate(), signal_received.clone())?; - // Wait for all streams to quit join_all(stream_task_handles).await; - - // Release last db's sender - let mut db_status = db.quit(); - - debug!("Asking for all tasks to quit..."); - shutdown.ask_shutdown(); - - debug!("Waiting for all tasks to quit..."); - shutdown.wait_shutdown().await; - - let stop_ok = config.stop(); - - if let Ok(Err(err)) = db_status.try_recv() { - Err(format!("database error: {}", err).into()) - } else if !signal_received.load(Ordering::SeqCst) { - Err("quitting because all streams finished".into()) - } else if !stop_ok { - Err("while executing stop command".into()) - } else { - Ok(()) - } + Ok(()) } fn handle_signals( diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index c0949ee..708e7a1 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -4,7 +4,7 @@ use std::{ time::Duration, }; -use futures::{future::join_all, FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, future::join_all}; use reaction_plugin::{ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamImpl}; use remoc::Connect; use serde_json::Value; @@ -16,7 +16,7 @@ use tracing::error; use crate::{ concepts::{Config, Plugin}, - daemon::{stream::reader_to_stream, utils::kill_child, ShutdownToken}, + daemon::{ShutdownToken, stream::reader_to_stream, utils::kill_child}, }; pub struct PluginManager { @@ -92,8 +92,10 @@ impl PluginManager { return Err(format!( "reaction can't handle plugin {} with incompatible version {}.{}: current version: {}.{}. {}", plugin.name, - manifest.hello.version_major, manifest.hello.version_minor, - my_hello.version_major, my_hello.version_minor, + manifest.hello.version_major, + manifest.hello.version_minor, + my_hello.version_major, + my_hello.version_minor, hint )); } @@ -133,13 +135,17 @@ impl PluginManager { } } -async fn handle_stderr(stderr: ChildStderr, plugin_name: String, _shutdown: ShutdownToken) { +async fn handle_stderr(stderr: ChildStderr, plugin_name: String, shutdown: ShutdownToken) { let lines = reader_to_stream(stderr); tokio::pin!(lines); loop { - match lines.next().await { + let event = tokio::select! { + line = lines.next() => line, + _ = shutdown.wait() => None, + }; + match event { Some(Ok(line)) => { - // dumb: I can't factorize this because tracing::event! + // sad: I can't factorize this because the tracing::event! macro // requires its log level to be a constant. if line.starts_with("DEBUG ") { tracing::debug!("plugin {plugin_name}: {}", line.split_at(5).1) @@ -150,7 +156,7 @@ async fn handle_stderr(stderr: ChildStderr, plugin_name: String, _shutdown: Shut } else if line.starts_with("ERROR ") { tracing::error!("plugin {plugin_name}: {}", line.split_at(5).1) } else { - // If there is no log level, we suppose it's an error (panic or something) + // If there is no log level, we suppose it's an error (may be a panic or something) tracing::error!("plugin {plugin_name}: {}", line) } } diff --git a/src/main.rs b/src/main.rs index a6e22c6..8adbf26 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,6 @@ use reaction::{ daemon::daemon, protocol::Order, }; -use tracing::{error, Level}; #[tokio::main] async fn main() { @@ -28,68 +27,64 @@ async fn main() { let cli = Cli::parse(); - let (is_daemon, level) = if let SubCommand::Start { loglevel, .. } = cli.command { - (true, loglevel) - } else { - (false, Level::DEBUG) - }; - - if is_daemon { - // Set log level + if let SubCommand::Start { + loglevel, + config, + socket, + } = cli.command + { if let Err(err) = tracing_subscriber::fmt::fmt() .without_time() .with_target(false) .with_ansi(std::io::stdout().is_terminal()) - .with_max_level(level) + .with_max_level(loglevel) // .with_max_level(Level::TRACE) .try_init() { eprintln!("ERROR could not initialize logging: {err}"); exit(1); } - } - - let result = match cli.command { - SubCommand::Start { config, socket, .. } => daemon(config, socket).await, - SubCommand::Show { - socket, - format, - limit, - patterns, - } => request(socket, format, limit, patterns, Order::Show).await, - SubCommand::Flush { - socket, - format, - limit, - patterns, - } => request(socket, format, limit, patterns, Order::Flush).await, - SubCommand::Trigger { - socket, - limit, - patterns, - } => request(socket, Format::JSON, Some(limit), patterns, Order::Trigger).await, - SubCommand::TestRegex { - config, - regex, - line, - } => test_regex(config, regex, line), - SubCommand::TestConfig { - config, - format, - verbose, - } => test_config(config, format, verbose), - }; - match result { - Ok(()) => { - exit(0); - } - Err(err) => { - if is_daemon { - error!("{err}"); - } else { - eprintln!("ERROR {err}"); + daemon(config, socket).await; + } else { + let result = match cli.command { + SubCommand::Show { + socket, + format, + limit, + patterns, + } => request(socket, format, limit, patterns, Order::Show).await, + SubCommand::Flush { + socket, + format, + limit, + patterns, + } => request(socket, format, limit, patterns, Order::Flush).await, + SubCommand::Trigger { + socket, + limit, + patterns, + } => request(socket, Format::JSON, Some(limit), patterns, Order::Trigger).await, + SubCommand::TestRegex { + config, + regex, + line, + } => test_regex(config, regex, line), + SubCommand::TestConfig { + config, + format, + verbose, + } => test_config(config, format, verbose), + // Can't be daemon + _ => Ok(()), + }; + match result { + Ok(()) => { + exit(0); + } + Err(err) => { + eprintln!("ERROR {err}"); + exit(1); } - exit(1); } } } diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index ee86850..9234274 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -18,12 +18,12 @@ use std::{ }; use chrono::{Local, TimeDelta}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::{Deserialize, Serialize, de::DeserializeOwned}; use serde_json::Value; use tokio::{ - fs::{rename, File}, + fs::{File, rename}, sync::{mpsc, oneshot}, - time::{interval, MissedTickBehavior}, + time::{MissedTickBehavior, interval}, }; use crate::{ @@ -76,10 +76,12 @@ impl Config { } } +pub type DatabaseErrorReceiver = oneshot::Receiver>; + /// Public-facing API for a treedb Database pub struct Database { entry_tx: Option>, - error_rx: oneshot::Receiver>, + error_rx: DatabaseErrorReceiver, } impl Database { @@ -101,7 +103,7 @@ impl Database { /// Permit to close DB's channel. /// Without this function manually called, the DB can't close. - pub fn quit(self) -> oneshot::Receiver> { + pub fn quit(self) -> DatabaseErrorReceiver { self.error_rx } } @@ -279,7 +281,7 @@ async fn rotate_db( // No need to rotate the database when it is new, // we return here (true, ErrorKind::NotFound) => { - return Ok((WriteDB::new(File::create(path).await?), HashMap::default())) + return Ok((WriteDB::new(File::create(path).await?), HashMap::default())); } (_, _) => return Err(err), }, @@ -480,13 +482,13 @@ mod tests { use chrono::{Local, TimeDelta}; use serde_json::Value; use tempfile::{NamedTempFile, TempDir}; - use tokio::fs::{write, File}; + use tokio::fs::{File, write}; use crate::{concepts::Config, daemon::ShutdownController}; use super::{ - helpers::*, raw::WriteDB, rotate_db, Database, DatabaseManager, Entry, KeyType, LoadedDB, - Tree, ValueType, DB_NAME, + DB_NAME, Database, DatabaseManager, Entry, KeyType, LoadedDB, Tree, ValueType, helpers::*, + raw::WriteDB, rotate_db, }; impl DatabaseManager { From 58180fe6091b9ca54e50fe7893aa617ecb60d883 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 30 Oct 2025 12:00:00 +0100 Subject: [PATCH 132/241] fmt, clippy, tests, fix some tests after startup refacto --- plugins/reaction-plugin-cluster/src/main.rs | 2 +- plugins/reaction-plugin-virtual/src/main.rs | 2 +- plugins/reaction-plugin/src/lib.rs | 32 ++---- src/concepts/action.rs | 8 +- src/concepts/filter.rs | 118 +++++++++++--------- src/concepts/mod.rs | 2 +- src/concepts/plugin.rs | 25 ++--- src/concepts/stream.rs | 22 ++-- src/daemon/mod.rs | 15 ++- src/daemon/plugin/mod.rs | 11 +- src/daemon/stream.rs | 2 +- src/lib.rs | 8 +- src/main.rs | 2 +- src/treedb/mod.rs | 1 + tests/end_to_end.rs | 6 +- tests/plugin_virtual.rs | 2 +- tests/simple.rs | 9 +- tests/start_stop.rs | 4 +- tests/test-conf/test-cluster.jsonnet | 34 +++--- tests/test-conf/test-virtual.jsonnet | 36 +++--- 20 files changed, 166 insertions(+), 175 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index b738e1c..3a804c5 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -83,7 +83,7 @@ struct ActionInit { impl PluginInfo for Plugin { async fn manifest(&mut self) -> Result { Ok(Manifest { - hello: Hello::hello(), + hello: Hello::new(), streams: BTreeSet::from(["cluster".into()]), actions: BTreeSet::from(["cluster_send".into()]), }) diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index b33774b..fe30d33 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -21,7 +21,7 @@ struct Plugin { impl PluginInfo for Plugin { async fn manifest(&mut self) -> Result { Ok(Manifest { - hello: Hello::hello(), + hello: Hello::new(), streams: BTreeSet::from(["virtual".into()]), actions: BTreeSet::from(["virtual".into()]), }) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 099eb5b..ccc9355 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -143,21 +143,11 @@ pub struct Hello { } impl Hello { - pub fn hello() -> Hello { + pub fn new() -> Hello { + let mut version = env!("CARGO_PKG_VERSION").split("."); Hello { - version_major: env!("CARGO_PKG_VERSION") - .split(".") - .next() - .unwrap() - .parse() - .unwrap(), - version_minor: env!("CARGO_PKG_VERSION") - .split(".") - .skip(1) - .next() - .unwrap() - .parse() - .unwrap(), + version_major: version.next().unwrap().parse().unwrap(), + version_minor: version.next().unwrap().parse().unwrap(), } } @@ -166,15 +156,13 @@ impl Hello { && server.version_minor >= plugin.version_minor { Ok(()) + } else if plugin.version_major > server.version_major + || (plugin.version_major == server.version_major + && plugin.version_minor > server.version_minor) + { + Err("consider upgrading reaction".into()) } else { - if plugin.version_major > server.version_major - || (plugin.version_major == server.version_major - && plugin.version_minor > server.version_minor) - { - Err("consider upgrading reaction".into()) - } else { - Err("consider upgrading the plugin".into()) - } + Err("consider upgrading the plugin".into()) } } } diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 00754d1..b815d82 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::process::Command; -use super::{null_value, parse_duration::*, Match, Pattern, PatternType}; +use super::{Match, Pattern, PatternType, null_value, parse_duration::*}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] @@ -102,10 +102,8 @@ impl Action { if self.cmd[0].is_empty() { return Err("cmd's first item is empty".into()); } - } else { - if !self.cmd.is_empty() { - return Err("can't define a cmd and a plugin type".into()); - } + } else if !self.cmd.is_empty() { + return Err("can't define a cmd and a plugin type".into()); } if let Some(after) = &self.after { diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index cf5e0d4..171304f 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -10,7 +10,7 @@ use chrono::TimeDelta; use regex::Regex; use serde::{Deserialize, Serialize}; -use super::{parse_duration, Action, Match, Pattern, PatternType, Patterns}; +use super::{Action, Match, Pattern, PatternType, Patterns, parse_duration}; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum Duplicate { @@ -159,9 +159,9 @@ impl Filter { } } else if !first && new_patterns.contains(pattern) { return Err(format!( - "pattern {} is present in the first regex but is not present in a following regex. all regexes should contain the same set of regexes", - &pattern.name_with_braces() - )); + "pattern {} is present in the first regex but is not present in a following regex. all regexes should contain the same set of regexes", + &pattern.name_with_braces() + )); } regex_buf = regex_buf.replacen(pattern.name_with_braces(), &pattern.regex, 1); } @@ -202,12 +202,12 @@ impl Filter { for pattern in self.patterns.as_ref() { // if the pattern is in an optional part of the regex, // there may be no captured group for it. - if let Some(match_) = matches.name(&pattern.name) { - if !pattern.is_ignore(match_.as_str()) { - let mut match_ = match_.as_str().to_string(); - pattern.normalize(&mut match_); - result.push(match_); - } + if let Some(match_) = matches.name(&pattern.name) + && !pattern.is_ignore(match_.as_str()) + { + let mut match_ = match_.as_str().to_string(); + pattern.normalize(&mut match_); + result.push(match_); } } if result.len() == self.patterns.len() { @@ -408,10 +408,10 @@ impl Filter { #[cfg(test)] pub mod tests { use crate::concepts::action::tests::{ok_action, ok_action_with_after}; + use crate::concepts::pattern::PatternIp; use crate::concepts::pattern::tests::{ boubou_pattern_with_ignore, default_pattern, number_pattern, ok_pattern_with_ignore, }; - use crate::concepts::pattern::PatternIp; use super::*; @@ -707,24 +707,32 @@ pub mod tests { Ok(vec!("b".into())) ); // Doesn't match - assert!(filter - .get_match_from_patterns(BTreeMap::from([(pattern.clone(), "abc".into())])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([(pattern.clone(), "abc".into())])) + .is_err() + ); // Ignored match - assert!(filter - .get_match_from_patterns(BTreeMap::from([(pattern.clone(), "a".into())])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([(pattern.clone(), "a".into())])) + .is_err() + ); // Bad pattern - assert!(filter - .get_match_from_patterns(BTreeMap::from([(boubou.clone(), "bou".into())])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([(boubou.clone(), "bou".into())])) + .is_err() + ); // Bad number of patterns - assert!(filter - .get_match_from_patterns(BTreeMap::from([ - (pattern.clone(), "b".into()), - (boubou.clone(), "bou".into()), - ])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (boubou.clone(), "bou".into()), + ])) + .is_err() + ); // Bad number of patterns assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err()); @@ -752,34 +760,42 @@ pub mod tests { Ok(vec!("bou".into(), "b".into())) ); // Doesn't match - assert!(filter - .get_match_from_patterns(BTreeMap::from([ - (pattern.clone(), "abc".into()), - (boubou.clone(), "bou".into()), - ])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "abc".into()), + (boubou.clone(), "bou".into()), + ])) + .is_err() + ); // Ignored match - assert!(filter - .get_match_from_patterns(BTreeMap::from([ - (pattern.clone(), "b".into()), - (boubou.clone(), "boubou".into()), - ])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (boubou.clone(), "boubou".into()), + ])) + .is_err() + ); // Bad pattern - assert!(filter - .get_match_from_patterns(BTreeMap::from([ - (pattern.clone(), "b".into()), - (number_pattern.clone(), "1".into()), - ])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (number_pattern.clone(), "1".into()), + ])) + .is_err() + ); // Bad number of patterns - assert!(filter - .get_match_from_patterns(BTreeMap::from([ - (pattern.clone(), "b".into()), - (boubou.clone(), "bou".into()), - (number_pattern.clone(), "1".into()), - ])) - .is_err()); + assert!( + filter + .get_match_from_patterns(BTreeMap::from([ + (pattern.clone(), "b".into()), + (boubou.clone(), "bou".into()), + (number_pattern.clone(), "1".into()), + ])) + .is_err() + ); // Bad number of patterns assert!(filter.get_match_from_patterns(BTreeMap::from([])).is_err()); diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index ddec710..6bd72a8 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -43,7 +43,7 @@ fn merge_attrs( if this == default { return Ok(other); } - return Ok(this); + Ok(this) } fn null_value() -> Value { diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index bc51bf2..62d81a4 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -1,9 +1,4 @@ -use std::{ - collections::BTreeMap, - io::{Error, ErrorKind}, - os::linux::fs::MetadataExt, - process::Stdio, -}; +use std::{collections::BTreeMap, io::Error, os::linux::fs::MetadataExt, process::Stdio}; use serde::{Deserialize, Serialize}; use tokio::{ @@ -50,10 +45,15 @@ impl Plugin { // Only when testing, make relative paths absolute #[cfg(debug_assertions)] if !self.path.starts_with("/") { - self.path = std::fs::canonicalize(&self.path) - .unwrap() - .to_string_lossy() - .to_string(); + use std::env::current_dir; + + self.path = format!( + "{}/{}", + current_dir() + .map_err(|err| format!("error on working directory: {err}"))? + .to_string_lossy(), + self.path + ); } // Disallow relative paths @@ -79,10 +79,7 @@ impl Plugin { let stat = fs::metadata(path).await?; if stat.st_uid() != 0 { - return Err(Error::new( - ErrorKind::Other, - "plugin file is not owned by root", - )); + return Err(Error::other("plugin file is not owned by root")); } } diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 84bea87..8830c66 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -3,7 +3,7 @@ use std::{cmp::Ordering, collections::BTreeMap, hash::Hash}; use serde::{Deserialize, Serialize}; use serde_json::Value; -use super::{merge_attrs, null_value, Filter, Patterns}; +use super::{Filter, Patterns, merge_attrs, null_value}; #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] @@ -36,7 +36,10 @@ impl Stream { for (key, filter) in other.filters.into_iter() { if self.filters.insert(key.clone(), filter).is_some() { - return Err(format!("filter {} is already defined. filter definitions can't be spread accross multiple files.", key)); + return Err(format!( + "filter {} is already defined. filter definitions can't be spread accross multiple files.", + key + )); } } @@ -71,10 +74,8 @@ impl Stream { if self.cmd[0].is_empty() { return Err("cmd's first item is empty".into()); } - } else { - if !self.cmd.is_empty() { - return Err("can't define cmd and a plugin type".into()); - } + } else if !self.cmd.is_empty() { + return Err("can't define cmd and a plugin type".into()); } if self.filters.is_empty() { @@ -118,10 +119,11 @@ mod tests { use crate::concepts::filter::tests::ok_filter; fn ok_stream() -> Stream { - let mut stream = Stream::default(); - stream.cmd = vec!["command".into()]; - stream.filters.insert("name".into(), ok_filter()); - stream + Stream { + cmd: vec!["command".into()], + filters: BTreeMap::from([("name".into(), ok_filter())]), + ..Default::default() + } } #[test] diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 06170ae..1a29310 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -2,7 +2,6 @@ use std::{ collections::HashMap, error::Error, path::PathBuf, - process::exit, sync::{ Arc, atomic::{AtomicBool, Ordering}, @@ -36,13 +35,13 @@ mod socket; mod stream; mod utils; -pub async fn daemon(config_path: PathBuf, socket: PathBuf) { +pub async fn daemon(config_path: PathBuf, socket: PathBuf) -> i32 { // Load config or quit let config: &'static Config = Box::leak(Box::new(match Config::from_path(&config_path) { Ok(config) => config, Err(err) => { error!("{err}"); - return; + return 1; } })); @@ -53,7 +52,7 @@ pub async fn daemon(config_path: PathBuf, socket: PathBuf) { let signal_received = Arc::new(AtomicBool::new(false)); if let Err(err) = handle_signals(shutdown.delegate(), signal_received.clone()) { error!("{err}"); - return; + return 1; } let mut db = None; @@ -92,17 +91,17 @@ pub async fn daemon(config_path: PathBuf, socket: PathBuf) { } if daemon_err || !stop_ok { - exit(1); + return 1; } else if let Some(mut db_status) = db_status && let Ok(Err(err)) = db_status.try_recv() { error!("database error: {}", err); - exit(1); + return 1; } else if !signal_received.load(Ordering::SeqCst) { error!("quitting because all streams finished"); - exit(1); + return 1; } else { - exit(0); + return 0; } } diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 708e7a1..a332c7a 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -86,7 +86,7 @@ impl PluginManager { .await .map_err(|err| format!("error while getting plugin {} manifest: {err}", plugin.name))?; - let my_hello = Hello::hello(); + let my_hello = Hello::new(); if let Err(hint) = Hello::is_compatible(&my_hello, &manifest.hello) { return Err(format!( @@ -182,7 +182,7 @@ impl Plugins { for plugin in config.plugins.values() { let name = plugin.name.clone(); - this.load_plugin(&plugin, &config.state_directory, shutdown.clone()) + this.load_plugin(plugin, &config.state_directory, shutdown.clone()) .await .map_err(|err| format!("plugin {name}: {err}]"))?; } @@ -200,7 +200,7 @@ impl Plugins { let manager = PluginManager::new(plugin, state_directory, shutdown).await?; for stream in &manager.streams { - if let Some(name) = self.streams.insert(stream.clone().into(), name.clone()) { + if let Some(name) = self.streams.insert(stream.clone(), name.clone()) { return Err(format!( "plugin {name} already exposed a stream with type name '{stream}'", )); @@ -208,7 +208,7 @@ impl Plugins { } for action in &manager.actions { - if let Some(name) = self.actions.insert(action.clone().into(), name.clone()) { + if let Some(name) = self.actions.insert(action.clone(), name.clone()) { return Err(format!( "plugin {name} already exposed a action with type name '{action}'", )); @@ -276,10 +276,9 @@ impl Plugins { // Convert Vec> into Result .into_iter() .zip(self.plugins.values()) - .map(|(result, plugin_manager)| { + .try_for_each(|(result, plugin_manager)| { result.map_err(|err| format!("plugin {} error: {err}", plugin_manager.plugin.name)) }) - .collect::>() } pub fn manager(self) { diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index 42c9b54..c15fd3d 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -4,7 +4,7 @@ use std::{ }; use chrono::Local; -use futures::{future::join_all, FutureExt, Stream as AsyncStream, StreamExt}; +use futures::{FutureExt, Stream as AsyncStream, StreamExt, future::join_all}; use reaction_plugin::StreamImpl; use regex::RegexSet; use tokio::{ diff --git a/src/lib.rs b/src/lib.rs index 3619f1a..efc4595 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,4 @@ -#![warn( - clippy::panic, - clippy::todo, - clippy::unimplemented, - clippy::unwrap_used, - unsafe_code -)] +#![warn(clippy::panic, clippy::todo, clippy::unimplemented, unsafe_code)] #![allow(clippy::upper_case_acronyms, clippy::mutable_key_type)] // Allow unwrap in tests #![cfg_attr(test, allow(clippy::unwrap_used))] diff --git a/src/main.rs b/src/main.rs index 8adbf26..e1db591 100644 --- a/src/main.rs +++ b/src/main.rs @@ -44,7 +44,7 @@ async fn main() { eprintln!("ERROR could not initialize logging: {err}"); exit(1); } - daemon(config, socket).await; + exit(daemon(config, socket).await); } else { let result = match cli.command { SubCommand::Show { diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index 9234274..173b1db 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -89,6 +89,7 @@ impl Database { /// You'll have to: /// - drop all [`Tree`]s, /// - call [`Self::quit`], + /// /// to have the Database properly quit. /// /// You can wait for [`Self::quit`] returned channel to know how it went. diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs index 7d68890..89e52e0 100644 --- a/tests/end_to_end.rs +++ b/tests/end_to_end.rs @@ -84,7 +84,7 @@ fn kill_stream_on_exit() -> Result<(), Box> { let _ = signal::kill(pid, signal::SIGKILL); let _ = child.wait(); - assert!(false, "Test timed out"); + panic!("Test timed out"); } } @@ -173,7 +173,7 @@ fn manualy_trigger_filter() -> Result<(), Box> { if elapsed > Duration::from_secs(1) { let _ = daemon.kill(); let _ = daemon.wait(); - assert!(false, "Daemon did not create socket"); + panic!("Daemon did not create socket"); } } @@ -204,7 +204,7 @@ fn manualy_trigger_filter() -> Result<(), Box> { if elapsed > Duration::from_secs(2) { let _ = daemon.kill(); let _ = daemon.wait(); - assert!(false, "Daemon did not exit"); + panic!("Daemon did not exit"); } } diff --git a/tests/plugin_virtual.rs b/tests/plugin_virtual.rs index c9bb385..008624a 100644 --- a/tests/plugin_virtual.rs +++ b/tests/plugin_virtual.rs @@ -31,6 +31,6 @@ fn plugin_virtual() { let output = [ "a0 1", "a0 2", "a0 3", "a0 4", "b0 1", "b0 2", "b0 3", "b0 4", "", ]; - tmp_dir.child("log").assert(&output.join("\n")); + tmp_dir.child("log").assert(output.join("\n")); tmp_dir.child("log").write_str("").unwrap(); } diff --git a/tests/simple.rs b/tests/simple.rs index 91830e9..0fd4bce 100644 --- a/tests/simple.rs +++ b/tests/simple.rs @@ -129,8 +129,11 @@ async fn simple() { let (daemon_exit, flush1, flush2) = tokio::join!(handle, handle2, handle3); assert!(daemon_exit.is_ok()); + assert!(daemon_exit.unwrap() == 1); assert!(flush1.is_ok()); + assert!(flush1.unwrap().is_ok()); assert!(flush2.is_ok()); + assert!(flush2.unwrap().is_ok()); assert_eq!( // 24 is encountered for the second time, then @@ -160,11 +163,7 @@ async fn simple() { file_with_contents(oneshot_path, ""); let daemon_exit = daemon(config_path.into(), socket_path.into()).await; - assert!(daemon_exit.is_err()); - assert_eq!( - daemon_exit.unwrap_err().to_string(), - "quitting because all streams finished" - ); + assert!(daemon_exit == 1); // 36 trigger from DB // 12 trigger from DB diff --git a/tests/start_stop.rs b/tests/start_stop.rs index 09a459e..c555e54 100644 --- a/tests/start_stop.rs +++ b/tests/start_stop.rs @@ -1,7 +1,7 @@ use std::{path::Path, time::Duration}; use assert_cmd::Command; -use assert_fs::{prelude::*, TempDir}; +use assert_fs::{TempDir, prelude::*}; use predicates::prelude::predicate; #[test] @@ -23,7 +23,7 @@ fn start_stop() { "stop 2", "", ]; - tmp_dir.child("log").assert(&output.join("\n")); + tmp_dir.child("log").assert(output.join("\n")); tmp_dir.child("log").write_str("").unwrap(); println!( diff --git a/tests/test-conf/test-cluster.jsonnet b/tests/test-conf/test-cluster.jsonnet index 9ec8f32..eca617f 100644 --- a/tests/test-conf/test-cluster.jsonnet +++ b/tests/test-conf/test-cluster.jsonnet @@ -10,17 +10,15 @@ plugins: { cluster: { - path: "./target/debug/reaction-plugin-cluster", + path: './target/debug/reaction-plugin-cluster', check_root: false, systemd_options: { - DynamicUser: ["false"], - }, - options: { - clusters: { + DynamicUser: ['false'], + options: { org1: { listen_port: 9000, bootstrap_nodes: { - "public_key": ["127.0.0.1:9001"], + public_key: ['127.0.0.1:9001'], }, }, }, @@ -30,36 +28,36 @@ streams: { s0: { - cmd: ["bash", "-c", "for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2"], + cmd: ['bash', '-c', 'for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2'], filters: { f0: { - regex: ["^$"], + regex: ['^$'], actions: { a0: { - type: "virtual", + type: 'virtual', options: { - send: "a0 ", - to: "s1", - } + send: 'a0 ', + to: 's1', + }, }, b0: { - type: "virtual", + type: 'virtual', options: { - send: "b0 ", - to: "s1", + send: 'b0 ', + to: 's1', }, - after: "600ms", + after: '600ms', }, }, }, }, }, s1: { - type: "cluster", + type: 'cluster', options: {}, filters: { f1: { - regex: ["^$"], + regex: ['^$'], actions: { a1: { cmd: ['sh', '-c', 'echo >>./log'], diff --git a/tests/test-conf/test-virtual.jsonnet b/tests/test-conf/test-virtual.jsonnet index e68a4fa..6848858 100644 --- a/tests/test-conf/test-virtual.jsonnet +++ b/tests/test-conf/test-virtual.jsonnet @@ -4,52 +4,52 @@ regex: @"[0-9]+", }, all: { - regex: @".*" - } + regex: @".*", + }, }, plugins: { virtual: { - path: "./target/debug/reaction-plugin-virtual", + path: './target/debug/reaction-plugin-virtual', check_root: false, systemd_options: { - DynamicUser: ["false"], - } - } + DynamicUser: ['false'], + }, + }, }, streams: { s0: { - cmd: ["bash", "-c", "for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2"], + cmd: ['bash', '-c', 'for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2'], filters: { f0: { - regex: ["^$"], + regex: ['^$'], actions: { a0: { - type: "virtual", + type: 'virtual', options: { - send: "a0 ", - to: "s1", - } + send: 'a0 ', + to: 's1', + }, }, b0: { - type: "virtual", + type: 'virtual', options: { - send: "b0 ", - to: "s1", + send: 'b0 ', + to: 's1', }, - after: "600ms", + after: '600ms', }, }, }, }, }, s1: { - type: "virtual", + type: 'virtual', options: {}, filters: { f1: { - regex: ["^$"], + regex: ['^$'], actions: { a1: { cmd: ['sh', '-c', 'echo >>./log'], From 310d3dbe99dbfd3a13151b462ead4dfaa0b104a3 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 30 Oct 2025 12:00:00 +0100 Subject: [PATCH 133/241] Fix plugin build, one secret key per cluster, more work on cluster init --- Cargo.toml | 7 +- plugins/reaction-plugin-cluster/Cargo.toml | 6 +- .../reaction-plugin-cluster/src/cluster.rs | 36 +++++++++++ plugins/reaction-plugin-cluster/src/main.rs | 44 ++++++++++--- .../reaction-plugin-cluster/src/secret_key.rs | 26 ++++---- src/concepts/plugin.rs | 4 +- tests/test-conf/cluster-a.jsonnet | 64 ------------------- tests/test-conf/test-cluster.jsonnet | 17 +++-- 8 files changed, 101 insertions(+), 103 deletions(-) create mode 100644 plugins/reaction-plugin-cluster/src/cluster.rs delete mode 100644 tests/test-conf/cluster-a.jsonnet diff --git a/Cargo.toml b/Cargo.toml index 9a9cff7..1a0aaeb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ tracing = "0.1.40" tracing-subscriber = "0.3.18" # Reaction plugin system remoc = { workspace = true } -reaction-plugin = { path = "plugins/reaction-plugin" } +reaction-plugin = { workspace = true } [build-dependencies] clap = { version = "4.5.4", features = ["derive"] } @@ -82,7 +82,4 @@ remoc = { version = "0.18.3" } serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" tokio = { version = "1.40.0" } - -[[bin]] -name = "reaction-plugin-virtual" -path = "plugins/reaction-plugin-virtual/src/main.rs" +reaction-plugin = { path = "plugins/reaction-plugin" } diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 902804d..648c67a 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -4,13 +4,13 @@ version = "0.1.0" edition = "2024" [dependencies] -reaction-plugin.path = "../reaction-plugin" +reaction-plugin.workspace = true -tokio.workspace = true -tokio.features = ["rt-multi-thread"] remoc.workspace = true serde.workspace = true serde_json.workspace = true +tokio.workspace = true +tokio.features = ["rt-multi-thread"] data-encoding = "2.9.0" iroh = "0.94.0" diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs new file mode 100644 index 0000000..4b3744a --- /dev/null +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -0,0 +1,36 @@ +use std::net::{SocketAddrV4, SocketAddrV6}; + +use iroh::Endpoint; +use reaction_plugin::RemoteResult; + +use crate::Plugin; + +const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; + +impl Plugin { + pub async fn endpoint_init(&mut self) -> RemoteResult<()> { + // while let Some((stream_name, stream)) = self.streams.pop_first() { + for (stream_name, stream) in &self.streams { + let mut builder = Endpoint::builder() + .secret_key(stream.secret_key.clone()) + .alpns(ALPN.iter().map(|slice| slice.to_vec()).collect()) + .relay_mode(iroh::RelayMode::Disabled) + .clear_discovery(); + + if let Some(ip) = stream.bind_ipv4 { + builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port)); + } + if let Some(ip) = stream.bind_ipv6 { + builder = builder.bind_addr_v6(SocketAddrV6::new(ip, stream.listen_port, 0, 0)); + } + + let endpoint = builder.bind().await.map_err(|err| { + format!("Could not create socket address for cluster {stream_name}: {err}") + })?; + self.endpoints.insert(stream_name.clone(), endpoint); + } + // We have no use of those parameters anymore + self.streams = Default::default(); + Ok(()) + } +} diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 3a804c5..a67cdd6 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -1,9 +1,9 @@ use std::{ collections::{BTreeMap, BTreeSet}, - net::SocketAddr, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, }; -use iroh::PublicKey; +use iroh::{Endpoint, PublicKey, SecretKey}; use reaction_plugin::{ ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, main_loop, }; @@ -12,6 +12,7 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::fs; +mod cluster; mod secret_key; use secret_key::secret_key; @@ -27,11 +28,22 @@ async fn main() { struct Plugin { streams: BTreeMap, actions: Vec, + endpoints: BTreeMap, } /// Stream options as defined by the user #[derive(Serialize, Deserialize)] struct StreamOptions { + /// The UDP port to open + listen_port: u16, + /// The IPv4 to bind to. Defaults to 0.0.0.0. + /// Set to `null` to use IPv6 only. + #[serde(default = "ipv4_unspecified")] + bind_ipv4: Option, + /// The IPv6 to bind to. Defaults to 0.0.0.0. + /// Set to `null` to use IPv6 only. + #[serde(default = "ipv6_unspecified")] + bind_ipv6: Option, /// The secret that permits to join the cluster. shared_secret: Option, /// The secret that permits to join the cluster, as a file. @@ -41,9 +53,20 @@ struct StreamOptions { nodes: Vec, } +fn ipv4_unspecified() -> Option { + Some(Ipv4Addr::UNSPECIFIED) +} +fn ipv6_unspecified() -> Option { + Some(Ipv6Addr::UNSPECIFIED) +} + /// Stream information before start struct StreamInit { + listen_port: u16, + bind_ipv4: Option, + bind_ipv6: Option, shared_secret: String, + secret_key: SecretKey, nodes: Vec, tx: mpsc::Sender, } @@ -130,10 +153,20 @@ impl PluginInfo for Plugin { }); } + let secret_key = secret_key(&stream_name).await?; + eprintln!( + "INFO public key of this node for cluster {stream_name}: {}", + key_bytes_to_b64(secret_key.public().as_bytes()) + ); + let (tx, rx) = mpsc::channel(1); let stream = StreamInit { + listen_port: options.listen_port, + bind_ipv4: options.bind_ipv4, + bind_ipv6: options.bind_ipv6, shared_secret, + secret_key, nodes: init_nodes, tx, }; @@ -180,12 +213,7 @@ impl PluginInfo for Plugin { } async fn finish_setup(&mut self) -> RemoteResult<()> { - let secret_key = secret_key().await?; - eprintln!( - "public key of this node: {}", - key_bytes_to_b64(secret_key.public().as_bytes()) - ); - todo!() + self.endpoint_init().await } async fn close(self) -> RemoteResult<()> { diff --git a/plugins/reaction-plugin-cluster/src/secret_key.rs b/plugins/reaction-plugin-cluster/src/secret_key.rs index ca7e67a..5545b86 100644 --- a/plugins/reaction-plugin-cluster/src/secret_key.rs +++ b/plugins/reaction-plugin-cluster/src/secret_key.rs @@ -7,20 +7,22 @@ use tokio::{ io::AsyncWriteExt, }; -const SECRET_KEY_PATH: &str = "./secret_key.txt"; +fn secret_key_path(cluster_name: &str) -> String { + format!("./secret_key_{cluster_name}.txt") +} -pub async fn secret_key() -> Result { - if let Some(key) = get_secret_key().await? { +pub async fn secret_key(cluster_name: &str) -> Result { + if let Some(key) = get_secret_key(cluster_name).await? { Ok(key) } else { let key = SecretKey::generate(&mut rand::rng()); - set_secret_key(&key).await?; + set_secret_key(cluster_name, &key).await?; Ok(key) } } -async fn get_secret_key() -> Result, String> { - let key = match fs::read_to_string(SECRET_KEY_PATH).await { +async fn get_secret_key(cluster_name: &str) -> Result, String> { + let key = match fs::read_to_string(secret_key_path(cluster_name)).await { Ok(key) => Ok(key), Err(err) => match err.kind() { io::ErrorKind::NotFound => return Ok(None), @@ -30,24 +32,26 @@ async fn get_secret_key() -> Result, String> { let bytes = match key_b64_to_bytes(&key) { Ok(key) => Ok(key), Err(err) => Err(format!( - "invalid secret key read from file: {err}. Please remove the `{SECRET_KEY_PATH}` file from plugin directory." + "invalid secret key read from file: {err}. Please remove the `{}` file from plugin directory.", + secret_key_path(cluster_name), )), }?; Ok(Some(SecretKey::from_bytes(&bytes))) } -async fn set_secret_key(key: &SecretKey) -> Result<(), String> { +async fn set_secret_key(cluster_name: &str, key: &SecretKey) -> Result<(), String> { let secret_key = key_bytes_to_b64(&key.to_bytes()); + let secret_key_path = secret_key_path(cluster_name); File::options() .mode(0o600) .write(true) .create(true) - .open(SECRET_KEY_PATH) + .open(&secret_key_path) .await - .map_err(|err| format!("can't open `{SECRET_KEY_PATH}` in plugin directory: {err}"))? + .map_err(|err| format!("can't open `{secret_key_path}` in plugin directory: {err}"))? .write_all(secret_key.as_bytes()) .await - .map_err(|err| format!("can't write to `{SECRET_KEY_PATH}` in plugin directory: {err}")) + .map_err(|err| format!("can't write to `{secret_key_path}` in plugin directory: {err}")) } pub fn key_b64_to_bytes(key: &str) -> Result<[u8; 32], DecodeError> { diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 62d81a4..8bcde2c 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -45,11 +45,9 @@ impl Plugin { // Only when testing, make relative paths absolute #[cfg(debug_assertions)] if !self.path.starts_with("/") { - use std::env::current_dir; - self.path = format!( "{}/{}", - current_dir() + std::env::current_dir() .map_err(|err| format!("error on working directory: {err}"))? .to_string_lossy(), self.path diff --git a/tests/test-conf/cluster-a.jsonnet b/tests/test-conf/cluster-a.jsonnet deleted file mode 100644 index ed36d95..0000000 --- a/tests/test-conf/cluster-a.jsonnet +++ /dev/null @@ -1,64 +0,0 @@ -{ - patterns: { - num: { - regex: @"[0-9]+", - }, - all: { - regex: @".*" - } - }, - - plugins: { - cluster: { - path: "./target/debug/reaction-plugin-cluster", - check_root: false, - systemd_options: { - DynamicUser: ["false"], - } - } - }, - - streams: { - s0: { - cmd: ["bash", "-c", "for i in $(seq 4); do echo $i; sleep 0.1; done; sleep 1.2"], - filters: { - f0: { - regex: ["^$"], - actions: { - a0: { - type: "virtual", - options: { - send: "a0 ", - to: "s1", - } - }, - b0: { - type: "cluster", - options: { - send: "b0 ", - to: "s1", - }, - after: "600ms", - }, - }, - }, - }, - }, - s1: { - type: "cluster", - options: { - - }, - filters: { - f1: { - regex: ["^$"], - actions: { - a1: { - cmd: ['sh', '-c', 'echo >>./log'], - }, - }, - }, - }, - }, - }, -} diff --git a/tests/test-conf/test-cluster.jsonnet b/tests/test-conf/test-cluster.jsonnet index eca617f..e73d89f 100644 --- a/tests/test-conf/test-cluster.jsonnet +++ b/tests/test-conf/test-cluster.jsonnet @@ -14,14 +14,6 @@ check_root: false, systemd_options: { DynamicUser: ['false'], - options: { - org1: { - listen_port: 9000, - bootstrap_nodes: { - public_key: ['127.0.0.1:9001'], - }, - }, - }, }, }, }, @@ -54,7 +46,14 @@ }, s1: { type: 'cluster', - options: {}, + options: { + listen_port: 9000, + shared_secret: '', + nodes: { + publickey: '', + addresses: ['127.0.0.1:9001'], + }, + }, filters: { f1: { regex: ['^$'], From ebf906ea51d1925df955a75f7b0d4ed1f1d398a7 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 31 Oct 2025 12:00:00 +0100 Subject: [PATCH 134/241] Better doc and errors --- plugins/reaction-plugin/src/lib.rs | 106 ++++++++++++++++----------- src/daemon/plugin/mod.rs | 39 ++++++++-- tests/test-conf/test-cluster.jsonnet | 4 +- 3 files changed, 97 insertions(+), 52 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index ccc9355..32d3ca5 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -1,10 +1,67 @@ //! This crate defines the API between reaction's core and plugins. //! +//! Plugins must be written in Rust. +//! +//! This documentation assumes the reader has some knowledge of Rust. +//! However, if you find that something is unclear, don't hesitate to +//! [ask for help](https://framagit.org/ppom/reaction/#help), even if you're new to Rust. +//! //! To implement a plugin, one has to provide an implementation of [`PluginInfo`], that provides //! the entrypoint for a plugin. -//! It permits to define 0 to n (stream, filter, action) custom types. +//! It permits to define `0` to `n` custom stream and action types. +//! +//! ## Naming & calling conventions +//! +//! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql`. +//! It will be invoked with one positional argument "serve". +//! ``` +//! reaction-plugin-$NAME serve +//! ``` +//! This can be useful if you want to provide CLI functionnality to your users, +//! so you can distinguish between a human user and reaction. +//! +//! It will be executed in its own directory, in which it should have write access. +//! The directory is `$reaction_state_directory/plugin_data/$NAME`. +//! reaction's [state_directory](https://reaction.ppom.me/reference.html#state_directory) +//! defaults to its working directory. +//! +//! ## Communication +//! +//! Communication between the plugin and reaction is based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait +//! calls over a single transport channel. +//! The channels is made of stdin and stdout, so don't use them for something else. +//! +//! [`remoc`] build upon [`tokio`], so you'll need to use tokio too. +//! +//! ### Errors +//! +//! Errors can be printed to stderr. +//! They'll be captured line by line and re-printed by reaction, with the plugin name prepended. +//! +//! A line can start with `DEBUG `, `INFO `, `WARN `, `ERROR `. +//! If the starts with none of the above, the line is assumed to be an error. +//! +//! Example: +//! Those lines: +//! ```log +//! WARN This is an official warning from the plugin +//! Freeeee errrooooorrr +//! ``` +//! Will become: +//! ```log +//! WARN plugin test: This is an official warning from the plugin +//! ERROR plugin test: Freeeee errrooooorrr +//! ``` +//! +//! ## Starting template +//! +//! ```bash +//! cargo new reaction-plugin-$NAME +//! cd reaction-plugin-$NAME +//! cargo add reaction-plugin tokio +//! vim src/main.rs +//! ``` //! -//! Minimal example: //! `src/main.rs` //! ```rust //! use reaction_plugin::PluginInfo; @@ -20,50 +77,13 @@ //! //! impl PluginInfo for Plugin { //! // ... +//! // Your IDE should propose to implement missing members of the `Plugin` trait +//! // ... //! } //! ``` //! -//! ## Naming & calling conventions -//! -//! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql`. -//! It will be invoked with one positional argument "serve". -//! ``` -//! reaction-plugin-$NAME serve -//! ``` -//! This can be useful if you want to provide CLI functionnality to your users. -//! -//! It will be run in its own directory, in which it should have write access. -//! -//! ## Communication -//! -//! Communication between the plugin and reaction is based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait -//! calls over a single transport channel. -//! The channels used are stdin and stdout, so you can't use them for something else. -//! -//! ### Errors -//! -//! Errors can be printed to stderr. -//! They'll be captured line by line and re-printed by reaction, with the plugin name prepended. -//! -//! A line can start with `DEBUG `, `INFO `, `WARN `, `ERROR `. -//! If the starts with none of the above, the line is assumed to be an error. -//! -//! Examples: -//! ```log -//! WARN This is an official warning from the plugin -//! # will become: -//! WARN plugin test: This is an official warning from the plugin -//! -//! Freeeee errrooooorrr -//! # will become: -//! ERROR plugin test: Freeeee errrooooorrr -//! ``` -//! -//! ## Starting template -//! -//! Core plugins can be found here: -//! The "virtual" plugin is the simplest and can serve as a template. -//! You'll have to adjust dependencies versions in `Cargo.toml`. +//! Core plugins can be found here: . +//! The "virtual" plugin is the simplest and can serve as a good complete example. use std::{collections::BTreeSet, error::Error, fmt::Display, process::exit}; diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index a332c7a..340e2c1 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -12,7 +12,7 @@ use tokio::{ process::{Child, ChildStderr}, time::sleep, }; -use tracing::error; +use tracing::{error, info}; use crate::{ concepts::{Config, Plugin}, @@ -225,9 +225,15 @@ impl Plugins { stream_type: String, config: Value, ) -> Result { - let plugin_name = self.streams.get(&stream_type).ok_or(format!( - "No plugin provided the stream type '{stream_type}'" - ))?; + let plugin_name = match self.streams.get(&stream_type) { + Some(name) => name, + None => { + display_plugin_exposed_types(&self.streams, "stream"); + return Err(format!( + "No plugin provided the stream type '{stream_type}'" + )); + } + }; let plugin = self.plugins.get_mut(plugin_name).unwrap(); @@ -246,9 +252,15 @@ impl Plugins { config: Value, patterns: Vec, ) -> Result { - let plugin_name = self.actions.get(&action_type).ok_or(format!( - "No plugin provided the action type '{action_type}'" - ))?; + let plugin_name = match self.actions.get(&action_type) { + Some(name) => name, + None => { + display_plugin_exposed_types(&self.actions, "action"); + return Err(format!( + "No plugin provided the action type '{action_type}'" + )); + } + }; let plugin = self.plugins.get_mut(plugin_name).unwrap(); @@ -289,3 +301,16 @@ impl Plugins { } } } + +fn display_plugin_exposed_types(type_to_plugin: &BTreeMap, name: &str) { + let mut plugin_to_types: BTreeMap<&str, Vec<&str>> = BTreeMap::new(); + for (type_, plugin) in type_to_plugin { + plugin_to_types.entry(plugin).or_default().push(type_); + } + for (plugin, types) in plugin_to_types { + info!( + "Plugin {plugin} exposes those {name} types: '{}'", + types.join("', '") + ); + } +} diff --git a/tests/test-conf/test-cluster.jsonnet b/tests/test-conf/test-cluster.jsonnet index e73d89f..a635c73 100644 --- a/tests/test-conf/test-cluster.jsonnet +++ b/tests/test-conf/test-cluster.jsonnet @@ -26,14 +26,14 @@ regex: ['^$'], actions: { a0: { - type: 'virtual', + type: 'cluster_send', options: { send: 'a0 ', to: 's1', }, }, b0: { - type: 'virtual', + type: 'cluster_send', options: { send: 'b0 ', to: 's1', From cd2d337850641075cfe4a6e94b62869cee92d835 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 31 Oct 2025 12:00:00 +0100 Subject: [PATCH 135/241] Fixed communication error: do not use serde_json::Value So maybe serde_json's Value can't be serialized with postbag. Recreated my own Value that can be converted from and to serde_json's. removed one useless tokio::spawn. --- plugins/reaction-plugin-cluster/src/main.rs | 11 ++- plugins/reaction-plugin-virtual/src/main.rs | 2 +- plugins/reaction-plugin/src/lib.rs | 82 ++++++++++++++++++--- src/daemon/filter/mod.rs | 6 +- src/daemon/plugin/mod.rs | 4 +- src/daemon/stream.rs | 4 +- 6 files changed, 88 insertions(+), 21 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index a67cdd6..72cddb4 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -5,11 +5,10 @@ use std::{ use iroh::{Endpoint, PublicKey, SecretKey}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, main_loop, + ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, main_loop, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; -use serde_json::Value; use tokio::fs; mod cluster; @@ -122,8 +121,8 @@ impl PluginInfo for Plugin { return Err("This plugin can't handle other stream types than cluster".into()); } - let options: StreamOptions = - serde_json::from_value(config).map_err(|err| format!("invalid options: {err}"))?; + let options: StreamOptions = serde_json::from_value(config.into()) + .map_err(|err| format!("invalid options: {err}"))?; let shared_secret = if let Some(shared_secret) = options.shared_secret { shared_secret @@ -194,8 +193,8 @@ impl PluginInfo for Plugin { return Err("This plugin can't handle other action types than cluster".into()); } - let options: ActionOptions = - serde_json::from_value(config).map_err(|err| format!("invalid options: {err}"))?; + let options: ActionOptions = serde_json::from_value(config.into()) + .map_err(|err| format!("invalid options: {err}"))?; let (tx, rx) = mpsc::channel(1); diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index fe30d33..57b8610 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -152,7 +152,7 @@ impl VirtualActionInit { config: Value, patterns: Vec, ) -> Result<(Self, mpsc::Sender), String> { - let options: ActionOptions = serde_json::from_value(config).map_err(|err| { + let options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") })?; diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 32d3ca5..1f2eecc 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -85,14 +85,19 @@ //! Core plugins can be found here: . //! The "virtual" plugin is the simplest and can serve as a good complete example. -use std::{collections::BTreeSet, error::Error, fmt::Display, process::exit}; +use std::{ + collections::{BTreeMap, BTreeSet}, + error::Error, + fmt::Display, + process::exit, +}; use remoc::{ Connect, rch, rtc::{self, Server}, }; use serde::{Deserialize, Serialize}; -pub use serde_json::Value; +use serde_json::{Number, Value as JValue}; use tokio::io::{stdin, stdout}; /// This is the only trait that **must** be implemented by a plugin. @@ -102,7 +107,8 @@ pub trait PluginInfo { /// Return the manifest of the plugin. async fn manifest(&mut self) -> Result; - /// Return one stream of a given type if it exists + /// Return one stream of a given type. + /// Errors if the type does not exist or if config is invalid. async fn stream_impl( &mut self, stream_name: String, @@ -110,7 +116,8 @@ pub trait PluginInfo { config: Value, ) -> RemoteResult; - /// Return one instance of a given type. + /// Return one action of a given type. + /// Errors if the type does not exist or if config is invalid. async fn action_impl( &mut self, stream_name: String, @@ -187,6 +194,54 @@ impl Hello { } } +/// A clone of [`serde_json::Value`] +/// Implements From & Into [`serde_json::Value`] +#[derive(Serialize, Deserialize, Clone)] +pub enum Value { + Null, + Bool(bool), + Integer(i64), + Float(f64), + String(String), + Array(Vec), + Object(BTreeMap), +} + +impl From for Value { + fn from(value: serde_json::Value) -> Self { + match value { + JValue::Null => Value::Null, + JValue::Bool(b) => Value::Bool(b), + JValue::Number(number) => { + if let Some(number) = number.as_i64() { + Value::Integer(number) + } else if let Some(number) = number.as_f64() { + Value::Float(number) + } else { + Value::Null + } + } + JValue::String(s) => Value::String(s.into()), + JValue::Array(v) => Value::Array(v.into_iter().map(|e| e.into()).collect()), + JValue::Object(m) => Value::Object(m.into_iter().map(|(k, v)| (k, v.into())).collect()), + } + } +} + +impl Into for Value { + fn into(self) -> JValue { + match self { + Value::Null => JValue::Null, + Value::Bool(v) => JValue::Bool(v), + Value::Integer(v) => JValue::Number(v.into()), + Value::Float(v) => JValue::Number(Number::from_f64(v).unwrap()), + Value::String(v) => JValue::String(v), + Value::Array(v) => JValue::Array(v.into_iter().map(|e| e.into()).collect()), + Value::Object(m) => JValue::Object(m.into_iter().map(|(k, v)| (k, v.into())).collect()), + } + } +} + #[derive(Serialize, Deserialize)] pub struct StreamImpl { pub stream: rch::mpsc::Receiver, @@ -223,7 +278,17 @@ pub struct Exec { pub result: rch::oneshot::Sender>, } -// TODO write main function here? +/// The main loop for a plugin. +/// +/// Your main function should only create a struct that implements [`PluginInfo`] +/// and then call [`main_loop`]: +/// ```rust +/// #[tokio::main] +/// async fn main() { +/// let plugin = MyPlugin::default(); +/// reaction_plugin::main_loop(plugin).await; +/// } +/// ``` pub async fn main_loop(plugin_info: T) { let (conn, mut tx, _rx): ( _, @@ -235,7 +300,7 @@ pub async fn main_loop(plugin_info: T) { let (server, client) = PluginInfoServer::new(plugin_info, 1); - let (res1, (_, res2), res3) = tokio::join!(tx.send(client), server.serve(), tokio::spawn(conn)); + let (res1, (_, res2), res3) = tokio::join!(tx.send(client), server.serve(), conn); let mut exit_code = 0; if let Err(err) = res1 { eprintln!("ERROR could not send plugin info to reaction: {err}"); @@ -246,10 +311,7 @@ pub async fn main_loop(plugin_info: T) { exit_code = 2; } if let Err(err) = res3 { - eprintln!("ERROR could not setup connection with reaction: {err}"); - exit_code = 3; - } else if let Ok(Err(err)) = res3 { - eprintln!("ERROR could not setup connection with reaction: {err}"); + eprintln!("ERROR connection error with reaction: {err}"); exit_code = 3; } exit(exit_code); diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index a71bcd5..2b4e01e 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -8,7 +8,7 @@ use std::{collections::BTreeMap, process::Stdio, sync::Arc}; use reaction_plugin::ActionImpl; use regex::Regex; use tokio::sync::{Mutex, MutexGuard, Semaphore}; -use tracing::{error, info}; +use tracing::{debug, error, info}; use crate::{ concepts::{Action, Duplicate, Filter, Match, Pattern, Time}, @@ -81,6 +81,10 @@ impl FilterManager { ) .await?, ); + debug!( + "successfully intialized action {}.{}.{}", + action.stream_name, action.filter_name, action.name + ); } let this = Self { filter, diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 340e2c1..710e7dc 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -238,7 +238,7 @@ impl Plugins { let plugin = self.plugins.get_mut(plugin_name).unwrap(); plugin - .stream_impl(stream_name.clone(), stream_type, config) + .stream_impl(stream_name.clone(), stream_type, config.into()) .await .map_err(|err| format!("plugin error while initializing stream {stream_name}: {err}")) } @@ -270,7 +270,7 @@ impl Plugins { filter_name.clone(), action_name.clone(), action_type, - config, + config.into(), patterns, ) .await diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index c15fd3d..e80a8f1 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -11,7 +11,7 @@ use tokio::{ io::{AsyncBufReadExt, BufReader}, process::{Child, ChildStderr, ChildStdout, Command}, }; -use tracing::{error, info}; +use tracing::{debug, error, info}; use crate::{ concepts::{Filter, Stream}, @@ -86,6 +86,8 @@ impl StreamManager { None }; + debug!("successfully initialized stream {}", stream.name); + Ok(StreamManager { compiled_regex_set: RegexSet::new(all_regexes.keys()).map_err(|err| err.to_string())?, regex_index_to_filter_manager: all_regexes.into_values().collect(), From db622eec53a7884c0538526a6ae37b506af55f9a Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 2 Nov 2025 12:00:00 +0100 Subject: [PATCH 136/241] show plugin stream exit error only when not quitting --- src/daemon/shutdown.rs | 5 +++++ src/daemon/stream.rs | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/daemon/shutdown.rs b/src/daemon/shutdown.rs index a311ad1..8ef4e43 100644 --- a/src/daemon/shutdown.rs +++ b/src/daemon/shutdown.rs @@ -84,6 +84,11 @@ impl ShutdownToken { self.shutdown_notifyer.cancelled() } + /// Returns true if the shutdown request happened + pub fn is_shutdown(&self) -> bool { + self.shutdown_notifyer.is_cancelled() + } + /// Ask for all tasks to quit pub fn ask_shutdown(&self) { self.shutdown_notifyer.cancel(); diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index e80a8f1..8834311 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -147,7 +147,9 @@ impl StreamManager { } } Ok(None) => { - error!("stream {} has exited", self.stream.name); + if !self.shutdown.is_shutdown() { + error!("stream {} has exited", self.stream.name); + } return; } } From 983eff13eb90519180a034230c2ccc0eafca80e6 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 3 Nov 2025 12:00:00 +0100 Subject: [PATCH 137/241] cluster initialization - Actions are connected to Cluster, - Separate task to (re)initialize connections --- .../reaction-plugin-cluster/src/cluster.rs | 123 ++++++++++++---- .../reaction-plugin-cluster/src/connection.rs | 136 ++++++++++++++++++ plugins/reaction-plugin-cluster/src/main.rs | 83 +++++++---- 3 files changed, 291 insertions(+), 51 deletions(-) create mode 100644 plugins/reaction-plugin-cluster/src/connection.rs diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index 4b3744a..f58372b 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -1,36 +1,107 @@ -use std::net::{SocketAddrV4, SocketAddrV6}; +use std::{ + collections::BTreeMap, + net::{SocketAddrV4, SocketAddrV6}, + sync::Arc, +}; -use iroh::Endpoint; -use reaction_plugin::RemoteResult; +use iroh::{Endpoint, EndpointAddr, EndpointId, endpoint::Connection}; +use reaction_plugin::Exec; +use tokio::sync::{mpsc, oneshot}; -use crate::Plugin; +use crate::{ActionInit, StreamInit, connection::ConnectionInitializer}; -const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; +pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; -impl Plugin { - pub async fn endpoint_init(&mut self) -> RemoteResult<()> { - // while let Some((stream_name, stream)) = self.streams.pop_first() { - for (stream_name, stream) in &self.streams { - let mut builder = Endpoint::builder() - .secret_key(stream.secret_key.clone()) - .alpns(ALPN.iter().map(|slice| slice.to_vec()).collect()) - .relay_mode(iroh::RelayMode::Disabled) - .clear_discovery(); +type ShutdownNotification = oneshot::Receiver>; - if let Some(ip) = stream.bind_ipv4 { - builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port)); - } - if let Some(ip) = stream.bind_ipv6 { - builder = builder.bind_addr_v6(SocketAddrV6::new(ip, stream.listen_port, 0, 0)); - } +pub struct Cluster { + endpoint: Arc, + stream: StreamInit, + actions: Vec, + shutdown: ShutdownNotification, + connections: BTreeMap, + endpoint_addr_tx: mpsc::Sender, + connection_rx: mpsc::Receiver, +} - let endpoint = builder.bind().await.map_err(|err| { - format!("Could not create socket address for cluster {stream_name}: {err}") - })?; - self.endpoints.insert(stream_name.clone(), endpoint); +impl Cluster { + pub async fn new( + stream: StreamInit, + actions: Vec, + shutdown: ShutdownNotification, + ) -> Result<(), String> { + let mut builder = Endpoint::builder() + .secret_key(stream.secret_key.clone()) + .alpns(ALPN.iter().map(|slice| slice.to_vec()).collect()) + .relay_mode(iroh::RelayMode::Disabled) + .clear_discovery(); + + if let Some(ip) = stream.bind_ipv4 { + builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port)); } - // We have no use of those parameters anymore - self.streams = Default::default(); + if let Some(ip) = stream.bind_ipv6 { + builder = builder.bind_addr_v6(SocketAddrV6::new(ip, stream.listen_port, 0, 0)); + } + + let endpoint = builder.bind().await.map_err(|err| { + format!( + "Could not create socket address for cluster {}: {err}", + stream.name + ) + })?; + let endpoint = Arc::new(endpoint); + + let (endpoint_addr_tx, connection_rx) = + ConnectionInitializer::new(endpoint.clone(), stream.name.clone(), stream.nodes.len()); + + for node in stream.nodes.values() { + endpoint_addr_tx.send(node.clone()).await.unwrap(); + } + + let this = Self { + // No connection for now + connections: Default::default(), + // Values passed as-is + stream, + actions, + endpoint, + shutdown, + endpoint_addr_tx, + connection_rx, + }; + tokio::spawn(async move { this.task().await }); + Ok(()) } + + async fn task(mut self) { + let action_rx = self.spawn_actions(); + + // Ok donc là il faut : + // - Que je réessaie plus tard les connections qui ont raté + // - Que j'accepte des nouvelles connections + // - Que j'ai une queue par noeud + // - Que chaque élément de la queue puisse timeout + // - Que j'envoie les messages de mes actions dans toutes les queues + // + // - Que j'écoute les messages de mes pairs et que je les renvoie à mon stream + // + // Et que je gère l'authentification en début de connection + } + + fn spawn_actions(&mut self) -> mpsc::Receiver<(Exec, bool)> { + let (tx, rx) = mpsc::channel(1); + while let Some(mut action) = self.actions.pop() { + let tx = tx.clone(); + tokio::spawn(async move { + while let Ok(Some(exec)) = action.rx.recv().await { + if let Err(err) = tx.send((exec, action.self_)).await { + eprintln!("ERROR while queueing action in cluster: {err}"); + break; + } + } + }); + } + rx + } } diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs new file mode 100644 index 0000000..5cb646e --- /dev/null +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -0,0 +1,136 @@ +use std::collections::BTreeMap; +use std::sync::Arc; +use std::time::Duration; + +use iroh::Endpoint; +use iroh::{EndpointAddr, endpoint::Connection}; +use tokio::{ + sync::mpsc, + time::{Instant, sleep, sleep_until}, +}; + +use crate::cluster::ALPN; + +const START_TIMEOUT: Duration = Duration::from_secs(5); +const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour +const TIMEOUT_FACTOR: f64 = 1.5; + +pub struct ConnectionInitializer { + endpoint: Arc, + cluster_name: String, + retry_connections: BTreeMap, + endpoint_addr_rx: mpsc::Receiver, + connection_tx: mpsc::Sender, +} + +impl ConnectionInitializer { + pub fn new( + endpoint: Arc, + cluster_name: String, + cluster_size: usize, + ) -> (mpsc::Sender, mpsc::Receiver) { + let (tx1, rx1) = mpsc::channel(cluster_size); + let (tx2, rx2) = mpsc::channel(cluster_size); + + tokio::spawn(async move { + Self { + endpoint, + cluster_name, + retry_connections: Default::default(), + endpoint_addr_rx: rx1, + connection_tx: tx2, + } + .task() + .await + }); + + (tx1, rx2) + } + + async fn task(&mut self) { + let mut tick = sleep(Duration::default()); + loop { + // Uncomment this line and comment the select for faster development in this function + // let option = Some(self.endpoint_addr_rx.recv().await); + let option = tokio::select! { + endpoint_addr = self.endpoint_addr_rx.recv() => Some(endpoint_addr), + _ = tick => None, + }; + if let Some(option) = option { + if let Some(endpoint_addr) = option { + match self.try_connect(endpoint_addr).await { + Ok(connection) => { + if let Err(_) = self.connection_tx.send(connection).await { + // This means the main cluster loop has quit, so let's quit + break; + } + } + Err(endpoint_addr) => { + self.insert_address(endpoint_addr, START_TIMEOUT); + } + } + } else { + break; + } + } else { + if self + .retry_connections + .keys() + .next() + .is_some_and(|time| time < &Instant::now()) + { + let (_, (endpoint_addr, delta)) = self.retry_connections.pop_first().unwrap(); + match self.try_connect(endpoint_addr).await { + Ok(connection) => { + if let Err(_) = self.connection_tx.send(connection).await { + // This means the main cluster loop has quit, so let's quit + break; + } + } + Err(endpoint_addr) => { + // Multiply timeout by TIMEOUT_FACTOR + let delta = Duration::from_millis( + ((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64, + ); + // Cap to MAX_TIMEOUT + let delta = if delta > MAX_TIMEOUT { + MAX_TIMEOUT + } else { + delta + }; + self.insert_address(endpoint_addr, delta); + } + } + } + } + // Tick at next deadline + tick = sleep_until(*self.retry_connections.keys().next().unwrap()); + } + } + + fn insert_address(&mut self, endpoint_addr: EndpointAddr, delta: Duration) { + if !delta.is_zero() { + eprintln!( + "INFO cluster {}: retry connecting to node {} in {:?}", + self.cluster_name, endpoint_addr.id, delta + ); + } + let now = Instant::now(); + // Schedule this address for later + self.retry_connections + .insert(now + delta, (endpoint_addr, delta)); + } + + async fn try_connect(&self, addr: EndpointAddr) -> Result { + match self.endpoint.connect(addr.clone(), ALPN[0]).await { + Ok(connection) => Ok(connection), + Err(err) => { + eprintln!( + "ERROR cluster {}: connecting to node {}: {err}", + self.cluster_name, addr.id + ); + Err(addr) + } + } + } +} diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 72cddb4..e40f17e 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -3,19 +3,21 @@ use std::{ net::{Ipv4Addr, Ipv6Addr, SocketAddr}, }; -use iroh::{Endpoint, PublicKey, SecretKey}; +use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr}; use reaction_plugin::{ ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, main_loop, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; -use tokio::fs; +use tokio::{fs, sync::oneshot}; mod cluster; +mod connection; mod secret_key; -use secret_key::secret_key; -use crate::secret_key::{key_b64_to_bytes, key_bytes_to_b64}; +use secret_key::{key_b64_to_bytes, key_bytes_to_b64, secret_key}; + +use crate::cluster::Cluster; #[tokio::main] async fn main() { @@ -26,8 +28,8 @@ async fn main() { #[derive(Default)] struct Plugin { streams: BTreeMap, - actions: Vec, - endpoints: BTreeMap, + actions: BTreeMap>, + cluster_shutdown: Vec>>, } /// Stream options as defined by the user @@ -61,12 +63,13 @@ fn ipv6_unspecified() -> Option { /// Stream information before start struct StreamInit { + name: String, listen_port: u16, bind_ipv4: Option, bind_ipv6: Option, shared_secret: String, secret_key: SecretKey, - nodes: Vec, + nodes: BTreeMap, tx: mpsc::Sender, } @@ -76,12 +79,6 @@ struct NodeOption { addresses: Vec, } -#[derive(Serialize, Deserialize)] -struct NodeInit { - public_key: PublicKey, - addresses: Vec, -} - #[derive(Serialize, Deserialize)] struct ActionOptions { /// The line to send to the corresponding cluster, example: "ban \" @@ -95,8 +92,8 @@ struct ActionOptions { #[derive(Serialize, Deserialize)] struct ActionInit { + name: String, send: String, - to: String, self_: bool, patterns: Vec, rx: mpsc::Receiver, @@ -138,7 +135,7 @@ impl PluginInfo for Plugin { return Err("missing shared secret: either shared_secret or shared_secret_file must be provided".into()); }; - let mut init_nodes = Vec::default(); + let mut nodes = BTreeMap::default(); for node in options.nodes.into_iter() { let bytes = key_b64_to_bytes(&node.public_key) .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; @@ -146,10 +143,17 @@ impl PluginInfo for Plugin { let public_key = PublicKey::from_bytes(&bytes) .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; - init_nodes.push(NodeInit { + nodes.insert( public_key, - addresses: node.addresses, - }); + EndpointAddr { + id: public_key, + addrs: node + .addresses + .into_iter() + .map(|addr| TransportAddr::Ip(addr)) + .collect(), + }, + ); } let secret_key = secret_key(&stream_name).await?; @@ -161,12 +165,13 @@ impl PluginInfo for Plugin { let (tx, rx) = mpsc::channel(1); let stream = StreamInit { + name: stream_name.clone(), listen_port: options.listen_port, bind_ipv4: options.bind_ipv4, bind_ipv6: options.bind_ipv6, shared_secret, secret_key, - nodes: init_nodes, + nodes, tx, }; @@ -182,9 +187,9 @@ impl PluginInfo for Plugin { async fn action_impl( &mut self, - _stream_name: String, - _filter_name: String, - _action_name: String, + stream_name: String, + filter_name: String, + action_name: String, action_type: String, config: Value, patterns: Vec, @@ -199,20 +204,48 @@ impl PluginInfo for Plugin { let (tx, rx) = mpsc::channel(1); let init_action = ActionInit { + name: format!("{}.{}.{}", stream_name, filter_name, action_name), send: options.send, - to: options.to, self_: options.self_, patterns, rx, }; - self.actions.push(init_action); + self.actions + .entry(options.to) + .or_default() + .push(init_action); Ok(ActionImpl { tx }) } async fn finish_setup(&mut self) -> RemoteResult<()> { - self.endpoint_init().await + while let Some((stream_name, stream)) = self.streams.pop_first() { + let (tx, rx) = oneshot::channel(); + self.cluster_shutdown.push(tx); + Cluster::new( + stream, + self.actions.remove(&stream_name).unwrap_or_default(), + rx, + ) + .await?; + } + // Check there is no action left + if !self.actions.is_empty() { + for (to, actions) in &self.actions { + for action in actions { + eprintln!( + "ERROR action '{}' sends 'to' unknown stream '{}'", + action.name, to + ); + } + } + return Err("at least one cluster_send action has unknown 'to'".into()); + } + // Free containers + self.actions = Default::default(); + self.streams = Default::default(); + Ok(()) } async fn close(self) -> RemoteResult<()> { From 3f6e74d096ad400fda6cb94946f73ae91f7566dd Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 6 Nov 2025 12:00:00 +0100 Subject: [PATCH 138/241] Accept remote connections. Prepare work for shared_secret handshake Renamed ConnectionInitializer to EndpointManager. Endpoint isn't shared with Cluster anymore. Moved big `match` in `loop` to own function, mainly to separate it from the select macro and reduce LSP latency. But that's cleaner too. --- TODO | 2 +- .../reaction-plugin-cluster/src/cluster.rs | 8 +- .../reaction-plugin-cluster/src/connection.rs | 136 ------------ .../reaction-plugin-cluster/src/endpoint.rs | 193 ++++++++++++++++++ plugins/reaction-plugin-cluster/src/main.rs | 2 +- 5 files changed, 197 insertions(+), 144 deletions(-) delete mode 100644 plugins/reaction-plugin-cluster/src/connection.rs create mode 100644 plugins/reaction-plugin-cluster/src/endpoint.rs diff --git a/TODO b/TODO index cc7658d..9c09bed 100644 --- a/TODO +++ b/TODO @@ -2,4 +2,4 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) plugins: pipe stderr too and wrap errors in logs plugins: provide treedb storage? omg (add an enum that's either remoc::rch::mpsc or tokio::mpsc) -plugins: implement a tracing subscriber over stderr? or more simply, capture lines and reprint them prefixed by the plugin name? +plugin cluster: provide a stream of refused connections? diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index f58372b..ddce02b 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -1,21 +1,19 @@ use std::{ collections::BTreeMap, net::{SocketAddrV4, SocketAddrV6}, - sync::Arc, }; use iroh::{Endpoint, EndpointAddr, EndpointId, endpoint::Connection}; use reaction_plugin::Exec; use tokio::sync::{mpsc, oneshot}; -use crate::{ActionInit, StreamInit, connection::ConnectionInitializer}; +use crate::{ActionInit, StreamInit, endpoint::EndpointManager}; pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; type ShutdownNotification = oneshot::Receiver>; pub struct Cluster { - endpoint: Arc, stream: StreamInit, actions: Vec, shutdown: ShutdownNotification, @@ -49,10 +47,9 @@ impl Cluster { stream.name ) })?; - let endpoint = Arc::new(endpoint); let (endpoint_addr_tx, connection_rx) = - ConnectionInitializer::new(endpoint.clone(), stream.name.clone(), stream.nodes.len()); + EndpointManager::new(endpoint, stream.name.clone(), stream.nodes.len()); for node in stream.nodes.values() { endpoint_addr_tx.send(node.clone()).await.unwrap(); @@ -64,7 +61,6 @@ impl Cluster { // Values passed as-is stream, actions, - endpoint, shutdown, endpoint_addr_tx, connection_rx, diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs deleted file mode 100644 index 5cb646e..0000000 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ /dev/null @@ -1,136 +0,0 @@ -use std::collections::BTreeMap; -use std::sync::Arc; -use std::time::Duration; - -use iroh::Endpoint; -use iroh::{EndpointAddr, endpoint::Connection}; -use tokio::{ - sync::mpsc, - time::{Instant, sleep, sleep_until}, -}; - -use crate::cluster::ALPN; - -const START_TIMEOUT: Duration = Duration::from_secs(5); -const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour -const TIMEOUT_FACTOR: f64 = 1.5; - -pub struct ConnectionInitializer { - endpoint: Arc, - cluster_name: String, - retry_connections: BTreeMap, - endpoint_addr_rx: mpsc::Receiver, - connection_tx: mpsc::Sender, -} - -impl ConnectionInitializer { - pub fn new( - endpoint: Arc, - cluster_name: String, - cluster_size: usize, - ) -> (mpsc::Sender, mpsc::Receiver) { - let (tx1, rx1) = mpsc::channel(cluster_size); - let (tx2, rx2) = mpsc::channel(cluster_size); - - tokio::spawn(async move { - Self { - endpoint, - cluster_name, - retry_connections: Default::default(), - endpoint_addr_rx: rx1, - connection_tx: tx2, - } - .task() - .await - }); - - (tx1, rx2) - } - - async fn task(&mut self) { - let mut tick = sleep(Duration::default()); - loop { - // Uncomment this line and comment the select for faster development in this function - // let option = Some(self.endpoint_addr_rx.recv().await); - let option = tokio::select! { - endpoint_addr = self.endpoint_addr_rx.recv() => Some(endpoint_addr), - _ = tick => None, - }; - if let Some(option) = option { - if let Some(endpoint_addr) = option { - match self.try_connect(endpoint_addr).await { - Ok(connection) => { - if let Err(_) = self.connection_tx.send(connection).await { - // This means the main cluster loop has quit, so let's quit - break; - } - } - Err(endpoint_addr) => { - self.insert_address(endpoint_addr, START_TIMEOUT); - } - } - } else { - break; - } - } else { - if self - .retry_connections - .keys() - .next() - .is_some_and(|time| time < &Instant::now()) - { - let (_, (endpoint_addr, delta)) = self.retry_connections.pop_first().unwrap(); - match self.try_connect(endpoint_addr).await { - Ok(connection) => { - if let Err(_) = self.connection_tx.send(connection).await { - // This means the main cluster loop has quit, so let's quit - break; - } - } - Err(endpoint_addr) => { - // Multiply timeout by TIMEOUT_FACTOR - let delta = Duration::from_millis( - ((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64, - ); - // Cap to MAX_TIMEOUT - let delta = if delta > MAX_TIMEOUT { - MAX_TIMEOUT - } else { - delta - }; - self.insert_address(endpoint_addr, delta); - } - } - } - } - // Tick at next deadline - tick = sleep_until(*self.retry_connections.keys().next().unwrap()); - } - } - - fn insert_address(&mut self, endpoint_addr: EndpointAddr, delta: Duration) { - if !delta.is_zero() { - eprintln!( - "INFO cluster {}: retry connecting to node {} in {:?}", - self.cluster_name, endpoint_addr.id, delta - ); - } - let now = Instant::now(); - // Schedule this address for later - self.retry_connections - .insert(now + delta, (endpoint_addr, delta)); - } - - async fn try_connect(&self, addr: EndpointAddr) -> Result { - match self.endpoint.connect(addr.clone(), ALPN[0]).await { - Ok(connection) => Ok(connection), - Err(err) => { - eprintln!( - "ERROR cluster {}: connecting to node {}: {err}", - self.cluster_name, addr.id - ); - Err(addr) - } - } - } -} diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs new file mode 100644 index 0000000..755dea3 --- /dev/null +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -0,0 +1,193 @@ +use std::collections::BTreeMap; +use std::time::Duration; + +use iroh::Endpoint; +use iroh::endpoint::Incoming; +use iroh::{EndpointAddr, endpoint::Connection}; +use tokio::{ + sync::mpsc, + time::{Instant, sleep, sleep_until}, +}; + +use crate::cluster::ALPN; + +const START_TIMEOUT: Duration = Duration::from_secs(5); +const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour +const TIMEOUT_FACTOR: f64 = 1.5; + +enum Event { + TryConnect(EndpointAddr), + Quit, + Tick, + Incoming(Option), +} + +enum Break { + Yes, + No, +} + +pub struct EndpointManager { + /// The [`iroh::Endpoint`] to manage + endpoint: Endpoint, + /// Cluster's name (for logging) + cluster_name: String, + /// Map of remote Endpoints to try to connect to + retry_connections: BTreeMap, + /// Connection requests from the [`crate::Cluster`] + endpoint_addr_rx: mpsc::Receiver, + /// Connection sender to the [`crate::Cluster`] + connection_tx: mpsc::Sender, +} + +impl EndpointManager { + pub fn new( + endpoint: Endpoint, + cluster_name: String, + cluster_size: usize, + ) -> (mpsc::Sender, mpsc::Receiver) { + let (tx1, rx1) = mpsc::channel(cluster_size); + let (tx2, rx2) = mpsc::channel(cluster_size); + + tokio::spawn(async move { + Self { + endpoint, + cluster_name, + retry_connections: Default::default(), + endpoint_addr_rx: rx1, + connection_tx: tx2, + } + .task() + .await + }); + + (tx1, rx2) + } + + async fn task(&mut self) { + let mut tick = sleep(Duration::default()); + + loop { + // Uncomment this line and comment the select! for faster development in this function + // let event = Event::TryConnect(self.endpoint_addr_rx.recv().await); + let event = tokio::select! { + received = self.endpoint_addr_rx.recv() => { + match received { + Some(endpoint_addr) => Event::TryConnect(endpoint_addr), + None => Event::Quit, + } + } + incoming = self.endpoint.accept() => Event::Incoming(incoming), + _ = tick => Event::Tick, + }; + + if let Break::Yes = self.handle_event(event).await { + break; + } + + // Tick at next deadline + tick = sleep_until(*self.retry_connections.keys().next().unwrap()); + } + + self.endpoint.close().await + } + + async fn handle_event(&mut self, event: Event) -> Break { + match event { + Event::Quit => return Break::Yes, + + Event::TryConnect(endpoint_addr) => match self.try_connect(endpoint_addr).await { + Ok(connection) => return self.handshake(connection, true).await, + Err(endpoint_addr) => { + self.insert_address(endpoint_addr, START_TIMEOUT); + } + }, + + Event::Tick => { + if self + .retry_connections + .keys() + .next() + .is_some_and(|time| time < &Instant::now()) + { + let (_, (endpoint_addr, delta)) = self.retry_connections.pop_first().unwrap(); + match self.try_connect(endpoint_addr).await { + Ok(connection) => return self.handshake(connection, true).await, + Err(endpoint_addr) => { + let delta = next_delta(delta); + self.insert_address(endpoint_addr, delta); + } + } + } + } + + Event::Incoming(incoming) => { + // FIXME a malicious actor could maybe prevent a node from connecting to + // its cluster by sending lots of invalid slow connection requests? + // We could lower its priority https://docs.rs/tokio/latest/tokio/macro.select.html#fairness + // And/or moving the handshake to another task + if let Some(incoming) = incoming { + let remote_address = incoming.remote_address(); + let remote_address_validated = incoming.remote_address_validated(); + match incoming.await { + Ok(connection) => return self.handshake(connection, false).await, + Err(err) => { + if remote_address_validated { + eprintln!("INFO refused connection from {}: {err}", remote_address) + } else { + eprintln!("INFO refused connection: {err}") + } + } + } + } + } + } + Break::No + } + + fn insert_address(&mut self, endpoint_addr: EndpointAddr, delta: Duration) { + if !delta.is_zero() { + eprintln!( + "INFO cluster {}: retry connecting to node {} in {:?}", + self.cluster_name, endpoint_addr.id, delta + ); + } + let now = Instant::now(); + // Schedule this address for later + self.retry_connections + .insert(now + delta, (endpoint_addr, delta)); + } + + async fn try_connect(&self, addr: EndpointAddr) -> Result { + match self.endpoint.connect(addr.clone(), ALPN[0]).await { + Ok(connection) => Ok(connection), + Err(err) => { + eprintln!( + "ERROR cluster {}: node {}: {err}", + self.cluster_name, addr.id + ); + Err(addr) + } + } + } + + async fn handshake(&mut self, connection: Connection, initiated_by_me: bool) -> Break { + // TODO validate handshake before sending + if let Err(_) = self.connection_tx.send(connection).await { + // This means the main cluster loop has exited, so let's quit + return Break::Yes; + } + Break::No + } +} + +fn next_delta(delta: Duration) -> Duration { + // Multiply timeout by TIMEOUT_FACTOR + let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); + // Cap to MAX_TIMEOUT + if delta > MAX_TIMEOUT { + MAX_TIMEOUT + } else { + delta + } +} diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index e40f17e..5bfce63 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use tokio::{fs, sync::oneshot}; mod cluster; -mod connection; +mod endpoint; mod secret_key; use secret_key::{key_b64_to_bytes, key_bytes_to_b64, secret_key}; From 2e7fa016c6a65d53d59b75f6c929bf8ae89a8593 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 11 Nov 2025 12:00:00 +0100 Subject: [PATCH 139/241] Insecure hash-based handshake. I must find something else. --- Cargo.lock | 87 ++++++++++--- plugins/reaction-plugin-cluster/Cargo.toml | 1 + .../reaction-plugin-cluster/src/endpoint.rs | 116 +++++++++++++++++- 3 files changed, 184 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06394fb..bd62106 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,7 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac8202ab55fcbf46ca829833f347a82a2a4ce0596f0304ac322c2d100030cd56" dependencies = [ "bytes", - "crypto-common", + "crypto-common 0.2.0-rc.4", "inout", ] @@ -322,6 +322,15 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + [[package]] name = "block-buffer" version = "0.11.0-rc.5" @@ -427,8 +436,8 @@ version = "0.5.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e12a13eb01ded5d32ee9658d94f553a19e804204f2dc811df69ab4d9e0cb8c7" dependencies = [ - "block-buffer", - "crypto-common", + "block-buffer 0.11.0-rc.5", + "crypto-common 0.2.0-rc.4", "inout", "zeroize", ] @@ -625,6 +634,16 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + [[package]] name = "crypto-common" version = "0.2.0-rc.4" @@ -676,7 +695,7 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", - "digest", + "digest 0.11.0-rc.3", "fiat-crypto", "rand_core 0.9.3", "rustc_version", @@ -776,15 +795,25 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer 0.10.4", + "crypto-common 0.1.6", +] + [[package]] name = "digest" version = "0.11.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac89f8a64533a9b0eaa73a68e424db0fb1fd6271c74cc0125336a05f090568d" dependencies = [ - "block-buffer", + "block-buffer 0.11.0-rc.5", "const-oid", - "crypto-common", + "crypto-common 0.2.0-rc.4", ] [[package]] @@ -851,7 +880,7 @@ dependencies = [ "ed25519", "rand_core 0.9.3", "serde", - "sha2", + "sha2 0.11.0-rc.2", "signature", "subtle", "zeroize", @@ -1068,6 +1097,16 @@ dependencies = [ "windows 0.61.1", ] +[[package]] +name = "generic-array" +version = "0.14.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "getrandom" version = "0.2.16" @@ -1375,7 +1414,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.5.10", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -1556,7 +1595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.16.0", ] [[package]] @@ -2697,7 +2736,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls", - "socket2 0.5.10", + "socket2 0.6.1", "thiserror 2.0.17", "tokio", "tracing", @@ -2734,9 +2773,9 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.1", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2864,6 +2903,7 @@ dependencies = [ "remoc", "serde", "serde_json", + "sha2 0.10.9", "tokio", ] @@ -3331,7 +3371,7 @@ checksum = "c5e046edf639aa2e7afb285589e5405de2ef7e61d4b0ac1e30256e3eab911af9" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.11.0-rc.3", ] [[package]] @@ -3340,6 +3380,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha2" version = "0.11.0-rc.2" @@ -3348,7 +3399,7 @@ checksum = "d1e3878ab0f98e35b2df35fe53201d088299b41a6bb63e3e34dada2ac4abd924" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.11.0-rc.3", ] [[package]] @@ -3999,7 +4050,7 @@ version = "0.6.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a55be643b40a21558f44806b53ee9319595bc7ca6896372e4e08e5d7d83c9cd6" dependencies = [ - "crypto-common", + "crypto-common 0.2.0-rc.4", "subtle", ] @@ -4057,6 +4108,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + [[package]] name = "wait-timeout" version = "0.2.1" diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 648c67a..0322d90 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -15,3 +15,4 @@ tokio.features = ["rt-multi-thread"] data-encoding = "2.9.0" iroh = "0.94.0" rand = "0.9.2" +sha2 = "0.10.9" diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs index 755dea3..52ee798 100644 --- a/plugins/reaction-plugin-cluster/src/endpoint.rs +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -1,9 +1,12 @@ +use std::arch::x86_64::_mm_sha256rnds2_epu32; use std::collections::BTreeMap; use std::time::Duration; use iroh::Endpoint; -use iroh::endpoint::Incoming; +use iroh::endpoint::{ConnectionStats, Incoming}; use iroh::{EndpointAddr, endpoint::Connection}; +use rand::{Rng, rng}; +use sha2::{Digest, Sha256}; use tokio::{ sync::mpsc, time::{Instant, sleep, sleep_until}, @@ -38,12 +41,14 @@ pub struct EndpointManager { endpoint_addr_rx: mpsc::Receiver, /// Connection sender to the [`crate::Cluster`] connection_tx: mpsc::Sender, + shared_secret: String, } impl EndpointManager { pub fn new( endpoint: Endpoint, cluster_name: String, + shared_secret: String, cluster_size: usize, ) -> (mpsc::Sender, mpsc::Receiver) { let (tx1, rx1) = mpsc::channel(cluster_size); @@ -56,6 +61,7 @@ impl EndpointManager { retry_connections: Default::default(), endpoint_addr_rx: rx1, connection_tx: tx2, + shared_secret, } .task() .await @@ -172,10 +178,110 @@ impl EndpointManager { } async fn handshake(&mut self, connection: Connection, initiated_by_me: bool) -> Break { - // TODO validate handshake before sending - if let Err(_) = self.connection_tx.send(connection).await { - // This means the main cluster loop has exited, so let's quit - return Break::Yes; + let remote_id = connection + .remote_id() + .map(|id| id.to_string()) + .unwrap_or("".to_string()); + + // Send challenge and check response + let ask_future = { + let connection = connection.clone(); + let shared_secret = self.shared_secret.clone(); + let remote_id = remote_id.clone(); + + async move { + let (mut tx, mut rx) = connection.open_bi().await.map_err(|err| { + format!( + "could not init a QUIC communication channel with node {remote_id}: {err}", + ) + })?; + + let salt: [u8; 32] = rand::rng().random(); + + tx.write_all(&salt) + .await + .map_err(|err| format!("could not send data to node {remote_id}: {err}"))?; + + let mut concat: Vec = Vec::with_capacity(64); + concat.extend_from_slice(shared_secret.as_bytes()); + concat.extend_from_slice(&salt); + let expected_hash = Sha256::digest(concat); + + let mut received_hash = [0u8; 32]; + rx.read_exact(&mut received_hash) + .await + .map_err(|err| format!("could not read data from node {remote_id}: {err}"))?; + + if expected_hash.iter().eq(received_hash.iter()) { + // Send "ok" + // Return Ok(()) + tx.write_all(&[1u8]) + .await + .map_err(|err| format!("could not send data to node {remote_id}: {err}")) + } else { + let _ = tx.write_all(&[0u8]).await; + Err(format!( + "hash mismatch from node {remote_id}. It most probably has a different `secret_key` set." + )) + } + } + }; + + // Receive challenge and send response + let answer_future = { + let connection = connection.clone(); + let shared_secret = self.shared_secret.clone(); + let remote_id = remote_id.clone(); + + async move { + let (mut tx, mut rx) = connection.accept_bi().await.map_err(|err| { + format!( + "could not receive a QUIC communication channel from node {remote_id}: {err}", + ) + })?; + + let mut received_salt = [0u8; 32]; + rx.read_exact(&mut received_salt) + .await + .map_err(|err| format!("could not read data from node {remote_id}: {err}"))?; + + let mut concat: Vec = Vec::with_capacity(64); + concat.extend_from_slice(shared_secret.as_bytes()); + concat.extend_from_slice(&received_salt); + let computed_hash = Sha256::digest(concat); + + tx.write_all(&computed_hash) + .await + .map_err(|err| format!("could not send data to node {remote_id}: {err}"))?; + + let mut response = [0u8; 1]; + rx.read_exact(&mut response) + .await + .map_err(|err| format!("could not read data from node {remote_id}: {err}"))?; + + if response[0] == 1 { + Ok(()) + } else { + Err(format!( + "node {remote_id} announced a hash mismatch. It most probably has a different `secret_key` set." + )) + } + } + }; + + let (ask_result, answer_result) = tokio::join!(ask_future, answer_future); + if let Err(err) = &ask_result { + eprintln!("ERROR {err}"); + } + if let Err(err) = &answer_result { + eprintln!("ERROR {err}"); + } + + if ask_result.is_ok() && answer_result.is_ok() { + if let Err(_) = self.connection_tx.send(connection).await { + // This means the main cluster loop has exited, so let's quit + return Break::Yes; + } } Break::No } From ba9ab4c31994639f957e2f4abef79a3ade4f12b5 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 11 Nov 2025 12:00:00 +0100 Subject: [PATCH 140/241] Remove insecure handshake and just check if we know this public key --- Cargo.lock | 77 +------ plugins/reaction-plugin-cluster/Cargo.toml | 1 - .../reaction-plugin-cluster/src/endpoint.rs | 201 +++++++----------- 3 files changed, 89 insertions(+), 190 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bd62106..5c35f88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,7 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac8202ab55fcbf46ca829833f347a82a2a4ce0596f0304ac322c2d100030cd56" dependencies = [ "bytes", - "crypto-common 0.2.0-rc.4", + "crypto-common", "inout", ] @@ -322,15 +322,6 @@ dependencies = [ "constant_time_eq", ] -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - [[package]] name = "block-buffer" version = "0.11.0-rc.5" @@ -436,8 +427,8 @@ version = "0.5.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e12a13eb01ded5d32ee9658d94f553a19e804204f2dc811df69ab4d9e0cb8c7" dependencies = [ - "block-buffer 0.11.0-rc.5", - "crypto-common 0.2.0-rc.4", + "block-buffer", + "crypto-common", "inout", "zeroize", ] @@ -634,16 +625,6 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - [[package]] name = "crypto-common" version = "0.2.0-rc.4" @@ -695,7 +676,7 @@ dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", - "digest 0.11.0-rc.3", + "digest", "fiat-crypto", "rand_core 0.9.3", "rustc_version", @@ -795,25 +776,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common 0.1.6", -] - [[package]] name = "digest" version = "0.11.0-rc.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dac89f8a64533a9b0eaa73a68e424db0fb1fd6271c74cc0125336a05f090568d" dependencies = [ - "block-buffer 0.11.0-rc.5", + "block-buffer", "const-oid", - "crypto-common 0.2.0-rc.4", + "crypto-common", ] [[package]] @@ -880,7 +851,7 @@ dependencies = [ "ed25519", "rand_core 0.9.3", "serde", - "sha2 0.11.0-rc.2", + "sha2", "signature", "subtle", "zeroize", @@ -1097,16 +1068,6 @@ dependencies = [ "windows 0.61.1", ] -[[package]] -name = "generic-array" -version = "0.14.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" -dependencies = [ - "typenum", - "version_check", -] - [[package]] name = "getrandom" version = "0.2.16" @@ -2903,7 +2864,6 @@ dependencies = [ "remoc", "serde", "serde_json", - "sha2 0.10.9", "tokio", ] @@ -3371,7 +3331,7 @@ checksum = "c5e046edf639aa2e7afb285589e5405de2ef7e61d4b0ac1e30256e3eab911af9" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.11.0-rc.3", + "digest", ] [[package]] @@ -3380,17 +3340,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" -[[package]] -name = "sha2" -version = "0.10.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", -] - [[package]] name = "sha2" version = "0.11.0-rc.2" @@ -3399,7 +3348,7 @@ checksum = "d1e3878ab0f98e35b2df35fe53201d088299b41a6bb63e3e34dada2ac4abd924" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.11.0-rc.3", + "digest", ] [[package]] @@ -4050,7 +3999,7 @@ version = "0.6.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a55be643b40a21558f44806b53ee9319595bc7ca6896372e4e08e5d7d83c9cd6" dependencies = [ - "crypto-common 0.2.0-rc.4", + "crypto-common", "subtle", ] @@ -4108,12 +4057,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - [[package]] name = "wait-timeout" version = "0.2.1" diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 0322d90..648c67a 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -15,4 +15,3 @@ tokio.features = ["rt-multi-thread"] data-encoding = "2.9.0" iroh = "0.94.0" rand = "0.9.2" -sha2 = "0.10.9" diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs index 52ee798..1e51e46 100644 --- a/plugins/reaction-plugin-cluster/src/endpoint.rs +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -1,12 +1,9 @@ -use std::arch::x86_64::_mm_sha256rnds2_epu32; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::time::Duration; -use iroh::Endpoint; -use iroh::endpoint::{ConnectionStats, Incoming}; +use iroh::endpoint::Incoming; +use iroh::{Endpoint, PublicKey}; use iroh::{EndpointAddr, endpoint::Connection}; -use rand::{Rng, rng}; -use sha2::{Digest, Sha256}; use tokio::{ sync::mpsc, time::{Instant, sleep, sleep_until}, @@ -37,6 +34,8 @@ pub struct EndpointManager { cluster_name: String, /// Map of remote Endpoints to try to connect to retry_connections: BTreeMap, + /// Set of PublicKeys we're trying to connect to + all_connections: BTreeMap, /// Connection requests from the [`crate::Cluster`] endpoint_addr_rx: mpsc::Receiver, /// Connection sender to the [`crate::Cluster`] @@ -59,6 +58,7 @@ impl EndpointManager { endpoint, cluster_name, retry_connections: Default::default(), + all_connections: Default::default(), endpoint_addr_rx: rx1, connection_tx: tx2, shared_secret, @@ -92,7 +92,13 @@ impl EndpointManager { } // Tick at next deadline - tick = sleep_until(*self.retry_connections.keys().next().unwrap()); + tick = sleep_until( + self.retry_connections + .keys() + .next() + .map(ToOwned::to_owned) + .unwrap_or_else(|| Instant::now() + MAX_TIMEOUT), + ); } self.endpoint.close().await @@ -103,25 +109,21 @@ impl EndpointManager { Event::Quit => return Break::Yes, Event::TryConnect(endpoint_addr) => match self.try_connect(endpoint_addr).await { - Ok(connection) => return self.handshake(connection, true).await, + Ok(connection) => return self.check_and_send_connection(connection).await, Err(endpoint_addr) => { - self.insert_address(endpoint_addr, START_TIMEOUT); + self.insert_endpoint(endpoint_addr, START_TIMEOUT); } }, Event::Tick => { - if self - .retry_connections - .keys() - .next() - .is_some_and(|time| time < &Instant::now()) - { - let (_, (endpoint_addr, delta)) = self.retry_connections.pop_first().unwrap(); + if let Some((endpoint_addr, delta)) = self.pop_next_endpoint() { match self.try_connect(endpoint_addr).await { - Ok(connection) => return self.handshake(connection, true).await, + Ok(connection) => { + return self.check_and_send_connection(connection).await; + } Err(endpoint_addr) => { let delta = next_delta(delta); - self.insert_address(endpoint_addr, delta); + self.insert_endpoint(endpoint_addr, delta); } } } @@ -136,7 +138,9 @@ impl EndpointManager { let remote_address = incoming.remote_address(); let remote_address_validated = incoming.remote_address_validated(); match incoming.await { - Ok(connection) => return self.handshake(connection, false).await, + Ok(connection) => { + return self.check_and_send_connection(connection).await; + } Err(err) => { if remote_address_validated { eprintln!("INFO refused connection from {}: {err}", remote_address) @@ -151,19 +155,37 @@ impl EndpointManager { Break::No } - fn insert_address(&mut self, endpoint_addr: EndpointAddr, delta: Duration) { + /// Schedule an endpoint to try to connect to later + fn insert_endpoint(&mut self, endpoint_addr: EndpointAddr, delta: Duration) { if !delta.is_zero() { eprintln!( "INFO cluster {}: retry connecting to node {} in {:?}", self.cluster_name, endpoint_addr.id, delta ); } - let now = Instant::now(); + let next = Instant::now() + delta; // Schedule this address for later - self.retry_connections - .insert(now + delta, (endpoint_addr, delta)); + self.all_connections.insert(endpoint_addr.id, next); + self.retry_connections.insert(next, (endpoint_addr, delta)); } + /// Returns the next endpoint we should try to connect to + fn pop_next_endpoint(&mut self) -> Option<(EndpointAddr, Duration)> { + if self + .retry_connections + .keys() + .next() + .is_some_and(|time| time < &Instant::now()) + { + let (_, tuple) = self.retry_connections.pop_first().unwrap(); + self.all_connections.remove(&tuple.0.id); + Some(tuple) + } else { + None + } + } + + /// Try connecting to a remote endpoint async fn try_connect(&self, addr: EndpointAddr) -> Result { match self.endpoint.connect(addr.clone(), ALPN[0]).await { Ok(connection) => Ok(connection), @@ -177,116 +199,51 @@ impl EndpointManager { } } - async fn handshake(&mut self, connection: Connection, initiated_by_me: bool) -> Break { - let remote_id = connection - .remote_id() - .map(|id| id.to_string()) - .unwrap_or("".to_string()); - - // Send challenge and check response - let ask_future = { - let connection = connection.clone(); - let shared_secret = self.shared_secret.clone(); - let remote_id = remote_id.clone(); - - async move { - let (mut tx, mut rx) = connection.open_bi().await.map_err(|err| { - format!( - "could not init a QUIC communication channel with node {remote_id}: {err}", - ) - })?; - - let salt: [u8; 32] = rand::rng().random(); - - tx.write_all(&salt) - .await - .map_err(|err| format!("could not send data to node {remote_id}: {err}"))?; - - let mut concat: Vec = Vec::with_capacity(64); - concat.extend_from_slice(shared_secret.as_bytes()); - concat.extend_from_slice(&salt); - let expected_hash = Sha256::digest(concat); - - let mut received_hash = [0u8; 32]; - rx.read_exact(&mut received_hash) - .await - .map_err(|err| format!("could not read data from node {remote_id}: {err}"))?; - - if expected_hash.iter().eq(received_hash.iter()) { - // Send "ok" - // Return Ok(()) - tx.write_all(&[1u8]) - .await - .map_err(|err| format!("could not send data to node {remote_id}: {err}")) - } else { - let _ = tx.write_all(&[0u8]).await; - Err(format!( - "hash mismatch from node {remote_id}. It most probably has a different `secret_key` set." - )) - } + /// Check that an incoming connection is an endpoint we're trying to connect, + /// and send it to the [`Cluster`] + async fn check_and_send_connection(&mut self, connection: Connection) -> Break { + let remote_id = match connection.remote_id() { + Ok(id) => id, + Err(err) => { + eprintln!( + "ERROR cluster {}: could not retrieve peer's id: {err}", + self.cluster_name + ); + return Break::No; } }; - // Receive challenge and send response - let answer_future = { - let connection = connection.clone(); - let shared_secret = self.shared_secret.clone(); - let remote_id = remote_id.clone(); - - async move { - let (mut tx, mut rx) = connection.accept_bi().await.map_err(|err| { - format!( - "could not receive a QUIC communication channel from node {remote_id}: {err}", - ) - })?; - - let mut received_salt = [0u8; 32]; - rx.read_exact(&mut received_salt) - .await - .map_err(|err| format!("could not read data from node {remote_id}: {err}"))?; - - let mut concat: Vec = Vec::with_capacity(64); - concat.extend_from_slice(shared_secret.as_bytes()); - concat.extend_from_slice(&received_salt); - let computed_hash = Sha256::digest(concat); - - tx.write_all(&computed_hash) - .await - .map_err(|err| format!("could not send data to node {remote_id}: {err}"))?; - - let mut response = [0u8; 1]; - rx.read_exact(&mut response) - .await - .map_err(|err| format!("could not read data from node {remote_id}: {err}"))?; - - if response[0] == 1 { - Ok(()) - } else { - Err(format!( - "node {remote_id} announced a hash mismatch. It most probably has a different `secret_key` set." - )) - } + match self.all_connections.remove(&remote_id) { + None => { + eprintln!( + "WARN cluster {}: new peer's id '{remote_id}' is not in our list, refusing incoming connection.", + self.cluster_name + ); + eprintln!( + "INFO cluster {}: {}, {}", + self.cluster_name, + "maybe we're already connected to it, maybe it's not from our cluster", + "maybe it is new and it has not been configured yet on this node" + ); + return Break::No; + } + Some(time) => { + self.retry_connections.remove(&time); } - }; - - let (ask_result, answer_result) = tokio::join!(ask_future, answer_future); - if let Err(err) = &ask_result { - eprintln!("ERROR {err}"); - } - if let Err(err) = &answer_result { - eprintln!("ERROR {err}"); } - if ask_result.is_ok() && answer_result.is_ok() { - if let Err(_) = self.connection_tx.send(connection).await { - // This means the main cluster loop has exited, so let's quit - return Break::Yes; - } + // TODO persist the incoming address, so that we don't forget this address + + if let Err(_) = self.connection_tx.send(connection).await { + // This means the main cluster loop has exited, so let's quit + return Break::Yes; } Break::No } } +/// Compute the next wait Duration. +/// We're multiplying the Duration by [`TIMEOUT_FACTOR`] and cap it to [`MAX_TIMEOUT`]. fn next_delta(delta: Duration) -> Duration { // Multiply timeout by TIMEOUT_FACTOR let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); From 923587308431f1bd63275ac94a811c63f152fe7a Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 14 Nov 2025 12:00:00 +0100 Subject: [PATCH 141/241] Expose parse_duration to the plugin It may be better to put it in the reaction-plugin module instead --- Cargo.lock | 1 + plugins/reaction-plugin-cluster/Cargo.toml | 1 + plugins/reaction-plugin-cluster/src/main.rs | 11 ++++++ src/concepts/action.rs | 7 +++- src/concepts/filter.rs | 7 +++- src/concepts/mod.rs | 2 +- src/concepts/parse_duration.rs | 43 ++++++++++++--------- 7 files changed, 49 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c35f88..d4eb582 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2860,6 +2860,7 @@ dependencies = [ "data-encoding", "iroh", "rand 0.9.2", + "reaction", "reaction-plugin", "remoc", "serde", diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 648c67a..2c5d69e 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" edition = "2024" [dependencies] +reaction.path = "../../" reaction-plugin.workspace = true remoc.workspace = true diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 5bfce63..d112ea7 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -1,6 +1,7 @@ use std::{ collections::{BTreeMap, BTreeSet}, net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + time::Duration, }; use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr}; @@ -15,6 +16,8 @@ mod cluster; mod endpoint; mod secret_key; +use reaction::concepts::parse_duration; + use secret_key::{key_b64_to_bytes, key_bytes_to_b64, secret_key}; use crate::cluster::Cluster; @@ -52,6 +55,8 @@ struct StreamOptions { shared_secret_file: Option, /// Other nodes which are part of the cluster. nodes: Vec, + /// Max duration before we drop pending messages to a node we can't connect to. + message_timeout: String, } fn ipv4_unspecified() -> Option { @@ -69,6 +74,7 @@ struct StreamInit { bind_ipv6: Option, shared_secret: String, secret_key: SecretKey, + message_timeout: Duration, nodes: BTreeMap, tx: mpsc::Sender, } @@ -136,6 +142,10 @@ impl PluginInfo for Plugin { }; let mut nodes = BTreeMap::default(); + + let message_timeout = parse_duration(&options.message_timeout) + .map_err(|err| format!("invalid message_timeout: {err}"))?; + for node in options.nodes.into_iter() { let bytes = key_b64_to_bytes(&node.public_key) .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; @@ -171,6 +181,7 @@ impl PluginInfo for Plugin { bind_ipv6: options.bind_ipv6, shared_secret, secret_key, + message_timeout, nodes, tx, }; diff --git a/src/concepts/action.rs b/src/concepts/action.rs index b815d82..fcf5c63 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -108,8 +108,11 @@ impl Action { if let Some(after) = &self.after { self.after_duration = Some( - parse_duration(after) - .map_err(|err| format!("failed to parse after time: {}", err))?, + TimeDelta::from_std( + parse_duration(after) + .map_err(|err| format!("failed to parse after time: {}", err))?, + ) + .map_err(|err| format!("too big after time: {err}"))?, ); self.after = None; } else if self.on_exit { diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index 171304f..b6708b9 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -124,8 +124,11 @@ impl Filter { if let Some(retry_period) = &self.retry_period { self.retry_duration = Some( - parse_duration(retry_period) - .map_err(|err| format!("failed to parse retry time: {}", err))?, + TimeDelta::from_std( + parse_duration(retry_period) + .map_err(|err| format!("failed to parse retry period: {}", err))?, + ) + .map_err(|err| format!("too big retry period: {err}"))?, ); self.retry_period = None; } diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index 6bd72a8..4b78070 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -11,7 +11,7 @@ use std::fmt::Debug; pub use action::Action; pub use config::{Config, Patterns}; pub use filter::{Duplicate, Filter}; -use parse_duration::parse_duration; +pub use parse_duration::parse_duration; pub use pattern::{Pattern, PatternType}; pub use plugin::Plugin; use serde::{Deserialize, Serialize}; diff --git a/src/concepts/parse_duration.rs b/src/concepts/parse_duration.rs index f42db64..c2b2562 100644 --- a/src/concepts/parse_duration.rs +++ b/src/concepts/parse_duration.rs @@ -1,4 +1,4 @@ -use chrono::TimeDelta; +use std::time::Duration; /// Parses the &str argument as a Duration /// Returns Ok(TimeDelta) if successful, or Err(String). @@ -12,26 +12,26 @@ use chrono::TimeDelta; /// - `m` / `min` / `mins` / `minute` / `minutes` /// - `h` / `hour` / `hours` /// - `d` / `day` / `days` -pub fn parse_duration(d: &str) -> Result { +pub fn parse_duration(d: &str) -> Result { let d_trimmed = d.trim(); let chars = d_trimmed.as_bytes(); let mut value = 0; let mut i = 0; while i < chars.len() && chars[i].is_ascii_digit() { - value = value * 10 + (chars[i] - b'0') as u32; + value = value * 10 + (chars[i] - b'0') as u64; i += 1; } if i == 0 { return Err(format!("duration '{}' doesn't start with digits", d)); } - let ok_as = |func: fn(i64) -> TimeDelta| -> Result<_, String> { Ok(func(value as i64)) }; + let ok_as = |func: fn(u64) -> Duration| -> Result<_, String> { Ok(func(value as u64)) }; match d_trimmed[i..].trim() { - "ms" | "millis" | "millisecond" | "milliseconds" => ok_as(TimeDelta::milliseconds), - "s" | "sec" | "secs" | "second" | "seconds" => ok_as(TimeDelta::seconds), - "m" | "min" | "mins" | "minute" | "minutes" => ok_as(TimeDelta::minutes), - "h" | "hour" | "hours" => ok_as(TimeDelta::hours), - "d" | "day" | "days" => ok_as(TimeDelta::days), + "ms" | "millis" | "millisecond" | "milliseconds" => ok_as(Duration::from_millis), + "s" | "sec" | "secs" | "second" | "seconds" => ok_as(Duration::from_secs), + "m" | "min" | "mins" | "minute" | "minutes" => ok_as(|n| Duration::from_secs(n * 60)), + "h" | "hour" | "hours" => ok_as(|n| Duration::from_secs(n * 3600)), + "d" | "day" | "days" => ok_as(|n| Duration::from_secs(n * 3600 * 24)), unit => Err(format!( "unit {} not recognised. must be one of s/sec/seconds, m/min/minutes, h/hours, d/days", unit @@ -42,8 +42,6 @@ pub fn parse_duration(d: &str) -> Result { #[cfg(test)] mod tests { - use chrono::TimeDelta; - use super::*; #[test] @@ -53,13 +51,22 @@ mod tests { #[test] fn parse_duration_test() { - assert_eq!(parse_duration("1s"), Ok(TimeDelta::seconds(1))); - assert_eq!(parse_duration("12s"), Ok(TimeDelta::seconds(12))); - assert_eq!(parse_duration(" 12 secs "), Ok(TimeDelta::seconds(12))); - assert_eq!(parse_duration("2m"), Ok(TimeDelta::minutes(2))); - assert_eq!(parse_duration("6 hours"), Ok(TimeDelta::hours(6))); - assert_eq!(parse_duration("1d"), Ok(TimeDelta::days(1))); - assert_eq!(parse_duration("365d"), Ok(TimeDelta::days(365))); + assert_eq!(parse_duration("1s"), Ok(Duration::from_secs(1))); + assert_eq!(parse_duration("12s"), Ok(Duration::from_secs(12))); + assert_eq!(parse_duration(" 12 secs "), Ok(Duration::from_secs(12))); + assert_eq!(parse_duration("2m"), Ok(Duration::from_secs(2 * 60))); + assert_eq!( + parse_duration("6 hours"), + Ok(Duration::from_secs(6 * 60 * 60)) + ); + assert_eq!( + parse_duration("1d"), + Ok(Duration::from_secs(1 * 24 * 60 * 60)) + ); + assert_eq!( + parse_duration("365d"), + Ok(Duration::from_secs(365 * 24 * 60 * 60)) + ); assert!(parse_duration("d 3").is_err()); assert!(parse_duration("d3").is_err()); From 7e680a3a669b0ecf5d8f4237899b0825659cc315 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 14 Nov 2025 12:00:00 +0100 Subject: [PATCH 142/241] Remove shared_secret option --- .../reaction-plugin-cluster/src/cluster.rs | 13 +++------- .../reaction-plugin-cluster/src/endpoint.rs | 3 --- plugins/reaction-plugin-cluster/src/main.rs | 25 ++----------------- 3 files changed, 6 insertions(+), 35 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index ddce02b..dd17465 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -51,10 +51,6 @@ impl Cluster { let (endpoint_addr_tx, connection_rx) = EndpointManager::new(endpoint, stream.name.clone(), stream.nodes.len()); - for node in stream.nodes.values() { - endpoint_addr_tx.send(node.clone()).await.unwrap(); - } - let this = Self { // No connection for now connections: Default::default(), @@ -71,18 +67,17 @@ impl Cluster { } async fn task(mut self) { + // Ask connections for all nodes + for node in self.stream.nodes.values() { + self.endpoint_addr_tx.send(node.clone()).await.unwrap(); + } let action_rx = self.spawn_actions(); // Ok donc là il faut : - // - Que je réessaie plus tard les connections qui ont raté - // - Que j'accepte des nouvelles connections // - Que j'ai une queue par noeud // - Que chaque élément de la queue puisse timeout // - Que j'envoie les messages de mes actions dans toutes les queues - // // - Que j'écoute les messages de mes pairs et que je les renvoie à mon stream - // - // Et que je gère l'authentification en début de connection } fn spawn_actions(&mut self) -> mpsc::Receiver<(Exec, bool)> { diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs index 1e51e46..cd803e3 100644 --- a/plugins/reaction-plugin-cluster/src/endpoint.rs +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -40,14 +40,12 @@ pub struct EndpointManager { endpoint_addr_rx: mpsc::Receiver, /// Connection sender to the [`crate::Cluster`] connection_tx: mpsc::Sender, - shared_secret: String, } impl EndpointManager { pub fn new( endpoint: Endpoint, cluster_name: String, - shared_secret: String, cluster_size: usize, ) -> (mpsc::Sender, mpsc::Receiver) { let (tx1, rx1) = mpsc::channel(cluster_size); @@ -61,7 +59,6 @@ impl EndpointManager { all_connections: Default::default(), endpoint_addr_rx: rx1, connection_tx: tx2, - shared_secret, } .task() .await diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index d112ea7..fcfda25 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -10,7 +10,7 @@ use reaction_plugin::{ }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; -use tokio::{fs, sync::oneshot}; +use tokio::sync::oneshot; mod cluster; mod endpoint; @@ -45,14 +45,9 @@ struct StreamOptions { #[serde(default = "ipv4_unspecified")] bind_ipv4: Option, /// The IPv6 to bind to. Defaults to 0.0.0.0. - /// Set to `null` to use IPv6 only. + /// Set to `null` to use IPv4 only. #[serde(default = "ipv6_unspecified")] bind_ipv6: Option, - /// The secret that permits to join the cluster. - shared_secret: Option, - /// The secret that permits to join the cluster, as a file. - /// Beginning and ending whitespace will be trimmed. - shared_secret_file: Option, /// Other nodes which are part of the cluster. nodes: Vec, /// Max duration before we drop pending messages to a node we can't connect to. @@ -72,7 +67,6 @@ struct StreamInit { listen_port: u16, bind_ipv4: Option, bind_ipv6: Option, - shared_secret: String, secret_key: SecretKey, message_timeout: Duration, nodes: BTreeMap, @@ -127,20 +121,6 @@ impl PluginInfo for Plugin { let options: StreamOptions = serde_json::from_value(config.into()) .map_err(|err| format!("invalid options: {err}"))?; - let shared_secret = if let Some(shared_secret) = options.shared_secret { - shared_secret - } else if let Some(shared_secret_file) = &options.shared_secret_file { - fs::read_to_string(shared_secret_file) - .await - .map_err(|err| { - format!("can't access shared_secret_file {shared_secret_file}: {err}") - })? - .trim() - .to_owned() - } else { - return Err("missing shared secret: either shared_secret or shared_secret_file must be provided".into()); - }; - let mut nodes = BTreeMap::default(); let message_timeout = parse_duration(&options.message_timeout) @@ -179,7 +159,6 @@ impl PluginInfo for Plugin { listen_port: options.listen_port, bind_ipv4: options.bind_ipv4, bind_ipv6: options.bind_ipv6, - shared_secret, secret_key, message_timeout, nodes, From 40c6202cd43fc14f7418b0320e0b1da6a5711b9f Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 14 Nov 2025 12:00:00 +0100 Subject: [PATCH 143/241] WIP switch to one task per connection --- .../reaction-plugin-cluster/src/cluster.rs | 178 ++++++++++-------- 1 file changed, 99 insertions(+), 79 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index dd17465..c32d21a 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -1,6 +1,8 @@ use std::{ - collections::BTreeMap, + collections::{BTreeMap, VecDeque}, net::{SocketAddrV4, SocketAddrV6}, + sync::Arc, + time::Instant, }; use iroh::{Endpoint, EndpointAddr, EndpointId, endpoint::Connection}; @@ -13,86 +15,104 @@ pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; type ShutdownNotification = oneshot::Receiver>; -pub struct Cluster { +pub async fn bind(stream: &StreamInit) -> Result { + let mut builder = Endpoint::builder() + .secret_key(stream.secret_key.clone()) + .alpns(ALPN.iter().map(|slice| slice.to_vec()).collect()) + .relay_mode(iroh::RelayMode::Disabled) + .clear_discovery(); + + if let Some(ip) = stream.bind_ipv4 { + builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port)); + } + if let Some(ip) = stream.bind_ipv6 { + builder = builder.bind_addr_v6(SocketAddrV6::new(ip, stream.listen_port, 0, 0)); + } + + builder.bind().await.map_err(|err| { + format!( + "Could not create socket address for cluster {}: {err}", + stream.name + ) + }) +} + +pub fn cluster_tasks( + endpoint: Endpoint, stream: StreamInit, actions: Vec, shutdown: ShutdownNotification, - connections: BTreeMap, - endpoint_addr_tx: mpsc::Sender, +) { + let messages_from_actions = spawn_actions(actions, stream.tx.clone()); + + let (endpoint_addr_tx, connection_rx) = + EndpointManager::new(endpoint, stream.name.clone(), stream.nodes.len()); + + // TODO create ConnectionManagers and connect them to EndpointManager +} + +fn spawn_actions( + mut actions: Vec, + own_cluster_tx: remoc::rch::mpsc::Sender, +) -> mpsc::Receiver> { + let (nodes_tx, nodes_rx) = mpsc::channel(1); + while let Some(mut action) = actions.pop() { + let nodes_tx = nodes_tx.clone(); + let own_cluster_tx = own_cluster_tx.clone(); + tokio::spawn(async move { action.serve(nodes_tx, own_cluster_tx).await }); + } + nodes_rx +} + +impl ActionInit { + async fn serve( + &mut self, + nodes_tx: mpsc::Sender>, + own_stream_tx: remoc::rch::mpsc::Sender, + ) { + while let Ok(Some(m)) = self.rx.recv().await { + let line = if m.match_.is_empty() { + self.send.clone() + } else { + (0..(m.match_.len())) + .zip(&self.patterns) + .fold(self.send.clone(), |acc, (i, pattern)| { + acc.replace(pattern, &m.match_[i]) + }) + }; + if self.self_ + && let Err(err) = own_stream_tx.send(line.clone()).await + { + eprintln!("ERROR while queueing message to be sent to own cluster stream: {err}"); + } + + let line = Arc::new(line); + if let Err(err) = nodes_tx.send(line).await { + eprintln!("ERROR while queueing message to be sent to cluster nodes: {err}"); + }; + + if let Err(err) = m.result.send(Ok(())) { + eprintln!("ERROR while responding to reaction action: {err}"); + } + } + } +} + +pub struct TimeMessage { + pub message: Arc, + pub timeout: Instant, +} + +pub struct ConnectionManager { + endpoint: EndpointAddr, + // Ask the EndpointManager to connect + ask_connection: mpsc::Sender, + // Our own connection (when we have one) + connection: Option, + // The EndpointManager sending us a connection (whether we asked for it or not) connection_rx: mpsc::Receiver, -} - -impl Cluster { - pub async fn new( - stream: StreamInit, - actions: Vec, - shutdown: ShutdownNotification, - ) -> Result<(), String> { - let mut builder = Endpoint::builder() - .secret_key(stream.secret_key.clone()) - .alpns(ALPN.iter().map(|slice| slice.to_vec()).collect()) - .relay_mode(iroh::RelayMode::Disabled) - .clear_discovery(); - - if let Some(ip) = stream.bind_ipv4 { - builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port)); - } - if let Some(ip) = stream.bind_ipv6 { - builder = builder.bind_addr_v6(SocketAddrV6::new(ip, stream.listen_port, 0, 0)); - } - - let endpoint = builder.bind().await.map_err(|err| { - format!( - "Could not create socket address for cluster {}: {err}", - stream.name - ) - })?; - - let (endpoint_addr_tx, connection_rx) = - EndpointManager::new(endpoint, stream.name.clone(), stream.nodes.len()); - - let this = Self { - // No connection for now - connections: Default::default(), - // Values passed as-is - stream, - actions, - shutdown, - endpoint_addr_tx, - connection_rx, - }; - tokio::spawn(async move { this.task().await }); - - Ok(()) - } - - async fn task(mut self) { - // Ask connections for all nodes - for node in self.stream.nodes.values() { - self.endpoint_addr_tx.send(node.clone()).await.unwrap(); - } - let action_rx = self.spawn_actions(); - - // Ok donc là il faut : - // - Que j'ai une queue par noeud - // - Que chaque élément de la queue puisse timeout - // - Que j'envoie les messages de mes actions dans toutes les queues - // - Que j'écoute les messages de mes pairs et que je les renvoie à mon stream - } - - fn spawn_actions(&mut self) -> mpsc::Receiver<(Exec, bool)> { - let (tx, rx) = mpsc::channel(1); - while let Some(mut action) = self.actions.pop() { - let tx = tx.clone(); - tokio::spawn(async move { - while let Ok(Some(exec)) = action.rx.recv().await { - if let Err(err) = tx.send((exec, action.self_)).await { - eprintln!("ERROR while queueing action in cluster: {err}"); - break; - } - } - }); - } - rx - } + // Our queue of messages to send + queue: VecDeque, + // Messages we send from remote nodes to our own stream + own_cluster_tx: remoc::rch::mpsc::Sender, } From a70b45ba2d121cb8fb33822c6a33d227127a26ad Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 15 Nov 2025 12:00:00 +0100 Subject: [PATCH 144/241] Move parse_duration to reaction-plugin and fix dependency tree --- Cargo.lock | 3 +- Cargo.toml | 3 +- plugins/reaction-plugin-cluster/Cargo.toml | 2 +- .../reaction-plugin-cluster/src/cluster.rs | 9 +- .../reaction-plugin-cluster/src/endpoint.rs | 2 +- plugins/reaction-plugin-cluster/src/main.rs | 17 +- plugins/reaction-plugin/Cargo.lock | 319 ------------------ plugins/reaction-plugin/Cargo.toml | 5 +- plugins/reaction-plugin/src/lib.rs | 3 + .../reaction-plugin/src/time.rs | 45 ++- src/concepts/action.rs | 10 +- src/concepts/filter.rs | 10 +- src/concepts/mod.rs | 2 - 13 files changed, 51 insertions(+), 379 deletions(-) delete mode 100644 plugins/reaction-plugin/Cargo.lock rename src/concepts/parse_duration.rs => plugins/reaction-plugin/src/time.rs (54%) diff --git a/Cargo.lock b/Cargo.lock index d4eb582..85db0af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2847,6 +2847,7 @@ dependencies = [ name = "reaction-plugin" version = "1.0.0" dependencies = [ + "chrono", "remoc", "serde", "serde_json", @@ -2857,10 +2858,10 @@ dependencies = [ name = "reaction-plugin-cluster" version = "0.1.0" dependencies = [ + "chrono", "data-encoding", "iroh", "rand 0.9.2", - "reaction", "reaction-plugin", "remoc", "serde", diff --git a/Cargo.toml b/Cargo.toml index 1a0aaeb..9fd0298 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ assets = [ [dependencies] # Time types -chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } +chrono = { workspace = true } # CLI parsing clap = { version = "4.5.4", features = ["derive"] } # Unix interfaces @@ -83,3 +83,4 @@ serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" tokio = { version = "1.40.0" } reaction-plugin = { path = "plugins/reaction-plugin" } +chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 2c5d69e..42c3171 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -4,9 +4,9 @@ version = "0.1.0" edition = "2024" [dependencies] -reaction.path = "../../" reaction-plugin.workspace = true +chrono.workspace = true remoc.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index c32d21a..87b283e 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -1,12 +1,11 @@ use std::{ - collections::{BTreeMap, VecDeque}, + collections::VecDeque, net::{SocketAddrV4, SocketAddrV6}, sync::Arc, - time::Instant, }; -use iroh::{Endpoint, EndpointAddr, EndpointId, endpoint::Connection}; -use reaction_plugin::Exec; +use chrono::{DateTime, Local}; +use iroh::{Endpoint, EndpointAddr, endpoint::Connection}; use tokio::sync::{mpsc, oneshot}; use crate::{ActionInit, StreamInit, endpoint::EndpointManager}; @@ -100,7 +99,7 @@ impl ActionInit { pub struct TimeMessage { pub message: Arc, - pub timeout: Instant, + pub timeout: DateTime, } pub struct ConnectionManager { diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs index cd803e3..851166c 100644 --- a/plugins/reaction-plugin-cluster/src/endpoint.rs +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeMap; use std::time::Duration; use iroh::endpoint::Incoming; diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index fcfda25..0bd5117 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -1,12 +1,13 @@ use std::{ collections::{BTreeMap, BTreeSet}, net::{Ipv4Addr, Ipv6Addr, SocketAddr}, - time::Duration, }; +use chrono::TimeDelta; use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr}; use reaction_plugin::{ ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, main_loop, + parse_duration, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; @@ -16,11 +17,9 @@ mod cluster; mod endpoint; mod secret_key; -use reaction::concepts::parse_duration; - use secret_key::{key_b64_to_bytes, key_bytes_to_b64, secret_key}; -use crate::cluster::Cluster; +use crate::cluster::{bind, cluster_tasks}; #[tokio::main] async fn main() { @@ -68,7 +67,7 @@ struct StreamInit { bind_ipv4: Option, bind_ipv6: Option, secret_key: SecretKey, - message_timeout: Duration, + message_timeout: TimeDelta, nodes: BTreeMap, tx: mpsc::Sender, } @@ -213,12 +212,14 @@ impl PluginInfo for Plugin { while let Some((stream_name, stream)) = self.streams.pop_first() { let (tx, rx) = oneshot::channel(); self.cluster_shutdown.push(tx); - Cluster::new( + + let endpoint = bind(&stream).await?; + cluster_tasks( + endpoint, stream, self.actions.remove(&stream_name).unwrap_or_default(), rx, - ) - .await?; + ); } // Check there is no action left if !self.actions.is_empty() { diff --git a/plugins/reaction-plugin/Cargo.lock b/plugins/reaction-plugin/Cargo.lock deleted file mode 100644 index a4701f3..0000000 --- a/plugins/reaction-plugin/Cargo.lock +++ /dev/null @@ -1,319 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "cfg-if" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "hashbrown" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" - -[[package]] -name = "indexmap" -version = "2.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "libc" -version = "0.2.176" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" - -[[package]] -name = "libloading" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" -dependencies = [ - "cfg-if", - "windows-link", -] - -[[package]] -name = "memchr" -version = "2.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit", -] - -[[package]] -name = "proc-macro2" -version = "1.0.101" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "reaction-plugin" -version = "0.1.0" -dependencies = [ - "stabby", -] - -[[package]] -name = "rustc_version" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" -dependencies = [ - "semver", -] - -[[package]] -name = "rustversion" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" - -[[package]] -name = "semver" -version = "1.0.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" - -[[package]] -name = "serde_core" -version = "1.0.226" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.226" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "sha2-const-stable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" - -[[package]] -name = "stabby" -version = "72.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976399a0c48ea769ef7f5dc303bb88240ab8d84008647a6b2303eced3dab3945" -dependencies = [ - "libloading", - "rustversion", - "stabby-abi", -] - -[[package]] -name = "stabby-abi" -version = "72.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b54832a9a1f92a0e55e74a5c0332744426edc515bb3fbad82f10b874a87f0d" -dependencies = [ - "rustc_version", - "rustversion", - "sha2-const-stable", - "stabby-macros", -] - -[[package]] -name = "stabby-macros" -version = "72.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a768b1e51e4dbfa4fa52ae5c01241c0a41e2938fdffbb84add0c8238092f9091" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "rand", - "syn 1.0.109", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.106" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "toml_datetime" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_edit" -version = "0.23.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3effe7c0e86fdff4f69cdd2ccc1b96f933e24811c5441d44904e8683e27184b" -dependencies = [ - "indexmap", - "toml_datetime", - "toml_parser", - "winnow", -] - -[[package]] -name = "toml_parser" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" -dependencies = [ - "winnow", -] - -[[package]] -name = "unicode-ident" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "windows-link" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" - -[[package]] -name = "winnow" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" -dependencies = [ - "memchr", -] - -[[package]] -name = "zerocopy" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] diff --git a/plugins/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml index 4a23e2c..ec53c7d 100644 --- a/plugins/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -4,11 +4,10 @@ version = "1.0.0" edition = "2024" [dependencies] +chrono.workspace = true remoc.workspace = true - serde.workspace = true - serde_json.workspace = true - tokio.workspace = true tokio.features = ["io-std"] + diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 1f2eecc..c03be3c 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -100,6 +100,9 @@ use serde::{Deserialize, Serialize}; use serde_json::{Number, Value as JValue}; use tokio::io::{stdin, stdout}; +mod time; +pub use time::parse_duration; + /// This is the only trait that **must** be implemented by a plugin. /// It provides lists of stream, filter and action types implemented by a dynamic plugin. #[rtc::remote] diff --git a/src/concepts/parse_duration.rs b/plugins/reaction-plugin/src/time.rs similarity index 54% rename from src/concepts/parse_duration.rs rename to plugins/reaction-plugin/src/time.rs index c2b2562..9a2694b 100644 --- a/src/concepts/parse_duration.rs +++ b/plugins/reaction-plugin/src/time.rs @@ -1,6 +1,6 @@ -use std::time::Duration; +use chrono::TimeDelta; -/// Parses the &str argument as a Duration +/// Parses the &str argument as a TimeDelta /// Returns Ok(TimeDelta) if successful, or Err(String). /// /// Format is defined as follows: ` ` @@ -12,26 +12,26 @@ use std::time::Duration; /// - `m` / `min` / `mins` / `minute` / `minutes` /// - `h` / `hour` / `hours` /// - `d` / `day` / `days` -pub fn parse_duration(d: &str) -> Result { +pub fn parse_duration(d: &str) -> Result { let d_trimmed = d.trim(); let chars = d_trimmed.as_bytes(); let mut value = 0; let mut i = 0; while i < chars.len() && chars[i].is_ascii_digit() { - value = value * 10 + (chars[i] - b'0') as u64; + value = value * 10 + (chars[i] - b'0') as u32; i += 1; } if i == 0 { return Err(format!("duration '{}' doesn't start with digits", d)); } - let ok_as = |func: fn(u64) -> Duration| -> Result<_, String> { Ok(func(value as u64)) }; + let ok_as = |func: fn(i64) -> TimeDelta| -> Result<_, String> { Ok(func(value as i64)) }; match d_trimmed[i..].trim() { - "ms" | "millis" | "millisecond" | "milliseconds" => ok_as(Duration::from_millis), - "s" | "sec" | "secs" | "second" | "seconds" => ok_as(Duration::from_secs), - "m" | "min" | "mins" | "minute" | "minutes" => ok_as(|n| Duration::from_secs(n * 60)), - "h" | "hour" | "hours" => ok_as(|n| Duration::from_secs(n * 3600)), - "d" | "day" | "days" => ok_as(|n| Duration::from_secs(n * 3600 * 24)), + "ms" | "millis" | "millisecond" | "milliseconds" => ok_as(TimeDelta::milliseconds), + "s" | "sec" | "secs" | "second" | "seconds" => ok_as(TimeDelta::seconds), + "m" | "min" | "mins" | "minute" | "minutes" => ok_as(TimeDelta::minutes), + "h" | "hour" | "hours" => ok_as(TimeDelta::hours), + "d" | "day" | "days" => ok_as(TimeDelta::days), unit => Err(format!( "unit {} not recognised. must be one of s/sec/seconds, m/min/minutes, h/hours, d/days", unit @@ -42,6 +42,8 @@ pub fn parse_duration(d: &str) -> Result { #[cfg(test)] mod tests { + use chrono::TimeDelta; + use super::*; #[test] @@ -51,22 +53,13 @@ mod tests { #[test] fn parse_duration_test() { - assert_eq!(parse_duration("1s"), Ok(Duration::from_secs(1))); - assert_eq!(parse_duration("12s"), Ok(Duration::from_secs(12))); - assert_eq!(parse_duration(" 12 secs "), Ok(Duration::from_secs(12))); - assert_eq!(parse_duration("2m"), Ok(Duration::from_secs(2 * 60))); - assert_eq!( - parse_duration("6 hours"), - Ok(Duration::from_secs(6 * 60 * 60)) - ); - assert_eq!( - parse_duration("1d"), - Ok(Duration::from_secs(1 * 24 * 60 * 60)) - ); - assert_eq!( - parse_duration("365d"), - Ok(Duration::from_secs(365 * 24 * 60 * 60)) - ); + assert_eq!(parse_duration("1s"), Ok(TimeDelta::seconds(1))); + assert_eq!(parse_duration("12s"), Ok(TimeDelta::seconds(12))); + assert_eq!(parse_duration(" 12 secs "), Ok(TimeDelta::seconds(12))); + assert_eq!(parse_duration("2m"), Ok(TimeDelta::minutes(2))); + assert_eq!(parse_duration("6 hours"), Ok(TimeDelta::hours(6))); + assert_eq!(parse_duration("1d"), Ok(TimeDelta::days(1))); + assert_eq!(parse_duration("365d"), Ok(TimeDelta::days(365))); assert!(parse_duration("d 3").is_err()); assert!(parse_duration("d3").is_err()); diff --git a/src/concepts/action.rs b/src/concepts/action.rs index fcf5c63..c2bf976 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -2,11 +2,12 @@ use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc}; use chrono::TimeDelta; +use reaction_plugin::parse_duration; use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::process::Command; -use super::{Match, Pattern, PatternType, null_value, parse_duration::*}; +use super::{Match, Pattern, PatternType, null_value}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] @@ -108,11 +109,8 @@ impl Action { if let Some(after) = &self.after { self.after_duration = Some( - TimeDelta::from_std( - parse_duration(after) - .map_err(|err| format!("failed to parse after time: {}", err))?, - ) - .map_err(|err| format!("too big after time: {err}"))?, + parse_duration(after) + .map_err(|err| format!("failed to parse after time: {}", err))?, ); self.after = None; } else if self.on_exit { diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index b6708b9..b1a0d7e 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -7,10 +7,11 @@ use std::{ }; use chrono::TimeDelta; +use reaction_plugin::parse_duration; use regex::Regex; use serde::{Deserialize, Serialize}; -use super::{Action, Match, Pattern, PatternType, Patterns, parse_duration}; +use super::{Action, Match, Pattern, PatternType, Patterns}; #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] pub enum Duplicate { @@ -124,11 +125,8 @@ impl Filter { if let Some(retry_period) = &self.retry_period { self.retry_duration = Some( - TimeDelta::from_std( - parse_duration(retry_period) - .map_err(|err| format!("failed to parse retry period: {}", err))?, - ) - .map_err(|err| format!("too big retry period: {err}"))?, + parse_duration(retry_period) + .map_err(|err| format!("failed to parse retry period: {}", err))?, ); self.retry_period = None; } diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index 4b78070..215be2b 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -1,7 +1,6 @@ mod action; mod config; mod filter; -mod parse_duration; mod pattern; mod plugin; mod stream; @@ -11,7 +10,6 @@ use std::fmt::Debug; pub use action::Action; pub use config::{Config, Patterns}; pub use filter::{Duplicate, Filter}; -pub use parse_duration::parse_duration; pub use pattern::{Pattern, PatternType}; pub use plugin::Plugin; use serde::{Deserialize, Serialize}; From 5782e3eb29d6c7fae43b22818e9ecb6121e6c9ac Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 15 Nov 2025 12:00:00 +0100 Subject: [PATCH 145/241] Fix reaction-plugin doctests --- plugins/reaction-plugin/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index c03be3c..f24a030 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -14,7 +14,7 @@ //! //! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql`. //! It will be invoked with one positional argument "serve". -//! ``` +//! ```bash //! reaction-plugin-$NAME serve //! ``` //! This can be useful if you want to provide CLI functionnality to your users, @@ -63,7 +63,7 @@ //! ``` //! //! `src/main.rs` -//! ```rust +//! ```ignore //! use reaction_plugin::PluginInfo; //! //! #[tokio::main] @@ -285,7 +285,7 @@ pub struct Exec { /// /// Your main function should only create a struct that implements [`PluginInfo`] /// and then call [`main_loop`]: -/// ```rust +/// ```ignore /// #[tokio::main] /// async fn main() { /// let plugin = MyPlugin::default(); From bc0271b20984cf15510d22b6ba05524ed711546d Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 15 Nov 2025 12:00:00 +0100 Subject: [PATCH 146/241] Fix test that did not pass when virtual was not previously built This seems a bit hacky though because the test needs to have `cargo` in `$PATH` --- tests/plugin_virtual.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/plugin_virtual.rs b/tests/plugin_virtual.rs index 008624a..19128d8 100644 --- a/tests/plugin_virtual.rs +++ b/tests/plugin_virtual.rs @@ -6,6 +6,11 @@ use predicates::prelude::predicate; #[test] fn plugin_virtual() { + // First build reaction-plugin-virtual + Command::new("cargo") + .args(["build", "-p", "reaction-plugin-virtual"]) + .unwrap(); + let tmp_dir = assert_fs::TempDir::new().unwrap(); tmp_dir .child("config.jsonnet") From 71d26766f82ad0b43f4ef0c598f32398cf0a233b Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 15 Nov 2025 12:00:00 +0100 Subject: [PATCH 147/241] plugin: Stream plugins now pass time information along their lines This will permit the cluster to accurately receive older-than-immediate information, and it will permit potential log plugins (journald?) to go back in time at startup. --- .../reaction-plugin-cluster/src/cluster.rs | 39 ++++++++++++------- plugins/reaction-plugin-cluster/src/main.rs | 6 +-- plugins/reaction-plugin-virtual/src/main.rs | 8 ++-- plugins/reaction-plugin/src/lib.rs | 5 ++- src/daemon/stream.rs | 13 +++---- 5 files changed, 43 insertions(+), 28 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index 87b283e..6e6c7f0 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -4,8 +4,9 @@ use std::{ sync::Arc, }; -use chrono::{DateTime, Local}; +use chrono::{DateTime, Local, Utc}; use iroh::{Endpoint, EndpointAddr, endpoint::Connection}; +use reaction_plugin::Line; use tokio::sync::{mpsc, oneshot}; use crate::{ActionInit, StreamInit, endpoint::EndpointManager}; @@ -14,6 +15,8 @@ pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; type ShutdownNotification = oneshot::Receiver>; +type UtcLine = Arc<(String, DateTime)>; + pub async fn bind(stream: &StreamInit) -> Result { let mut builder = Endpoint::builder() .secret_key(stream.secret_key.clone()) @@ -52,8 +55,8 @@ pub fn cluster_tasks( fn spawn_actions( mut actions: Vec, - own_cluster_tx: remoc::rch::mpsc::Sender, -) -> mpsc::Receiver> { + own_cluster_tx: remoc::rch::mpsc::Sender, +) -> mpsc::Receiver { let (nodes_tx, nodes_rx) = mpsc::channel(1); while let Some(mut action) = actions.pop() { let nodes_tx = nodes_tx.clone(); @@ -66,8 +69,8 @@ fn spawn_actions( impl ActionInit { async fn serve( &mut self, - nodes_tx: mpsc::Sender>, - own_stream_tx: remoc::rch::mpsc::Sender, + nodes_tx: mpsc::Sender, + own_stream_tx: remoc::rch::mpsc::Sender, ) { while let Ok(Some(m)) = self.rx.recv().await { let line = if m.match_.is_empty() { @@ -79,13 +82,14 @@ impl ActionInit { acc.replace(pattern, &m.match_[i]) }) }; + let now = Local::now(); if self.self_ - && let Err(err) = own_stream_tx.send(line.clone()).await + && let Err(err) = own_stream_tx.send((line.clone(), now.clone())).await { eprintln!("ERROR while queueing message to be sent to own cluster stream: {err}"); } - let line = Arc::new(line); + let line = Arc::new((line, now.to_utc())); if let Err(err) = nodes_tx.send(line).await { eprintln!("ERROR while queueing message to be sent to cluster nodes: {err}"); }; @@ -97,11 +101,6 @@ impl ActionInit { } } -pub struct TimeMessage { - pub message: Arc, - pub timeout: DateTime, -} - pub struct ConnectionManager { endpoint: EndpointAddr, // Ask the EndpointManager to connect @@ -111,7 +110,21 @@ pub struct ConnectionManager { // The EndpointManager sending us a connection (whether we asked for it or not) connection_rx: mpsc::Receiver, // Our queue of messages to send - queue: VecDeque, + queue: VecDeque, // Messages we send from remote nodes to our own stream own_cluster_tx: remoc::rch::mpsc::Sender, } + +#[cfg(test)] +mod tests { + use chrono::{DateTime, Local}; + + fn different_local_tz_is_ok() { + let date1: DateTime = + serde_json::from_str("2025-11-02T17:47:21.716229569+01:00").unwrap(); + let date2: DateTime = + serde_json::from_str("2025-11-02T18:47:21.716229569+02:00").unwrap(); + + assert_eq!(date1.to_utc(), date2.to_utc()); + } +} diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 0bd5117..9802936 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -6,8 +6,8 @@ use std::{ use chrono::TimeDelta; use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, main_loop, - parse_duration, + ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, + main_loop, parse_duration, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; @@ -69,7 +69,7 @@ struct StreamInit { secret_key: SecretKey, message_timeout: TimeDelta, nodes: BTreeMap, - tx: mpsc::Sender, + tx: mpsc::Sender, } #[derive(Serialize, Deserialize)] diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 57b8610..8bd735c 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, + ActionImpl, Exec, Hello, Line, Local, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; @@ -105,11 +105,11 @@ impl PluginInfo for Plugin { #[derive(Clone)] struct VirtualStream { - tx: mpsc::Sender, + tx: mpsc::Sender, } impl VirtualStream { - fn new(config: Value) -> Result<(Self, mpsc::Receiver), String> { + fn new(config: Value) -> Result<(Self, mpsc::Receiver), String> { const CONFIG_ERROR: &'static str = "streams of type virtual take no options"; match config { Value::Null => (), @@ -205,7 +205,7 @@ impl VirtualAction { acc.replace(pattern, &m.match_[i]) }) }; - let result = match self.to.tx.send(line).await { + let result = match self.to.tx.send((line, Local::now())).await { Ok(_) => Ok(()), Err(err) => Err(format!("{err}")), }; diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index f24a030..2bc436e 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -92,6 +92,7 @@ use std::{ process::exit, }; +pub use chrono::{DateTime, Local}; use remoc::{ Connect, rch, rtc::{self, Server}, @@ -245,9 +246,11 @@ impl Into for Value { } } +pub type Line = (String, DateTime); + #[derive(Serialize, Deserialize)] pub struct StreamImpl { - pub stream: rch::mpsc::Receiver, + pub stream: rch::mpsc::Receiver, /// Whether this stream works standalone, or if it needs other streams to be fed. /// Defaults to true. /// When false, reaction will exit if it's the last one standing. diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index 8834311..25375f7 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -14,7 +14,7 @@ use tokio::{ use tracing::{debug, error, info}; use crate::{ - concepts::{Filter, Stream}, + concepts::{Filter, Stream, Time}, daemon::{filter::FilterManager, plugin::Plugins, utils::kill_child}, }; @@ -129,8 +129,8 @@ impl StreamManager { loop { match plugin.stream.recv().await { - Ok(Some(line)) => { - self.handle_line(line).await; + Ok(Some((line, time))) => { + self.handle_line(line, time).await; } Err(err) => { if err.is_final() { @@ -208,7 +208,7 @@ impl StreamManager { loop { match lines.next().await { Some(Ok(line)) => { - self.handle_line(line).await; + self.handle_line(line, Local::now()).await; } Some(Err(err)) => { error!( @@ -224,10 +224,9 @@ impl StreamManager { } } - async fn handle_line(&self, line: String) { - let now = Local::now(); + async fn handle_line(&self, line: String, time: Time) { for manager in self.matching_filters(&line) { - manager.handle_line(&line, now).await; + manager.handle_line(&line, time).await; } } From 552b311ac4cb4d6873ed937c45be3aafcd7ce7d3 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 15 Nov 2025 12:00:00 +0100 Subject: [PATCH 148/241] Move shutdown module to reaction-plugin and use in cluster --- Cargo.lock | 1 + Cargo.toml | 5 ++- .../reaction-plugin-cluster/src/cluster.rs | 8 ++-- plugins/reaction-plugin-cluster/src/main.rs | 13 +++--- plugins/reaction-plugin/Cargo.toml | 3 +- plugins/reaction-plugin/src/lib.rs | 4 +- .../reaction-plugin/src}/shutdown.rs | 41 +++++++++---------- src/concepts/action.rs | 2 +- src/concepts/filter.rs | 2 +- src/daemon/filter/mod.rs | 4 +- src/daemon/filter/tests.rs | 5 ++- src/daemon/mod.rs | 3 +- src/daemon/socket.rs | 3 +- src/daemon/stream.rs | 4 +- src/treedb/mod.rs | 9 ++-- 15 files changed, 50 insertions(+), 57 deletions(-) rename {src/daemon => plugins/reaction-plugin/src}/shutdown.rs (66%) diff --git a/Cargo.lock b/Cargo.lock index 85db0af..e37e77a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2852,6 +2852,7 @@ dependencies = [ "serde", "serde_json", "tokio", + "tokio-util", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9fd0298..bf5eec8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,7 @@ thiserror = "1.0.63" # Async runtime & helpers futures = "0.3.30" tokio = { workspace = true, features = ["full", "tracing"] } -tokio-util = { version = "0.7.12", features = ["codec"] } +tokio-util = { workspace = true, features = ["codec"] } # Async logging tracing = "0.1.40" tracing-subscriber = "0.3.18" @@ -78,9 +78,10 @@ predicates = "3.1.3" members = ["plugins/reaction-plugin", "plugins/reaction-plugin-cluster", "plugins/reaction-plugin-virtual"] [workspace.dependencies] +chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } remoc = { version = "0.18.3" } serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" tokio = { version = "1.40.0" } +tokio-util = { version = "0.7.12" } reaction-plugin = { path = "plugins/reaction-plugin" } -chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index 6e6c7f0..45e82cb 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -6,15 +6,13 @@ use std::{ use chrono::{DateTime, Local, Utc}; use iroh::{Endpoint, EndpointAddr, endpoint::Connection}; -use reaction_plugin::Line; -use tokio::sync::{mpsc, oneshot}; +use reaction_plugin::{Line, shutdown::ShutdownToken}; +use tokio::sync::mpsc; use crate::{ActionInit, StreamInit, endpoint::EndpointManager}; pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; -type ShutdownNotification = oneshot::Receiver>; - type UtcLine = Arc<(String, DateTime)>; pub async fn bind(stream: &StreamInit) -> Result { @@ -43,7 +41,7 @@ pub fn cluster_tasks( endpoint: Endpoint, stream: StreamInit, actions: Vec, - shutdown: ShutdownNotification, + shutdown: ShutdownToken, ) { let messages_from_actions = spawn_actions(actions, stream.tx.clone()); diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 9802936..5f24875 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -7,7 +7,7 @@ use chrono::TimeDelta; use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr}; use reaction_plugin::{ ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, - main_loop, parse_duration, + main_loop, shutdown::ShutdownController, time::parse_duration, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; @@ -31,7 +31,7 @@ async fn main() { struct Plugin { streams: BTreeMap, actions: BTreeMap>, - cluster_shutdown: Vec>>, + cluster_shutdown: ShutdownController, } /// Stream options as defined by the user @@ -210,15 +210,12 @@ impl PluginInfo for Plugin { async fn finish_setup(&mut self) -> RemoteResult<()> { while let Some((stream_name, stream)) = self.streams.pop_first() { - let (tx, rx) = oneshot::channel(); - self.cluster_shutdown.push(tx); - let endpoint = bind(&stream).await?; cluster_tasks( endpoint, stream, self.actions.remove(&stream_name).unwrap_or_default(), - rx, + self.cluster_shutdown.token(), ); } // Check there is no action left @@ -240,6 +237,8 @@ impl PluginInfo for Plugin { } async fn close(self) -> RemoteResult<()> { - todo!() + self.cluster_shutdown.ask_shutdown(); + self.cluster_shutdown.wait_shutdown().await; + Ok(()) } } diff --git a/plugins/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml index ec53c7d..1c22585 100644 --- a/plugins/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -10,4 +10,5 @@ serde.workspace = true serde_json.workspace = true tokio.workspace = true tokio.features = ["io-std"] - +tokio-util.workspace = true +tokio-util.features = ["rt"] diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 2bc436e..1c875a7 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -101,8 +101,8 @@ use serde::{Deserialize, Serialize}; use serde_json::{Number, Value as JValue}; use tokio::io::{stdin, stdout}; -mod time; -pub use time::parse_duration; +pub mod shutdown; +pub mod time; /// This is the only trait that **must** be implemented by a plugin. /// It provides lists of stream, filter and action types implemented by a dynamic plugin. diff --git a/src/daemon/shutdown.rs b/plugins/reaction-plugin/src/shutdown.rs similarity index 66% rename from src/daemon/shutdown.rs rename to plugins/reaction-plugin/src/shutdown.rs index 8ef4e43..6152e27 100644 --- a/src/daemon/shutdown.rs +++ b/plugins/reaction-plugin/src/shutdown.rs @@ -1,45 +1,42 @@ -use tokio::sync::mpsc; -use tokio_util::sync::{CancellationToken, WaitForCancellationFuture}; +use tokio_util::{ + sync::{CancellationToken, WaitForCancellationFuture}, + task::task_tracker::{TaskTracker, TaskTrackerToken}, +}; -// Thanks to this article for inspiration -// https://www.wcygan.io/post/tokio-graceful-shutdown/ -// Now TaskTracker exist, but I don't know what I'd gain for using it instead? -// https://docs.rs/tokio-util/0.7.13/tokio_util/task/task_tracker/struct.TaskTracker.html - -/// Permits to keep track of ongoing tasks and ask them to shutdown. +/// Permits to keep track of ongoing tasks, ask them to shutdown and for all of them to quit. +/// Stupid wrapper around [`tokio_util::CancellationToken`] and [`tokio_util::task_tracker::TaskTracker`]. +#[derive(Default)] pub struct ShutdownController { shutdown_notifyer: CancellationToken, - task_tracker: mpsc::Sender<()>, - task_waiter: mpsc::Receiver<()>, + task_tracker: TaskTracker, } impl ShutdownController { #[allow(clippy::new_without_default)] pub fn new() -> Self { - let (task_tracker, task_waiter) = mpsc::channel(1); Self { shutdown_notifyer: CancellationToken::new(), - task_tracker, - task_waiter, + task_tracker: TaskTracker::new(), } } /// Ask for all tasks to quit pub fn ask_shutdown(&self) { self.shutdown_notifyer.cancel(); + self.task_tracker.close(); } /// Wait for all tasks to quit. /// This task may return even without having called [`ShutdownController::ask_shutdown`] /// first, if all tasks quit by themselves. - pub async fn wait_shutdown(mut self) { - drop(self.task_tracker); - self.task_waiter.recv().await; + pub async fn wait_shutdown(self) { + self.task_tracker.close(); + self.task_tracker.wait().await; } /// Returns a new shutdown token, to be held by a task. pub fn token(&self) -> ShutdownToken { - ShutdownToken::new(self.shutdown_notifyer.clone(), self.task_tracker.clone()) + ShutdownToken::new(self.shutdown_notifyer.clone(), self.task_tracker.token()) } /// Returns a [`ShutdownDelegate`], which is able to ask for shutdown, @@ -62,20 +59,20 @@ impl ShutdownDelegate { /// Created by a [`ShutdownController`]. /// Serves two purposes: /// -/// - Wait for a shutdown request to happen. +/// - Wait for a shutdown request to happen with [`Self::wait`] /// - Keep track of the current task. While this token is held, -/// the [`ShutdownController::wait_shutdown`] will block. +/// [`ShutdownController::wait_shutdown`] will block. #[derive(Clone)] pub struct ShutdownToken { shutdown_notifyer: CancellationToken, - _task_tracker: mpsc::Sender<()>, + _task_tracker_token: TaskTrackerToken, } impl ShutdownToken { - fn new(shutdown_notifyer: CancellationToken, _task_tracker: mpsc::Sender<()>) -> Self { + fn new(shutdown_notifyer: CancellationToken, _task_tracker_token: TaskTrackerToken) -> Self { Self { shutdown_notifyer, - _task_tracker, + _task_tracker_token, } } diff --git a/src/concepts/action.rs b/src/concepts/action.rs index c2bf976..cbad05c 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -2,7 +2,7 @@ use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc}; use chrono::TimeDelta; -use reaction_plugin::parse_duration; +use reaction_plugin::time::parse_duration; use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::process::Command; diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index b1a0d7e..d3932c6 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -7,7 +7,7 @@ use std::{ }; use chrono::TimeDelta; -use reaction_plugin::parse_duration; +use reaction_plugin::time::parse_duration; use regex::Regex; use serde::{Deserialize, Serialize}; diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 2b4e01e..658f739 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -5,7 +5,7 @@ mod state; use std::{collections::BTreeMap, process::Stdio, sync::Arc}; -use reaction_plugin::ActionImpl; +use reaction_plugin::{ActionImpl, shutdown::ShutdownToken}; use regex::Regex; use tokio::sync::{Mutex, MutexGuard, Semaphore}; use tracing::{debug, error, info}; @@ -19,8 +19,6 @@ use crate::{ use state::State; -use super::shutdown::ShutdownToken; - /// Responsible for handling all runtime logic dedicated to a [`Filter`]. /// Notably handles incoming lines from [`super::stream::stream_manager`] /// and orders from the [`super::socket::socket_manager`] diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 0deb880..a44fb24 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -6,17 +6,18 @@ use std::{ }; use chrono::{Local, TimeDelta}; +use reaction_plugin::shutdown::ShutdownController; use serde_json::json; use tempfile::TempPath; use tokio::sync::Semaphore; use super::{ - state::{filter_ordered_times_db_name, filter_triggers_db_name}, FilterManager, React, + state::{filter_ordered_times_db_name, filter_triggers_db_name}, }; use crate::{ concepts::{Action, Duplicate, Filter, Pattern, Patterns, Time}, - daemon::{plugin::Plugins, shutdown::ShutdownController}, + daemon::plugin::Plugins, tests::TempDatabase, }; diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 1a29310..d1dc070 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -10,6 +10,7 @@ use std::{ use chrono::Local; use futures::future::join_all; +use reaction_plugin::shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken}; use tokio::{ select, signal::unix::{SignalKind, signal}, @@ -21,7 +22,6 @@ use crate::{concepts::Config, treedb::Database}; use filter::FilterManager; pub use filter::React; use plugin::Plugins; -pub use shutdown::{ShutdownController, ShutdownDelegate, ShutdownToken}; use socket::Socket; use stream::StreamManager; @@ -30,7 +30,6 @@ pub use filter::tests; mod filter; mod plugin; -mod shutdown; mod socket; mod stream; mod utils; diff --git a/src/daemon/socket.rs b/src/daemon/socket.rs index 124b6c5..3c677d4 100644 --- a/src/daemon/socket.rs +++ b/src/daemon/socket.rs @@ -6,6 +6,7 @@ use std::{ use chrono::Local; use futures::{SinkExt, StreamExt}; +use reaction_plugin::shutdown::ShutdownToken; use regex::Regex; use tokio::{fs, net::UnixListener}; use tokio_util::{ @@ -19,7 +20,7 @@ use crate::{ protocol::{ClientRequest, ClientStatus, DaemonResponse, Order}, }; -use super::{filter::FilterManager, shutdown::ShutdownToken}; +use super::filter::FilterManager; async fn open_socket(path: PathBuf) -> Result { macro_rules! err_str { diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index 25375f7..06d7fdd 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -5,7 +5,7 @@ use std::{ use chrono::Local; use futures::{FutureExt, Stream as AsyncStream, StreamExt, future::join_all}; -use reaction_plugin::StreamImpl; +use reaction_plugin::{StreamImpl, shutdown::ShutdownToken}; use regex::RegexSet; use tokio::{ io::{AsyncBufReadExt, BufReader}, @@ -18,8 +18,6 @@ use crate::{ daemon::{filter::FilterManager, plugin::Plugins, utils::kill_child}, }; -use super::shutdown::ShutdownToken; - /// Converts bytes to line string, discarding invalid utf8 sequences and newlines at the end fn to_line(data: &[u8]) -> String { String::from_utf8_lossy(data) diff --git a/src/treedb/mod.rs b/src/treedb/mod.rs index 173b1db..0f58361 100644 --- a/src/treedb/mod.rs +++ b/src/treedb/mod.rs @@ -18,6 +18,7 @@ use std::{ }; use chrono::{Local, TimeDelta}; +use reaction_plugin::shutdown::ShutdownToken; use serde::{Deserialize, Serialize, de::DeserializeOwned}; use serde_json::Value; use tokio::{ @@ -26,10 +27,7 @@ use tokio::{ time::{MissedTickBehavior, interval}, }; -use crate::{ - concepts::{Config, Time}, - daemon::ShutdownToken, -}; +use crate::concepts::{Config, Time}; pub mod helpers; @@ -481,11 +479,12 @@ mod tests { }; use chrono::{Local, TimeDelta}; + use reaction_plugin::shutdown::ShutdownController; use serde_json::Value; use tempfile::{NamedTempFile, TempDir}; use tokio::fs::{File, write}; - use crate::{concepts::Config, daemon::ShutdownController}; + use crate::concepts::Config; use super::{ DB_NAME, Database, DatabaseManager, Entry, KeyType, LoadedDB, Tree, ValueType, helpers::*, From 0635bae5442a6c1783884df4e00c0de98ba51314 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 15 Nov 2025 12:00:00 +0100 Subject: [PATCH 149/241] cluster: created ConnectionManager Reorganized code. Moved some functionnality from EndpointManager to ConnectionManager. Still a lot to do there, but few in the rest of the code. --- Cargo.lock | 1 + Cargo.toml | 3 +- plugins/reaction-plugin-cluster/Cargo.toml | 1 + .../reaction-plugin-cluster/src/cluster.rs | 131 ++++++---- .../reaction-plugin-cluster/src/connection.rs | 230 +++++++++++++++++ .../reaction-plugin-cluster/src/endpoint.rs | 235 ++++-------------- plugins/reaction-plugin-cluster/src/main.rs | 14 +- 7 files changed, 378 insertions(+), 237 deletions(-) create mode 100644 plugins/reaction-plugin-cluster/src/connection.rs diff --git a/Cargo.lock b/Cargo.lock index e37e77a..bb17bba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2861,6 +2861,7 @@ version = "0.1.0" dependencies = [ "chrono", "data-encoding", + "futures", "iroh", "rand 0.9.2", "reaction-plugin", diff --git a/Cargo.toml b/Cargo.toml index bf5eec8..822a086 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ jrsonnet-evaluator = "0.4.2" # Error macro thiserror = "1.0.63" # Async runtime & helpers -futures = "0.3.30" +futures = { workspace = true } tokio = { workspace = true, features = ["full", "tracing"] } tokio-util = { workspace = true, features = ["codec"] } # Async logging @@ -79,6 +79,7 @@ members = ["plugins/reaction-plugin", "plugins/reaction-plugin-cluster", "plugin [workspace.dependencies] chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } +futures = "0.3.30" remoc = { version = "0.18.3" } serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.117" diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 42c3171..4ba08a5 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -7,6 +7,7 @@ edition = "2024" reaction-plugin.workspace = true chrono.workspace = true +futures.workspace = true remoc.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index 45e82cb..7b2ed85 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -1,19 +1,21 @@ use std::{ - collections::VecDeque, + collections::BTreeMap, net::{SocketAddrV4, SocketAddrV6}, sync::Arc, }; use chrono::{DateTime, Local, Utc}; -use iroh::{Endpoint, EndpointAddr, endpoint::Connection}; +use futures::future::join_all; +use iroh::{Endpoint, PublicKey, endpoint::Connection}; use reaction_plugin::{Line, shutdown::ShutdownToken}; -use tokio::sync::mpsc; +use remoc::rch::mpsc as remocMpsc; +use tokio::sync::mpsc as tokioMpsc; -use crate::{ActionInit, StreamInit, endpoint::EndpointManager}; +use crate::{ActionInit, StreamInit, connection::ConnectionManager, endpoint::EndpointManager}; pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; -type UtcLine = Arc<(String, DateTime)>; +pub type UtcLine = Arc<(String, DateTime)>; pub async fn bind(stream: &StreamInit) -> Result { let mut builder = Endpoint::builder() @@ -39,36 +41,81 @@ pub async fn bind(stream: &StreamInit) -> Result { pub fn cluster_tasks( endpoint: Endpoint, - stream: StreamInit, - actions: Vec, + mut stream: StreamInit, + mut actions: Vec, shutdown: ShutdownToken, ) { - let messages_from_actions = spawn_actions(actions, stream.tx.clone()); + let (message_action2connection_txs, mut message_action2connection_rxs): ( + Vec>, + Vec>, + ) = (0..stream.nodes.len()) + .map(|_| tokioMpsc::channel(1)) + .unzip(); - let (endpoint_addr_tx, connection_rx) = - EndpointManager::new(endpoint, stream.name.clone(), stream.nodes.len()); - - // TODO create ConnectionManagers and connect them to EndpointManager -} - -fn spawn_actions( - mut actions: Vec, - own_cluster_tx: remoc::rch::mpsc::Sender, -) -> mpsc::Receiver { - let (nodes_tx, nodes_rx) = mpsc::channel(1); + // Spawn action tasks while let Some(mut action) = actions.pop() { - let nodes_tx = nodes_tx.clone(); - let own_cluster_tx = own_cluster_tx.clone(); - tokio::spawn(async move { action.serve(nodes_tx, own_cluster_tx).await }); + let message_action2connection_txs = message_action2connection_txs.clone(); + let own_cluster_tx = stream.tx.clone(); + tokio::spawn(async move { + action + .serve(message_action2connection_txs, own_cluster_tx) + .await + }); + } + + let endpoint = Arc::new(endpoint); + + let (connection_endpoint2connection_txs, mut connection_endpoint2connection_rxs): ( + BTreeMap>, + Vec<(PublicKey, tokioMpsc::Receiver)>, + ) = stream + .nodes + .keys() + .map(|pk| { + let (tx, rx) = tokioMpsc::channel(1); + ((pk.clone(), tx), (pk.clone(), rx)) + }) + .unzip(); + + // Spawn connection accepter + EndpointManager::new( + endpoint.clone(), + stream.name.clone(), + connection_endpoint2connection_txs, + shutdown.clone(), + ); + + // Spawn connection managers + while let Some((pk, connection_endpoint2connection_rx)) = + connection_endpoint2connection_rxs.pop() + { + let cluster_name = stream.name.clone(); + let endpoint_addr = stream.nodes.remove(&pk).unwrap(); + let endpoint = endpoint.clone(); + let message_action2connection_rx = message_action2connection_rxs.pop().unwrap(); + let stream_tx = stream.tx.clone(); + tokio::spawn(async move { + ConnectionManager::new( + cluster_name, + endpoint_addr, + endpoint, + connection_endpoint2connection_rx, + stream.message_timeout, + message_action2connection_rx, + stream_tx, + ) + .task() + .await + }); } - nodes_rx } impl ActionInit { + // Receive messages from its reaction action and dispatch them to all connections and to the reaction stream async fn serve( &mut self, - nodes_tx: mpsc::Sender, - own_stream_tx: remoc::rch::mpsc::Sender, + nodes_tx: Vec>, + own_stream_tx: remocMpsc::Sender, ) { while let Ok(Some(m)) = self.rx.recv().await { let line = if m.match_.is_empty() { @@ -88,9 +135,11 @@ impl ActionInit { } let line = Arc::new((line, now.to_utc())); - if let Err(err) = nodes_tx.send(line).await { - eprintln!("ERROR while queueing message to be sent to cluster nodes: {err}"); - }; + for result in join_all(nodes_tx.iter().map(|tx| tx.send(line.clone()))).await { + if let Err(err) = result { + eprintln!("ERROR while queueing message to be sent to cluster nodes: {err}"); + }; + } if let Err(err) = m.result.send(Ok(())) { eprintln!("ERROR while responding to reaction action: {err}"); @@ -99,30 +148,18 @@ impl ActionInit { } } -pub struct ConnectionManager { - endpoint: EndpointAddr, - // Ask the EndpointManager to connect - ask_connection: mpsc::Sender, - // Our own connection (when we have one) - connection: Option, - // The EndpointManager sending us a connection (whether we asked for it or not) - connection_rx: mpsc::Receiver, - // Our queue of messages to send - queue: VecDeque, - // Messages we send from remote nodes to our own stream - own_cluster_tx: remoc::rch::mpsc::Sender, -} - #[cfg(test)] mod tests { use chrono::{DateTime, Local}; + // As long as nodes communicate with UTC datetimes, them having different local timezones is not an issue! + #[test] fn different_local_tz_is_ok() { - let date1: DateTime = - serde_json::from_str("2025-11-02T17:47:21.716229569+01:00").unwrap(); - let date2: DateTime = - serde_json::from_str("2025-11-02T18:47:21.716229569+02:00").unwrap(); + let dates: Vec> = serde_json::from_str( + "[\"2025-11-02T17:47:21.716229569+01:00\",\"2025-11-02T18:47:21.716229569+02:00\"]", + ) + .unwrap(); - assert_eq!(date1.to_utc(), date2.to_utc()); + assert_eq!(dates[0].to_utc(), dates[1].to_utc()); } } diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs new file mode 100644 index 0000000..77066f4 --- /dev/null +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -0,0 +1,230 @@ +use std::{collections::VecDeque, sync::Arc, time::Duration}; + +use chrono::TimeDelta; +use iroh::{Endpoint, EndpointAddr, endpoint::Connection}; +use reaction_plugin::Line; +use remoc::{Connect, rch::base}; +use serde::{Deserialize, Serialize}; +use tokio::{ + sync::mpsc, + time::{Sleep, sleep}, +}; + +use crate::cluster::{ALPN, UtcLine}; + +const START_TIMEOUT: Duration = Duration::from_secs(5); +const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour +const TIMEOUT_FACTOR: f64 = 1.5; + +const PROTOCOL_VERSION: u32 = 1; + +enum Event { + Tick, + LocalMessageReceived(UtcLine), + RemoteMessageReceived(RemoteMessage), + ConnectionReceived(Connection), +} + +struct OwnConnection { + connection: Connection, + tx: base::Sender, + rx: base::Receiver, +} + +pub struct ConnectionManager { + /// Cluster's name (for logging) + cluster_name: String, + + /// The remote node we're communicating with + remote: EndpointAddr, + /// Endpoint + endpoint: Arc, + + /// The EndpointManager sending us a connection (whether we asked for it or not) + connection_rx: mpsc::Receiver, + /// Our own connection (when we have one) + connection: Option, + + /// Delta we'll use next time we'll try to connect to remote + delta: Duration, + /// When this Future resolves, we'll retry connecting to remote + tick: Option, + + /// Max duration before we drop pending messages to a node we can't connect to. + message_timeout: TimeDelta, + /// Message we receive from actions + message_rx: mpsc::Receiver, + /// Our queue of messages to send + message_queue: VecDeque, + + /// Messages we send from remote nodes to our own stream + own_cluster_tx: remoc::rch::mpsc::Sender, +} + +impl ConnectionManager { + pub fn new( + cluster_name: String, + remote: EndpointAddr, + endpoint: Arc, + connection_rx: mpsc::Receiver, + message_timeout: TimeDelta, + message_rx: mpsc::Receiver, + own_cluster_tx: remoc::rch::mpsc::Sender, + ) -> Self { + Self { + cluster_name, + remote, + endpoint, + connection: None, + delta: Duration::default(), + tick: None, + connection_rx, + message_timeout, + message_rx, + message_queue: VecDeque::default(), + own_cluster_tx, + } + } + + pub async fn task(mut self) { + self.try_connect().await; + + loop { + // TODO event + let event = Event::Tick; + + self.handle_event(event).await; + } + } + + /// Main loop + async fn handle_event(&mut self, event: Event) { + match event { + Event::Tick => { + // TODO + self.try_connect().await; + } + Event::ConnectionReceived(connection) => { + // TODO + } + Event::LocalMessageReceived(utc_line) => { + // TODO + } + Event::RemoteMessageReceived(remote_message) => { + // TODO + } + } + } + + /// Try connecting to a remote endpoint + /// Returns true if we have a valid connection now + async fn try_connect(&mut self) -> bool { + if self.connection.is_none() { + match self.endpoint.connect(self.remote.clone(), ALPN[0]).await { + Ok(connection) => self.handle_connection(connection).await, + Err(err) => { + self.try_connect_error(err.to_string()); + false + } + } + } else { + true + } + } + + /// Bootstrap a new Connection + /// Returns true if we have a valid connection now + async fn handle_connection(&mut self, connection: Connection) -> bool { + self.delta = Duration::default(); + self.tick = None; + + match open_channels(&connection).await { + Ok((tx, rx)) => { + self.connection = Some(OwnConnection { connection, tx, rx }); + true + } + Err(err) => { + self.try_connect_error(err); + false + } + } + } + + /// Update the state and log an error when bootstraping a new Connection + async fn try_connect_error(&mut self, err: String) { + self.delta = next_delta(self.delta); + self.tick = Some(sleep(self.delta)); + eprintln!( + "ERROR cluster {}: node {}: {err}", + self.cluster_name, self.remote.id + ); + eprintln!( + "INFO cluster {}: retry connecting to node {} in {:?}", + self.cluster_name, self.remote.id, self.delta + ); + } +} + +/// Compute the next wait Duration. +/// We're multiplying the Duration by [`TIMEOUT_FACTOR`] and cap it to [`MAX_TIMEOUT`]. +fn next_delta(delta: Duration) -> Duration { + // Multiply timeout by TIMEOUT_FACTOR + let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); + // Cap to MAX_TIMEOUT + if delta > MAX_TIMEOUT { + MAX_TIMEOUT + } else { + delta + } +} + +/// All possible communication messages +/// Set as an enum for forward compatibility +#[derive(Serialize, Deserialize)] +pub enum RemoteMessage { + /// Must be the first message sent over, then should not be sent again + Version(u32), + /// A line to transmit to your stream + Line(UtcLine), + /// Announce the node is closing + Quitting, +} + +/// Open accept one stream and create one stream. +/// This way, there is no need to know if we created or accepted the connection. +async fn open_channels( + connection: &Connection, +) -> Result<(base::Sender, base::Receiver), String> { + let output = connection + .open_uni() + .await + .map_err(|err| format!("{err}"))?; + + let input = connection + .accept_uni() + .await + .map_err(|err| format!("{err}"))?; + + let (conn, mut tx, mut rx) = Connect::io_buffered(remoc::Cfg::default(), input, output, 1024) + .await + .map_err(|err| format!("{err}"))?; + + tokio::spawn(conn); + + tx.send(RemoteMessage::Version(PROTOCOL_VERSION)).await; + + match rx.recv().await { + // Good protocol version! + Ok(Some(RemoteMessage::Version(PROTOCOL_VERSION))) => Ok((tx, rx)), + // Errors + Ok(Some(RemoteMessage::Version(other))) => Err(format!( + "incompatible version: {other}. We use {PROTOCOL_VERSION}. Consider upgrading the node with the older version." + )), + Ok(Some(RemoteMessage::Line(_))) => Err(format!( + "incorrect protocol message: remote did not send its protocol version." + )), + Ok(Some(RemoteMessage::Quitting)) => Err("remote unexpectedly quit".into()), + Ok(None) => Err("remote unexpectedly closed its channel".into()), + Err(err) => Err(format!("could not receive message: {err}")), + } +} diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs index 851166c..2fb8a55 100644 --- a/plugins/reaction-plugin-cluster/src/endpoint.rs +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -1,26 +1,12 @@ use std::collections::BTreeMap; -use std::time::Duration; +use std::sync::Arc; -use iroh::endpoint::Incoming; -use iroh::{Endpoint, PublicKey}; -use iroh::{EndpointAddr, endpoint::Connection}; -use tokio::{ - sync::mpsc, - time::{Instant, sleep, sleep_until}, +use iroh::{ + Endpoint, PublicKey, + endpoint::{Connection, Incoming}, }; - -use crate::cluster::ALPN; - -const START_TIMEOUT: Duration = Duration::from_secs(5); -const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour -const TIMEOUT_FACTOR: f64 = 1.5; - -enum Event { - TryConnect(EndpointAddr), - Quit, - Tick, - Incoming(Option), -} +use reaction_plugin::shutdown::ShutdownToken; +use tokio::sync::mpsc; enum Break { Yes, @@ -29,176 +15,75 @@ enum Break { pub struct EndpointManager { /// The [`iroh::Endpoint`] to manage - endpoint: Endpoint, + endpoint: Arc, /// Cluster's name (for logging) cluster_name: String, - /// Map of remote Endpoints to try to connect to - retry_connections: BTreeMap, - /// Set of PublicKeys we're trying to connect to - all_connections: BTreeMap, - /// Connection requests from the [`crate::Cluster`] - endpoint_addr_rx: mpsc::Receiver, - /// Connection sender to the [`crate::Cluster`] - connection_tx: mpsc::Sender, + /// Connection sender to the Connection Managers + connections_tx: BTreeMap>, + /// shutdown + shutdown: ShutdownToken, } impl EndpointManager { pub fn new( - endpoint: Endpoint, + endpoint: Arc, cluster_name: String, - cluster_size: usize, - ) -> (mpsc::Sender, mpsc::Receiver) { - let (tx1, rx1) = mpsc::channel(cluster_size); - let (tx2, rx2) = mpsc::channel(cluster_size); - + connections_tx: BTreeMap>, + shutdown: ShutdownToken, + ) { tokio::spawn(async move { Self { endpoint, cluster_name, - retry_connections: Default::default(), - all_connections: Default::default(), - endpoint_addr_rx: rx1, - connection_tx: tx2, + connections_tx, + shutdown, } .task() .await }); - - (tx1, rx2) } async fn task(&mut self) { - let mut tick = sleep(Duration::default()); - loop { // Uncomment this line and comment the select! for faster development in this function // let event = Event::TryConnect(self.endpoint_addr_rx.recv().await); - let event = tokio::select! { - received = self.endpoint_addr_rx.recv() => { - match received { - Some(endpoint_addr) => Event::TryConnect(endpoint_addr), - None => Event::Quit, - } - } - incoming = self.endpoint.accept() => Event::Incoming(incoming), - _ = tick => Event::Tick, + let incoming = tokio::select! { + incoming = self.endpoint.accept() => incoming, + _ = self.shutdown.wait() => None, }; - if let Break::Yes = self.handle_event(event).await { - break; + match incoming { + Some(incoming) => { + if let Break::Yes = self.handle_incoming(incoming).await { + break; + } + } + None => break, } - - // Tick at next deadline - tick = sleep_until( - self.retry_connections - .keys() - .next() - .map(ToOwned::to_owned) - .unwrap_or_else(|| Instant::now() + MAX_TIMEOUT), - ); } self.endpoint.close().await } - async fn handle_event(&mut self, event: Event) -> Break { - match event { - Event::Quit => return Break::Yes, - - Event::TryConnect(endpoint_addr) => match self.try_connect(endpoint_addr).await { - Ok(connection) => return self.check_and_send_connection(connection).await, - Err(endpoint_addr) => { - self.insert_endpoint(endpoint_addr, START_TIMEOUT); - } - }, - - Event::Tick => { - if let Some((endpoint_addr, delta)) = self.pop_next_endpoint() { - match self.try_connect(endpoint_addr).await { - Ok(connection) => { - return self.check_and_send_connection(connection).await; - } - Err(endpoint_addr) => { - let delta = next_delta(delta); - self.insert_endpoint(endpoint_addr, delta); - } - } - } - } - - Event::Incoming(incoming) => { - // FIXME a malicious actor could maybe prevent a node from connecting to - // its cluster by sending lots of invalid slow connection requests? - // We could lower its priority https://docs.rs/tokio/latest/tokio/macro.select.html#fairness - // And/or moving the handshake to another task - if let Some(incoming) = incoming { - let remote_address = incoming.remote_address(); - let remote_address_validated = incoming.remote_address_validated(); - match incoming.await { - Ok(connection) => { - return self.check_and_send_connection(connection).await; - } - Err(err) => { - if remote_address_validated { - eprintln!("INFO refused connection from {}: {err}", remote_address) - } else { - eprintln!("INFO refused connection: {err}") - } - } - } - } - } - } - Break::No - } - - /// Schedule an endpoint to try to connect to later - fn insert_endpoint(&mut self, endpoint_addr: EndpointAddr, delta: Duration) { - if !delta.is_zero() { - eprintln!( - "INFO cluster {}: retry connecting to node {} in {:?}", - self.cluster_name, endpoint_addr.id, delta - ); - } - let next = Instant::now() + delta; - // Schedule this address for later - self.all_connections.insert(endpoint_addr.id, next); - self.retry_connections.insert(next, (endpoint_addr, delta)); - } - - /// Returns the next endpoint we should try to connect to - fn pop_next_endpoint(&mut self) -> Option<(EndpointAddr, Duration)> { - if self - .retry_connections - .keys() - .next() - .is_some_and(|time| time < &Instant::now()) - { - let (_, tuple) = self.retry_connections.pop_first().unwrap(); - self.all_connections.remove(&tuple.0.id); - Some(tuple) - } else { - None - } - } - - /// Try connecting to a remote endpoint - async fn try_connect(&self, addr: EndpointAddr) -> Result { - match self.endpoint.connect(addr.clone(), ALPN[0]).await { - Ok(connection) => Ok(connection), + async fn handle_incoming(&mut self, incoming: Incoming) -> Break { + // FIXME a malicious actor could maybe prevent a node from connecting to + // its cluster by sending lots of invalid slow connection requests? + // We could lower its priority https://docs.rs/tokio/latest/tokio/macro.select.html#fairness + // And/or moving the handshake to another task + let remote_address = incoming.remote_address(); + let remote_address_validated = incoming.remote_address_validated(); + let connection = match incoming.await { + Ok(connection) => connection, Err(err) => { - eprintln!( - "ERROR cluster {}: node {}: {err}", - self.cluster_name, addr.id - ); - Err(addr) + if remote_address_validated { + eprintln!("INFO refused connection from {}: {err}", remote_address) + } else { + eprintln!("INFO refused connection: {err}") + } + return Break::No; } - } - } + }; - /// Check that an incoming connection is an endpoint we're trying to connect, - /// and send it to the [`Cluster`] - async fn check_and_send_connection(&mut self, connection: Connection) -> Break { let remote_id = match connection.remote_id() { Ok(id) => id, Err(err) => { @@ -210,44 +95,30 @@ impl EndpointManager { } }; - match self.all_connections.remove(&remote_id) { + match self.connections_tx.get(&remote_id) { None => { eprintln!( - "WARN cluster {}: new peer's id '{remote_id}' is not in our list, refusing incoming connection.", - self.cluster_name + "WARN cluster {}: incoming connection from node '{remote_id}', ip: {} is not in our list, refusing incoming connection.", + self.cluster_name, remote_address ); eprintln!( "INFO cluster {}: {}, {}", self.cluster_name, - "maybe we're already connected to it, maybe it's not from our cluster", - "maybe it is new and it has not been configured yet on this node" + "maybe it's not from our cluster,", + "maybe this node's configuration has not yet been updated to add this new node." ); return Break::No; } - Some(time) => { - self.retry_connections.remove(&time); + Some(tx) => { + if let Err(_) = tx.send(connection).await { + // This means the main cluster loop has exited, so let's quit + return Break::Yes; + } } } // TODO persist the incoming address, so that we don't forget this address - if let Err(_) = self.connection_tx.send(connection).await { - // This means the main cluster loop has exited, so let's quit - return Break::Yes; - } Break::No } } - -/// Compute the next wait Duration. -/// We're multiplying the Duration by [`TIMEOUT_FACTOR`] and cap it to [`MAX_TIMEOUT`]. -fn next_delta(delta: Duration) -> Duration { - // Multiply timeout by TIMEOUT_FACTOR - let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); - // Cap to MAX_TIMEOUT - if delta > MAX_TIMEOUT { - MAX_TIMEOUT - } else { - delta - } -} diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 5f24875..8fd46a3 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -11,9 +11,9 @@ use reaction_plugin::{ }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; -use tokio::sync::oneshot; mod cluster; +mod connection; mod endpoint; mod secret_key; @@ -60,6 +60,12 @@ fn ipv6_unspecified() -> Option { Some(Ipv6Addr::UNSPECIFIED) } +#[derive(Serialize, Deserialize)] +struct NodeOption { + public_key: String, + addresses: Vec, +} + /// Stream information before start struct StreamInit { name: String, @@ -72,12 +78,6 @@ struct StreamInit { tx: mpsc::Sender, } -#[derive(Serialize, Deserialize)] -struct NodeOption { - public_key: String, - addresses: Vec, -} - #[derive(Serialize, Deserialize)] struct ActionOptions { /// The line to send to the corresponding cluster, example: "ban \" From 2216edfba0a8a00ebb72efa5703611213b558ae0 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 20 Nov 2025 12:00:00 +0100 Subject: [PATCH 150/241] shutdown: permit ShutdownController to be cloned When multiple tasks can ask to quit --- plugins/reaction-plugin/src/shutdown.rs | 9 +++++++-- src/daemon/mod.rs | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/plugins/reaction-plugin/src/shutdown.rs b/plugins/reaction-plugin/src/shutdown.rs index 6152e27..864d5c0 100644 --- a/plugins/reaction-plugin/src/shutdown.rs +++ b/plugins/reaction-plugin/src/shutdown.rs @@ -5,7 +5,7 @@ use tokio_util::{ /// Permits to keep track of ongoing tasks, ask them to shutdown and for all of them to quit. /// Stupid wrapper around [`tokio_util::CancellationToken`] and [`tokio_util::task_tracker::TaskTracker`]. -#[derive(Default)] +#[derive(Default, Clone)] pub struct ShutdownController { shutdown_notifyer: CancellationToken, task_tracker: TaskTracker, @@ -29,7 +29,7 @@ impl ShutdownController { /// Wait for all tasks to quit. /// This task may return even without having called [`ShutdownController::ask_shutdown`] /// first, if all tasks quit by themselves. - pub async fn wait_shutdown(self) { + pub async fn wait_all_task_shutdown(self) { self.task_tracker.close(); self.task_tracker.wait().await; } @@ -44,6 +44,11 @@ impl ShutdownController { pub fn delegate(&self) -> ShutdownDelegate { ShutdownDelegate(self.shutdown_notifyer.clone()) } + + /// Returns a future that will resolve only when a shutdown request happened. + pub fn wait(&self) -> WaitForCancellationFuture<'_> { + self.shutdown_notifyer.cancelled() + } } /// Permits to ask for shutdown, without counting as a task that needs to be awaited. diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index d1dc070..895e0b4 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -82,7 +82,7 @@ pub async fn daemon(config_path: PathBuf, socket: PathBuf) -> i32 { shutdown.ask_shutdown(); debug!("Waiting for all tasks to quit..."); - shutdown.wait_shutdown().await; + shutdown.wait_all_task_shutdown().await; let mut stop_ok = true; if config_started { From 43fdd3a877377c978762f8fbda634d2839b6b6e7 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 20 Nov 2025 12:00:00 +0100 Subject: [PATCH 151/241] cluster: finish first draft finish ConnectionManager main loop handle local & remote messages, maintain local queue --- TODO | 1 - .../reaction-plugin-cluster/src/cluster.rs | 6 +- .../reaction-plugin-cluster/src/connection.rs | 340 ++++++++++++++---- .../reaction-plugin-cluster/src/endpoint.rs | 12 +- plugins/reaction-plugin-cluster/src/main.rs | 4 +- 5 files changed, 289 insertions(+), 74 deletions(-) diff --git a/TODO b/TODO index 9c09bed..557559e 100644 --- a/TODO +++ b/TODO @@ -2,4 +2,3 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) plugins: pipe stderr too and wrap errors in logs plugins: provide treedb storage? omg (add an enum that's either remoc::rch::mpsc or tokio::mpsc) -plugin cluster: provide a stream of refused connections? diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index 7b2ed85..f29c47b 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -7,7 +7,7 @@ use std::{ use chrono::{DateTime, Local, Utc}; use futures::future::join_all; use iroh::{Endpoint, PublicKey, endpoint::Connection}; -use reaction_plugin::{Line, shutdown::ShutdownToken}; +use reaction_plugin::{Line, shutdown::ShutdownController}; use remoc::rch::mpsc as remocMpsc; use tokio::sync::mpsc as tokioMpsc; @@ -43,7 +43,7 @@ pub fn cluster_tasks( endpoint: Endpoint, mut stream: StreamInit, mut actions: Vec, - shutdown: ShutdownToken, + shutdown: ShutdownController, ) { let (message_action2connection_txs, mut message_action2connection_rxs): ( Vec>, @@ -94,6 +94,7 @@ pub fn cluster_tasks( let endpoint = endpoint.clone(); let message_action2connection_rx = message_action2connection_rxs.pop().unwrap(); let stream_tx = stream.tx.clone(); + let shutdown = shutdown.clone(); tokio::spawn(async move { ConnectionManager::new( cluster_name, @@ -103,6 +104,7 @@ pub fn cluster_tasks( stream.message_timeout, message_action2connection_rx, stream_tx, + shutdown, ) .task() .await diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index 77066f4..02d792a 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -1,14 +1,18 @@ -use std::{collections::VecDeque, sync::Arc, time::Duration}; +use std::{ + collections::VecDeque, + sync::Arc, + time::{Duration, Instant}, +}; -use chrono::TimeDelta; -use iroh::{Endpoint, EndpointAddr, endpoint::Connection}; -use reaction_plugin::Line; +use chrono::{DateTime, Local, TimeDelta, Utc}; +use iroh::{ + Endpoint, EndpointAddr, + endpoint::{Connection, VarInt}, +}; +use reaction_plugin::{Line, shutdown::ShutdownController}; use remoc::{Connect, rch::base}; use serde::{Deserialize, Serialize}; -use tokio::{ - sync::mpsc, - time::{Sleep, sleep}, -}; +use tokio::{sync::mpsc, time::sleep}; use crate::cluster::{ALPN, UtcLine}; @@ -20,9 +24,14 @@ const PROTOCOL_VERSION: u32 = 1; enum Event { Tick, - LocalMessageReceived(UtcLine), - RemoteMessageReceived(RemoteMessage), - ConnectionReceived(Connection), + LocalMessageReceived(Option), + RemoteMessageReceived(Result, remoc::rch::base::RecvError>), + ConnectionReceived(Option), +} + +enum Insert { + Front, + Back, } struct OwnConnection { @@ -31,6 +40,9 @@ struct OwnConnection { rx: base::Receiver, } +/// Handle a remote node. +/// Manage reception and sending of messages to this node. +/// Retry failed connections. pub struct ConnectionManager { /// Cluster's name (for logging) cluster_name: String, @@ -46,9 +58,9 @@ pub struct ConnectionManager { connection: Option, /// Delta we'll use next time we'll try to connect to remote - delta: Duration, - /// When this Future resolves, we'll retry connecting to remote - tick: Option, + delta: Option, + /// Next instant we'll try to connect + next_try_connect: Option, /// Max duration before we drop pending messages to a node we can't connect to. message_timeout: TimeDelta, @@ -59,6 +71,9 @@ pub struct ConnectionManager { /// Messages we send from remote nodes to our own stream own_cluster_tx: remoc::rch::mpsc::Sender, + + /// shutdown + shutdown: ShutdownController, } impl ConnectionManager { @@ -70,48 +85,77 @@ impl ConnectionManager { message_timeout: TimeDelta, message_rx: mpsc::Receiver, own_cluster_tx: remoc::rch::mpsc::Sender, + shutdown: ShutdownController, ) -> Self { Self { cluster_name, remote, endpoint, connection: None, - delta: Duration::default(), - tick: None, + delta: None, + next_try_connect: None, connection_rx, message_timeout, message_rx, message_queue: VecDeque::default(), own_cluster_tx, - } - } - - pub async fn task(mut self) { - self.try_connect().await; - - loop { - // TODO event - let event = Event::Tick; - - self.handle_event(event).await; + shutdown, } } /// Main loop + pub async fn task(mut self) { + self.try_connect().await; + + loop { + let tick = sleep(if self.connection.is_none() { + self.delta.unwrap_or(START_TIMEOUT) + } else { + // Still tick when we have a connection + Duration::from_secs(60) + }); + tokio::pin!(tick); + + let have_connection = self.connection.is_some(); + let maybe_conn_rx = self.connection.as_mut().map(|conn| conn.rx.recv()); + + let event = tokio::select! { + // Tick when we don't have a connection + _ = tick, if !have_connection => Some(Event::Tick), + // Receive remote message when we have a connection + msg = maybe_conn_rx.unwrap(), if have_connection => Some(Event::RemoteMessageReceived(msg)), + // Receive a connection from EndpointManager + conn = self.connection_rx.recv() => Some(Event::ConnectionReceived(conn)), + // Receive a message from local Actions + msg = self.message_rx.recv() => Some(Event::LocalMessageReceived(msg)), + // Quitting + _ = self.shutdown.wait() => None, + }; + + match event { + Some(event) => { + self.handle_event(event).await; + self.send_queue_messages().await; + self.drop_timeout_messages().await; + } + None => break, + } + } + } + async fn handle_event(&mut self, event: Event) { match event { Event::Tick => { - // TODO - self.try_connect().await; + self.handle_tick().await; } Event::ConnectionReceived(connection) => { - // TODO + self.handle_connection(connection).await; } Event::LocalMessageReceived(utc_line) => { - // TODO + self.handle_local_message(utc_line, Insert::Back).await; } - Event::RemoteMessageReceived(remote_message) => { - // TODO + Event::RemoteMessageReceived(message) => { + self.handle_remote_message(message).await; } } } @@ -121,7 +165,7 @@ impl ConnectionManager { async fn try_connect(&mut self) -> bool { if self.connection.is_none() { match self.endpoint.connect(self.remote.clone(), ALPN[0]).await { - Ok(connection) => self.handle_connection(connection).await, + Ok(connection) => self.handle_connection(Some(connection)).await, Err(err) => { self.try_connect_error(err.to_string()); false @@ -132,30 +176,88 @@ impl ConnectionManager { } } + async fn handle_tick(&mut self) { + if self + .next_try_connect + .is_some_and(|next| next > Instant::now()) + { + self.try_connect().await; + } + } + + async fn send_queue_messages(&mut self) { + while self.connection.is_some() + && let Some(message) = self.message_queue.pop_front() + { + self.handle_local_message(Some(message), Insert::Front) + .await; + } + } + + async fn drop_timeout_messages(&mut self) { + let now = Utc::now(); + let mut count = 0; + while self + .message_queue + .front() + .is_some_and(|element| element.1 + self.message_timeout < now) + { + self.message_queue.pop_front(); + count += 1; + } + if count > 0 { + eprintln!( + "DEBUG cluster {}: node {}: dropping {count} messages that reached timeout", + self.cluster_name, self.remote.id, + ) + } + } + /// Bootstrap a new Connection /// Returns true if we have a valid connection now - async fn handle_connection(&mut self, connection: Connection) -> bool { - self.delta = Duration::default(); - self.tick = None; - - match open_channels(&connection).await { - Ok((tx, rx)) => { - self.connection = Some(OwnConnection { connection, tx, rx }); - true - } - Err(err) => { - self.try_connect_error(err); + async fn handle_connection(&mut self, connection: Option) -> bool { + match connection { + None => { + eprintln!( + "DEBUG cluster {}: ConnectionManager {}: quitting because EndpointManager has quit", + self.cluster_name, self.remote.id + ); + self.quit(); false } + Some(connection) => { + if self.connection.is_none() { + self.delta = None; + self.next_try_connect = None; + + match open_channels(&connection).await { + Ok((tx, rx)) => { + self.connection = Some(OwnConnection { connection, tx, rx }); + true + } + Err(err) => { + self.try_connect_error(err); + false + } + } + } else { + eprintln!( + "WARN cluster {}: ignoring incoming connection from {}, as we already have a valid connection with it", + self.cluster_name, self.remote.id + ); + true + } + } } } /// Update the state and log an error when bootstraping a new Connection - async fn try_connect_error(&mut self, err: String) { - self.delta = next_delta(self.delta); - self.tick = Some(sleep(self.delta)); + fn try_connect_error(&mut self, err: String) { + let delta = next_delta(self.delta); + self.next_try_connect = Some(Instant::now() + delta); + self.delta = Some(delta); eprintln!( - "ERROR cluster {}: node {}: {err}", + "ERROR cluster {}: trying to connect to node {}: {err}", self.cluster_name, self.remote.id ); eprintln!( @@ -163,18 +265,127 @@ impl ConnectionManager { self.cluster_name, self.remote.id, self.delta ); } + + async fn handle_remote_message( + &mut self, + message: Result, remoc::rch::base::RecvError>, + ) { + match message { + Err(err) => { + eprintln!( + "WARN cluster {}: error receiving message from node {}: {err}", + self.cluster_name, self.remote.id + ); + self.close_connection(1, b"error receiving from your stream") + .await; + } + Ok(None) => { + eprintln!( + "WARN cluster {}: node {} closed its stream", + self.cluster_name, self.remote.id + ); + self.close_connection(1, b"you closed your stream").await; + } + Ok(Some(RemoteMessage::Version(_))) => { + eprintln!( + "WARN cluster {}: node {} sent invalid message, ignoring", + self.cluster_name, self.remote.id + ); + } + Ok(Some(RemoteMessage::Quitting)) => { + eprintln!( + "INFO cluster {}: node {} is quitting, bye bye", + self.cluster_name, self.remote.id + ); + self.close_connection(0, b"you said you'll quit so I quit") + .await; + } + Ok(Some(RemoteMessage::Line(line))) => { + let local_time = line.1.with_timezone(&Local); + if let Err(err) = self.own_cluster_tx.send((line.0.clone(), local_time)).await { + eprintln!( + "ERROR cluster {}: could not send message to reaction stream: {err}", + self.cluster_name + ); + eprintln!( + "INFO cluster {}: line that can't be sent: {}", + self.cluster_name, line.0 + ); + self.quit(); + } + } + } + } + + async fn handle_local_message(&mut self, message: Option, insert: Insert) { + match message { + None => { + eprintln!( + "INFO cluster {}: no action remaining, quitting", + self.cluster_name + ); + self.quit(); + } + Some(message) => match &mut self.connection { + Some(connection) => { + if let Err(err) = connection + .tx + .send(RemoteMessage::Line(( + message.0.clone(), + message.1.with_timezone(&Utc), + ))) + .await + { + eprintln!( + "INFO cluster {}: connection with node {} failed: {err}", + self.cluster_name, self.remote.id + ); + self.message_queue.push_back(message); + self.close_connection( + 0, + b"could not send a message to your channel so I quit", + ) + .await; + } + } + None => { + if let Insert::Front = insert { + self.message_queue.push_front(message); + } else { + self.message_queue.push_back(message); + } + } + }, + } + } + + async fn close_connection(&mut self, code: u32, reason: &[u8]) { + if let Some(mut connection) = self.connection.take() { + connection.rx.close().await; + connection.connection.close(VarInt::from_u32(code), reason); + } + } + + fn quit(&mut self) { + self.shutdown.ask_shutdown(); + } } /// Compute the next wait Duration. /// We're multiplying the Duration by [`TIMEOUT_FACTOR`] and cap it to [`MAX_TIMEOUT`]. -fn next_delta(delta: Duration) -> Duration { - // Multiply timeout by TIMEOUT_FACTOR - let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); - // Cap to MAX_TIMEOUT - if delta > MAX_TIMEOUT { - MAX_TIMEOUT - } else { - delta +fn next_delta(delta: Option) -> Duration { + match delta { + None => START_TIMEOUT, + Some(delta) => { + // Multiply timeout by TIMEOUT_FACTOR + let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); + // Cap to MAX_TIMEOUT + if delta > MAX_TIMEOUT { + MAX_TIMEOUT + } else { + delta + } + } } } @@ -185,7 +396,7 @@ pub enum RemoteMessage { /// Must be the first message sent over, then should not be sent again Version(u32), /// A line to transmit to your stream - Line(UtcLine), + Line((String, DateTime)), /// Announce the node is closing Quitting, } @@ -195,23 +406,22 @@ pub enum RemoteMessage { async fn open_channels( connection: &Connection, ) -> Result<(base::Sender, base::Receiver), String> { - let output = connection - .open_uni() - .await - .map_err(|err| format!("{err}"))?; + let output = connection.open_uni().await.map_err(|err| err.to_string())?; let input = connection .accept_uni() .await - .map_err(|err| format!("{err}"))?; + .map_err(|err| err.to_string())?; let (conn, mut tx, mut rx) = Connect::io_buffered(remoc::Cfg::default(), input, output, 1024) .await - .map_err(|err| format!("{err}"))?; + .map_err(|err| err.to_string())?; tokio::spawn(conn); - tx.send(RemoteMessage::Version(PROTOCOL_VERSION)).await; + tx.send(RemoteMessage::Version(PROTOCOL_VERSION)) + .await + .map_err(|err| err.to_string())?; match rx.recv().await { // Good protocol version! diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs index 2fb8a55..77a7035 100644 --- a/plugins/reaction-plugin-cluster/src/endpoint.rs +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -5,7 +5,7 @@ use iroh::{ Endpoint, PublicKey, endpoint::{Connection, Incoming}, }; -use reaction_plugin::shutdown::ShutdownToken; +use reaction_plugin::shutdown::ShutdownController; use tokio::sync::mpsc; enum Break { @@ -21,7 +21,7 @@ pub struct EndpointManager { /// Connection sender to the Connection Managers connections_tx: BTreeMap>, /// shutdown - shutdown: ShutdownToken, + shutdown: ShutdownController, } impl EndpointManager { @@ -29,7 +29,7 @@ impl EndpointManager { endpoint: Arc, cluster_name: String, connections_tx: BTreeMap>, - shutdown: ShutdownToken, + shutdown: ShutdownController, ) { tokio::spawn(async move { Self { @@ -111,7 +111,11 @@ impl EndpointManager { } Some(tx) => { if let Err(_) = tx.send(connection).await { - // This means the main cluster loop has exited, so let's quit + eprintln!( + "DEBUG cluster {}: EndpointManager: quitting because ConnectionManager has quit", + self.cluster_name, + ); + self.shutdown.ask_shutdown(); return Break::Yes; } } diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index 8fd46a3..b27e335 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -215,7 +215,7 @@ impl PluginInfo for Plugin { endpoint, stream, self.actions.remove(&stream_name).unwrap_or_default(), - self.cluster_shutdown.token(), + self.cluster_shutdown.clone(), ); } // Check there is no action left @@ -238,7 +238,7 @@ impl PluginInfo for Plugin { async fn close(self) -> RemoteResult<()> { self.cluster_shutdown.ask_shutdown(); - self.cluster_shutdown.wait_shutdown().await; + self.cluster_shutdown.wait_all_task_shutdown().await; Ok(()) } } From a5d31f6c1af46b265fb1c17da67a7baca804bcd0 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 22 Nov 2025 12:00:00 +0100 Subject: [PATCH 152/241] cluster: First round of fixes and tests after first run Still not working! --- Cargo.lock | 1 + Cargo.toml | 3 +- plugins/reaction-plugin-cluster/Cargo.toml | 5 +- .../reaction-plugin-cluster/src/connection.rs | 52 ++++++-- plugins/reaction-plugin-cluster/src/main.rs | 4 +- .../reaction-plugin-cluster/src/secret_key.rs | 124 ++++++++++++++++-- tests/plugin_cluster.rs | 82 ++++++++++++ tests/test-conf/test-cluster.jsonnet | 24 ++-- 8 files changed, 249 insertions(+), 46 deletions(-) create mode 100644 tests/plugin_cluster.rs diff --git a/Cargo.lock b/Cargo.lock index bb17bba..6722383 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2859,6 +2859,7 @@ dependencies = [ name = "reaction-plugin-cluster" version = "0.1.0" dependencies = [ + "assert_fs", "chrono", "data-encoding", "futures", diff --git a/Cargo.toml b/Cargo.toml index 822a086..51c2a45 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,7 @@ tracing = "0.1.40" [dev-dependencies] rand = "0.8.5" tempfile = "3.12.0" -assert_fs = "1.1.3" +assert_fs.workspace = true assert_cmd = "2.0.17" predicates = "3.1.3" @@ -78,6 +78,7 @@ predicates = "3.1.3" members = ["plugins/reaction-plugin", "plugins/reaction-plugin-cluster", "plugins/reaction-plugin-virtual"] [workspace.dependencies] +assert_fs = "1.1.3" chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } futures = "0.3.30" remoc = { version = "0.18.3" } diff --git a/plugins/reaction-plugin-cluster/Cargo.toml b/plugins/reaction-plugin-cluster/Cargo.toml index 4ba08a5..fe78995 100644 --- a/plugins/reaction-plugin-cluster/Cargo.toml +++ b/plugins/reaction-plugin-cluster/Cargo.toml @@ -15,5 +15,8 @@ tokio.workspace = true tokio.features = ["rt-multi-thread"] data-encoding = "2.9.0" -iroh = "0.94.0" +iroh = { version = "0.94.0", default-features = false } rand = "0.9.2" + +[dev-dependencies] +assert_fs.workspace = true diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index 02d792a..bbfb1ed 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -5,6 +5,7 @@ use std::{ }; use chrono::{DateTime, Local, TimeDelta, Utc}; +use futures::FutureExt; use iroh::{ Endpoint, EndpointAddr, endpoint::{Connection, VarInt}, @@ -14,7 +15,10 @@ use remoc::{Connect, rch::base}; use serde::{Deserialize, Serialize}; use tokio::{sync::mpsc, time::sleep}; -use crate::cluster::{ALPN, UtcLine}; +use crate::{ + cluster::{ALPN, UtcLine}, + secret_key::key_bytes_to_b64, +}; const START_TIMEOUT: Duration = Duration::from_secs(5); const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour @@ -117,13 +121,20 @@ impl ConnectionManager { tokio::pin!(tick); let have_connection = self.connection.is_some(); - let maybe_conn_rx = self.connection.as_mut().map(|conn| conn.rx.recv()); + let maybe_conn_rx = self + .connection + .as_mut() + .map(|conn| conn.rx.recv().boxed()) + // This Future will never be polled because of the if in select! + // It still needs to be present because the branch will be evaluated + // so we can't unwrap + .unwrap_or(false_recv().boxed()); let event = tokio::select! { // Tick when we don't have a connection _ = tick, if !have_connection => Some(Event::Tick), // Receive remote message when we have a connection - msg = maybe_conn_rx.unwrap(), if have_connection => Some(Event::RemoteMessageReceived(msg)), + msg = maybe_conn_rx, if have_connection => Some(Event::RemoteMessageReceived(msg)), // Receive a connection from EndpointManager conn = self.connection_rx.recv() => Some(Event::ConnectionReceived(conn)), // Receive a message from local Actions @@ -208,7 +219,8 @@ impl ConnectionManager { if count > 0 { eprintln!( "DEBUG cluster {}: node {}: dropping {count} messages that reached timeout", - self.cluster_name, self.remote.id, + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()), ) } } @@ -220,7 +232,8 @@ impl ConnectionManager { None => { eprintln!( "DEBUG cluster {}: ConnectionManager {}: quitting because EndpointManager has quit", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); self.quit(); false @@ -243,7 +256,8 @@ impl ConnectionManager { } else { eprintln!( "WARN cluster {}: ignoring incoming connection from {}, as we already have a valid connection with it", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); true } @@ -258,11 +272,14 @@ impl ConnectionManager { self.delta = Some(delta); eprintln!( "ERROR cluster {}: trying to connect to node {}: {err}", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); eprintln!( "INFO cluster {}: retry connecting to node {} in {:?}", - self.cluster_name, self.remote.id, self.delta + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()), + self.delta ); } @@ -274,7 +291,8 @@ impl ConnectionManager { Err(err) => { eprintln!( "WARN cluster {}: error receiving message from node {}: {err}", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); self.close_connection(1, b"error receiving from your stream") .await; @@ -282,20 +300,23 @@ impl ConnectionManager { Ok(None) => { eprintln!( "WARN cluster {}: node {} closed its stream", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); self.close_connection(1, b"you closed your stream").await; } Ok(Some(RemoteMessage::Version(_))) => { eprintln!( "WARN cluster {}: node {} sent invalid message, ignoring", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); } Ok(Some(RemoteMessage::Quitting)) => { eprintln!( "INFO cluster {}: node {} is quitting, bye bye", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); self.close_connection(0, b"you said you'll quit so I quit") .await; @@ -338,7 +359,8 @@ impl ConnectionManager { { eprintln!( "INFO cluster {}: connection with node {} failed: {err}", - self.cluster_name, self.remote.id + self.cluster_name, + key_bytes_to_b64(self.remote.id.as_bytes()) ); self.message_queue.push_back(message); self.close_connection( @@ -438,3 +460,7 @@ async fn open_channels( Err(err) => Err(format!("could not receive message: {err}")), } } + +async fn false_recv() -> Result, remoc::rch::base::RecvError> { + Ok(None) +} diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index b27e335..b8e8198 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -145,7 +145,7 @@ impl PluginInfo for Plugin { ); } - let secret_key = secret_key(&stream_name).await?; + let secret_key = secret_key(".", &stream_name).await?; eprintln!( "INFO public key of this node for cluster {stream_name}: {}", key_bytes_to_b64(secret_key.public().as_bytes()) @@ -183,7 +183,7 @@ impl PluginInfo for Plugin { config: Value, patterns: Vec, ) -> RemoteResult { - if &action_type != "cluster" { + if &action_type != "cluster_send" { return Err("This plugin can't handle other action types than cluster".into()); } diff --git a/plugins/reaction-plugin-cluster/src/secret_key.rs b/plugins/reaction-plugin-cluster/src/secret_key.rs index 5545b86..6397611 100644 --- a/plugins/reaction-plugin-cluster/src/secret_key.rs +++ b/plugins/reaction-plugin-cluster/src/secret_key.rs @@ -7,22 +7,23 @@ use tokio::{ io::AsyncWriteExt, }; -fn secret_key_path(cluster_name: &str) -> String { - format!("./secret_key_{cluster_name}.txt") +fn secret_key_path(dir: &str, cluster_name: &str) -> String { + format!("{dir}/secret_key_{cluster_name}.txt") } -pub async fn secret_key(cluster_name: &str) -> Result { - if let Some(key) = get_secret_key(cluster_name).await? { +pub async fn secret_key(dir: &str, cluster_name: &str) -> Result { + let path = secret_key_path(dir, cluster_name); + if let Some(key) = get_secret_key(&path).await? { Ok(key) } else { let key = SecretKey::generate(&mut rand::rng()); - set_secret_key(cluster_name, &key).await?; + set_secret_key(&path, &key).await?; Ok(key) } } -async fn get_secret_key(cluster_name: &str) -> Result, String> { - let key = match fs::read_to_string(secret_key_path(cluster_name)).await { +async fn get_secret_key(path: &str) -> Result, String> { + let key = match fs::read_to_string(path).await { Ok(key) => Ok(key), Err(err) => match err.kind() { io::ErrorKind::NotFound => return Ok(None), @@ -32,26 +33,24 @@ async fn get_secret_key(cluster_name: &str) -> Result, String> let bytes = match key_b64_to_bytes(&key) { Ok(key) => Ok(key), Err(err) => Err(format!( - "invalid secret key read from file: {err}. Please remove the `{}` file from plugin directory.", - secret_key_path(cluster_name), + "invalid secret key read from file: {err}. Please remove the `{path}` file from plugin directory.", )), }?; Ok(Some(SecretKey::from_bytes(&bytes))) } -async fn set_secret_key(cluster_name: &str, key: &SecretKey) -> Result<(), String> { +async fn set_secret_key(path: &str, key: &SecretKey) -> Result<(), String> { let secret_key = key_bytes_to_b64(&key.to_bytes()); - let secret_key_path = secret_key_path(cluster_name); File::options() .mode(0o600) .write(true) .create(true) - .open(&secret_key_path) + .open(path) .await - .map_err(|err| format!("can't open `{secret_key_path}` in plugin directory: {err}"))? + .map_err(|err| format!("can't open `{path}` in plugin directory: {err}"))? .write_all(secret_key.as_bytes()) .await - .map_err(|err| format!("can't write to `{secret_key_path}` in plugin directory: {err}")) + .map_err(|err| format!("can't write to `{path}` in plugin directory: {err}")) } pub fn key_b64_to_bytes(key: &str) -> Result<[u8; 32], DecodeError> { @@ -72,3 +71,100 @@ pub fn key_b64_to_bytes(key: &str) -> Result<[u8; 32], DecodeError> { pub fn key_bytes_to_b64(key: &[u8; 32]) -> String { data_encoding::BASE64URL.encode(key) } + +#[cfg(test)] +mod tests { + use assert_fs::{ + TempDir, + prelude::{FileWriteStr, PathChild}, + }; + use iroh::{PublicKey, SecretKey}; + use tokio::fs::read_to_string; + + use crate::secret_key::{ + get_secret_key, key_b64_to_bytes, key_bytes_to_b64, secret_key_path, set_secret_key, + }; + + #[test] + fn secret_key_encode_decode() { + for (secret_key, public_key) in [ + ( + "g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=", + "HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY=", + ), + ( + "5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=", + "LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc=", + ), + ] { + assert_eq!( + secret_key, + &key_bytes_to_b64(&key_b64_to_bytes(secret_key).unwrap()) + ); + assert_eq!( + public_key, + &key_bytes_to_b64(&key_b64_to_bytes(public_key).unwrap()) + ); + + let secret_key_parsed = SecretKey::from_bytes(&key_b64_to_bytes(secret_key).unwrap()); + let public_key_parsed = + PublicKey::from_bytes(&key_b64_to_bytes(public_key).unwrap()).unwrap(); + + assert_eq!(secret_key_parsed.public(), public_key_parsed); + } + } + + #[tokio::test] + async fn secret_key_get() { + let tmp_dir = TempDir::new().unwrap(); + let tmp_dir_str = tmp_dir.to_str().unwrap(); + for (secret_key, cluster_name) in [ + ("g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw=", "my_cluster"), + ("5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY=", "name"), + ] { + tmp_dir + .child(&format!("secret_key_{cluster_name}.txt")) + .write_str(secret_key) + .unwrap(); + + let secret_key_parsed = SecretKey::from_bytes(&key_b64_to_bytes(secret_key).unwrap()); + + let path = secret_key_path(tmp_dir_str, cluster_name); + let secret_key_from_file = get_secret_key(&path).await.unwrap(); + + assert_eq!( + secret_key_parsed.to_bytes(), + secret_key_from_file.unwrap().to_bytes() + ) + } + + assert_eq!( + Ok(None), + get_secret_key(&format!("{tmp_dir_str}/non_existent")) + .await + // Can't compare secret keys so we map to bytes + // even if we don't want one + .map(|opt| opt.map(|pk| pk.to_bytes())) + ); + // Will fail if we're root, but who runs this as root?? + assert!( + get_secret_key(&format!("/root/non_existent")) + .await + .is_err() + ); + } + + #[tokio::test] + async fn secret_key_set() { + let tmp_dir = TempDir::new().unwrap(); + let tmp_dir_str = tmp_dir.to_str().unwrap(); + + let path = format!("{tmp_dir_str}/secret"); + let key = SecretKey::generate(&mut rand::rng()); + + assert!(set_secret_key(&path, &key).await.is_ok()); + let read_file = read_to_string(&path).await; + assert!(read_file.is_ok()); + assert_eq!(read_file.unwrap(), key_bytes_to_b64(&key.to_bytes())); + } +} diff --git a/tests/plugin_cluster.rs b/tests/plugin_cluster.rs new file mode 100644 index 0000000..08fc1a1 --- /dev/null +++ b/tests/plugin_cluster.rs @@ -0,0 +1,82 @@ +use std::{path::Path, thread::sleep, time::Duration}; + +use assert_cmd::Command; +use assert_fs::prelude::*; +use predicates::prelude::predicate; +use tokio::{fs::read_to_string, runtime::Handle}; + +// require UDP ports 9876 & 9877 to be free on 127.0.0.1 + +#[tokio::test] +async fn plugin_cluster() { + // First build reaction-plugin-cluster + Command::new("cargo") + .args(["build", "-p", "reaction-plugin-cluster"]) + .unwrap(); + + let config = read_to_string("tests/test-conf/test-cluster.jsonnet") + .await + .unwrap(); + + let secret_key_a = "g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw="; + let public_key_a = "HhVh7ghqpXM9375HZ82OOeB504HBSS25wgug-1vUggY="; + let secret_key_b = "5EgRjwIpqd60IXWCGg5dFTtxkI-0fS1PlhoIhUjh1eY="; + let public_key_b = "LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc="; + + let config_a = config + .replace("PUBLIC_KEY", public_key_b) + .replace("NODE", "A") + .replace("1234", "9876") + .replace("4321", "9877"); + let config_b = config + .replace("PUBLIC_KEY", public_key_a) + .replace("NODE", "B") + .replace("1234", "9877") + .replace("4321", "9876"); + + let output_a = vec![ + "a0 1", "a0 2", "a0 3", "a0 4", "b0 1", "b0 2", "b0 3", "b0 4", "", + ]; + let output_b = vec![ + "a0 1", "a0 2", "a0 3", "a0 4", "b0 1", "b0 2", "b0 3", "b0 4", "", + ]; + + let runtime = Handle::current(); + let a_handle = runtime.spawn_blocking(|| launch_node(config_a, secret_key_a, output_a)); + let b_handle = runtime.spawn_blocking(|| launch_node(config_b, secret_key_b, output_b)); + + let (a_res, b_res) = tokio::join!(a_handle, b_handle); + a_res.unwrap(); + b_res.unwrap(); +} + +fn launch_node(config: String, my_secret: &'static str, expected_output: Vec<&'static str>) { + let tmp_dir = assert_fs::TempDir::new().unwrap(); + + // Write node config + tmp_dir.child("config.jsonnet").write_str(&config).unwrap(); + tmp_dir + .child("plugin_data/cluster/secret_key_s1") + .write_str(my_secret) + .unwrap(); + + // Copy cluster plugin + tmp_dir + .child("./target/debug/reaction-plugin-cluster") + .write_file(Path::new("./target/debug/reaction-plugin-cluster")) + .unwrap(); + + sleep(Duration::from_secs(10)); + Command::cargo_bin("reaction") + .unwrap() + .args(["start", "--socket", "./s", "--config", "./config.jsonnet"]) + .current_dir(tmp_dir.path()) + .timeout(Duration::from_secs(5)) + // Expected exit 1: all stream exited + .assert() + .code(predicate::eq(1)); + + // Expected output + tmp_dir.child("log").assert(expected_output.join("\n")); + tmp_dir.child("log").write_str("").unwrap(); +} diff --git a/tests/test-conf/test-cluster.jsonnet b/tests/test-conf/test-cluster.jsonnet index a635c73..b4cb403 100644 --- a/tests/test-conf/test-cluster.jsonnet +++ b/tests/test-conf/test-cluster.jsonnet @@ -28,18 +28,10 @@ a0: { type: 'cluster_send', options: { - send: 'a0 ', + send: 'NODE a0 ', to: 's1', }, }, - b0: { - type: 'cluster_send', - options: { - send: 'b0 ', - to: 's1', - }, - after: '600ms', - }, }, }, }, @@ -47,12 +39,14 @@ s1: { type: 'cluster', options: { - listen_port: 9000, - shared_secret: '', - nodes: { - publickey: '', - addresses: ['127.0.0.1:9001'], - }, + listen_port: 1234, + bind_ipv4: '127.0.0.1', + bind_ipv6: null, + message_timeout: '30s', + nodes: [{ + public_key: 'PUBLIC_KEY', + addresses: ['127.0.0.1:4321'], + }], }, filters: { f1: { From ff5200b0a0d1c4417f4e6eb0f94b445b839ed892 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 25 Nov 2025 12:00:00 +0100 Subject: [PATCH 153/241] cluster: add a lot of DEBUG msgs, Show trait to ease logging --- .../reaction-plugin-cluster/src/cluster.rs | 5 ++ .../reaction-plugin-cluster/src/connection.rs | 76 +++++++++++++++---- .../reaction-plugin-cluster/src/endpoint.rs | 14 +++- .../src/{secret_key.rs => key.rs} | 24 +++++- plugins/reaction-plugin-cluster/src/main.rs | 13 ++-- plugins/reaction-plugin-virtual/src/main.rs | 5 +- 6 files changed, 109 insertions(+), 28 deletions(-) rename plugins/reaction-plugin-cluster/src/{secret_key.rs => key.rs} (91%) diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index f29c47b..5a4df4b 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -45,6 +45,8 @@ pub fn cluster_tasks( mut actions: Vec, shutdown: ShutdownController, ) { + eprintln!("DEBUG cluster tasks starts running"); + let (message_action2connection_txs, mut message_action2connection_rxs): ( Vec>, Vec>, @@ -110,6 +112,8 @@ pub fn cluster_tasks( .await }); } + + eprintln!("DEBUG cluster tasks finished running"); } impl ActionInit { @@ -120,6 +124,7 @@ impl ActionInit { own_stream_tx: remocMpsc::Sender, ) { while let Ok(Some(m)) = self.rx.recv().await { + eprintln!("DEBUG action: received a message to send to connections"); let line = if m.match_.is_empty() { self.send.clone() } else { diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index bbfb1ed..e5709b1 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -17,7 +17,7 @@ use tokio::{sync::mpsc, time::sleep}; use crate::{ cluster::{ALPN, UtcLine}, - secret_key::key_bytes_to_b64, + key::Show, }; const START_TIMEOUT: Duration = Duration::from_secs(5); @@ -112,6 +112,7 @@ impl ConnectionManager { self.try_connect().await; loop { + eprintln!("DEBUG connection: NEW LOOP!"); let tick = sleep(if self.connection.is_none() { self.delta.unwrap_or(START_TIMEOUT) } else { @@ -131,16 +132,17 @@ impl ConnectionManager { .unwrap_or(false_recv().boxed()); let event = tokio::select! { + biased; + // Quitting + _ = self.shutdown.wait() => None, + // Receive a connection from EndpointManager + conn = self.connection_rx.recv() => Some(Event::ConnectionReceived(conn)), // Tick when we don't have a connection _ = tick, if !have_connection => Some(Event::Tick), // Receive remote message when we have a connection msg = maybe_conn_rx, if have_connection => Some(Event::RemoteMessageReceived(msg)), - // Receive a connection from EndpointManager - conn = self.connection_rx.recv() => Some(Event::ConnectionReceived(conn)), // Receive a message from local Actions msg = self.message_rx.recv() => Some(Event::LocalMessageReceived(msg)), - // Quitting - _ = self.shutdown.wait() => None, }; match event { @@ -160,12 +162,27 @@ impl ConnectionManager { self.handle_tick().await; } Event::ConnectionReceived(connection) => { + eprintln!( + "DEBUG cluster {}: node {}: received a connection", + self.cluster_name, + self.remote.id.show(), + ); self.handle_connection(connection).await; } Event::LocalMessageReceived(utc_line) => { + eprintln!( + "DEBUG cluster {}: node {}: received a local message", + self.cluster_name, + self.remote.id.show(), + ); self.handle_local_message(utc_line, Insert::Back).await; } Event::RemoteMessageReceived(message) => { + eprintln!( + "DEBUG cluster {}: node {}: received a remote message", + self.cluster_name, + self.remote.id.show(), + ); self.handle_remote_message(message).await; } } @@ -175,8 +192,21 @@ impl ConnectionManager { /// Returns true if we have a valid connection now async fn try_connect(&mut self) -> bool { if self.connection.is_none() { + eprintln!( + "DEBUG cluster {}: node {}: trying to connect...", + self.cluster_name, + self.remote.id.show(), + ); match self.endpoint.connect(self.remote.clone(), ALPN[0]).await { - Ok(connection) => self.handle_connection(Some(connection)).await, + Ok(connection) => { + eprintln!( + "DEBUG cluster {}: node {}: created connection", + self.cluster_name, + self.remote.id.show(), + ); + + self.handle_connection(Some(connection)).await + } Err(err) => { self.try_connect_error(err.to_string()); false @@ -220,7 +250,7 @@ impl ConnectionManager { eprintln!( "DEBUG cluster {}: node {}: dropping {count} messages that reached timeout", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()), + self.remote.id.show(), ) } } @@ -233,7 +263,7 @@ impl ConnectionManager { eprintln!( "DEBUG cluster {}: ConnectionManager {}: quitting because EndpointManager has quit", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); self.quit(); false @@ -257,7 +287,7 @@ impl ConnectionManager { eprintln!( "WARN cluster {}: ignoring incoming connection from {}, as we already have a valid connection with it", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); true } @@ -273,12 +303,12 @@ impl ConnectionManager { eprintln!( "ERROR cluster {}: trying to connect to node {}: {err}", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); eprintln!( "INFO cluster {}: retry connecting to node {} in {:?}", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()), + self.remote.id.show(), self.delta ); } @@ -292,7 +322,7 @@ impl ConnectionManager { eprintln!( "WARN cluster {}: error receiving message from node {}: {err}", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); self.close_connection(1, b"error receiving from your stream") .await; @@ -301,7 +331,7 @@ impl ConnectionManager { eprintln!( "WARN cluster {}: node {} closed its stream", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); self.close_connection(1, b"you closed your stream").await; } @@ -309,14 +339,14 @@ impl ConnectionManager { eprintln!( "WARN cluster {}: node {} sent invalid message, ignoring", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); } Ok(Some(RemoteMessage::Quitting)) => { eprintln!( "INFO cluster {}: node {} is quitting, bye bye", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); self.close_connection(0, b"you said you'll quit so I quit") .await; @@ -333,6 +363,13 @@ impl ConnectionManager { self.cluster_name, line.0 ); self.quit(); + } else { + eprintln!( + "DEBUG cluster {}: node {}: sent a remote message to local stream: {}", + self.cluster_name, + self.remote.id.show(), + line.0 + ); } } } @@ -360,7 +397,7 @@ impl ConnectionManager { eprintln!( "INFO cluster {}: connection with node {} failed: {err}", self.cluster_name, - key_bytes_to_b64(self.remote.id.as_bytes()) + self.remote.id.show() ); self.message_queue.push_back(message); self.close_connection( @@ -368,6 +405,13 @@ impl ConnectionManager { b"could not send a message to your channel so I quit", ) .await; + } else { + eprintln!( + "DEBUG cluster {}: node {}: sent a local message to remote: {}", + self.cluster_name, + self.remote.id.show(), + message.0 + ); } } None => { diff --git a/plugins/reaction-plugin-cluster/src/endpoint.rs b/plugins/reaction-plugin-cluster/src/endpoint.rs index 77a7035..4101ba3 100644 --- a/plugins/reaction-plugin-cluster/src/endpoint.rs +++ b/plugins/reaction-plugin-cluster/src/endpoint.rs @@ -8,6 +8,8 @@ use iroh::{ use reaction_plugin::shutdown::ShutdownController; use tokio::sync::mpsc; +use crate::key::Show; + enum Break { Yes, No, @@ -66,10 +68,13 @@ impl EndpointManager { } async fn handle_incoming(&mut self, incoming: Incoming) -> Break { + eprintln!( + "DEBUG cluster {}: EndpointManager: receiving connection", + self.cluster_name, + ); // FIXME a malicious actor could maybe prevent a node from connecting to // its cluster by sending lots of invalid slow connection requests? - // We could lower its priority https://docs.rs/tokio/latest/tokio/macro.select.html#fairness - // And/or moving the handshake to another task + // This function could be moved to a new 'oneshot' task instead let remote_address = incoming.remote_address(); let remote_address_validated = incoming.remote_address_validated(); let connection = match incoming.await { @@ -118,6 +123,11 @@ impl EndpointManager { self.shutdown.ask_shutdown(); return Break::Yes; } + eprintln!( + "DEBUG cluster {}: EndpointManager: receiving connection from {}", + self.cluster_name, + remote_id.show(), + ); } } diff --git a/plugins/reaction-plugin-cluster/src/secret_key.rs b/plugins/reaction-plugin-cluster/src/key.rs similarity index 91% rename from plugins/reaction-plugin-cluster/src/secret_key.rs rename to plugins/reaction-plugin-cluster/src/key.rs index 6397611..db0d277 100644 --- a/plugins/reaction-plugin-cluster/src/secret_key.rs +++ b/plugins/reaction-plugin-cluster/src/key.rs @@ -1,7 +1,7 @@ use std::io; use data_encoding::DecodeError; -use iroh::SecretKey; +use iroh::{PublicKey, SecretKey}; use tokio::{ fs::{self, File}, io::AsyncWriteExt, @@ -40,7 +40,7 @@ async fn get_secret_key(path: &str) -> Result, String> { } async fn set_secret_key(path: &str, key: &SecretKey) -> Result<(), String> { - let secret_key = key_bytes_to_b64(&key.to_bytes()); + let secret_key = key.show(); File::options() .mode(0o600) .write(true) @@ -72,6 +72,24 @@ pub fn key_bytes_to_b64(key: &[u8; 32]) -> String { data_encoding::BASE64URL.encode(key) } +/// Implemented by PublicKey & SecretKey to display keys as base64 instead of hexadecimal. +/// Similar to Display/ToString +pub trait Show { + fn show(&self) -> String; +} + +impl Show for PublicKey { + fn show(&self) -> String { + key_bytes_to_b64(self.as_bytes()) + } +} + +impl Show for SecretKey { + fn show(&self) -> String { + key_bytes_to_b64(&self.to_bytes()) + } +} + #[cfg(test)] mod tests { use assert_fs::{ @@ -81,7 +99,7 @@ mod tests { use iroh::{PublicKey, SecretKey}; use tokio::fs::read_to_string; - use crate::secret_key::{ + use crate::key::{ get_secret_key, key_b64_to_bytes, key_bytes_to_b64, secret_key_path, set_secret_key, }; diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index b8e8198..cd8875a 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -15,11 +15,12 @@ use serde::{Deserialize, Serialize}; mod cluster; mod connection; mod endpoint; -mod secret_key; +mod key; -use secret_key::{key_b64_to_bytes, key_bytes_to_b64, secret_key}; - -use crate::cluster::{bind, cluster_tasks}; +use crate::{ + cluster::{bind, cluster_tasks}, + key::{Show, key_b64_to_bytes, secret_key}, +}; #[tokio::main] async fn main() { @@ -148,7 +149,7 @@ impl PluginInfo for Plugin { let secret_key = secret_key(".", &stream_name).await?; eprintln!( "INFO public key of this node for cluster {stream_name}: {}", - key_bytes_to_b64(secret_key.public().as_bytes()) + secret_key.public().show() ); let (tx, rx) = mpsc::channel(1); @@ -233,6 +234,8 @@ impl PluginInfo for Plugin { // Free containers self.actions = Default::default(); self.streams = Default::default(); + eprintln!("DEBUG finished setup."); + Ok(()) } diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 8bd735c..eafb0f9 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -92,8 +92,9 @@ impl PluginInfo for Plugin { } } } - self.streams = BTreeMap::new(); - self.actions_init = Vec::new(); + // Free containers + self.streams = Default::default(); + self.actions_init = Default::default(); Ok(()) } From 3ed2ebd4884986db7a47c3888828bc9f809d1abf Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 26 Nov 2025 12:00:00 +0100 Subject: [PATCH 154/241] =?UTF-8?q?Two=20nodes=20succeeded=20to=20exchange?= =?UTF-8?q?=20messages=20=F0=9F=8E=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Separated try_connect to another task, to prevent interblocking Send a byte to the new stream so that the other can see the stream and accept it. --- .../reaction-plugin-cluster/src/cluster.rs | 25 +- .../reaction-plugin-cluster/src/connection.rs | 294 +++++++++--------- 2 files changed, 168 insertions(+), 151 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index 5a4df4b..9d76f25 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -2,11 +2,15 @@ use std::{ collections::BTreeMap, net::{SocketAddrV4, SocketAddrV6}, sync::Arc, + time::Duration, }; use chrono::{DateTime, Local, Utc}; use futures::future::join_all; -use iroh::{Endpoint, PublicKey, endpoint::Connection}; +use iroh::{ + Endpoint, PublicKey, + endpoint::{Connection, TransportConfig, VarInt}, +}; use reaction_plugin::{Line, shutdown::ShutdownController}; use remoc::rch::mpsc as remocMpsc; use tokio::sync::mpsc as tokioMpsc; @@ -18,11 +22,17 @@ pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; pub type UtcLine = Arc<(String, DateTime)>; pub async fn bind(stream: &StreamInit) -> Result { + let mut transport = TransportConfig::default(); + transport + .max_idle_timeout(Some(VarInt::from_u32(1).into())) + .keep_alive_interval(Some(Duration::from_millis(400))); + let mut builder = Endpoint::builder() .secret_key(stream.secret_key.clone()) .alpns(ALPN.iter().map(|slice| slice.to_vec()).collect()) .relay_mode(iroh::RelayMode::Disabled) - .clear_discovery(); + .clear_discovery() + .transport_config(transport); if let Some(ip) = stream.bind_ipv4 { builder = builder.bind_addr_v4(SocketAddrV4::new(ip, stream.listen_port)); @@ -69,13 +79,17 @@ pub fn cluster_tasks( let (connection_endpoint2connection_txs, mut connection_endpoint2connection_rxs): ( BTreeMap>, - Vec<(PublicKey, tokioMpsc::Receiver)>, + Vec<( + PublicKey, + tokioMpsc::Sender, + tokioMpsc::Receiver, + )>, ) = stream .nodes .keys() .map(|pk| { let (tx, rx) = tokioMpsc::channel(1); - ((pk.clone(), tx), (pk.clone(), rx)) + ((pk.clone(), tx.clone()), (pk.clone(), tx, rx)) }) .unzip(); @@ -88,7 +102,7 @@ pub fn cluster_tasks( ); // Spawn connection managers - while let Some((pk, connection_endpoint2connection_rx)) = + while let Some((pk, connection_endpoint2connection_tx, connection_endpoint2connection_rx)) = connection_endpoint2connection_rxs.pop() { let cluster_name = stream.name.clone(); @@ -102,6 +116,7 @@ pub fn cluster_tasks( cluster_name, endpoint_addr, endpoint, + connection_endpoint2connection_tx, connection_endpoint2connection_rx, stream.message_timeout, message_action2connection_rx, diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index e5709b1..13ad2c0 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -1,8 +1,4 @@ -use std::{ - collections::VecDeque, - sync::Arc, - time::{Duration, Instant}, -}; +use std::{collections::VecDeque, sync::Arc}; use chrono::{DateTime, Local, TimeDelta, Utc}; use futures::FutureExt; @@ -13,21 +9,20 @@ use iroh::{ use reaction_plugin::{Line, shutdown::ShutdownController}; use remoc::{Connect, rch::base}; use serde::{Deserialize, Serialize}; -use tokio::{sync::mpsc, time::sleep}; +use tokio::{io::AsyncWriteExt, sync::mpsc}; use crate::{ cluster::{ALPN, UtcLine}, key::Show, }; -const START_TIMEOUT: Duration = Duration::from_secs(5); -const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour -const TIMEOUT_FACTOR: f64 = 1.5; +// const START_TIMEOUT: Duration = Duration::from_secs(5); +// const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60); // 1 hour +// const TIMEOUT_FACTOR: f64 = 1.5; const PROTOCOL_VERSION: u32 = 1; enum Event { - Tick, LocalMessageReceived(Option), RemoteMessageReceived(Result, remoc::rch::base::RecvError>), ConnectionReceived(Option), @@ -50,22 +45,17 @@ struct OwnConnection { pub struct ConnectionManager { /// Cluster's name (for logging) cluster_name: String, + /// The remote node we're communicating with (for logging) + node_id: String, - /// The remote node we're communicating with - remote: EndpointAddr, - /// Endpoint - endpoint: Arc, - + /// Ask for a connection + ask_connection: mpsc::Sender>, + cancel_ask_connection: Option>, /// The EndpointManager sending us a connection (whether we asked for it or not) connection_rx: mpsc::Receiver, /// Our own connection (when we have one) connection: Option, - /// Delta we'll use next time we'll try to connect to remote - delta: Option, - /// Next instant we'll try to connect - next_try_connect: Option, - /// Max duration before we drop pending messages to a node we can't connect to. message_timeout: TimeDelta, /// Message we receive from actions @@ -85,19 +75,28 @@ impl ConnectionManager { cluster_name: String, remote: EndpointAddr, endpoint: Arc, + connection_tx: mpsc::Sender, connection_rx: mpsc::Receiver, message_timeout: TimeDelta, message_rx: mpsc::Receiver, own_cluster_tx: remoc::rch::mpsc::Sender, shutdown: ShutdownController, ) -> Self { + let node_id = remote.id.show(); + let (ask_connection, order_start) = mpsc::channel(1); + try_connect( + cluster_name.clone(), + remote.clone(), + endpoint.clone(), + connection_tx, + order_start, + ); Self { cluster_name, - remote, - endpoint, + node_id, connection: None, - delta: None, - next_try_connect: None, + ask_connection, + cancel_ask_connection: None, connection_rx, message_timeout, message_rx, @@ -109,17 +108,9 @@ impl ConnectionManager { /// Main loop pub async fn task(mut self) { - self.try_connect().await; - + self.ask_connection().await; loop { eprintln!("DEBUG connection: NEW LOOP!"); - let tick = sleep(if self.connection.is_none() { - self.delta.unwrap_or(START_TIMEOUT) - } else { - // Still tick when we have a connection - Duration::from_secs(60) - }); - tokio::pin!(tick); let have_connection = self.connection.is_some(); let maybe_conn_rx = self @@ -137,8 +128,6 @@ impl ConnectionManager { _ = self.shutdown.wait() => None, // Receive a connection from EndpointManager conn = self.connection_rx.recv() => Some(Event::ConnectionReceived(conn)), - // Tick when we don't have a connection - _ = tick, if !have_connection => Some(Event::Tick), // Receive remote message when we have a connection msg = maybe_conn_rx, if have_connection => Some(Event::RemoteMessageReceived(msg)), // Receive a message from local Actions @@ -158,74 +147,30 @@ impl ConnectionManager { async fn handle_event(&mut self, event: Event) { match event { - Event::Tick => { - self.handle_tick().await; - } Event::ConnectionReceived(connection) => { eprintln!( "DEBUG cluster {}: node {}: received a connection", - self.cluster_name, - self.remote.id.show(), + self.cluster_name, self.node_id, ); self.handle_connection(connection).await; } Event::LocalMessageReceived(utc_line) => { eprintln!( "DEBUG cluster {}: node {}: received a local message", - self.cluster_name, - self.remote.id.show(), + self.cluster_name, self.node_id, ); self.handle_local_message(utc_line, Insert::Back).await; } Event::RemoteMessageReceived(message) => { eprintln!( "DEBUG cluster {}: node {}: received a remote message", - self.cluster_name, - self.remote.id.show(), + self.cluster_name, self.node_id, ); self.handle_remote_message(message).await; } } } - /// Try connecting to a remote endpoint - /// Returns true if we have a valid connection now - async fn try_connect(&mut self) -> bool { - if self.connection.is_none() { - eprintln!( - "DEBUG cluster {}: node {}: trying to connect...", - self.cluster_name, - self.remote.id.show(), - ); - match self.endpoint.connect(self.remote.clone(), ALPN[0]).await { - Ok(connection) => { - eprintln!( - "DEBUG cluster {}: node {}: created connection", - self.cluster_name, - self.remote.id.show(), - ); - - self.handle_connection(Some(connection)).await - } - Err(err) => { - self.try_connect_error(err.to_string()); - false - } - } - } else { - true - } - } - - async fn handle_tick(&mut self) { - if self - .next_try_connect - .is_some_and(|next| next > Instant::now()) - { - self.try_connect().await; - } - } - async fn send_queue_messages(&mut self) { while self.connection.is_some() && let Some(message) = self.message_queue.pop_front() @@ -249,8 +194,7 @@ impl ConnectionManager { if count > 0 { eprintln!( "DEBUG cluster {}: node {}: dropping {count} messages that reached timeout", - self.cluster_name, - self.remote.id.show(), + self.cluster_name, self.node_id, ) } } @@ -262,32 +206,33 @@ impl ConnectionManager { None => { eprintln!( "DEBUG cluster {}: ConnectionManager {}: quitting because EndpointManager has quit", - self.cluster_name, - self.remote.id.show() + self.cluster_name, self.node_id, ); self.quit(); false } Some(connection) => { + if let Some(cancel) = self.cancel_ask_connection.take() { + let _ = cancel.send(()).await; + } if self.connection.is_none() { - self.delta = None; - self.next_try_connect = None; - match open_channels(&connection).await { Ok((tx, rx)) => { self.connection = Some(OwnConnection { connection, tx, rx }); true } Err(err) => { - self.try_connect_error(err); + eprintln!( + "ERROR cluster {}: trying to initialize connection to node {}: {err}", + self.cluster_name, self.node_id, + ); false } } } else { eprintln!( - "WARN cluster {}: ignoring incoming connection from {}, as we already have a valid connection with it", - self.cluster_name, - self.remote.id.show() + "WARN cluster {}: node {}: ignoring incoming connection, as we already have a valid connection with it", + self.cluster_name, self.node_id, ); true } @@ -295,24 +240,6 @@ impl ConnectionManager { } } - /// Update the state and log an error when bootstraping a new Connection - fn try_connect_error(&mut self, err: String) { - let delta = next_delta(self.delta); - self.next_try_connect = Some(Instant::now() + delta); - self.delta = Some(delta); - eprintln!( - "ERROR cluster {}: trying to connect to node {}: {err}", - self.cluster_name, - self.remote.id.show() - ); - eprintln!( - "INFO cluster {}: retry connecting to node {} in {:?}", - self.cluster_name, - self.remote.id.show(), - self.delta - ); - } - async fn handle_remote_message( &mut self, message: Result, remoc::rch::base::RecvError>, @@ -320,9 +247,8 @@ impl ConnectionManager { match message { Err(err) => { eprintln!( - "WARN cluster {}: error receiving message from node {}: {err}", - self.cluster_name, - self.remote.id.show() + "WARN cluster {}: node {}: error receiving remote message: {err}", + self.cluster_name, self.node_id, ); self.close_connection(1, b"error receiving from your stream") .await; @@ -330,23 +256,20 @@ impl ConnectionManager { Ok(None) => { eprintln!( "WARN cluster {}: node {} closed its stream", - self.cluster_name, - self.remote.id.show() + self.cluster_name, self.node_id, ); self.close_connection(1, b"you closed your stream").await; } Ok(Some(RemoteMessage::Version(_))) => { eprintln!( "WARN cluster {}: node {} sent invalid message, ignoring", - self.cluster_name, - self.remote.id.show() + self.cluster_name, self.node_id, ); } Ok(Some(RemoteMessage::Quitting)) => { eprintln!( "INFO cluster {}: node {} is quitting, bye bye", - self.cluster_name, - self.remote.id.show() + self.cluster_name, self.node_id, ); self.close_connection(0, b"you said you'll quit so I quit") .await; @@ -366,9 +289,7 @@ impl ConnectionManager { } else { eprintln!( "DEBUG cluster {}: node {}: sent a remote message to local stream: {}", - self.cluster_name, - self.remote.id.show(), - line.0 + self.cluster_name, self.node_id, line.0 ); } } @@ -396,8 +317,7 @@ impl ConnectionManager { { eprintln!( "INFO cluster {}: connection with node {} failed: {err}", - self.cluster_name, - self.remote.id.show() + self.cluster_name, self.node_id, ); self.message_queue.push_back(message); self.close_connection( @@ -408,13 +328,15 @@ impl ConnectionManager { } else { eprintln!( "DEBUG cluster {}: node {}: sent a local message to remote: {}", - self.cluster_name, - self.remote.id.show(), - message.0 + self.cluster_name, self.node_id, message.0 ); } } None => { + eprintln!( + "DEBUG cluster {}: node {}: no connection, saving local message to send later: {}", + self.cluster_name, self.node_id, message.0 + ); if let Insert::Front = insert { self.message_queue.push_front(message); } else { @@ -430,6 +352,19 @@ impl ConnectionManager { connection.rx.close().await; connection.connection.close(VarInt::from_u32(code), reason); } + self.ask_connection().await; + } + + async fn ask_connection(&mut self) { + let (tx, rx) = mpsc::channel(1); + self.cancel_ask_connection = Some(tx); + if let Err(err) = self.ask_connection.send(rx).await { + eprintln!( + "ERROR cluster {}: node {}: quitting because our connection initiater quitted: {}", + self.cluster_name, self.node_id, err + ); + self.quit(); + } } fn quit(&mut self) { @@ -439,21 +374,21 @@ impl ConnectionManager { /// Compute the next wait Duration. /// We're multiplying the Duration by [`TIMEOUT_FACTOR`] and cap it to [`MAX_TIMEOUT`]. -fn next_delta(delta: Option) -> Duration { - match delta { - None => START_TIMEOUT, - Some(delta) => { - // Multiply timeout by TIMEOUT_FACTOR - let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); - // Cap to MAX_TIMEOUT - if delta > MAX_TIMEOUT { - MAX_TIMEOUT - } else { - delta - } - } - } -} +// fn next_delta(delta: Option) -> Duration { +// match delta { +// None => START_TIMEOUT, +// Some(delta) => { +// // Multiply timeout by TIMEOUT_FACTOR +// let delta = Duration::from_millis(((delta.as_millis() as f64) * TIMEOUT_FACTOR) as u64); +// // Cap to MAX_TIMEOUT +// if delta > MAX_TIMEOUT { +// MAX_TIMEOUT +// } else { +// delta +// } +// } +// } +// } /// All possible communication messages /// Set as an enum for forward compatibility @@ -472,26 +407,45 @@ pub enum RemoteMessage { async fn open_channels( connection: &Connection, ) -> Result<(base::Sender, base::Receiver), String> { - let output = connection.open_uni().await.map_err(|err| err.to_string())?; + eprintln!("DEBUG opening uni channel"); + let mut output = connection.open_uni().await.map_err(|err| err.to_string())?; - let input = connection + eprintln!("DEBUG sending 1 byte in uni channel"); + let res = output.write(&[0; 1]).await.map_err(|err| err.to_string())?; + eprintln!("DEBUG sent {res} byte in uni channel"); + output.flush().await.map_err(|err| err.to_string())?; + + eprintln!("DEBUG accepting uni channel"); + let mut input = connection .accept_uni() .await .map_err(|err| err.to_string())?; + eprintln!("DEBUG reading 1 byte from uni channel"); + input + .read(&mut [0; 1]) + .await + .map_err(|err| err.to_string())?; + + eprintln!("DEBUG creating remoc channels"); let (conn, mut tx, mut rx) = Connect::io_buffered(remoc::Cfg::default(), input, output, 1024) .await .map_err(|err| err.to_string())?; tokio::spawn(conn); + eprintln!("DEBUG sending version"); tx.send(RemoteMessage::Version(PROTOCOL_VERSION)) .await .map_err(|err| err.to_string())?; + eprintln!("DEBUG receiving version"); match rx.recv().await { // Good protocol version! - Ok(Some(RemoteMessage::Version(PROTOCOL_VERSION))) => Ok((tx, rx)), + Ok(Some(RemoteMessage::Version(PROTOCOL_VERSION))) => { + eprintln!("DEBUG version handshake complete!"); + Ok((tx, rx)) + } // Errors Ok(Some(RemoteMessage::Version(other))) => Err(format!( "incompatible version: {other}. We use {PROTOCOL_VERSION}. Consider upgrading the node with the older version." @@ -508,3 +462,51 @@ async fn open_channels( async fn false_recv() -> Result, remoc::rch::base::RecvError> { Ok(None) } + +fn try_connect( + cluster_name: String, + remote: EndpointAddr, + endpoint: Arc, + connection_tx: mpsc::Sender, + mut order_start: mpsc::Receiver>, +) { + tokio::spawn(async move { + let node_id = remote.id.show(); + // Each time we receive and order + while let Some(mut order_stop) = order_start.recv().await { + // Until we have a connection or we're requested to stop + let mut keep_trying = true; + while keep_trying { + eprintln!("DEBUG cluster {cluster_name}: node {node_id}: trying to connect..."); + let connect = tokio::select! { + conn = endpoint.connect(remote.clone(), ALPN[0]) => Some(conn), + _ = order_stop.recv() => None, + }; + if let Some(connect) = connect { + match connect { + Ok(connection) => { + eprintln!( + "DEBUG cluster {cluster_name}: node {node_id}: created connection" + ); + if let Err(err) = connection_tx.send(connection).await { + eprintln!( + "DEBUG cluster {cluster_name}: node {node_id}: quitting because ConnectionManager has quit: {err}" + ); + order_start.close(); + } + keep_trying = false; + } + Err(err) => { + eprintln!( + "WARN cluster {cluster_name}: node {node_id}: while trying to connect: {err}" + ); + } + } + } else { + // received stop order + keep_trying = false; + } + } + } + }); +} From da5c3afefbe0b669d618b71f0de455f52a44186d Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 27 Nov 2025 12:00:00 +0100 Subject: [PATCH 155/241] Provide a correct implementation of user-configured match line parsing --- plugins/reaction-plugin/src/lib.rs | 14 +- plugins/reaction-plugin/src/line.rs | 231 ++++++++++++++++++++++++++++ 2 files changed, 244 insertions(+), 1 deletion(-) create mode 100644 plugins/reaction-plugin/src/line.rs diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 1c875a7..32912a6 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -53,6 +53,17 @@ //! ERROR plugin test: Freeeee errrooooorrr //! ``` //! +//! ## Helpers +//! +//! Those helpers permits to easily maintain similar configuration interfaces accross plugins: +//! +//! - [`line::PatternLine`], to permit users to use templated lines (ie. "\ bad password"). +//! - [`time::parse_duration`] to parse durations (ie. "6h", "3 days"). +//! +//! Those helpers solve common issues for reaction plugins: +//! +//! - The [`shutdown`] module provides structures to ease the quitting process when having multiple tokio tasks. +//! //! ## Starting template //! //! ```bash @@ -101,6 +112,7 @@ use serde::{Deserialize, Serialize}; use serde_json::{Number, Value as JValue}; use tokio::io::{stdin, stdout}; +pub mod line; pub mod shutdown; pub mod time; @@ -248,7 +260,7 @@ impl Into for Value { pub type Line = (String, DateTime); -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct StreamImpl { pub stream: rch::mpsc::Receiver, /// Whether this stream works standalone, or if it needs other streams to be fed. diff --git a/plugins/reaction-plugin/src/line.rs b/plugins/reaction-plugin/src/line.rs new file mode 100644 index 0000000..bf91a8c --- /dev/null +++ b/plugins/reaction-plugin/src/line.rs @@ -0,0 +1,231 @@ +#[derive(Debug, PartialEq, Eq)] +enum SendItem { + Index(usize), + Str(String), +} + +impl SendItem { + fn min_size(&self) -> usize { + match self { + Self::Index(_) => 0, + Self::Str(s) => s.len(), + } + } +} + +/// Helper struct that permits to transform a template line with patterns into an instantiated line from a match. +/// +/// Useful when you permit the user to reconstruct lines from an action, like in reaction's native actions and in the virtual plugin: +/// ```yaml +/// actions: +/// native: +/// cmd: ["iptables", "...", ""] +/// +/// virtual: +/// type: virtual +/// options: +/// send: ": bad password on user " +/// to: "my_virtual_stream" +/// ``` +/// +/// Usage example: +/// ``` +/// # use reaction_plugin::line::PatternLine; +/// # +/// let template = ": bad password on user ".to_string(); +/// let patterns = vec!["ip".to_string(), "user".to_string()]; +/// let pattern_line = PatternLine::new(template, patterns); +/// +/// assert_eq!( +/// pattern_line.line(vec!["1.2.3.4".to_string(), "root".to_string()]), +/// "1.2.3.4: bad password on user root".to_string(), +/// ); +/// ``` +/// +/// You can find full examples in those plugins: +/// `reaction-plugin-virtual`, +/// `reaction-plugin-cluster`. +/// +#[derive(Debug)] +pub struct PatternLine { + line: Vec, + min_size: usize, +} + +impl PatternLine { + /// Construct [`PatternLine`] from a template line and the list of patterns of the underlying [Filter](https://reaction.ppom.me/reference.html#filter). + /// + /// This list of patterns comes from [`PluginInfo::action_impl`]. + pub fn new(template: String, patterns: Vec) -> Self { + let line = Self::_from(patterns, Vec::from([SendItem::Str(template)])); + Self { + min_size: line.iter().map(SendItem::min_size).sum(), + line, + } + } + fn _from(mut patterns: Vec, acc: Vec) -> Vec { + match patterns.pop() { + None => acc, + Some(pattern) => { + let enclosed_pattern = format!("<{pattern}>"); + let acc = acc + .into_iter() + .flat_map(|item| match &item { + SendItem::Index(_) => vec![item], + SendItem::Str(str) => match str.find(&enclosed_pattern) { + Some(i) => { + let pattern_index = patterns.len(); + let mut ret = vec![]; + + let (left, mid) = str.split_at(i); + if !left.is_empty() { + ret.push(SendItem::Str(left.into())) + } + + ret.push(SendItem::Index(pattern_index)); + + if mid.len() > enclosed_pattern.len() { + let (_, right) = mid.split_at(enclosed_pattern.len()); + ret.push(SendItem::Str(right.into())) + } + + ret + } + None => vec![item], + }, + }) + .collect(); + Self::_from(patterns, acc) + } + } + } + + pub fn line(&self, match_: Vec) -> String { + let mut res = String::with_capacity(self.min_size); + for item in &self.line { + match item { + SendItem::Index(i) => { + if let Some(element) = match_.get(*i) { + res.push_str(element); + } + } + SendItem::Str(str) => res.push_str(str), + } + } + res + } +} + +#[cfg(test)] +mod tests { + use crate::line::{PatternLine, SendItem}; + + #[test] + fn line_0_pattern() { + let msg = "my message".to_string(); + let line = PatternLine::new(msg.clone(), vec![]); + assert_eq!(line.line, vec![SendItem::Str(msg.clone())]); + assert_eq!(line.min_size, msg.len()); + assert_eq!(line.line(vec![]), msg.clone()); + } + + #[test] + fn line_1_pattern() { + let patterns = vec![ + "ignored".into(), + "oh".into(), + "ignored".into(), + "my".into(), + "test".into(), + ]; + + let matches = vec!["yay", "oh", "my", "test", "", "", ""]; + + let tests = [ + ( + " my test", + 1, + vec![SendItem::Index(1), SendItem::Str(" my test".into())], + vec![ + ("yay", "yay my test"), + ("oh", "oh my test"), + ("my", "my my test"), + ("test", "test my test"), + ("", " my test"), + ("", " my test"), + ("", " my test"), + ], + ), + ( + "oh test", + 3, + vec![ + SendItem::Str("oh ".into()), + SendItem::Index(3), + SendItem::Str(" test".into()), + ], + vec![ + ("yay", "oh yay test"), + ("oh", "oh oh test"), + ("my", "oh my test"), + ("test", "oh test test"), + ("", "oh test"), + ("", "oh test"), + ("", "oh test"), + ], + ), + ( + "oh my ", + 4, + vec![SendItem::Str("oh my ".into()), SendItem::Index(4)], + vec![ + ("yay", "oh my yay"), + ("oh", "oh my oh"), + ("my", "oh my my"), + ("test", "oh my test"), + ("", "oh my "), + ("", "oh my "), + ("", "oh my "), + ], + ), + ]; + + for (msg, index, expected_pl, lines) in tests { + let pattern_line = PatternLine::new(msg.to_string(), patterns.clone()); + assert_eq!(pattern_line.line, expected_pl); + + for (match_element, line) in lines { + for match_default in &matches { + let mut match_ = vec![ + match_default.to_string(), + match_default.to_string(), + match_default.to_string(), + match_default.to_string(), + match_default.to_string(), + ]; + match_[index] = match_element.to_string(); + assert_eq!( + pattern_line.line(match_.clone()), + line, + "match: {match_:?}, pattern_line: {pattern_line:?}" + ); + } + } + } + } + + #[test] + fn line_2_pattern() { + let pattern_line = PatternLine::new(" ; ".into(), vec!["a".into(), "b".into()]); + + let matches = ["a", "b", "ab", "", ""]; + for a in &matches { + for b in &matches { + assert_eq!( + pattern_line.line(vec![a.to_string(), b.to_string()]), + format!("{a} ; {b}"), + ); + } + } + } +} From e22429f92e391d27e942b62a35da222e22c4d5dc Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 27 Nov 2025 12:00:00 +0100 Subject: [PATCH 156/241] Add time to Exec messages, so that plugin actions don't have to calc this --- plugins/reaction-plugin/src/lib.rs | 1 + src/daemon/filter/mod.rs | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 32912a6..395db9d 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -294,6 +294,7 @@ pub struct ActionImpl { pub struct Exec { pub match_: Vec, pub result: rch::oneshot::Sender>, + pub time: DateTime, } /// The main loop for a plugin. diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 658f739..0223af5 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -247,7 +247,7 @@ impl FilterManager { // Execute the action early if let Order::Flush = order { - self.exec_now(action, m.clone()); + self.exec_now(action, m.clone(), t); } } } @@ -291,7 +291,7 @@ impl FilterManager { if exec_time <= now { if state.decrement_trigger(&m, t, false).await { - self.exec_now(action, m); + self.exec_now(action, m, t); } } else { let this = self.clone(); @@ -313,7 +313,7 @@ impl FilterManager { #[allow(clippy::unwrap_used)] // propagating panics is ok let mut state = this.state.lock().await; if state.decrement_trigger(&m, t, exiting).await { - exec_now(&this.exec_limit, this.shutdown, action, action_impl, m); + exec_now(&this.exec_limit, this.shutdown, action, action_impl, m, t); } } }); @@ -373,7 +373,7 @@ impl FilterManager { } } - fn exec_now(&self, action: &'static Action, m: Match) { + fn exec_now(&self, action: &'static Action, m: Match, t: Time) { let action_impl = self.action_plugins.get(&action.name).cloned(); exec_now( &self.exec_limit, @@ -381,6 +381,7 @@ impl FilterManager { action, action_impl, m, + t, ) } } @@ -391,6 +392,7 @@ fn exec_now( action: &'static Action, action_impl: Option, m: Match, + t: Time, ) { let exec_limit = exec_limit.clone(); tokio::spawn(async move { @@ -412,6 +414,7 @@ fn exec_now( .send(reaction_plugin::Exec { match_: m, result: response_tx, + time: t, }) .await { From 81fa49aa5c5b3d683b350f67e900557fe4f16994 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 27 Nov 2025 12:00:00 +0100 Subject: [PATCH 157/241] Add tests to virtual and use reaction-plugin's PatternLine Those tests permitted to find the bug that led me to create PatternLine Also add a serde option to deny extra keys in virtual action's config --- plugins/reaction-plugin-virtual/src/main.rs | 35 +-- plugins/reaction-plugin-virtual/src/tests.rs | 274 +++++++++++++++++++ 2 files changed, 287 insertions(+), 22 deletions(-) create mode 100644 plugins/reaction-plugin-virtual/src/tests.rs diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index eafb0f9..760ed54 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,11 +1,15 @@ use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Line, Local, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, + ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, + line::PatternLine, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; +#[cfg(test)] +mod tests; + #[tokio::main] async fn main() { let plugin = Plugin::default(); @@ -128,6 +132,7 @@ impl VirtualStream { } #[derive(Serialize, Deserialize)] +#[serde(deny_unknown_fields)] struct ActionOptions { /// The line to send to the corresponding virtual stream, example: "ban \" send: String, @@ -157,11 +162,6 @@ impl VirtualActionInit { format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") })?; - let patterns = patterns - .into_iter() - .map(|pattern| format!("<{pattern}>")) - .collect(); - let (tx, rx) = mpsc::channel(1); Ok(( Self { @@ -180,37 +180,28 @@ impl VirtualActionInit { struct VirtualAction { rx: mpsc::Receiver, - patterns: Vec, - send: String, + send: PatternLine, to: VirtualStream, } impl VirtualAction { fn from(action_init: VirtualActionInit, to: VirtualStream) -> VirtualAction { + let send = PatternLine::new(action_init.send, action_init.patterns); VirtualAction { rx: action_init.rx, - patterns: action_init.patterns, - send: action_init.send, + send, to, } } async fn serve(&mut self) { - while let Ok(Some(m)) = self.rx.recv().await { - let line = if m.match_.is_empty() { - self.send.clone() - } else { - (0..(m.match_.len())) - .zip(&self.patterns) - .fold(self.send.clone(), |acc, (i, pattern)| { - acc.replace(pattern, &m.match_[i]) - }) - }; - let result = match self.to.tx.send((line, Local::now())).await { + while let Ok(Some(exec)) = self.rx.recv().await { + let line = self.send.line(exec.match_); + let result = match self.to.tx.send((line, exec.time)).await { Ok(_) => Ok(()), Err(err) => Err(format!("{err}")), }; - m.result.send(result).unwrap(); + exec.result.send(result).unwrap(); } } } diff --git a/plugins/reaction-plugin-virtual/src/tests.rs b/plugins/reaction-plugin-virtual/src/tests.rs new file mode 100644 index 0000000..baea0de --- /dev/null +++ b/plugins/reaction-plugin-virtual/src/tests.rs @@ -0,0 +1,274 @@ +use reaction_plugin::{Exec, Local, PluginInfo, Value}; +use remoc::rch::oneshot; +use serde_json::json; + +use crate::Plugin; + +#[tokio::test] +async fn conf_stream() { + // Invalid type + assert!( + Plugin::default() + .stream_impl("stream".into(), "virtu".into(), Value::Null) + .await + .is_err() + ); + + assert!( + Plugin::default() + .stream_impl("stream".into(), "virtual".into(), Value::Null) + .await + .is_ok() + ); + eprintln!( + "err: {:?}", + Plugin::default() + .stream_impl("stream".into(), "virtual".into(), json!({}).into()) + .await + ); + assert!( + Plugin::default() + .stream_impl("stream".into(), "virtual".into(), json!({}).into()) + .await + .is_ok() + ); + + // Invalid conf: must be empty + assert!( + Plugin::default() + .stream_impl( + "stream".into(), + "virtual".into(), + json!({"key": "value" }).into() + ) + .await + .is_err() + ); +} + +#[tokio::test] +async fn conf_action() { + let valid_conf = json!({ "send": "message", "to": "stream" }); + + let missing_send_conf = json!({ "to": "stream" }); + let missing_to_conf = json!({ "send": "stream" }); + let extra_attr_conf = json!({ "send": "message", "send2": "message", "to": "stream" }); + + let patterns = Vec::default(); + + // Invalid type + assert!( + Plugin::default() + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtu".into(), + Value::Null, + patterns.clone() + ) + .await + .is_err() + ); + assert!( + Plugin::default() + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + valid_conf.into(), + patterns.clone() + ) + .await + .is_ok() + ); + + for conf in [missing_send_conf, missing_to_conf, extra_attr_conf] { + assert!( + Plugin::default() + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + conf.clone().into(), + patterns.clone() + ) + .await + .is_err(), + "conf: {:?}", + conf + ); + } +} + +#[tokio::test] +async fn conf_send() { + // Valid to: option + let mut plugin = Plugin::default(); + plugin + .stream_impl("stream".into(), "virtual".into(), Value::Null) + .await + .unwrap(); + plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + json!({ "send": "message", "to": "stream" }).into(), + Vec::default(), + ) + .await + .unwrap(); + assert!(plugin.finish_setup().await.is_ok()); + + // Invalid to: option + let mut plugin = Plugin::default(); + plugin + .stream_impl("stream".into(), "virtual".into(), Value::Null) + .await + .unwrap(); + plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + json!({ "send": "message", "to": "stream1" }).into(), + Vec::default(), + ) + .await + .unwrap(); + assert!(plugin.finish_setup().await.is_err()); +} + +// #[tokio::test] +// async fn conf_empty_stream() { +// let mut plugin = Plugin::default(); +// plugin +// .stream_impl("stream".into(), "virtual".into(), Value::Null) +// .await +// .unwrap(); +// assert!(plugin.finish_setup().await.is_err()); +// } + +#[tokio::test] +async fn run_simple() { + let mut plugin = Plugin::default(); + let mut stream = plugin + .stream_impl("stream".into(), "virtual".into(), Value::Null) + .await + .unwrap(); + assert!(!stream.standalone); + + let action = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + json!({ "send": "message ", "to": "stream" }).into(), + vec!["test".into()], + ) + .await + .unwrap(); + assert!(plugin.finish_setup().await.is_ok()); + + for m in ["test1", "test2", "test3", " a a a aa a a"] { + let (tx, rx) = oneshot::channel(); + let time = Local::now(); + assert!( + action + .tx + .send(Exec { + match_: vec![m.into()], + result: tx, + time, + }) + .await + .is_ok() + ); + assert_eq!( + stream.stream.recv().await.unwrap().unwrap(), + (format!("message {m}"), time), + ); + assert!(rx.await.is_ok()); + } +} + +#[tokio::test] +async fn run_two_actions() { + let mut plugin = Plugin::default(); + let mut stream = plugin + .stream_impl("stream".into(), "virtual".into(), Value::Null) + .await + .unwrap(); + assert!(!stream.standalone); + + let action1 = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + json!({ "send": "send ", "to": "stream" }).into(), + vec!["a".into(), "b".into()], + ) + .await + .unwrap(); + + let action2 = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + json!({ "send": " send", "to": "stream" }).into(), + vec!["a".into(), "b".into()], + ) + .await + .unwrap(); + + assert!(plugin.finish_setup().await.is_ok()); + + let time = Local::now(); + + let (tx, rx1) = oneshot::channel(); + assert!( + action1 + .tx + .send(Exec { + match_: vec!["aa".into(), "bb".into()], + result: tx, + time, + }) + .await + .is_ok(), + ); + assert_eq!( + stream.stream.recv().await.unwrap().unwrap(), + ("send aa".into(), time), + ); + + let (tx, rx2) = oneshot::channel(); + assert!( + action2 + .tx + .send(Exec { + match_: vec!["aa".into(), "bb".into()], + result: tx, + time, + }) + .await + .is_ok(), + ); + assert_eq!( + stream.stream.recv().await.unwrap().unwrap(), + ("bb send".into(), time), + ); + + assert!(rx1.await.is_ok()); + assert!(rx2.await.is_ok()); +} From 83ac520d27f4368fe6a2f4734143529735781a0c Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 28 Nov 2025 12:00:00 +0100 Subject: [PATCH 158/241] Connections have ids, to fix simultaneous connections races --- .../reaction-plugin-cluster/src/cluster.rs | 5 +- .../reaction-plugin-cluster/src/connection.rs | 114 ++++++++++++------ 2 files changed, 78 insertions(+), 41 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index 9d76f25..ee31fb8 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -22,10 +22,11 @@ pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; pub type UtcLine = Arc<(String, DateTime)>; pub async fn bind(stream: &StreamInit) -> Result { + // FIXME higher timeouts and keep alive let mut transport = TransportConfig::default(); transport - .max_idle_timeout(Some(VarInt::from_u32(1).into())) - .keep_alive_interval(Some(Duration::from_millis(400))); + .max_idle_timeout(Some(VarInt::from_u32(2).into())) + .keep_alive_interval(Some(Duration::from_millis(200))); let mut builder = Endpoint::builder() .secret_key(stream.secret_key.clone()) diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index 13ad2c0..dc2a04c 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -1,4 +1,4 @@ -use std::{collections::VecDeque, sync::Arc}; +use std::{cmp::max, collections::VecDeque, sync::Arc}; use chrono::{DateTime, Local, TimeDelta, Utc}; use futures::FutureExt; @@ -6,6 +6,7 @@ use iroh::{ Endpoint, EndpointAddr, endpoint::{Connection, VarInt}, }; +use rand::random_range; use reaction_plugin::{Line, shutdown::ShutdownController}; use remoc::{Connect, rch::base}; use serde::{Deserialize, Serialize}; @@ -55,6 +56,8 @@ pub struct ConnectionManager { connection_rx: mpsc::Receiver, /// Our own connection (when we have one) connection: Option, + /// Last connexion ID, used to have a determinist way to choose between conflicting connections + last_connexion_id: u64, /// Max duration before we drop pending messages to a node we can't connect to. message_timeout: TimeDelta, @@ -98,6 +101,7 @@ impl ConnectionManager { ask_connection, cancel_ask_connection: None, connection_rx, + last_connexion_id: 0, message_timeout, message_rx, message_queue: VecDeque::default(), @@ -148,24 +152,12 @@ impl ConnectionManager { async fn handle_event(&mut self, event: Event) { match event { Event::ConnectionReceived(connection) => { - eprintln!( - "DEBUG cluster {}: node {}: received a connection", - self.cluster_name, self.node_id, - ); self.handle_connection(connection).await; } Event::LocalMessageReceived(utc_line) => { - eprintln!( - "DEBUG cluster {}: node {}: received a local message", - self.cluster_name, self.node_id, - ); self.handle_local_message(utc_line, Insert::Back).await; } Event::RemoteMessageReceived(message) => { - eprintln!( - "DEBUG cluster {}: node {}: received a remote message", - self.cluster_name, self.node_id, - ); self.handle_remote_message(message).await; } } @@ -201,7 +193,7 @@ impl ConnectionManager { /// Bootstrap a new Connection /// Returns true if we have a valid connection now - async fn handle_connection(&mut self, connection: Option) -> bool { + async fn handle_connection(&mut self, connection: Option) { match connection { None => { eprintln!( @@ -209,32 +201,38 @@ impl ConnectionManager { self.cluster_name, self.node_id, ); self.quit(); - false } Some(connection) => { if let Some(cancel) = self.cancel_ask_connection.take() { let _ = cancel.send(()).await; } - if self.connection.is_none() { - match open_channels(&connection).await { - Ok((tx, rx)) => { + match open_channels(&connection, self.last_connexion_id).await { + Ok((mut tx, rx, new_id)) => { + if self.connection.is_none() || self.last_connexion_id < new_id { + if let Some(mut conn) = self.connection.take() { + // FIXME should we do this in a separate task? + let _ = conn.tx.send(RemoteMessage::Quitting).await; + } self.connection = Some(OwnConnection { connection, tx, rx }); - true - } - Err(err) => { + self.last_connexion_id = new_id; + } else { eprintln!( - "ERROR cluster {}: trying to initialize connection to node {}: {err}", + "WARN cluster {}: node {}: ignoring incoming connection, as we already have a valid connection with it and our connection id is greater", self.cluster_name, self.node_id, ); - false + // FIXME should we do this in a separate task? + let _ = tx.send(RemoteMessage::Quitting).await; + } + } + Err(err) => { + eprintln!( + "ERROR cluster {}: trying to initialize connection to node {}: {err}", + self.cluster_name, self.node_id, + ); + if self.connection.is_none() { + self.ask_connection().await; } } - } else { - eprintln!( - "WARN cluster {}: node {}: ignoring incoming connection, as we already have a valid connection with it", - self.cluster_name, self.node_id, - ); - true } } } @@ -247,8 +245,8 @@ impl ConnectionManager { match message { Err(err) => { eprintln!( - "WARN cluster {}: node {}: error receiving remote message: {err}", - self.cluster_name, self.node_id, + "WARN cluster {}: node {}: connection {}: error receiving remote message: {err}", + self.cluster_name, self.node_id, self.last_connexion_id ); self.close_connection(1, b"error receiving from your stream") .await; @@ -260,7 +258,7 @@ impl ConnectionManager { ); self.close_connection(1, b"you closed your stream").await; } - Ok(Some(RemoteMessage::Version(_))) => { + Ok(Some(RemoteMessage::Handshake(_, _))) => { eprintln!( "WARN cluster {}: node {} sent invalid message, ignoring", self.cluster_name, self.node_id, @@ -297,6 +295,10 @@ impl ConnectionManager { } async fn handle_local_message(&mut self, message: Option, insert: Insert) { + eprintln!( + "DEBUG cluster {}: node {}: received a local message", + self.cluster_name, self.node_id, + ); match message { None => { eprintln!( @@ -356,6 +358,7 @@ impl ConnectionManager { } async fn ask_connection(&mut self) { + // if self.node_id.starts_with('H') { let (tx, rx) = mpsc::channel(1); self.cancel_ask_connection = Some(tx); if let Err(err) = self.ask_connection.send(rx).await { @@ -365,6 +368,7 @@ impl ConnectionManager { ); self.quit(); } + // } } fn quit(&mut self) { @@ -395,7 +399,22 @@ impl ConnectionManager { #[derive(Serialize, Deserialize)] pub enum RemoteMessage { /// Must be the first message sent over, then should not be sent again - Version(u32), + /// + /// u32 is a version number. + /// The version must be the same, or compatibility is broken. + /// + /// u64 is a connection id. + /// Each node sends a number between n and n + 1_000_000. + /// Both nodes keep the highest of both numbers. + /// n starts at 0. + /// When recreating a new connection, n starts at _last connection number_ + 1. + /// This leaves room for a minimum of 18_446_744_073_709 connections between two nodes + /// For each "shared uptime", which I hope is sufficient :p + /// Will be reset as soon as there is an instant where both nodes are down + /// + /// This protocols permits to determine on both nodes which connection to drop + /// when we have one connection and receive another. + Handshake(u32, u64), /// A line to transmit to your stream Line((String, DateTime)), /// Announce the node is closing @@ -406,7 +425,15 @@ pub enum RemoteMessage { /// This way, there is no need to know if we created or accepted the connection. async fn open_channels( connection: &Connection, -) -> Result<(base::Sender, base::Receiver), String> { + last_connexion_id: u64, +) -> Result< + ( + base::Sender, + base::Receiver, + u64, + ), + String, +> { eprintln!("DEBUG opening uni channel"); let mut output = connection.open_uni().await.map_err(|err| err.to_string())?; @@ -434,20 +461,29 @@ async fn open_channels( tokio::spawn(conn); + let our_id = random_range(last_connexion_id + 1..last_connexion_id + 1_000_000); + eprintln!("DEBUG sending version"); - tx.send(RemoteMessage::Version(PROTOCOL_VERSION)) + tx.send(RemoteMessage::Handshake(PROTOCOL_VERSION, our_id)) .await .map_err(|err| err.to_string())?; eprintln!("DEBUG receiving version"); match rx.recv().await { // Good protocol version! - Ok(Some(RemoteMessage::Version(PROTOCOL_VERSION))) => { - eprintln!("DEBUG version handshake complete!"); - Ok((tx, rx)) + Ok(Some(RemoteMessage::Handshake(PROTOCOL_VERSION, their_id))) => { + // FIXME Do we need to test this? If so, this function should return their_id even when error in order to retry better next time + // if their_id < last_connexion_id + // ERROR + // else + let chosen_id = max(our_id, their_id); + eprintln!( + "DEBUG version handshake complete: last id: {last_connexion_id}, our id: {our_id}, their id: {their_id}: chosen id: {chosen_id}" + ); + Ok((tx, rx, chosen_id)) } // Errors - Ok(Some(RemoteMessage::Version(other))) => Err(format!( + Ok(Some(RemoteMessage::Handshake(other, _))) => Err(format!( "incompatible version: {other}. We use {PROTOCOL_VERSION}. Consider upgrading the node with the older version." )), Ok(Some(RemoteMessage::Line(_))) => Err(format!( From b667b1a3735469631662a0405ff34979c84578b9 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 28 Nov 2025 12:00:00 +0100 Subject: [PATCH 159/241] Get rid of remoc for peer communications I couldn't understand why all communications timed out as soon as all messages are sent with a remoc RecvError::ChMux "multiplexer terminated". So I'm getting rid of remoc (for now at least) and sending/receiving raw data over the stream. For now it panics, after the handshake complete, which is already good after only one test O:D --- .../reaction-plugin-cluster/src/connection.rs | 253 ++++++++---------- 1 file changed, 118 insertions(+), 135 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index dc2a04c..44f4b46 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -1,16 +1,17 @@ -use std::{cmp::max, collections::VecDeque, sync::Arc}; +use std::{cmp::max, collections::VecDeque, io::Error as IoError, sync::Arc}; -use chrono::{DateTime, Local, TimeDelta, Utc}; +use chrono::{DateTime, Local, TimeDelta, TimeZone, Utc}; use futures::FutureExt; use iroh::{ Endpoint, EndpointAddr, - endpoint::{Connection, VarInt}, + endpoint::{Connection, RecvStream, SendStream, VarInt}, }; use rand::random_range; use reaction_plugin::{Line, shutdown::ShutdownController}; -use remoc::{Connect, rch::base}; -use serde::{Deserialize, Serialize}; -use tokio::{io::AsyncWriteExt, sync::mpsc}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt, BufReader, BufWriter}, + sync::mpsc, +}; use crate::{ cluster::{ALPN, UtcLine}, @@ -23,9 +24,12 @@ use crate::{ const PROTOCOL_VERSION: u32 = 1; +type RemoteLine = (String, DateTime); +type MaybeRemoteLine = Result, IoError>; + enum Event { LocalMessageReceived(Option), - RemoteMessageReceived(Result, remoc::rch::base::RecvError>), + RemoteMessageReceived(MaybeRemoteLine), ConnectionReceived(Option), } @@ -36,8 +40,73 @@ enum Insert { struct OwnConnection { connection: Connection, - tx: base::Sender, - rx: base::Receiver, + + line_tx: BufWriter, + line_rx: BufReader, + + next_time: Option>, + next_len: Option, + next_line: Option>, +} + +impl OwnConnection { + fn new( + connection: Connection, + line_tx: BufWriter, + line_rx: BufReader, + ) -> Self { + Self { + connection, + line_tx, + line_rx, + next_time: None, + next_len: None, + next_line: None, + } + } + + /// Send a line to peer + async fn send_line(&mut self, line: RemoteLine) -> Result<(), std::io::Error> { + self.line_tx.write_i64(line.1.timestamp_micros()).await?; + self.line_tx.write_u32(line.0.len() as u32).await?; + self.line_tx.write_all(line.0.as_bytes()).await?; + self.line_tx.flush().await?; + Ok(()) + } + + /// Cancel-safe function that returns next line from peer + /// Returns None if we don't have all data yet. + async fn recv_line(&mut self) -> Result)>, std::io::Error> { + if self.next_time.is_none() { + self.next_time = Some(Utc.timestamp_nanos(self.line_rx.read_i64().await?)); + } + // Ok we have next_time.is_some() + if self.next_len.is_none() { + self.next_len = Some(self.line_rx.read_u32().await? as usize); + } + // Ok we have next_len.is_some() + + if self.next_line.is_none() { + self.next_line = Some(Vec::with_capacity(self.next_len.unwrap())); + } + let actual_len = self.next_line.as_ref().unwrap().len(); + self.line_rx + .read(&mut self.next_line.as_mut().unwrap()[actual_len..self.next_len.unwrap()]) + .await?; + // Ok we have next_line.is_some(), but don't know yet + + if self.next_line.as_ref().unwrap().len() == self.next_len.unwrap() { + // Ok we have a full line + self.next_len.take(); + let line = String::try_from(self.next_line.take().unwrap()).map_err(|err| { + std::io::Error::new(std::io::ErrorKind::InvalidData, err.to_string()) + })?; + Ok(Some((line, self.next_time.take().unwrap()))) + } else { + // Ok we don't have a full line, will be next time! + Ok(None) + } + } } /// Handle a remote node. @@ -120,7 +189,7 @@ impl ConnectionManager { let maybe_conn_rx = self .connection .as_mut() - .map(|conn| conn.rx.recv().boxed()) + .map(|conn| conn.recv_line().boxed()) // This Future will never be polled because of the if in select! // It still needs to be present because the branch will be evaluated // so we can't unwrap @@ -206,22 +275,16 @@ impl ConnectionManager { if let Some(cancel) = self.cancel_ask_connection.take() { let _ = cancel.send(()).await; } - match open_channels(&connection, self.last_connexion_id).await { - Ok((mut tx, rx, new_id)) => { + match open_channels(connection, self.last_connexion_id).await { + Ok((own_connection, new_id)) => { if self.connection.is_none() || self.last_connexion_id < new_id { - if let Some(mut conn) = self.connection.take() { - // FIXME should we do this in a separate task? - let _ = conn.tx.send(RemoteMessage::Quitting).await; - } - self.connection = Some(OwnConnection { connection, tx, rx }); + self.connection = Some(own_connection); self.last_connexion_id = new_id; } else { eprintln!( "WARN cluster {}: node {}: ignoring incoming connection, as we already have a valid connection with it and our connection id is greater", self.cluster_name, self.node_id, ); - // FIXME should we do this in a separate task? - let _ = tx.send(RemoteMessage::Quitting).await; } } Err(err) => { @@ -238,10 +301,7 @@ impl ConnectionManager { } } - async fn handle_remote_message( - &mut self, - message: Result, remoc::rch::base::RecvError>, - ) { + async fn handle_remote_message(&mut self, message: MaybeRemoteLine) { match message { Err(err) => { eprintln!( @@ -258,21 +318,7 @@ impl ConnectionManager { ); self.close_connection(1, b"you closed your stream").await; } - Ok(Some(RemoteMessage::Handshake(_, _))) => { - eprintln!( - "WARN cluster {}: node {} sent invalid message, ignoring", - self.cluster_name, self.node_id, - ); - } - Ok(Some(RemoteMessage::Quitting)) => { - eprintln!( - "INFO cluster {}: node {} is quitting, bye bye", - self.cluster_name, self.node_id, - ); - self.close_connection(0, b"you said you'll quit so I quit") - .await; - } - Ok(Some(RemoteMessage::Line(line))) => { + Ok(Some(line)) => { let local_time = line.1.with_timezone(&Local); if let Err(err) = self.own_cluster_tx.send((line.0.clone(), local_time)).await { eprintln!( @@ -310,11 +356,7 @@ impl ConnectionManager { Some(message) => match &mut self.connection { Some(connection) => { if let Err(err) = connection - .tx - .send(RemoteMessage::Line(( - message.0.clone(), - message.1.with_timezone(&Utc), - ))) + .send_line((message.0.clone(), message.1.with_timezone(&Utc))) .await { eprintln!( @@ -350,8 +392,7 @@ impl ConnectionManager { } async fn close_connection(&mut self, code: u32, reason: &[u8]) { - if let Some(mut connection) = self.connection.take() { - connection.rx.close().await; + if let Some(connection) = self.connection.take() { connection.connection.close(VarInt::from_u32(code), reason); } self.ask_connection().await; @@ -394,108 +435,50 @@ impl ConnectionManager { // } // } -/// All possible communication messages -/// Set as an enum for forward compatibility -#[derive(Serialize, Deserialize)] -pub enum RemoteMessage { - /// Must be the first message sent over, then should not be sent again - /// - /// u32 is a version number. - /// The version must be the same, or compatibility is broken. - /// - /// u64 is a connection id. - /// Each node sends a number between n and n + 1_000_000. - /// Both nodes keep the highest of both numbers. - /// n starts at 0. - /// When recreating a new connection, n starts at _last connection number_ + 1. - /// This leaves room for a minimum of 18_446_744_073_709 connections between two nodes - /// For each "shared uptime", which I hope is sufficient :p - /// Will be reset as soon as there is an instant where both nodes are down - /// - /// This protocols permits to determine on both nodes which connection to drop - /// when we have one connection and receive another. - Handshake(u32, u64), - /// A line to transmit to your stream - Line((String, DateTime)), - /// Announce the node is closing - Quitting, -} - /// Open accept one stream and create one stream. /// This way, there is no need to know if we created or accepted the connection. async fn open_channels( - connection: &Connection, + connection: Connection, last_connexion_id: u64, -) -> Result< - ( - base::Sender, - base::Receiver, - u64, - ), - String, -> { +) -> Result<(OwnConnection, u64), IoError> { eprintln!("DEBUG opening uni channel"); - let mut output = connection.open_uni().await.map_err(|err| err.to_string())?; - - eprintln!("DEBUG sending 1 byte in uni channel"); - let res = output.write(&[0; 1]).await.map_err(|err| err.to_string())?; - eprintln!("DEBUG sent {res} byte in uni channel"); - output.flush().await.map_err(|err| err.to_string())?; - - eprintln!("DEBUG accepting uni channel"); - let mut input = connection - .accept_uni() - .await - .map_err(|err| err.to_string())?; - - eprintln!("DEBUG reading 1 byte from uni channel"); - input - .read(&mut [0; 1]) - .await - .map_err(|err| err.to_string())?; - - eprintln!("DEBUG creating remoc channels"); - let (conn, mut tx, mut rx) = Connect::io_buffered(remoc::Cfg::default(), input, output, 1024) - .await - .map_err(|err| err.to_string())?; - - tokio::spawn(conn); + let mut output = BufWriter::new(connection.open_uni().await?); let our_id = random_range(last_connexion_id + 1..last_connexion_id + 1_000_000); - eprintln!("DEBUG sending version"); - tx.send(RemoteMessage::Handshake(PROTOCOL_VERSION, our_id)) - .await - .map_err(|err| err.to_string())?; + eprintln!("DEBUG sending handshake in uni channel"); + output.write_u32(PROTOCOL_VERSION).await?; + output.write_u64(our_id).await?; + output.flush().await?; - eprintln!("DEBUG receiving version"); - match rx.recv().await { - // Good protocol version! - Ok(Some(RemoteMessage::Handshake(PROTOCOL_VERSION, their_id))) => { - // FIXME Do we need to test this? If so, this function should return their_id even when error in order to retry better next time - // if their_id < last_connexion_id - // ERROR - // else - let chosen_id = max(our_id, their_id); - eprintln!( - "DEBUG version handshake complete: last id: {last_connexion_id}, our id: {our_id}, their id: {their_id}: chosen id: {chosen_id}" - ); - Ok((tx, rx, chosen_id)) - } - // Errors - Ok(Some(RemoteMessage::Handshake(other, _))) => Err(format!( - "incompatible version: {other}. We use {PROTOCOL_VERSION}. Consider upgrading the node with the older version." - )), - Ok(Some(RemoteMessage::Line(_))) => Err(format!( - "incorrect protocol message: remote did not send its protocol version." - )), - Ok(Some(RemoteMessage::Quitting)) => Err("remote unexpectedly quit".into()), - Ok(None) => Err("remote unexpectedly closed its channel".into()), - Err(err) => Err(format!("could not receive message: {err}")), + eprintln!("DEBUG accepting uni channel"); + let mut input = BufReader::new(connection.accept_uni().await?); + + eprintln!("DEBUG reading handshake from uni channel"); + let their_version = input.read_u32().await?; + + if their_version != PROTOCOL_VERSION { + return Err(IoError::new( + std::io::ErrorKind::InvalidData, + format!( + "incompatible version: {their_version}. We use {PROTOCOL_VERSION}. Consider upgrading the node with the older version." + ), + )); } + + let their_id = input.read_u64().await?; + // FIXME Do we need to test this? If so, this function should return their_id even when error in order to retry better next time + // if their_id < last_connexion_id + // ERROR + // else + let chosen_id = max(our_id, their_id); + eprintln!( + "DEBUG version handshake complete: last id: {last_connexion_id}, our id: {our_id}, their id: {their_id}: chosen id: {chosen_id}" + ); + Ok((OwnConnection::new(connection, output, input), chosen_id)) } -async fn false_recv() -> Result, remoc::rch::base::RecvError> { +async fn false_recv() -> MaybeRemoteLine { Ok(None) } From 1c423c5258aa1310ab086cd8ce51721d8a7c3666 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 5 Dec 2025 12:00:00 +0100 Subject: [PATCH 160/241] Fix panic caused by previous commit Connection still close as soon as they idle :/ --- .../reaction-plugin-cluster/src/connection.rs | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index 44f4b46..0c1ea7e 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -76,7 +76,7 @@ impl OwnConnection { /// Cancel-safe function that returns next line from peer /// Returns None if we don't have all data yet. - async fn recv_line(&mut self) -> Result)>, std::io::Error> { + async fn recv_line(&mut self) -> Result, std::io::Error> { if self.next_time.is_none() { self.next_time = Some(Utc.timestamp_nanos(self.line_rx.read_i64().await?)); } @@ -85,17 +85,28 @@ impl OwnConnection { self.next_len = Some(self.line_rx.read_u32().await? as usize); } // Ok we have next_len.is_some() + let next_len = self.next_len.clone().unwrap(); if self.next_line.is_none() { - self.next_line = Some(Vec::with_capacity(self.next_len.unwrap())); + self.next_line = Some(Vec::with_capacity(next_len)); } - let actual_len = self.next_line.as_ref().unwrap().len(); - self.line_rx - .read(&mut self.next_line.as_mut().unwrap()[actual_len..self.next_len.unwrap()]) - .await?; - // Ok we have next_line.is_some(), but don't know yet + // Ok we have next_line.is_some() + let next_line = self.next_line.as_mut().unwrap(); - if self.next_line.as_ref().unwrap().len() == self.next_len.unwrap() { + let actual_len = next_line.len(); + // Resize to wanted length + next_line.resize(next_len, 0); + + // Read bytes + let bytes_read = self + .line_rx + .read(&mut next_line[actual_len..next_len]) + .await?; + // Truncate possibly unread bytes + next_line.truncate(actual_len + bytes_read); + + // Let's test if we read all bytes + if next_line.len() == next_len { // Ok we have a full line self.next_len.take(); let line = String::try_from(self.next_line.take().unwrap()).map_err(|err| { From 79d85c1df1ae83fc9616530297faeb841751e032 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 6 Dec 2025 12:00:00 +0100 Subject: [PATCH 161/241] Reduce usage of chrono TODO: handle migrations --- Cargo.toml | 2 +- TODO | 1 + .../reaction-plugin-cluster/src/cluster.rs | 10 +- .../reaction-plugin-cluster/src/connection.rs | 32 +++-- plugins/reaction-plugin-cluster/src/main.rs | 4 +- plugins/reaction-plugin-virtual/src/tests.rs | 8 +- plugins/reaction-plugin/src/lib.rs | 6 +- plugins/reaction-plugin/src/time.rs | 36 +++-- src/concepts/action.rs | 6 +- src/concepts/filter.rs | 22 ++-- src/concepts/mod.rs | 117 ++++++++++++++++- src/concepts/pattern/ip/mod.rs | 13 +- src/daemon/filter/mod.rs | 24 ++-- src/daemon/filter/state.rs | 68 +++++----- src/daemon/filter/tests.rs | 82 ++++++------ src/daemon/mod.rs | 8 +- src/daemon/socket.rs | 8 +- src/daemon/stream.rs | 10 +- src/treedb/helpers.rs | 124 +++++++++--------- src/treedb/mod.rs | 61 +++++---- src/treedb/raw.rs | 88 +++++++------ 21 files changed, 439 insertions(+), 291 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 51c2a45..682e1e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,7 @@ chrono = { version = "0.4.38", features = ["std", "clock", "serde"] } futures = "0.3.30" remoc = { version = "0.18.3" } serde = { version = "1.0.203", features = ["derive"] } -serde_json = "1.0.117" +serde_json = { version = "1.0.117", features = ["arbitrary_precision"] } tokio = { version = "1.40.0" } tokio-util = { version = "0.7.12" } reaction-plugin = { path = "plugins/reaction-plugin" } diff --git a/TODO b/TODO index 557559e..0cbaa1c 100644 --- a/TODO +++ b/TODO @@ -1,4 +1,5 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) +conf: merge filters plugins: pipe stderr too and wrap errors in logs plugins: provide treedb storage? omg (add an enum that's either remoc::rch::mpsc or tokio::mpsc) diff --git a/plugins/reaction-plugin-cluster/src/cluster.rs b/plugins/reaction-plugin-cluster/src/cluster.rs index ee31fb8..5992d18 100644 --- a/plugins/reaction-plugin-cluster/src/cluster.rs +++ b/plugins/reaction-plugin-cluster/src/cluster.rs @@ -2,10 +2,9 @@ use std::{ collections::BTreeMap, net::{SocketAddrV4, SocketAddrV6}, sync::Arc, - time::Duration, + time::{Duration, SystemTime, UNIX_EPOCH}, }; -use chrono::{DateTime, Local, Utc}; use futures::future::join_all; use iroh::{ Endpoint, PublicKey, @@ -19,7 +18,7 @@ use crate::{ActionInit, StreamInit, connection::ConnectionManager, endpoint::End pub const ALPN: [&[u8]; 1] = ["reaction_cluster_1".as_bytes()]; -pub type UtcLine = Arc<(String, DateTime)>; +pub type UtcLine = Arc<(String, Duration)>; pub async fn bind(stream: &StreamInit) -> Result { // FIXME higher timeouts and keep alive @@ -150,14 +149,15 @@ impl ActionInit { acc.replace(pattern, &m.match_[i]) }) }; - let now = Local::now(); + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); if self.self_ && let Err(err) = own_stream_tx.send((line.clone(), now.clone())).await { eprintln!("ERROR while queueing message to be sent to own cluster stream: {err}"); } - let line = Arc::new((line, now.to_utc())); + // TODO to_utc + let line = Arc::new((line, now)); for result in join_all(nodes_tx.iter().map(|tx| tx.send(line.clone()))).await { if let Err(err) = result { eprintln!("ERROR while queueing message to be sent to cluster nodes: {err}"); diff --git a/plugins/reaction-plugin-cluster/src/connection.rs b/plugins/reaction-plugin-cluster/src/connection.rs index 0c1ea7e..51c7697 100644 --- a/plugins/reaction-plugin-cluster/src/connection.rs +++ b/plugins/reaction-plugin-cluster/src/connection.rs @@ -1,6 +1,11 @@ -use std::{cmp::max, collections::VecDeque, io::Error as IoError, sync::Arc}; +use std::{ + cmp::max, + collections::VecDeque, + io::Error as IoError, + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; -use chrono::{DateTime, Local, TimeDelta, TimeZone, Utc}; use futures::FutureExt; use iroh::{ Endpoint, EndpointAddr, @@ -24,7 +29,7 @@ use crate::{ const PROTOCOL_VERSION: u32 = 1; -type RemoteLine = (String, DateTime); +type RemoteLine = (String, Duration); type MaybeRemoteLine = Result, IoError>; enum Event { @@ -44,7 +49,7 @@ struct OwnConnection { line_tx: BufWriter, line_rx: BufReader, - next_time: Option>, + next_time: Option, next_len: Option, next_line: Option>, } @@ -67,7 +72,7 @@ impl OwnConnection { /// Send a line to peer async fn send_line(&mut self, line: RemoteLine) -> Result<(), std::io::Error> { - self.line_tx.write_i64(line.1.timestamp_micros()).await?; + self.line_tx.write_u64(line.1.as_micros() as u64).await?; self.line_tx.write_u32(line.0.len() as u32).await?; self.line_tx.write_all(line.0.as_bytes()).await?; self.line_tx.flush().await?; @@ -78,7 +83,7 @@ impl OwnConnection { /// Returns None if we don't have all data yet. async fn recv_line(&mut self) -> Result, std::io::Error> { if self.next_time.is_none() { - self.next_time = Some(Utc.timestamp_nanos(self.line_rx.read_i64().await?)); + self.next_time = Some(Duration::from_micros(self.line_rx.read_u64().await?)); } // Ok we have next_time.is_some() if self.next_len.is_none() { @@ -140,7 +145,7 @@ pub struct ConnectionManager { last_connexion_id: u64, /// Max duration before we drop pending messages to a node we can't connect to. - message_timeout: TimeDelta, + message_timeout: Duration, /// Message we receive from actions message_rx: mpsc::Receiver, /// Our queue of messages to send @@ -160,7 +165,7 @@ impl ConnectionManager { endpoint: Arc, connection_tx: mpsc::Sender, connection_rx: mpsc::Receiver, - message_timeout: TimeDelta, + message_timeout: Duration, message_rx: mpsc::Receiver, own_cluster_tx: remoc::rch::mpsc::Sender, shutdown: ShutdownController, @@ -253,7 +258,7 @@ impl ConnectionManager { } async fn drop_timeout_messages(&mut self) { - let now = Utc::now(); + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); let mut count = 0; while self .message_queue @@ -330,7 +335,8 @@ impl ConnectionManager { self.close_connection(1, b"you closed your stream").await; } Ok(Some(line)) => { - let local_time = line.1.with_timezone(&Local); + // TODO from_utc + let local_time = line.1; if let Err(err) = self.own_cluster_tx.send((line.0.clone(), local_time)).await { eprintln!( "ERROR cluster {}: could not send message to reaction stream: {err}", @@ -366,10 +372,8 @@ impl ConnectionManager { } Some(message) => match &mut self.connection { Some(connection) => { - if let Err(err) = connection - .send_line((message.0.clone(), message.1.with_timezone(&Utc))) - .await - { + // TODO to_utc + if let Err(err) = connection.send_line((message.0.clone(), message.1)).await { eprintln!( "INFO cluster {}: connection with node {} failed: {err}", self.cluster_name, self.node_id, diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index cd8875a..ed69cc5 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -1,9 +1,9 @@ use std::{ collections::{BTreeMap, BTreeSet}, net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + time::Duration, }; -use chrono::TimeDelta; use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr}; use reaction_plugin::{ ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, @@ -74,7 +74,7 @@ struct StreamInit { bind_ipv4: Option, bind_ipv6: Option, secret_key: SecretKey, - message_timeout: TimeDelta, + message_timeout: Duration, nodes: BTreeMap, tx: mpsc::Sender, } diff --git a/plugins/reaction-plugin-virtual/src/tests.rs b/plugins/reaction-plugin-virtual/src/tests.rs index baea0de..9b3dec6 100644 --- a/plugins/reaction-plugin-virtual/src/tests.rs +++ b/plugins/reaction-plugin-virtual/src/tests.rs @@ -1,4 +1,6 @@ -use reaction_plugin::{Exec, Local, PluginInfo, Value}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use reaction_plugin::{Exec, PluginInfo, Value}; use remoc::rch::oneshot; use serde_json::json; @@ -178,7 +180,7 @@ async fn run_simple() { for m in ["test1", "test2", "test3", " a a a aa a a"] { let (tx, rx) = oneshot::channel(); - let time = Local::now(); + let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); assert!( action .tx @@ -233,7 +235,7 @@ async fn run_two_actions() { assert!(plugin.finish_setup().await.is_ok()); - let time = Local::now(); + let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); let (tx, rx1) = oneshot::channel(); assert!( diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 395db9d..73da253 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -101,9 +101,9 @@ use std::{ error::Error, fmt::Display, process::exit, + time::Duration, }; -pub use chrono::{DateTime, Local}; use remoc::{ Connect, rch, rtc::{self, Server}, @@ -258,7 +258,7 @@ impl Into for Value { } } -pub type Line = (String, DateTime); +pub type Line = (String, Duration); #[derive(Debug, Serialize, Deserialize)] pub struct StreamImpl { @@ -294,7 +294,7 @@ pub struct ActionImpl { pub struct Exec { pub match_: Vec, pub result: rch::oneshot::Sender>, - pub time: DateTime, + pub time: Duration, } /// The main loop for a plugin. diff --git a/plugins/reaction-plugin/src/time.rs b/plugins/reaction-plugin/src/time.rs index 9a2694b..d682429 100644 --- a/plugins/reaction-plugin/src/time.rs +++ b/plugins/reaction-plugin/src/time.rs @@ -1,7 +1,7 @@ -use chrono::TimeDelta; +use std::time::Duration; -/// Parses the &str argument as a TimeDelta -/// Returns Ok(TimeDelta) if successful, or Err(String). +/// Parses the &str argument as a Duration +/// Returns Ok(Duration) if successful, or Err(String). /// /// Format is defined as follows: ` ` /// - whitespace between the integer and unit is optional @@ -12,7 +12,7 @@ use chrono::TimeDelta; /// - `m` / `min` / `mins` / `minute` / `minutes` /// - `h` / `hour` / `hours` /// - `d` / `day` / `days` -pub fn parse_duration(d: &str) -> Result { +pub fn parse_duration(d: &str) -> Result { let d_trimmed = d.trim(); let chars = d_trimmed.as_bytes(); let mut value = 0; @@ -24,14 +24,14 @@ pub fn parse_duration(d: &str) -> Result { if i == 0 { return Err(format!("duration '{}' doesn't start with digits", d)); } - let ok_as = |func: fn(i64) -> TimeDelta| -> Result<_, String> { Ok(func(value as i64)) }; + let ok_as = |func: fn(u64) -> Duration| -> Result<_, String> { Ok(func(value as u64)) }; match d_trimmed[i..].trim() { - "ms" | "millis" | "millisecond" | "milliseconds" => ok_as(TimeDelta::milliseconds), - "s" | "sec" | "secs" | "second" | "seconds" => ok_as(TimeDelta::seconds), - "m" | "min" | "mins" | "minute" | "minutes" => ok_as(TimeDelta::minutes), - "h" | "hour" | "hours" => ok_as(TimeDelta::hours), - "d" | "day" | "days" => ok_as(TimeDelta::days), + "ms" | "millis" | "millisecond" | "milliseconds" => ok_as(Duration::from_millis), + "s" | "sec" | "secs" | "second" | "seconds" => ok_as(Duration::from_secs), + "m" | "min" | "mins" | "minute" | "minutes" => ok_as(Duration::from_mins), + "h" | "hour" | "hours" => ok_as(Duration::from_hours), + "d" | "day" | "days" => ok_as(|d: u64| Duration::from_hours(d * 24)), unit => Err(format!( "unit {} not recognised. must be one of s/sec/seconds, m/min/minutes, h/hours, d/days", unit @@ -42,8 +42,6 @@ pub fn parse_duration(d: &str) -> Result { #[cfg(test)] mod tests { - use chrono::TimeDelta; - use super::*; #[test] @@ -53,13 +51,13 @@ mod tests { #[test] fn parse_duration_test() { - assert_eq!(parse_duration("1s"), Ok(TimeDelta::seconds(1))); - assert_eq!(parse_duration("12s"), Ok(TimeDelta::seconds(12))); - assert_eq!(parse_duration(" 12 secs "), Ok(TimeDelta::seconds(12))); - assert_eq!(parse_duration("2m"), Ok(TimeDelta::minutes(2))); - assert_eq!(parse_duration("6 hours"), Ok(TimeDelta::hours(6))); - assert_eq!(parse_duration("1d"), Ok(TimeDelta::days(1))); - assert_eq!(parse_duration("365d"), Ok(TimeDelta::days(365))); + assert_eq!(parse_duration("1s"), Ok(Duration::from_secs(1))); + assert_eq!(parse_duration("12s"), Ok(Duration::from_secs(12))); + assert_eq!(parse_duration(" 12 secs "), Ok(Duration::from_secs(12))); + assert_eq!(parse_duration("2m"), Ok(Duration::from_mins(2))); + assert_eq!(parse_duration("6 hours"), Ok(Duration::from_hours(6))); + assert_eq!(parse_duration("1d"), Ok(Duration::from_hours(1 * 24))); + assert_eq!(parse_duration("365d"), Ok(Duration::from_hours(365 * 24))); assert!(parse_duration("d 3").is_err()); assert!(parse_duration("d3").is_err()); diff --git a/src/concepts/action.rs b/src/concepts/action.rs index cbad05c..4d5f765 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -1,6 +1,4 @@ -use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc}; - -use chrono::TimeDelta; +use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc, time::Duration}; use reaction_plugin::time::parse_duration; use serde::{Deserialize, Serialize}; @@ -19,7 +17,7 @@ pub struct Action { #[serde(skip_serializing_if = "Option::is_none")] pub after: Option, #[serde(skip)] - pub after_duration: Option, + pub after_duration: Option, #[serde( rename = "onexit", diff --git a/src/concepts/filter.rs b/src/concepts/filter.rs index d3932c6..52fc41f 100644 --- a/src/concepts/filter.rs +++ b/src/concepts/filter.rs @@ -4,9 +4,9 @@ use std::{ fmt::Display, hash::Hash, sync::Arc, + time::Duration, }; -use chrono::TimeDelta; use reaction_plugin::time::parse_duration; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -30,7 +30,7 @@ pub enum Duplicate { #[serde(deny_unknown_fields)] pub struct Filter { #[serde(skip)] - pub longuest_action_duration: TimeDelta, + pub longuest_action_duration: Duration, #[serde(skip)] pub has_ip: bool, @@ -47,7 +47,7 @@ pub struct Filter { #[serde(rename = "retryperiod", skip_serializing_if = "Option::is_none")] pub retry_period: Option, #[serde(skip)] - pub retry_duration: Option, + pub retry_duration: Option, #[serde(default)] pub duplicate: Duplicate, @@ -187,10 +187,12 @@ impl Filter { .any(|action| action.ipv4only || action.ipv6only); self.longuest_action_duration = - self.actions.values().fold(TimeDelta::seconds(0), |acc, v| { - v.after_duration - .map_or(acc, |v| if v > acc { v } else { acc }) - }); + self.actions + .values() + .fold(Duration::from_secs(0), |acc, v| { + v.after_duration + .map_or(acc, |v| if v > acc { v } else { acc }) + }); Ok(()) } @@ -480,14 +482,14 @@ pub mod tests { let name = "name".to_string(); let empty_patterns = Patterns::new(); let minute_str = "1m".to_string(); - let minute = TimeDelta::seconds(60); - let two_minutes = TimeDelta::seconds(60 * 2); + let minute = Duration::from_secs(60); + let two_minutes = Duration::from_secs(60 * 2); let two_minutes_str = "2m".to_string(); // duration 0 filter = ok_filter(); filter.setup(&name, &name, &empty_patterns).unwrap(); - assert_eq!(filter.longuest_action_duration, TimeDelta::default()); + assert_eq!(filter.longuest_action_duration, Duration::default()); let minute_action = ok_action_with_after(minute_str.clone(), &minute_str); diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index 215be2b..761f9f1 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -5,7 +5,11 @@ mod pattern; mod plugin; mod stream; -use std::fmt::Debug; +use std::{ + fmt::{self, Debug}, + ops::{Add, Deref, Sub}, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; pub use action::Action; pub use config::{Config, Patterns}; @@ -16,9 +20,116 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; pub use stream::Stream; -use chrono::{DateTime, Local}; +/// [`std::time::Duration`] since [`std::time::UNIX_EPOCH`] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct Time(Duration); +impl Deref for Time { + type Target = Duration; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl From for Time { + fn from(value: Duration) -> Self { + Time(value) + } +} +impl Into for Time { + fn into(self) -> Duration { + self.0 + } +} +impl Add for Time { + type Output = Time; + fn add(self, rhs: Duration) -> Self::Output { + Time(self.0 + rhs) + } +} +impl Add", "to": "stream" }).into(), + vec!["a".into(), "b".into()], + ) + .await + .unwrap(); + + let action2 = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "virtual".into(), + json!({ "send": " send", "to": "stream" }).into(), + vec!["a".into(), "b".into()], + ) + .await + .unwrap(); + + assert!(plugin.finish_setup().await.is_ok()); + + let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + + assert!( + action1 + .tx + .send(Exec { + match_: vec!["aa".into(), "bb".into()], + time, + }) + .await + .is_ok(), + ); + assert_eq!( + stream.stream.recv().await.unwrap().unwrap(), + ("send aa".into(), time), + ); + + assert!( + action2 + .tx + .send(Exec { + match_: vec!["aa".into(), "bb".into()], + time, + }) + .await + .is_ok(), + ); + assert_eq!( + stream.stream.recv().await.unwrap().unwrap(), + ("bb send".into(), time), + ); +} diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000..e761b07 --- /dev/null +++ b/shell.nix @@ -0,0 +1,12 @@ +with import {}; +pkgs.mkShell { + name = "libipset"; + buildInputs = [ + ipset + clang + ]; + src = null; + shellHook = '' + export LIBCLANG_PATH="$(clang -print-file-name=libclang.so)" + ''; +} diff --git a/tests/test-conf/test-ipset.jsonnet b/tests/test-conf/test-ipset.jsonnet new file mode 100644 index 0000000..e749324 --- /dev/null +++ b/tests/test-conf/test-ipset.jsonnet @@ -0,0 +1,73 @@ +{ + patterns: { + ip: { + type: 'ip', + }, + all: { + regex: @".*", + }, + }, + + plugins: { + cluster: { + path: './target/debug/reaction-plugin-ipset', + check_root: false, + systemd_options: { + DynamicUser: ['false'], + }, + }, + }, + + streams: { + s0: { + cmd: ['bash', '-c', 'sleep 1; for i in $(seq 4); do echo 192.0.2.$i; sleep 0.1; done'], + filters: { + f0: { + regex: ['^$'], + actions: { + a0: { + type: 'ipset', + options: { + set: 'reactiontest', + pattern: 'ip', + version: 46, + chains: ['INPUT', 'FORWARD'], + }, + }, + b0: { + type: 'cluster_send', + options: { + send: 'NODE b0 ', + to: 's1', + }, + after: '1s', + }, + }, + }, + }, + }, + s1: { + type: 'cluster', + options: { + listen_port: 1234, + bind_ipv4: '127.0.0.1', + bind_ipv6: null, + message_timeout: '30s', + nodes: [{ + public_key: 'PUBLIC_KEY', + addresses: ['127.0.0.1:4321'], + }], + }, + filters: { + f1: { + regex: ['^$'], + actions: { + a1: { + cmd: ['sh', '-c', 'echo >>./log'], + }, + }, + }, + }, + }, + }, +} From 3ccd471b456f92f58a96234d2bb89483a954d4b9 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 28 Jan 2026 12:00:00 +0100 Subject: [PATCH 185/241] ipset: so much ~~waow~~ code --- plugins/reaction-plugin-ipset/src/action.rs | 258 ++++++++++++++++++++ plugins/reaction-plugin-ipset/src/ipset.rs | 194 +++++++++++---- plugins/reaction-plugin-ipset/src/main.rs | 148 ++++------- shell.nix | 1 + 4 files changed, 452 insertions(+), 149 deletions(-) create mode 100644 plugins/reaction-plugin-ipset/src/action.rs diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs new file mode 100644 index 0000000..f4277f4 --- /dev/null +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -0,0 +1,258 @@ +use std::u32; + +use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration}; +use remoc::rch::mpsc as remocMpsc; +use serde::{Deserialize, Serialize, de::Deserializer, de::Error}; + +use crate::ipset::{IpSet, Order, SetChain, SetOptions, Version}; +pub enum IpVersion { + V4, + V6, + V46, +} +impl<'de> Deserialize<'de> for IpVersion { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + match Option::::deserialize(deserializer)? { + None => Ok(IpVersion::V46), + Some(version) => match version { + 4 => Ok(IpVersion::V4), + 6 => Ok(IpVersion::V6), + 46 => Ok(IpVersion::V46), + _ => Err(D::Error::custom("version must be one of 4, 6 or 46")), + }, + } + } +} +impl Serialize for IpVersion { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_u8(match self { + IpVersion::V4 => 4, + IpVersion::V6 => 6, + IpVersion::V46 => 46, + }) + } +} + +// FIXME block configs that have different set options for the same name +// treat default values as none? + +#[derive(Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ActionOptions { + /// The set that should be used by this action + set: String, + /// The pattern name of the IP. + /// Defaults to "ip" + #[serde(default = "serde_ip")] + pub pattern: String, + /// The IP type. + /// Defaults to `46`. + /// If `4`: creates an IPv4 set with this name + /// If `6`: creates an IPv6 set with this name + /// If `46`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' + version: IpVersion, + /// Chains where the IP set should be inserted. + /// Defaults to `["INPUT", "FORWARD"]` + #[serde(default = "serde_chains")] + chains: Vec, + // Optional timeout, letting linux/netfilter handle set removal instead of reaction + // Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action + #[serde(skip_serializing_if = "Option::is_none")] + timeout: Option, + // Target that iptables should use when the IP is encountered. + // Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain + #[serde(default = "serde_drop")] + target: String, + // TODO add `add`//`remove` option +} + +fn serde_ip() -> String { + "ip".into() +} +fn serde_drop() -> String { + "DROP".into() +} +fn serde_chains() -> Vec { + vec!["INPUT".into(), "FORWARD".into()] +} + +pub struct Action { + ipset: IpSet, + rx: remocMpsc::Receiver, + shutdown: ShutdownToken, + ipv4_set: Option, + ipv6_set: Option, + // index of pattern ip in match vec + ip_index: usize, + chains: Vec, + timeout: Option, + target: String, +} + +impl Action { + pub fn new( + ipset: IpSet, + shutdown: ShutdownToken, + ip_index: usize, + rx: remocMpsc::Receiver, + options: ActionOptions, + ) -> Result { + Ok(Action { + ipset, + rx, + shutdown, + ip_index, + target: options.target, + chains: options.chains, + timeout: if let Some(timeout) = options.timeout { + let duration = parse_duration(&timeout) + .map_err(|err| format!("failed to parse timeout: {}", err))? + .as_secs(); + if duration > u32::MAX as u64 { + return Err(format!( + "timeout is limited to {} seconds (approx {} days)", + u32::MAX, + 49_000 + )); + } + Some(duration as u32) + } else { + None + }, + ipv4_set: match options.version { + IpVersion::V4 => Some(options.set.clone()), + IpVersion::V6 => None, + IpVersion::V46 => Some(format!("{}v4", options.set)), + }, + ipv6_set: match options.version { + IpVersion::V4 => None, + IpVersion::V6 => Some(options.set), + IpVersion::V46 => Some(format!("{}v6", options.set)), + }, + }) + } + + pub async fn init(&mut self) -> Result<(), String> { + for (set, version) in [ + (&self.ipv4_set, Version::IPv4), + (&self.ipv6_set, Version::IPv6), + ] { + if let Some(set) = set { + println!("INFO creating {version} set {set}"); + // create set + self.ipset + .order(Order::CreateSet(SetOptions { + name: set.clone(), + version, + timeout: self.timeout, + })) + .await?; + // insert set in chains + for chain in &self.chains { + println!("INFO inserting {version} set {set} in chain {chain}"); + self.ipset + .order(Order::InsertSet(SetChain { + set: set.clone(), + chain: chain.clone(), + target: self.target.clone(), + })) + .await?; + } + } + } + Ok(()) + } + + pub async fn destroy(&mut self) { + for (set, version) in [ + (&self.ipv4_set, Version::IPv4), + (&self.ipv6_set, Version::IPv6), + ] { + if let Some(set) = set { + for chain in &self.chains { + println!("INFO removing {version} set {set} from chain {chain}"); + if let Err(err) = self + .ipset + .order(Order::RemoveSet(SetChain { + set: set.clone(), + chain: chain.clone(), + target: self.target.clone(), + })) + .await + { + println!( + "ERROR while removing {version} set {set} from chain {chain}: {err}" + ); + } + } + println!("INFO destroying {version} set {set}"); + if let Err(err) = self.ipset.order(Order::DestroySet(set.clone())).await { + println!("ERROR while destroying {version} set {set}: {err}"); + } + } + } + } + + pub async fn serve(mut self) { + loop { + let event = tokio::select! { + exec = self.rx.recv() => Some(exec), + _ = self.shutdown.wait() => None, + }; + match event { + // shutdown asked + None => break, + // channel closed + Some(Ok(None)) => break, + // error from channel + Some(Err(err)) => { + println!("ERROR {err}"); + break; + } + // ok + Some(Ok(Some(exec))) => { + if let Err(err) = self.handle_exec(exec).await { + println!("ERROR {err}"); + break; + } + } + } + } + self.shutdown.ask_shutdown(); + self.destroy().await; + } + + async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> { + // safeguard against Vec::remove's panic + if exec.match_.len() <= self.ip_index { + return Err(format!( + "match received from reaction is smaller than expected. looking for index {} but size is {}. this is a bug!", + self.ip_index, + exec.match_.len() + )); + } + let ip = exec.match_.remove(self.ip_index); + // select set + let set = match (&self.ipv4_set, &self.ipv6_set) { + (None, None) => return Err(format!("action is neither IPv4 nor IPv6, this is a bug!")), + (None, Some(set)) => set, + (Some(set), None) => set, + (Some(set4), Some(set6)) => { + if ip.contains(':') { + set6 + } else { + set4 + } + } + }; + // add ip to set + self.ipset.order(Order::Insert(set.clone(), ip)).await?; + Ok(()) + } +} diff --git a/plugins/reaction-plugin-ipset/src/ipset.rs b/plugins/reaction-plugin-ipset/src/ipset.rs index 6f841fb..263addb 100644 --- a/plugins/reaction-plugin-ipset/src/ipset.rs +++ b/plugins/reaction-plugin-ipset/src/ipset.rs @@ -1,27 +1,35 @@ -use std::{collections::BTreeMap, process::Command, thread}; +use std::{collections::BTreeMap, fmt::Display, net::Ipv4Addr, process::Command, thread}; use ipset::{ Session, - types::{Error, HashNet}, + types::{HashNet, NetDataType, Parse}, }; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; -#[derive(PartialEq, Eq)] +#[derive(PartialEq, Eq, Copy, Clone)] pub enum Version { IPv4, IPv6, } +impl Display for Version { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Version::IPv4 => "IPv4", + Version::IPv6 => "IPv6", + }) + } +} pub struct SetOptions { - name: String, - version: Version, - timeout: Option, + pub name: String, + pub version: Version, + pub timeout: Option, } pub struct SetChain { - set: String, - chain: String, - action: String, + pub set: String, + pub chain: String, + pub target: String, } pub enum Order { @@ -29,14 +37,63 @@ pub enum Order { DestroySet(String), InsertSet(SetChain), RemoveSet(SetChain), + Insert(String, String), + Remove(String, String), } -pub fn ipset_thread() -> Result, String> { - let (tx, rx) = mpsc::channel(1); - thread::spawn(move || IPsetManager::default().serve(rx)); - Ok(tx) +#[derive(Clone)] +pub struct IpSet { + tx: mpsc::Sender, } +impl Default for IpSet { + fn default() -> Self { + let (tx, rx) = mpsc::channel(1); + thread::spawn(move || IPsetManager::default().serve(rx)); + Self { tx } + } +} + +impl IpSet { + pub async fn order(&mut self, order: Order) -> Result<(), IpSetError> { + let (tx, rx) = oneshot::channel(); + self.tx + .send((order, tx)) + .await + .map_err(|err| IpSetError::Thread(format!("ipset thread has quit: {err}")))?; + rx.await + .map_err(|err| IpSetError::Thread(format!("ipset thread didn't respond: {err}")))? + .map_err(IpSetError::IpSet) + } +} + +pub enum IpSetError { + Thread(String), + IpSet(String), +} +impl Display for IpSetError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + IpSetError::Thread(err) => err, + IpSetError::IpSet(err) => err, + } + ) + } +} +impl From for String { + fn from(value: IpSetError) -> Self { + match value { + IpSetError::Thread(err) => err, + IpSetError::IpSet(err) => err, + } + } +} + +pub type OrderType = (Order, oneshot::Sender>); + struct Set { session: Session, version: Version, @@ -48,12 +105,13 @@ struct IPsetManager { } impl IPsetManager { - fn serve(&mut self, mut rx: mpsc::Receiver) { + fn serve(mut self, mut rx: mpsc::Receiver) { loop { match rx.blocking_recv() { None => break, - Some(order) => { + Some((order, response)) => { let result = self.handle_order(order); + let _ = response.send(result); } } } @@ -88,35 +146,83 @@ impl IPsetManager { .map_err(|err| format!("Could not destroy set {set}: {err}"))?; } } - Order::InsertSet(SetChain { set, chain, action }) => { - let child = Command::new("iptables") - .args([ - "-w", - "-I", - &chain, - "-m", - "set", - "--match-set", - &set, - "src", - "-j", - &action, - ]) - .spawn() - .map_err(|err| { - format!("Could not insert ipset {set} in chain {chain}: {err}") - })?; - match child.wait() { - Ok(exit) => { - if !exit.success() { - return Err(format!("Could not insert ipset")); - } - } - Err(_) => todo!(), - }; - } - Order::RemoveSet(options) => {} + + Order::InsertSet(options) => insert_remove_set(options, true)?, + Order::RemoveSet(options) => insert_remove_set(options, false)?, + + Order::Insert(set, ip) => self.insert_remove_ip(set, ip, true)?, + Order::Remove(set, ip) => self.insert_remove_ip(set, ip, false)?, }; Ok(()) } + + fn insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), String> { + let session = self + .sessions + .get_mut(&set) + .ok_or(format!("No set managed by us with this name: {set}"))?; + + let mut net_data = NetDataType::new(Ipv4Addr::LOCALHOST, 0); + net_data + .parse(&ip) + .map_err(|err| format!("`{ip}` is not recognized as an IP: {err}"))?; + + if insert { + session.session.add(net_data, &[]) + } else { + session.session.del(net_data) + } + .map_err(|err| format!("Could not add `{ip}` to set {set}: {err}"))?; + + Ok(()) + } + + fn insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), String> { + let SetChain { + set, + chain, + target: action, + } = options; + + let command = match self + .sessions + .get(&set) + .ok_or(format!("No set managed by us with this name: {set}"))? + .version + { + Version::IPv4 => "iptables", + Version::IPv6 => "ip6tables", + }; + + let mut child = Command::new(command) + .args([ + "-w", + if insert { "-I" } else { "-D" }, + &chain, + "-m", + "set", + "--match-set", + &set, + "src", + "-j", + &action, + ]) + .spawn() + .map_err(|err| format!("Could not insert ipset {set} in chain {chain}: {err}"))?; + + let exit = child + .wait() + .map_err(|err| format!("Could not insert ipset: {err}"))?; + + if exit.success() { + Ok(()) + } else { + Err(format!( + "Could not insert ipset: exit code {}", + exit.code() + .map(|c| c.to_string()) + .unwrap_or_else(|| "".to_string()) + )) + } + } } diff --git a/plugins/reaction-plugin-ipset/src/main.rs b/plugins/reaction-plugin-ipset/src/main.rs index 47b4e3d..74e494e 100644 --- a/plugins/reaction-plugin-ipset/src/main.rs +++ b/plugins/reaction-plugin-ipset/src/main.rs @@ -1,16 +1,20 @@ use std::collections::BTreeSet; use reaction_plugin::{ - ActionImpl, Exec, Hello, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, + ActionImpl, Hello, Manifest, PluginInfo, RemoteError, RemoteResult, StreamImpl, Value, + shutdown::ShutdownController, }; -use remoc::{rch::mpsc, rtc}; -use serde::{Deserialize, Serialize, de::Deserializer, de::Error}; +use remoc::rtc; -use crate::ipset::ipset_thread; +use crate::{ + action::{Action, ActionOptions}, + ipset::IpSet, +}; #[cfg(test)] mod tests; +mod action; mod ipset; #[tokio::main] @@ -21,8 +25,9 @@ async fn main() { #[derive(Default)] struct Plugin { - // ipset: Arc>, + ipset: IpSet, actions: Vec, + shutdown: ShutdownController, } impl PluginInfo for Plugin { @@ -56,7 +61,7 @@ impl PluginInfo for Plugin { return Err("This plugin can't handle other action types than ipset".into()); } - let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { + let options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") })?; @@ -67,121 +72,54 @@ impl PluginInfo for Plugin { .next() .ok_or_else(|| { format!( - "No pattern with name {} in filter {stream_name}.{filter_name}", + "No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'", options.pattern ) })? .0; let (tx, rx) = remoc::rch::mpsc::channel(1); - self.actions.push(Action { - chains: options.chains, - ipv4_set: match options.version { - IpVersion::V4 => Some(options.set.clone()), - IpVersion::V6 => None, - IpVersion::V46 => Some(format!("{}v4", options.set)), - }, - ipv6_set: match options.version { - IpVersion::V4 => None, - IpVersion::V6 => Some(options.set), - IpVersion::V46 => Some(format!("{}v6", options.set)), - }, + self.actions.push(Action::new( + self.ipset.clone(), + self.shutdown.token(), ip_index, rx, - }); + options, + )?); Ok(ActionImpl { tx }) } async fn finish_setup(&mut self) -> RemoteResult<()> { - ipset_thread()?; + // Init all sets + let mut first_error = None; + for (i, action) in self.actions.iter_mut().enumerate() { + // Retain if error + if let Err(err) = action.init().await { + first_error = Some((i, RemoteError::Plugin(err))); + break; + } + } + // Destroy initialized sets if error + if let Some((i, err)) = first_error { + for action in self.actions.iter_mut().take(i + 1) { + let _ = action.destroy().await; + } + return Err(err); + } - todo!(); + // Launch all actions + while let Some(action) = self.actions.pop() { + tokio::spawn(async move { action.serve() }); + } + + self.actions = Default::default(); + Ok(()) } async fn close(self) -> RemoteResult<()> { - todo!(); - } -} - -enum IpVersion { - V4, - V6, - V46, -} -impl<'de> Deserialize<'de> for IpVersion { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - match Option::::deserialize(deserializer)? { - None => Ok(IpVersion::V46), - Some(version) => match version { - 4 => Ok(IpVersion::V4), - 6 => Ok(IpVersion::V6), - 46 => Ok(IpVersion::V46), - _ => Err(D::Error::custom("version must be one of 4, 6 or 46")), - }, - } - } -} -impl Serialize for IpVersion { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_u8(match self { - IpVersion::V4 => 4, - IpVersion::V6 => 6, - IpVersion::V46 => 46, - }) - } -} - -#[derive(Serialize, Deserialize)] -#[serde(deny_unknown_fields)] -struct ActionOptions { - /// The set that should be used by this action - set: String, - /// The pattern name of the IP. - /// Defaults to "ip" - #[serde(default = "serde_ip")] - pattern: String, - /// The IP type. - /// Defaults to `46`. - /// If `4`: creates an IPv4 set with this name - /// If `6`: creates an IPv6 set with this name - /// If `46`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' - version: IpVersion, - /// Chains where the IP set should be inserted. - /// Defaults to `["INPUT", "FORWARD"]` - #[serde(default = "serde_chains")] - chains: Vec, - // Optional timeout, letting linux/netfilter handle set removal instead of reaction - // Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action -} - -fn serde_ip() -> String { - "ip".into() -} -fn serde_chains() -> Vec { - vec!["INPUT".into(), "FORWARD".into()] -} - -struct Action { - ipv4_set: Option, - ipv6_set: Option, - // index of pattern ip in match vec - ip_index: usize, - chains: Vec, - rx: mpsc::Receiver, -} - -impl Action { - async fn serve(&mut self) { - // while let Ok(Some(exec)) = self.rx.recv().await { - // let line = self.send.line(exec.match_); - // self.to.tx.send((line, exec.time)).await.unwrap(); - // } + self.shutdown.ask_shutdown(); + self.shutdown.wait_all_task_shutdown().await; + Ok(()) } } diff --git a/shell.nix b/shell.nix index e761b07..27dac77 100644 --- a/shell.nix +++ b/shell.nix @@ -1,3 +1,4 @@ +# This shell.nix for NixOS users is only needed when building reaction-plugin-ipset with import {}; pkgs.mkShell { name = "libipset"; From d6b6e9096bbc638f002e98406f46df2df2d0029b Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 29 Jan 2026 12:00:00 +0100 Subject: [PATCH 186/241] ipset: Add the add/del option, journal orders & deduplicate them --- plugins/reaction-plugin-ipset/src/action.rs | 91 +++++++++++++-------- plugins/reaction-plugin-ipset/src/ipset.rs | 65 +++++++++++---- tests/test-conf/test-ipset.jsonnet | 43 +++------- 3 files changed, 117 insertions(+), 82 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index f4277f4..e3b63b1 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -2,41 +2,56 @@ use std::u32; use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration}; use remoc::rch::mpsc as remocMpsc; -use serde::{Deserialize, Serialize, de::Deserializer, de::Error}; +use serde::{Deserialize, Serialize}; use crate::ipset::{IpSet, Order, SetChain, SetOptions, Version}; + +#[derive(Default, Serialize, Deserialize)] pub enum IpVersion { + #[serde(alias = "4")] V4, + #[serde(alias = "6")] V6, + #[serde(alias = "46")] + #[default] V46, } -impl<'de> Deserialize<'de> for IpVersion { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - match Option::::deserialize(deserializer)? { - None => Ok(IpVersion::V46), - Some(version) => match version { - 4 => Ok(IpVersion::V4), - 6 => Ok(IpVersion::V6), - 46 => Ok(IpVersion::V46), - _ => Err(D::Error::custom("version must be one of 4, 6 or 46")), - }, - } - } -} -impl Serialize for IpVersion { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_u8(match self { - IpVersion::V4 => 4, - IpVersion::V6 => 6, - IpVersion::V46 => 46, - }) - } +// impl<'de> Deserialize<'de> for IpVersion { +// fn deserialize(deserializer: D) -> Result +// where +// D: Deserializer<'de>, +// { +// match Option::::deserialize(deserializer)? { +// None => Ok(IpVersion::V46), +// Some(version) => match version { +// 4 => Ok(IpVersion::V4), +// 6 => Ok(IpVersion::V6), +// 46 => Ok(IpVersion::V46), +// _ => Err(D::Error::custom("version must be one of 4, 6 or 46")), +// }, +// } +// } +// } +// impl Serialize for IpVersion { +// fn serialize(&self, serializer: S) -> Result +// where +// S: serde::Serializer, +// { +// serializer.serialize_u8(match self { +// IpVersion::V4 => 4, +// IpVersion::V6 => 6, +// IpVersion::V46 => 46, +// }) +// } +// } + +#[derive(Default, Serialize, Deserialize)] +pub enum AddDel { + #[default] + #[serde(alias = "add")] + Add, + #[serde(alias = "del")] + Del, } // FIXME block configs that have different set options for the same name @@ -56,6 +71,7 @@ pub struct ActionOptions { /// If `4`: creates an IPv4 set with this name /// If `6`: creates an IPv6 set with this name /// If `46`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' + #[serde(default)] version: IpVersion, /// Chains where the IP set should be inserted. /// Defaults to `["INPUT", "FORWARD"]` @@ -69,7 +85,9 @@ pub struct ActionOptions { // Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain #[serde(default = "serde_drop")] target: String, - // TODO add `add`//`remove` option + // Whether the action is to "add" or "del" the ip from the set + #[serde(default)] + action: AddDel, } fn serde_ip() -> String { @@ -93,6 +111,7 @@ pub struct Action { chains: Vec, timeout: Option, target: String, + action: AddDel, } impl Action { @@ -108,6 +127,7 @@ impl Action { rx, shutdown, ip_index, + action: options.action, target: options.target, chains: options.chains, timeout: if let Some(timeout) = options.timeout { @@ -144,7 +164,6 @@ impl Action { (&self.ipv6_set, Version::IPv6), ] { if let Some(set) = set { - println!("INFO creating {version} set {set}"); // create set self.ipset .order(Order::CreateSet(SetOptions { @@ -155,7 +174,6 @@ impl Action { .await?; // insert set in chains for chain in &self.chains { - println!("INFO inserting {version} set {set} in chain {chain}"); self.ipset .order(Order::InsertSet(SetChain { set: set.clone(), @@ -176,7 +194,6 @@ impl Action { ] { if let Some(set) = set { for chain in &self.chains { - println!("INFO removing {version} set {set} from chain {chain}"); if let Err(err) = self .ipset .order(Order::RemoveSet(SetChain { @@ -191,7 +208,6 @@ impl Action { ); } } - println!("INFO destroying {version} set {set}"); if let Err(err) = self.ipset.order(Order::DestroySet(set.clone())).await { println!("ERROR while destroying {version} set {set}: {err}"); } @@ -251,8 +267,13 @@ impl Action { } } }; - // add ip to set - self.ipset.order(Order::Insert(set.clone(), ip)).await?; + // add/remove ip to set + self.ipset + .order(match self.action { + AddDel::Add => Order::Add(set.clone(), ip), + AddDel::Del => Order::Del(set.clone(), ip), + }) + .await?; Ok(()) } } diff --git a/plugins/reaction-plugin-ipset/src/ipset.rs b/plugins/reaction-plugin-ipset/src/ipset.rs index 263addb..3c4ded9 100644 --- a/plugins/reaction-plugin-ipset/src/ipset.rs +++ b/plugins/reaction-plugin-ipset/src/ipset.rs @@ -1,4 +1,10 @@ -use std::{collections::BTreeMap, fmt::Display, net::Ipv4Addr, process::Command, thread}; +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::Display, + net::Ipv4Addr, + process::Command, + thread, +}; use ipset::{ Session, @@ -6,7 +12,7 @@ use ipset::{ }; use tokio::sync::{mpsc, oneshot}; -#[derive(PartialEq, Eq, Copy, Clone)] +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] pub enum Version { IPv4, IPv6, @@ -20,25 +26,28 @@ impl Display for Version { } } +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)] pub struct SetOptions { pub name: String, pub version: Version, pub timeout: Option, } +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)] pub struct SetChain { pub set: String, pub chain: String, pub target: String, } +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)] pub enum Order { CreateSet(SetOptions), DestroySet(String), InsertSet(SetChain), RemoveSet(SetChain), - Insert(String, String), - Remove(String, String), + Add(String, String), + Del(String, String), } #[derive(Clone)] @@ -101,7 +110,14 @@ struct Set { #[derive(Default)] struct IPsetManager { + // IPset sessions sessions: BTreeMap, + // All set-wise commands already run, to ignore duplicates. + // Duplicates are the natural cause of multiple actions + // (eg. add action and del action) manipulating the same sets. + // + // It's fine because no order should be run twice during the runtime of the plugin. + journal: BTreeSet, } impl IPsetManager { @@ -118,12 +134,24 @@ impl IPsetManager { } fn handle_order(&mut self, order: Order) -> Result<(), String> { + // We only journal set-wise orders + // We skip the order if already run. + match order { + Order::Add(_, _) | Order::Del(_, _) => (), + _ => { + if !self.journal.insert(order.clone()) { + return Ok(()); + } + } + }; + match order { Order::CreateSet(SetOptions { name, version, timeout, }) => { + println!("INFO creating {version} set {name}"); let mut session: Session = Session::new(name.clone()); session .create(|builder| { @@ -140,6 +168,7 @@ impl IPsetManager { } Order::DestroySet(set) => { if let Some(mut session) = self.sessions.remove(&set) { + println!("INFO destroying {} set {set}", session.version); session .session .destroy() @@ -147,20 +176,19 @@ impl IPsetManager { } } - Order::InsertSet(options) => insert_remove_set(options, true)?, - Order::RemoveSet(options) => insert_remove_set(options, false)?, + Order::InsertSet(options) => self.insert_remove_set(options, true)?, + Order::RemoveSet(options) => self.insert_remove_set(options, false)?, - Order::Insert(set, ip) => self.insert_remove_ip(set, ip, true)?, - Order::Remove(set, ip) => self.insert_remove_ip(set, ip, false)?, + Order::Add(set, ip) => self.insert_remove_ip(set, ip, true)?, + Order::Del(set, ip) => self.insert_remove_ip(set, ip, false)?, }; Ok(()) } fn insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), String> { - let session = self - .sessions - .get_mut(&set) - .ok_or(format!("No set managed by us with this name: {set}"))?; + let session = self.sessions.get_mut(&set).ok_or(format!( + "No set handled by us with this name: {set}. This likely is a bug." + ))?; let mut net_data = NetDataType::new(Ipv4Addr::LOCALHOST, 0); net_data @@ -184,12 +212,19 @@ impl IPsetManager { target: action, } = options; - let command = match self + let version = self .sessions .get(&set) .ok_or(format!("No set managed by us with this name: {set}"))? - .version - { + .version; + + if insert { + println!("INFO inserting {version} set {set} in chain {chain}"); + } else { + println!("INFO removing {version} set {set} from chain {chain}"); + } + + let command = match version { Version::IPv4 => "iptables", Version::IPv6 => "ip6tables", }; diff --git a/tests/test-conf/test-ipset.jsonnet b/tests/test-conf/test-ipset.jsonnet index e749324..d8769aa 100644 --- a/tests/test-conf/test-ipset.jsonnet +++ b/tests/test-conf/test-ipset.jsonnet @@ -29,41 +29,20 @@ type: 'ipset', options: { set: 'reactiontest', - pattern: 'ip', - version: 46, - chains: ['INPUT', 'FORWARD'], + // pattern: 'ip', + // version: 46, + // chains: ['INPUT', 'FORWARD'], + // target: 'DROP', + // action: 'add', }, }, - b0: { - type: 'cluster_send', - options: { - send: 'NODE b0 ', - to: 's1', - }, - after: '1s', - }, - }, - }, - }, - }, - s1: { - type: 'cluster', - options: { - listen_port: 1234, - bind_ipv4: '127.0.0.1', - bind_ipv6: null, - message_timeout: '30s', - nodes: [{ - public_key: 'PUBLIC_KEY', - addresses: ['127.0.0.1:4321'], - }], - }, - filters: { - f1: { - regex: ['^$'], - actions: { a1: { - cmd: ['sh', '-c', 'echo >>./log'], + after: '10s', + type: 'ipset', + options: { + set: 'reactiontest', + action: 'del', + }, }, }, }, From 87a25cf04c7fb3d915899f2d7178782304e5a5d7 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 31 Jan 2026 12:00:00 +0100 Subject: [PATCH 187/241] Extract ipset options from action options so that it's globally merged Actions don't manage sets anymore. Set options are merged at each new action, then Sets are managed by themselves. --- plugins/reaction-plugin-ipset/src/action.rs | 307 ++++++++++++-------- plugins/reaction-plugin-ipset/src/ipset.rs | 31 +- plugins/reaction-plugin-ipset/src/main.rs | 59 ++-- tests/test-conf/test-ipset.jsonnet | 2 +- 4 files changed, 231 insertions(+), 168 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index e3b63b1..32f2a0a 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -1,12 +1,12 @@ -use std::u32; +use std::{fmt::Debug, u32}; use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration}; use remoc::rch::mpsc as remocMpsc; use serde::{Deserialize, Serialize}; -use crate::ipset::{IpSet, Order, SetChain, SetOptions, Version}; +use crate::ipset::{CreateSet, IpSet, Order, SetChain, Version}; -#[derive(Default, Serialize, Deserialize)] +#[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] pub enum IpVersion { #[serde(alias = "4")] V4, @@ -16,34 +16,19 @@ pub enum IpVersion { #[default] V46, } -// impl<'de> Deserialize<'de> for IpVersion { -// fn deserialize(deserializer: D) -> Result -// where -// D: Deserializer<'de>, -// { -// match Option::::deserialize(deserializer)? { -// None => Ok(IpVersion::V46), -// Some(version) => match version { -// 4 => Ok(IpVersion::V4), -// 6 => Ok(IpVersion::V6), -// 46 => Ok(IpVersion::V46), -// _ => Err(D::Error::custom("version must be one of 4, 6 or 46")), -// }, -// } -// } -// } -// impl Serialize for IpVersion { -// fn serialize(&self, serializer: S) -> Result -// where -// S: serde::Serializer, -// { -// serializer.serialize_u8(match self { -// IpVersion::V4 => 4, -// IpVersion::V6 => 6, -// IpVersion::V46 => 46, -// }) -// } -// } +impl Debug for IpVersion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + IpVersion::V4 => "4", + IpVersion::V6 => "6", + IpVersion::V46 => "46", + } + ) + } +} #[derive(Default, Serialize, Deserialize)] pub enum AddDel { @@ -57,116 +42,148 @@ pub enum AddDel { // FIXME block configs that have different set options for the same name // treat default values as none? +/// User-facing action options #[derive(Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct ActionOptions { /// The set that should be used by this action - set: String, + pub set: String, /// The pattern name of the IP. /// Defaults to "ip" #[serde(default = "serde_ip")] pub pattern: String, - /// The IP type. - /// Defaults to `46`. - /// If `4`: creates an IPv4 set with this name - /// If `6`: creates an IPv6 set with this name - /// If `46`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' - #[serde(default)] - version: IpVersion, - /// Chains where the IP set should be inserted. - /// Defaults to `["INPUT", "FORWARD"]` - #[serde(default = "serde_chains")] - chains: Vec, - // Optional timeout, letting linux/netfilter handle set removal instead of reaction - // Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action - #[serde(skip_serializing_if = "Option::is_none")] - timeout: Option, - // Target that iptables should use when the IP is encountered. - // Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain - #[serde(default = "serde_drop")] - target: String, + #[serde(skip)] + ip_index: usize, // Whether the action is to "add" or "del" the ip from the set #[serde(default)] action: AddDel, + + #[serde(flatten)] + pub set_options: SetOptions, } fn serde_ip() -> String { "ip".into() } -fn serde_drop() -> String { - "DROP".into() -} -fn serde_chains() -> Vec { - vec!["INPUT".into(), "FORWARD".into()] + +impl ActionOptions { + pub fn set_ip_index(&mut self, patterns: Vec) -> Result<(), ()> { + self.ip_index = patterns + .into_iter() + .enumerate() + .filter(|(_, name)| name == &self.pattern) + .next() + .ok_or(())? + .0; + Ok(()) + } } -pub struct Action { - ipset: IpSet, - rx: remocMpsc::Receiver, - shutdown: ShutdownToken, - ipv4_set: Option, - ipv6_set: Option, - // index of pattern ip in match vec - ip_index: usize, +/// Merged set options +#[derive(Default, Deserialize, Serialize)] +pub struct SetOptions { + /// The IP type. + /// Defaults to `46`. + /// If `4`: creates an IPv4 set with this name + /// If `6`: creates an IPv6 set with this name + /// If `46`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' + /// *Merged set-wise*. + #[serde(default)] + version: Option, + /// Chains where the IP set should be inserted. + /// Defaults to `["INPUT", "FORWARD"]` + /// *Merged set-wise*. + #[serde(default)] + chains: Option>, + // Optional timeout, letting linux/netfilter handle set removal instead of reaction + // Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action + /// *Merged set-wise*. + #[serde(skip_serializing_if = "Option::is_none")] + timeout: Option, + #[serde(skip)] + timeout_u32: Option, + // Target that iptables should use when the IP is encountered. + // Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain + /// *Merged set-wise*. + #[serde(default)] + target: Option, +} + +impl SetOptions { + pub fn merge(&mut self, options: &SetOptions) -> Result<(), String> { + // merge two Option and fail if there is conflict + fn inner_merge( + a: &mut Option, + b: &Option, + name: &str, + ) -> Result<(), String> { + match (&a, &b) { + (Some(aa), Some(bb)) => { + if aa != bb { + return Err(format!( + "Conflicting options for {name}: `{aa:?}` and `{bb:?}`" + )); + } + } + (None, Some(_)) => { + *a = b.clone(); + } + _ => (), + }; + Ok(()) + } + + inner_merge(&mut self.version, &options.version, "version")?; + inner_merge(&mut self.timeout, &options.timeout, "timeout")?; + inner_merge(&mut self.chains, &options.chains, "chains")?; + inner_merge(&mut self.target, &options.target, "target")?; + + if let Some(timeout) = &self.timeout { + let duration = parse_duration(timeout) + .map_err(|err| format!("failed to parse timeout: {}", err))? + .as_secs(); + if duration > u32::MAX as u64 { + return Err(format!( + "timeout is limited to {} seconds (approx {} days)", + u32::MAX, + 49_000 + )); + } + self.timeout_u32 = Some(duration as u32); + } + + Ok(()) + } +} + +pub struct Set { + sets: SetNames, chains: Vec, timeout: Option, target: String, - action: AddDel, } -impl Action { - pub fn new( - ipset: IpSet, - shutdown: ShutdownToken, - ip_index: usize, - rx: remocMpsc::Receiver, - options: ActionOptions, - ) -> Result { - Ok(Action { - ipset, - rx, - shutdown, - ip_index, - action: options.action, - target: options.target, - chains: options.chains, - timeout: if let Some(timeout) = options.timeout { - let duration = parse_duration(&timeout) - .map_err(|err| format!("failed to parse timeout: {}", err))? - .as_secs(); - if duration > u32::MAX as u64 { - return Err(format!( - "timeout is limited to {} seconds (approx {} days)", - u32::MAX, - 49_000 - )); - } - Some(duration as u32) - } else { - None - }, - ipv4_set: match options.version { - IpVersion::V4 => Some(options.set.clone()), - IpVersion::V6 => None, - IpVersion::V46 => Some(format!("{}v4", options.set)), - }, - ipv6_set: match options.version { - IpVersion::V4 => None, - IpVersion::V6 => Some(options.set), - IpVersion::V46 => Some(format!("{}v6", options.set)), - }, - }) +impl Set { + pub fn from(name: String, options: SetOptions) -> Self { + Self { + sets: SetNames::new(name, options.version), + timeout: options.timeout_u32, + target: options.target.unwrap_or("DROP".into()), + chains: options + .chains + .unwrap_or(vec!["INPUT".into(), "FORWARD".into()]), + } } - pub async fn init(&mut self) -> Result<(), String> { + pub async fn init(&self, ipset: &mut IpSet) -> Result<(), String> { for (set, version) in [ - (&self.ipv4_set, Version::IPv4), - (&self.ipv6_set, Version::IPv6), + (&self.sets.ipv4, Version::IPv4), + (&self.sets.ipv6, Version::IPv6), ] { if let Some(set) = set { // create set - self.ipset - .order(Order::CreateSet(SetOptions { + ipset + .order(Order::CreateSet(CreateSet { name: set.clone(), version, timeout: self.timeout, @@ -174,7 +191,7 @@ impl Action { .await?; // insert set in chains for chain in &self.chains { - self.ipset + ipset .order(Order::InsertSet(SetChain { set: set.clone(), chain: chain.clone(), @@ -187,15 +204,14 @@ impl Action { Ok(()) } - pub async fn destroy(&mut self) { + pub async fn destroy(&self, ipset: &mut IpSet) { for (set, version) in [ - (&self.ipv4_set, Version::IPv4), - (&self.ipv6_set, Version::IPv6), + (&self.sets.ipv4, Version::IPv4), + (&self.sets.ipv6, Version::IPv6), ] { if let Some(set) = set { for chain in &self.chains { - if let Err(err) = self - .ipset + if let Err(err) = ipset .order(Order::RemoveSet(SetChain { set: set.clone(), chain: chain.clone(), @@ -208,12 +224,62 @@ impl Action { ); } } - if let Err(err) = self.ipset.order(Order::DestroySet(set.clone())).await { + if let Err(err) = ipset.order(Order::DestroySet(set.clone())).await { println!("ERROR while destroying {version} set {set}: {err}"); } } } } +} + +pub struct SetNames { + pub ipv4: Option, + pub ipv6: Option, +} + +impl SetNames { + pub fn new(name: String, version: Option) -> Self { + Self { + ipv4: match version { + Some(IpVersion::V4) => Some(name.clone()), + Some(IpVersion::V6) => None, + None | Some(IpVersion::V46) => Some(format!("{}v4", name)), + }, + ipv6: match version { + Some(IpVersion::V4) => None, + Some(IpVersion::V6) => Some(name), + None | Some(IpVersion::V46) => Some(format!("{}v6", name)), + }, + } + } +} + +pub struct Action { + ipset: IpSet, + rx: remocMpsc::Receiver, + shutdown: ShutdownToken, + sets: SetNames, + // index of pattern ip in match vec + ip_index: usize, + action: AddDel, +} + +impl Action { + pub fn new( + ipset: IpSet, + shutdown: ShutdownToken, + rx: remocMpsc::Receiver, + options: ActionOptions, + ) -> Result { + Ok(Action { + ipset, + rx, + shutdown, + sets: SetNames::new(options.set, options.set_options.version), + ip_index: options.ip_index, + action: options.action, + }) + } pub async fn serve(mut self) { loop { @@ -241,7 +307,6 @@ impl Action { } } self.shutdown.ask_shutdown(); - self.destroy().await; } async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> { @@ -255,7 +320,7 @@ impl Action { } let ip = exec.match_.remove(self.ip_index); // select set - let set = match (&self.ipv4_set, &self.ipv6_set) { + let set = match (&self.sets.ipv4, &self.sets.ipv6) { (None, None) => return Err(format!("action is neither IPv4 nor IPv6, this is a bug!")), (None, Some(set)) => set, (Some(set), None) => set, diff --git a/plugins/reaction-plugin-ipset/src/ipset.rs b/plugins/reaction-plugin-ipset/src/ipset.rs index 3c4ded9..46a2c97 100644 --- a/plugins/reaction-plugin-ipset/src/ipset.rs +++ b/plugins/reaction-plugin-ipset/src/ipset.rs @@ -1,10 +1,4 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt::Display, - net::Ipv4Addr, - process::Command, - thread, -}; +use std::{collections::BTreeMap, fmt::Display, net::Ipv4Addr, process::Command, thread}; use ipset::{ Session, @@ -27,7 +21,7 @@ impl Display for Version { } #[derive(PartialEq, Eq, PartialOrd, Ord, Clone)] -pub struct SetOptions { +pub struct CreateSet { pub name: String, pub version: Version, pub timeout: Option, @@ -42,7 +36,7 @@ pub struct SetChain { #[derive(PartialEq, Eq, PartialOrd, Ord, Clone)] pub enum Order { - CreateSet(SetOptions), + CreateSet(CreateSet), DestroySet(String), InsertSet(SetChain), RemoveSet(SetChain), @@ -112,12 +106,6 @@ struct Set { struct IPsetManager { // IPset sessions sessions: BTreeMap, - // All set-wise commands already run, to ignore duplicates. - // Duplicates are the natural cause of multiple actions - // (eg. add action and del action) manipulating the same sets. - // - // It's fine because no order should be run twice during the runtime of the plugin. - journal: BTreeSet, } impl IPsetManager { @@ -134,19 +122,8 @@ impl IPsetManager { } fn handle_order(&mut self, order: Order) -> Result<(), String> { - // We only journal set-wise orders - // We skip the order if already run. match order { - Order::Add(_, _) | Order::Del(_, _) => (), - _ => { - if !self.journal.insert(order.clone()) { - return Ok(()); - } - } - }; - - match order { - Order::CreateSet(SetOptions { + Order::CreateSet(CreateSet { name, version, timeout, diff --git a/plugins/reaction-plugin-ipset/src/main.rs b/plugins/reaction-plugin-ipset/src/main.rs index 74e494e..7be98b0 100644 --- a/plugins/reaction-plugin-ipset/src/main.rs +++ b/plugins/reaction-plugin-ipset/src/main.rs @@ -1,13 +1,13 @@ -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ ActionImpl, Hello, Manifest, PluginInfo, RemoteError, RemoteResult, StreamImpl, Value, - shutdown::ShutdownController, + shutdown::{ShutdownController, ShutdownToken}, }; use remoc::rtc; use crate::{ - action::{Action, ActionOptions}, + action::{Action, ActionOptions, Set, SetOptions}, ipset::IpSet, }; @@ -26,6 +26,8 @@ async fn main() { #[derive(Default)] struct Plugin { ipset: IpSet, + set_options: BTreeMap, + sets: Vec, actions: Vec, shutdown: ShutdownController, } @@ -61,28 +63,28 @@ impl PluginInfo for Plugin { return Err("This plugin can't handle other action types than ipset".into()); } - let options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { + let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") })?; - let ip_index = patterns - .into_iter() - .enumerate() - .filter(|(_, name)| name == &options.pattern) - .next() - .ok_or_else(|| { + options.set_ip_index(patterns).map_err(|_| format!( "No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'", - options.pattern + &options.pattern ) - })? - .0; + )?; + + // Merge option + self.set_options + .entry(options.set.clone()) + .or_default() + .merge(&options.set_options) + .map_err(|err| format!("ipset {}: {err}", options.set))?; let (tx, rx) = remoc::rch::mpsc::channel(1); self.actions.push(Action::new( self.ipset.clone(), self.shutdown.token(), - ip_index, rx, options, )?); @@ -92,28 +94,40 @@ impl PluginInfo for Plugin { async fn finish_setup(&mut self) -> RemoteResult<()> { // Init all sets + while let Some((name, options)) = self.set_options.pop_first() { + self.sets.push(Set::from(name, options)); + } + self.set_options = Default::default(); + let mut first_error = None; - for (i, action) in self.actions.iter_mut().enumerate() { + for (i, set) in self.sets.iter().enumerate() { // Retain if error - if let Err(err) = action.init().await { + if let Err(err) = set.init(&mut self.ipset).await { first_error = Some((i, RemoteError::Plugin(err))); break; } } // Destroy initialized sets if error if let Some((i, err)) = first_error { - for action in self.actions.iter_mut().take(i + 1) { - let _ = action.destroy().await; + for set in self.sets.iter().take(i + 1) { + let _ = set.destroy(&mut self.ipset).await; } return Err(err); } + // Launch a task that will destroy the sets on shutdown + tokio::spawn(destroy_sets_at_shutdown( + self.ipset.clone(), + std::mem::take(&mut self.sets), + self.shutdown.token(), + )); + // Launch all actions while let Some(action) = self.actions.pop() { tokio::spawn(async move { action.serve() }); } - self.actions = Default::default(); + Ok(()) } @@ -123,3 +137,10 @@ impl PluginInfo for Plugin { Ok(()) } } + +async fn destroy_sets_at_shutdown(mut ipset: IpSet, sets: Vec, shutdown: ShutdownToken) { + shutdown.wait().await; + for set in sets { + set.destroy(&mut ipset).await; + } +} diff --git a/tests/test-conf/test-ipset.jsonnet b/tests/test-conf/test-ipset.jsonnet index d8769aa..25e316d 100644 --- a/tests/test-conf/test-ipset.jsonnet +++ b/tests/test-conf/test-ipset.jsonnet @@ -9,7 +9,7 @@ }, plugins: { - cluster: { + ipset: { path: './target/debug/reaction-plugin-ipset', check_root: false, systemd_options: { From 41b8a661d25019ac99c8d013ce8044e09bf5de13 Mon Sep 17 00:00:00 2001 From: ppom Date: Sat, 31 Jan 2026 12:00:00 +0100 Subject: [PATCH 188/241] Print on stderr instead of stdout ...stdout is already taken by remoc ;) --- plugins/reaction-plugin-ipset/src/action.rs | 9 +++++---- plugins/reaction-plugin-ipset/src/ipset.rs | 8 ++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index 32f2a0a..610c8cf 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -219,13 +219,13 @@ impl Set { })) .await { - println!( + eprintln!( "ERROR while removing {version} set {set} from chain {chain}: {err}" ); } } if let Err(err) = ipset.order(Order::DestroySet(set.clone())).await { - println!("ERROR while destroying {version} set {set}: {err}"); + eprintln!("ERROR while destroying {version} set {set}: {err}"); } } } @@ -294,18 +294,19 @@ impl Action { Some(Ok(None)) => break, // error from channel Some(Err(err)) => { - println!("ERROR {err}"); + eprintln!("ERROR {err}"); break; } // ok Some(Ok(Some(exec))) => { if let Err(err) = self.handle_exec(exec).await { - println!("ERROR {err}"); + eprintln!("ERROR {err}"); break; } } } } + eprintln!("DEBUG Asking for shutdown"); self.shutdown.ask_shutdown(); } diff --git a/plugins/reaction-plugin-ipset/src/ipset.rs b/plugins/reaction-plugin-ipset/src/ipset.rs index 46a2c97..7b7ba88 100644 --- a/plugins/reaction-plugin-ipset/src/ipset.rs +++ b/plugins/reaction-plugin-ipset/src/ipset.rs @@ -128,7 +128,7 @@ impl IPsetManager { version, timeout, }) => { - println!("INFO creating {version} set {name}"); + eprintln!("INFO creating {version} set {name}"); let mut session: Session = Session::new(name.clone()); session .create(|builder| { @@ -145,7 +145,7 @@ impl IPsetManager { } Order::DestroySet(set) => { if let Some(mut session) = self.sessions.remove(&set) { - println!("INFO destroying {} set {set}", session.version); + eprintln!("INFO destroying {} set {set}", session.version); session .session .destroy() @@ -196,9 +196,9 @@ impl IPsetManager { .version; if insert { - println!("INFO inserting {version} set {set} in chain {chain}"); + eprintln!("INFO inserting {version} set {set} in chain {chain}"); } else { - println!("INFO removing {version} set {set} from chain {chain}"); + eprintln!("INFO removing {version} set {set} from chain {chain}"); } let command = match version { From 915e30801558bd650401b446d04d8823cc0b64ce Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 3 Feb 2026 12:00:00 +0100 Subject: [PATCH 189/241] Better plugin process management following stderr: task doesn't use shutdown anymore. It will simply follow stderr until the end of reaction, which at worst is a negligible memory leak if reaction continues running. I tried closing stderr on the plugin side with a raw syscall of the file descriptor, but reaction side doesn't see that stderr is closed. So I can't rely on that. Quitting when shutdown.wait() returns is too early, because that's also what makes reaction asking for the plugin to close(), and it can print important logs during its shutdown. The task ignoring all the shutdown part is dead simple and is most likely correct everytime. updated the wording of plugin-related errors. also replaced futures::select! { future, sleep() } with more concise and macro-less tokio::timeout. --- TODO | 1 - src/daemon/plugin/mod.rs | 92 ++++++++++++++++++++++++++++------------ src/daemon/utils.rs | 33 ++++++++------ 3 files changed, 86 insertions(+), 40 deletions(-) diff --git a/TODO b/TODO index a6aa816..2b6071f 100644 --- a/TODO +++ b/TODO @@ -1,4 +1,3 @@ Test what happens when a Filter's pattern Set changes (I think it's shitty) DB: add tests on stress testing (lines should always be in order) conf: merge filters -plugins: pipe stderr too and wrap errors in logs. fix errors? diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index aeaf1a9..b9e7b88 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -1,16 +1,18 @@ use std::{ collections::{BTreeMap, BTreeSet}, + io, ops::{Deref, DerefMut}, + process::ExitStatus, time::Duration, }; -use futures::{FutureExt, StreamExt, future::join_all}; +use futures::{StreamExt, future::join_all}; use reaction_plugin::{ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamImpl}; use remoc::Connect; use serde_json::Value; use tokio::{ process::{Child, ChildStderr}, - time::sleep, + time::timeout, }; use tracing::{error, info}; @@ -53,8 +55,8 @@ impl PluginManager { { let stderr = child.stderr.take().unwrap(); - let shutdown = shutdown.clone(); - tokio::spawn(async move { handle_stderr(stderr, plugin.name.clone(), shutdown).await }); + // let shutdown = shutdown.clone(); + tokio::spawn(async move { handle_stderr(stderr, plugin.name.clone()).await }); } let stdin = child.stdin.take().unwrap(); @@ -114,36 +116,61 @@ impl PluginManager { const PLUGIN_STOP_GRACE_TIME: u64 = 15; // wait either for the child process to exit on its own or for the shutdown signal - futures::select! { - _ = self.child.wait().fuse() => { - error!("plugin {} exited: its command returned.", self.plugin.name); + tokio::select! { + status = self.child.wait() => { + self.print_exit(status); return; } - _ = self.shutdown.wait().fuse() => {} + _ = self.shutdown.wait() => {} } - futures::select! { - _ = self.plugin_info.close().fuse() => { - return; - }, - _ = sleep(Duration::from_secs(PLUGIN_STOP_GRACE_TIME)).fuse() => { - error!("plugin {} did not respond to close request in time, killing", self.plugin.name) - }, + match timeout( + Duration::from_secs(PLUGIN_STOP_GRACE_TIME), + self.plugin_info.close(), + ) + .await + { + Ok(Ok(())) => (), + Ok(Err(err)) => { + error!("plugin {}: {err}", self.plugin.name); + } + // got timeout + Err(_) => { + error!( + "plugin {} did not respond to close request in time, killing", + self.plugin.name + ); + kill_child(self.child, format!("plugin {}", self.plugin.name), 5).await; + } } + } - kill_child(self.child, format!("plugin {}", self.plugin.name), 5).await; + fn print_exit(&self, status: io::Result) { + match status { + Ok(status) => match status.code() { + Some(code) => { + error!( + "plugin {}: process exited. exit code: {}", + self.plugin.name, code + ); + } + None => { + error!("plugin {}: process exited.", self.plugin.name); + } + }, + Err(err) => { + error!("plugin {}: process exited. {err}", self.plugin.name); + } + } } } -async fn handle_stderr(stderr: ChildStderr, plugin_name: String, shutdown: ShutdownToken) { +async fn handle_stderr(stderr: ChildStderr, plugin_name: String) { + // read lines until shutdown let lines = reader_to_stream(stderr); tokio::pin!(lines); loop { - let event = tokio::select! { - line = lines.next() => line, - _ = shutdown.wait() => None, - }; - match event { + match lines.next().await { Some(Ok(line)) => { // sad: I can't factorize this because the tracing::event! macro // requires its log level to be a constant. @@ -161,7 +188,7 @@ async fn handle_stderr(stderr: ChildStderr, plugin_name: String, shutdown: Shutd } } Some(Err(err)) => { - error!("while trying to read plugin {plugin_name} stderr: {err}"); + tracing::error!("while trying to read plugin {plugin_name} stderr: {err}"); break; } None => break, @@ -240,7 +267,12 @@ impl Plugins { plugin .stream_impl(stream_name.clone(), stream_type, config.into()) .await - .map_err(|err| format!("plugin error while initializing stream {stream_name}: {err}")) + .map_err(|err| { + format!( + "plugin error while initializing stream {stream_name}: {}", + err.to_string().replace('\n', " ") + ) + }) } pub async fn init_action_impl( @@ -274,7 +306,9 @@ impl Plugins { patterns, ) .await - .map_err(|err| format!("plugin error while initializing action {stream_name}.{filter_name}.{action_name}: {err}")) + .map_err(|err| format!("plugin error while initializing action {stream_name}.{filter_name}.{action_name}: {}", + err.to_string().replace('\n', " ") + )) } pub async fn finish_setup(&mut self) -> Result<(), String> { @@ -289,7 +323,13 @@ impl Plugins { .into_iter() .zip(self.plugins.values()) .try_for_each(|(result, plugin_manager)| { - result.map_err(|err| format!("plugin {} error: {err}", plugin_manager.plugin.name)) + result.map_err(|err| { + format!( + "plugin {}: {}", + plugin_manager.plugin.name, + err.to_string().replace('\n', " ") + ) + }) }) } diff --git a/src/daemon/utils.rs b/src/daemon/utils.rs index 0ef3986..4ec94ae 100644 --- a/src/daemon/utils.rs +++ b/src/daemon/utils.rs @@ -1,7 +1,6 @@ use std::time::Duration; -use futures::FutureExt; -use tokio::{process::Child, time::sleep}; +use tokio::{process::Child, time::timeout}; use tracing::{error, warn}; pub async fn kill_child(mut child: Child, context: String, grace_time_sec: u64) { @@ -15,14 +14,11 @@ pub async fn kill_child(mut child: Child, context: String, grace_time_sec: u64) // but we still need to reclaim it with Child::wait let _ = nix::sys::signal::kill(pid, nix::sys::signal::SIGTERM); - futures::select! { - _ = child.wait().fuse() => { - return; - }, - _ = sleep(Duration::from_secs(grace_time_sec)).fuse() => {}, + if let Ok(_) = timeout(Duration::from_secs(grace_time_sec), child.wait()).await { + return; } } else { - warn!("could not get PID of child process for {}", context); + warn!("could not get PID of child process for {context}"); // still try to use tokio API to kill and reclaim the child process } @@ -33,12 +29,23 @@ pub async fn kill_child(mut child: Child, context: String, grace_time_sec: u64) // as before, the only expected error is that the child process already terminated // but we still need to reclaim it if that's the case. + warn!("process for {context} didn't exit {grace_time_sec}s after SIGTERM, sending SIGKILL"); let _ = child.start_kill(); - futures::select! { - _ = child.wait().fuse() => {} - _ = sleep(Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC)).fuse() => { - error!("child process of {} did not terminate", context); - } + match timeout( + Duration::from_secs(STREAM_PROCESS_KILL_WAIT_TIMEOUT_SEC), + child.wait(), + ) + .await + { + Ok(_) => {} + Err(_) => match child.id() { + Some(id) => { + error!("child process of {context} did not terminate. PID: {id}"); + } + None => { + error!("child process of {context} did not terminate"); + } + }, } } From 47947d18db14d937d2b8032eba8df07940f2823b Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 4 Feb 2026 12:00:00 +0100 Subject: [PATCH 190/241] ipset: Fix dumb bug due to future not awaited The edge case is so dumb, cargo is supposed to tell me about this >< Just learnt that Python never warns about this btw: https://trio.readthedocs.io/en/v0.9.0/tutorial.html#warning-don-t-forget-that-await --- plugins/reaction-plugin-ipset/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/reaction-plugin-ipset/src/main.rs b/plugins/reaction-plugin-ipset/src/main.rs index 7be98b0..7ae9d04 100644 --- a/plugins/reaction-plugin-ipset/src/main.rs +++ b/plugins/reaction-plugin-ipset/src/main.rs @@ -124,7 +124,7 @@ impl PluginInfo for Plugin { // Launch all actions while let Some(action) = self.actions.pop() { - tokio::spawn(async move { action.serve() }); + tokio::spawn(async move { action.serve().await }); } self.actions = Default::default(); From a83c93ac9db26e675bbdb9fdf8ad9bc29d5195e5 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 5 Feb 2026 12:00:00 +0100 Subject: [PATCH 191/241] ipset: do not shutdown plugin when one action errors --- plugins/reaction-plugin-ipset/src/action.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index 610c8cf..bdb8153 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -306,8 +306,8 @@ impl Action { } } } - eprintln!("DEBUG Asking for shutdown"); - self.shutdown.ask_shutdown(); + // eprintln!("DEBUG Asking for shutdown"); + // self.shutdown.ask_shutdown(); } async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> { From 79ec6d279f7af6715fccad23b88b8981f954eab8 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Feb 2026 12:00:00 +0100 Subject: [PATCH 192/241] ipset: Manuel e2e test does pass --- tests/test-conf/test-ipset.jsonnet | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test-conf/test-ipset.jsonnet b/tests/test-conf/test-ipset.jsonnet index 25e316d..12ce0c8 100644 --- a/tests/test-conf/test-ipset.jsonnet +++ b/tests/test-conf/test-ipset.jsonnet @@ -20,7 +20,7 @@ streams: { s0: { - cmd: ['bash', '-c', 'sleep 1; for i in $(seq 4); do echo 192.0.2.$i; sleep 0.1; done'], + cmd: ['bash', '-c', 'sleep 1; for i in $(seq 4); do echo 192.0.2.$i; echo 2001:db8:$i:a31b::$i; sleep 0.1; done; sleep 3'], filters: { f0: { regex: ['^$'], @@ -37,7 +37,7 @@ }, }, a1: { - after: '10s', + after: '2s', type: 'ipset', options: { set: 'reactiontest', From 516e6956abe0521aa43315b4fc507b7051541c73 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Feb 2026 12:00:00 +0100 Subject: [PATCH 193/241] fix double-printing of square brackets in plugin logs --- src/daemon/filter/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index 332b650..a18b401 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -410,7 +410,7 @@ fn exec_now( match action_impl { Some(action_impl) => { info!( - "{action}: run {} [{:?}]", + "{action}: run {} {:?}", action.action_type.clone().unwrap_or_default(), &m, ); From 885e6b7ef79fa4404444d32af74aff71cdcc23cb Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Feb 2026 12:00:00 +0100 Subject: [PATCH 194/241] ipset: re-arrange spacing in logs --- plugins/reaction-plugin-ipset/src/ipset.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/ipset.rs b/plugins/reaction-plugin-ipset/src/ipset.rs index 7b7ba88..b2fcb78 100644 --- a/plugins/reaction-plugin-ipset/src/ipset.rs +++ b/plugins/reaction-plugin-ipset/src/ipset.rs @@ -128,7 +128,7 @@ impl IPsetManager { version, timeout, }) => { - eprintln!("INFO creating {version} set {name}"); + eprintln!("INFO creating {version} set {name}"); let mut session: Session = Session::new(name.clone()); session .create(|builder| { @@ -198,7 +198,7 @@ impl IPsetManager { if insert { eprintln!("INFO inserting {version} set {set} in chain {chain}"); } else { - eprintln!("INFO removing {version} set {set} from chain {chain}"); + eprintln!("INFO removing {version} set {set} from chain {chain}"); } let command = match version { From c39fdecef3354db6fcd2df3edc0998eb26a89e98 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 6 Feb 2026 12:00:00 +0100 Subject: [PATCH 195/241] ipset: add tests for configuration --- plugins/reaction-plugin-ipset/src/action.rs | 87 +++- plugins/reaction-plugin-ipset/src/main.rs | 2 + plugins/reaction-plugin-ipset/src/tests.rs | 420 +++++++++----------- 3 files changed, 273 insertions(+), 236 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index bdb8153..96fd352 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -8,11 +8,11 @@ use crate::ipset::{CreateSet, IpSet, Order, SetChain, Version}; #[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] pub enum IpVersion { - #[serde(alias = "4")] + #[serde(rename = "4")] V4, - #[serde(alias = "6")] + #[serde(rename = "6")] V6, - #[serde(alias = "46")] + #[serde(rename = "46")] #[default] V46, } @@ -80,7 +80,7 @@ impl ActionOptions { } /// Merged set options -#[derive(Default, Deserialize, Serialize)] +#[derive(Default, Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] pub struct SetOptions { /// The IP type. /// Defaults to `46`. @@ -97,6 +97,7 @@ pub struct SetOptions { chains: Option>, // Optional timeout, letting linux/netfilter handle set removal instead of reaction // Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action + // Same syntax as after and retryperiod in reaction. /// *Merged set-wise*. #[serde(skip_serializing_if = "Option::is_none")] timeout: Option, @@ -343,3 +344,81 @@ impl Action { Ok(()) } } + +#[cfg(test)] +mod tests { + use crate::action::{IpVersion, SetOptions}; + + #[tokio::test] + async fn set_options_merge() { + let s1 = SetOptions { + version: None, + chains: None, + timeout: None, + timeout_u32: None, + target: None, + }; + let s2 = SetOptions { + version: Some(IpVersion::V4), + chains: Some(vec!["INPUT".into()]), + timeout: Some("3h".into()), + timeout_u32: Some(3 * 3600), + target: Some("DROP".into()), + }; + assert_ne!(s1, s2); + assert_eq!(s1, SetOptions::default()); + + { + // s2 can be merged in s1 + let mut s1 = s1.clone(); + assert!(s1.merge(&s2).is_ok()); + assert_eq!(s1, s2); + } + + { + // s1 can be merged in s2 + let mut s2 = s2.clone(); + assert!(s2.merge(&s1).is_ok()); + } + + { + // s1 can be merged in itself + let mut s3 = s1.clone(); + assert!(s3.merge(&s1).is_ok()); + assert_eq!(s1, s3); + } + + { + // s2 can be merged in itself + let mut s3 = s2.clone(); + assert!(s3.merge(&s2).is_ok()); + assert_eq!(s2, s3); + } + + for s3 in [ + SetOptions { + version: Some(IpVersion::V6), + ..Default::default() + }, + SetOptions { + chains: Some(vec!["damn".into()]), + ..Default::default() + }, + SetOptions { + timeout: Some("30min".into()), + ..Default::default() + }, + SetOptions { + target: Some("log-refuse".into()), + ..Default::default() + }, + ] { + // none with some is ok + assert!(s3.clone().merge(&s1).is_ok(), "s3: {s3:?}"); + assert!(s1.clone().merge(&s3).is_ok(), "s3: {s3:?}"); + // different some is ko + assert!(s3.clone().merge(&s2).is_err(), "s3: {s3:?}"); + assert!(s2.clone().merge(&s3).is_err(), "s3: {s3:?}"); + } + } +} diff --git a/plugins/reaction-plugin-ipset/src/main.rs b/plugins/reaction-plugin-ipset/src/main.rs index 7ae9d04..9982b94 100644 --- a/plugins/reaction-plugin-ipset/src/main.rs +++ b/plugins/reaction-plugin-ipset/src/main.rs @@ -138,6 +138,8 @@ impl PluginInfo for Plugin { } } +impl Plugin {} + async fn destroy_sets_at_shutdown(mut ipset: IpSet, sets: Vec, shutdown: ShutdownToken) { shutdown.wait().await; for set in sets { diff --git a/plugins/reaction-plugin-ipset/src/tests.rs b/plugins/reaction-plugin-ipset/src/tests.rs index b08f416..397df39 100644 --- a/plugins/reaction-plugin-ipset/src/tests.rs +++ b/plugins/reaction-plugin-ipset/src/tests.rs @@ -1,268 +1,224 @@ -use std::time::{SystemTime, UNIX_EPOCH}; - -use reaction_plugin::{Exec, PluginInfo, Value}; +use reaction_plugin::{PluginInfo, Value}; use serde_json::json; use crate::Plugin; #[tokio::test] async fn conf_stream() { - // Invalid type + // No stream is supported by ipset assert!( Plugin::default() - .stream_impl("stream".into(), "virtu".into(), Value::Null) - .await - .is_err() - ); - - assert!( - Plugin::default() - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .is_ok() - ); - eprintln!( - "err: {:?}", - Plugin::default() - .stream_impl("stream".into(), "virtual".into(), json!({}).into()) - .await - ); - assert!( - Plugin::default() - .stream_impl("stream".into(), "virtual".into(), json!({}).into()) - .await - .is_ok() - ); - - // Invalid conf: must be empty - assert!( - Plugin::default() - .stream_impl( - "stream".into(), - "virtual".into(), - json!({"key": "value" }).into() - ) + .stream_impl("stream".into(), "ipset".into(), Value::Null) .await .is_err() ); } #[tokio::test] -async fn conf_action() { - let valid_conf = json!({ "send": "message", "to": "stream" }); +async fn conf_action_standalone() { + let p = vec!["name".into(), "ip".into(), "ip2".into()]; + let p_noip = vec!["name".into(), "ip2".into()]; - let missing_send_conf = json!({ "to": "stream" }); - let missing_to_conf = json!({ "send": "stream" }); - let extra_attr_conf = json!({ "send": "message", "send2": "message", "to": "stream" }); - - let patterns = Vec::default(); - - // Invalid type - assert!( - Plugin::default() + for (is_ok, conf, patterns) in [ + // minimal set + (true, json!({ "set": "test" }), &p), + // missing set key + (false, json!({}), &p), + (false, json!({ "version": 4 }), &p), + // unknown key + (false, json!({ "set": "test", "unknown": "yes" }), &p), + (false, json!({ "set": "test", "ip_index": 1 }), &p), + (false, json!({ "set": "test", "timeout_u32": 1 }), &p), + // pattern // + (true, json!({ "set": "test" }), &p), + (true, json!({ "set": "test", "pattern": "ip" }), &p), + (true, json!({ "set": "test", "pattern": "ip2" }), &p), + (true, json!({ "set": "test", "pattern": "ip2" }), &p_noip), + // unknown pattern "ip" + (false, json!({ "set": "test" }), &p_noip), + (false, json!({ "set": "test", "pattern": "ip" }), &p_noip), + // unknown pattern + (false, json!({ "set": "test", "pattern": "unknown" }), &p), + (false, json!({ "set": "test", "pattern": "uwu" }), &p_noip), + // bad type + (false, json!({ "set": "test", "pattern": 0 }), &p_noip), + (false, json!({ "set": "test", "pattern": true }), &p_noip), + // action // + (true, json!({ "set": "test", "action": "add" }), &p), + (true, json!({ "set": "test", "action": "del" }), &p), + // unknown action + (false, json!({ "set": "test", "action": "create" }), &p), + (false, json!({ "set": "test", "action": "insert" }), &p), + (false, json!({ "set": "test", "action": "delete" }), &p), + (false, json!({ "set": "test", "action": "destroy" }), &p), + // bad type + (false, json!({ "set": "test", "action": true }), &p), + (false, json!({ "set": "test", "action": 1 }), &p), + // ip version // + // ok + (true, json!({ "set": "test", "version": "4" }), &p), + (true, json!({ "set": "test", "version": "6" }), &p), + (true, json!({ "set": "test", "version": "46" }), &p), + // unknown version + (false, json!({ "set": "test", "version": 4 }), &p), + (false, json!({ "set": "test", "version": 6 }), &p), + (false, json!({ "set": "test", "version": 46 }), &p), + (false, json!({ "set": "test", "version": "5" }), &p), + // bad type + (false, json!({ "set": "test", "version": true }), &p), + // chains // + // everything is fine really + (true, json!({ "set": "test", "chains": [] }), &p), + (true, json!({ "set": "test", "chains": ["INPUT"] }), &p), + (true, json!({ "set": "test", "chains": ["FORWARD"] }), &p), + ( + true, + json!({ "set": "test", "chains": ["custom_chain"] }), + &p, + ), + ( + true, + json!({ "set": "test", "chains": ["INPUT", "FORWARD"] }), + &p, + ), + ( + true, + json!({ + "set": "test", + "chains": ["INPUT", "FORWARD", "my_iptables_chain"] + }), + &p, + ), + // timeout // + (true, json!({ "set": "test", "timeout": "1m" }), &p), + (true, json!({ "set": "test", "timeout": "3 days" }), &p), + // bad + (false, json!({ "set": "test", "timeout": "3 dayz"}), &p), + (false, json!({ "set": "test", "timeout": 12 }), &p), + // target // + // anything is fine too + (true, json!({ "set": "test", "target": "DROP" }), &p), + (true, json!({ "set": "test", "target": "ACCEPT" }), &p), + (true, json!({ "set": "test", "target": "RETURN" }), &p), + (true, json!({ "set": "test", "target": "custom_chain" }), &p), + // bad + (false, json!({ "set": "test", "target": 11 }), &p), + (false, json!({ "set": "test", "target": ["DROP"] }), &p), + ] { + let res = Plugin::default() .action_impl( "stream".into(), "filter".into(), "action".into(), - "virtu".into(), - Value::Null, - patterns.clone() + "ipset".into(), + conf.clone().into(), + patterns.clone(), ) - .await - .is_err() - ); - assert!( - Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - valid_conf.into(), - patterns.clone() - ) - .await - .is_ok() - ); + .await; - for conf in [missing_send_conf, missing_to_conf, extra_attr_conf] { assert!( - Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - conf.clone().into(), - patterns.clone() - ) - .await - .is_err(), - "conf: {:?}", - conf + res.is_ok() == is_ok, + "conf: {:?}, must be ok: {is_ok}, result: {:?}", + conf, + // empty Result::Ok because ActionImpl is not Debug + res.map(|_| ()) ); } } +// TODO #[tokio::test] -async fn conf_send() { - // Valid to: option +async fn conf_action_merge() { let mut plugin = Plugin::default(); - plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .unwrap(); - plugin + + // First set is ok + let res = plugin .action_impl( "stream".into(), "filter".into(), "action".into(), - "virtual".into(), - json!({ "send": "message", "to": "stream" }).into(), - Vec::default(), - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_ok()); - - // Invalid to: option - let mut plugin = Plugin::default(); - plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .unwrap(); - plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": "message", "to": "stream1" }).into(), - Vec::default(), - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_err()); -} - -// Let's allow empty streams for now. -// I guess it can be useful to have manual only actions. -// -// #[tokio::test] -// async fn conf_empty_stream() { -// let mut plugin = Plugin::default(); -// plugin -// .stream_impl("stream".into(), "virtual".into(), Value::Null) -// .await -// .unwrap(); -// assert!(plugin.finish_setup().await.is_err()); -// } - -#[tokio::test] -async fn run_simple() { - let mut plugin = Plugin::default(); - let mut stream = plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .unwrap(); - assert!(!stream.standalone); - - let action = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": "message ", "to": "stream" }).into(), - vec!["test".into()], - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_ok()); - - for m in ["test1", "test2", "test3", " a a a aa a a"] { - let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - assert!( - action - .tx - .send(Exec { - match_: vec![m.into()], - time, - }) - .await - .is_ok() - ); - assert_eq!( - stream.stream.recv().await.unwrap().unwrap(), - (format!("message {m}"), time), - ); - } -} - -#[tokio::test] -async fn run_two_actions() { - let mut plugin = Plugin::default(); - let mut stream = plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .unwrap(); - assert!(!stream.standalone); - - let action1 = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": "send ", "to": "stream" }).into(), - vec!["a".into(), "b".into()], - ) - .await - .unwrap(); - - let action2 = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": " send", "to": "stream" }).into(), - vec!["a".into(), "b".into()], - ) - .await - .unwrap(); - - assert!(plugin.finish_setup().await.is_ok()); - - let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - - assert!( - action1 - .tx - .send(Exec { - match_: vec!["aa".into(), "bb".into()], - time, + "ipset".into(), + json!({ + "set": "test", + "target": "DROP", + "chains": ["INPUT"], + "action": "add", }) - .await - .is_ok(), - ); - assert_eq!( - stream.stream.recv().await.unwrap().unwrap(), - ("send aa".into(), time), - ); + .into(), + vec!["ip".into()], + ) + .await; + assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); - assert!( - action2 - .tx - .send(Exec { - match_: vec!["aa".into(), "bb".into()], - time, + // Another set without conflict is ok + let res = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "ipset".into(), + json!({ + "set": "test", + "target": "DROP", + "version": "46", + "action": "add", }) - .await - .is_ok(), - ); - assert_eq!( - stream.stream.recv().await.unwrap().unwrap(), - ("bb send".into(), time), - ); + .into(), + vec!["ip".into()], + ) + .await; + assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); + + // Another set without conflict is ok + let res = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "ipset".into(), + json!({ + "set": "test", + "action": "del", + }) + .into(), + vec!["ip".into()], + ) + .await; + assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); + + // Unrelated set is ok + let res = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action2".into(), + "ipset".into(), + json!({ + "set": "test1", + "target": "target1", + "version": "6", + }) + .into(), + vec!["ip".into()], + ) + .await; + assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); + + // Another set with conflict is not ok + let res = plugin + .action_impl( + "stream".into(), + "filter".into(), + "action".into(), + "ipset".into(), + json!({ + "set": "test", + "target": "target2", + "action": "del", + }) + .into(), + vec!["ip".into()], + ) + .await; + assert!(res.is_err(), "res: {:?}", res.map(|_| ())); } From 7cd4a4305de63b06e3fd8a13de865c4a0a996d7f Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 196/241] fix: merge plugins in configuration --- src/concepts/config.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/concepts/config.rs b/src/concepts/config.rs index ebac704..3080a74 100644 --- a/src/concepts/config.rs +++ b/src/concepts/config.rs @@ -49,6 +49,20 @@ fn dot() -> String { impl Config { fn merge(&mut self, mut other: Config) -> Result<(), String> { + for (key, plugin) in other.plugins.into_iter() { + match self.plugins.entry(key) { + Entry::Vacant(e) => { + e.insert(plugin); + } + Entry::Occupied(e) => { + return Err(format!( + "plugin {} is already defined. plugin definitions can't be spread accross multiple files.", + e.key() + )); + } + } + } + for (key, pattern) in other.patterns.into_iter() { match self.patterns.entry(key) { Entry::Vacant(e) => { From 6914f19fb880911fca15b1f245b2b3e2a974a99e Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 197/241] fix assert_cmd::cargo_bin deprecation warning --- tests/conf_load.rs | 6 +++--- tests/end_to_end.rs | 16 ++++++++-------- tests/persistence.rs | 6 +++--- tests/plugin_cluster.rs | 5 ++--- tests/plugin_virtual.rs | 5 ++--- tests/start_stop.rs | 5 ++--- 6 files changed, 20 insertions(+), 23 deletions(-) diff --git a/tests/conf_load.rs b/tests/conf_load.rs index 048f6ca..4968621 100644 --- a/tests/conf_load.rs +++ b/tests/conf_load.rs @@ -1,10 +1,10 @@ use std::error::Error; -use assert_cmd::Command; +use assert_cmd::cargo::cargo_bin_cmd; #[test] fn load_conf_directory() -> Result<(), Box> { - let mut cmd = Command::cargo_bin("reaction")?; + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args([ "test-config", "--verbose", @@ -70,7 +70,7 @@ streams: fn example_configs_are_equal() { let outputs = ["config/example.yml", "config/example.jsonnet"] .map(|config_path| { - let mut cmd = Command::cargo_bin("reaction").unwrap(); + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args(["test-config", "--config", config_path]); cmd.assert().success().get_output().stdout.clone() }) diff --git a/tests/end_to_end.rs b/tests/end_to_end.rs index 89e52e0..cfe09d6 100644 --- a/tests/end_to_end.rs +++ b/tests/end_to_end.rs @@ -1,6 +1,6 @@ use std::{error::Error, path::Path, process::Stdio, thread::sleep, time::Duration}; -use assert_cmd::Command; +use assert_cmd::cargo::cargo_bin_cmd; use assert_fs::prelude::*; use nix::sys::signal; use predicates::prelude::predicate; @@ -14,7 +14,7 @@ fn actions_delayed_and_on_exit() -> Result<(), Box> { .child("config.jsonnet") .write_file(Path::new("tests/test-conf/test-after.jsonnet"))?; - let mut cmd = Command::cargo_bin("reaction")?; + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); cmd.current_dir(tmp_dir.path()); cmd.timeout(Duration::from_secs(5)); @@ -46,7 +46,7 @@ fn kill_stream_on_exit() -> Result<(), Box> { .child("config.jsonnet") .write_file(Path::new("tests/test-conf/test-shutdown.jsonnet"))?; - let cmd = Command::cargo_bin("reaction")?; + let cmd = cargo_bin_cmd!("reaction"); let mut cmd = std::process::Command::new(cmd.get_program()); cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); cmd.current_dir(tmp_dir.path()); @@ -103,7 +103,7 @@ fn non_utf8_is_stripped() -> Result<(), Box> { .child("config.jsonnet") .write_file(Path::new("tests/test-conf/test-binary-input.jsonnet"))?; - let mut cmd = Command::cargo_bin("reaction")?; + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); cmd.current_dir(tmp_dir.path()); cmd.timeout(std::time::Duration::from_secs(1)); @@ -124,7 +124,7 @@ fn capture_streams_stderr() -> Result<(), Box> { .child("config.jsonnet") .write_file(Path::new("tests/test-conf/test-stream-stderr.jsonnet"))?; - let mut cmd = Command::cargo_bin("reaction")?; + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); cmd.current_dir(tmp_dir.path()); cmd.timeout(std::time::Duration::from_secs(1)); @@ -149,7 +149,7 @@ fn manualy_trigger_filter() -> Result<(), Box> { .write_file(Path::new("tests/test-conf/test-trigger.jsonnet"))?; // start daemon - let cmd = Command::cargo_bin("reaction")?; + let cmd = cargo_bin_cmd!("reaction"); let program = cmd.get_program(); let mut cmd = std::process::Command::new(program); cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); @@ -181,7 +181,7 @@ fn manualy_trigger_filter() -> Result<(), Box> { let socket_path = socket.path().to_str().unwrap(); // trigger event manually - let mut cmd_trigger = Command::cargo_bin("reaction")?; + let mut cmd_trigger = cargo_bin_cmd!("reaction"); cmd_trigger.current_dir(tmp_dir.path()); cmd_trigger.args(["trigger", "--socket", socket_path, "s1.f1", "num=95"]); cmd_trigger.timeout(Duration::from_secs(1)); @@ -221,7 +221,7 @@ fn filter_regex_match_eol() -> Result<(), Box> { .child("config.jsonnet") .write_file(Path::new("tests/test-conf/test-eol-match.jsonnet"))?; - let mut cmd = Command::cargo_bin("reaction")?; + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args(["start", "--socket", "./s", "--config", "config.jsonnet"]); cmd.current_dir(tmp_dir.path()); cmd.timeout(std::time::Duration::from_secs(1)); diff --git a/tests/persistence.rs b/tests/persistence.rs index 3cbfd74..d68db33 100644 --- a/tests/persistence.rs +++ b/tests/persistence.rs @@ -1,6 +1,6 @@ use std::{error::Error, path::Path, time::Duration}; -use assert_cmd::Command; +use assert_cmd::cargo::cargo_bin_cmd; use assert_fs::prelude::*; use predicates::prelude::predicate; @@ -13,7 +13,7 @@ fn resume_action() -> Result<(), Box> { .write_file(Path::new("tests/test-conf/test-resume-action.jsonnet"))?; // first run - let mut cmd = Command::cargo_bin("reaction")?; + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); cmd.current_dir(tmp_dir.path()); cmd.timeout(Duration::from_secs(5)); @@ -25,7 +25,7 @@ fn resume_action() -> Result<(), Box> { tmp_dir.child("log").assert(&expected); // second run, expect to resume action - let mut cmd = Command::cargo_bin("reaction")?; + let mut cmd = cargo_bin_cmd!("reaction"); cmd.args(["start", "--socket", "./s", "--config", "./config.jsonnet"]); cmd.current_dir(tmp_dir.path()); cmd.timeout(Duration::from_secs(5)); diff --git a/tests/plugin_cluster.rs b/tests/plugin_cluster.rs index 3f43107..cf863fb 100644 --- a/tests/plugin_cluster.rs +++ b/tests/plugin_cluster.rs @@ -1,6 +1,6 @@ use std::{fs::read_to_string, path::Path, thread, time::Duration}; -use assert_cmd::Command; +use assert_cmd::{Command, cargo::cargo_bin_cmd}; use assert_fs::prelude::*; const SECRET_KEY_A: &str = "g7U1LPq2cgGSyk6CH_v1QpoXowSFKVQ8IcFljd_ZKGw="; @@ -100,8 +100,7 @@ fn launch_node(config: String, my_secret: &'static str, expected_output: Vec<&'s .write_file(Path::new("./target/debug/reaction-plugin-cluster")) .unwrap(); - let output = Command::cargo_bin("reaction") - .unwrap() + let output = cargo_bin_cmd!("reaction") .args([ "start", "--socket", diff --git a/tests/plugin_virtual.rs b/tests/plugin_virtual.rs index 19128d8..89f37da 100644 --- a/tests/plugin_virtual.rs +++ b/tests/plugin_virtual.rs @@ -1,6 +1,6 @@ use std::{path::Path, time::Duration}; -use assert_cmd::Command; +use assert_cmd::{Command, cargo::cargo_bin_cmd}; use assert_fs::prelude::*; use predicates::prelude::predicate; @@ -23,8 +23,7 @@ fn plugin_virtual() { .write_file(Path::new("./target/debug/reaction-plugin-virtual")) .unwrap(); - Command::cargo_bin("reaction") - .unwrap() + cargo_bin_cmd!("reaction") .args(["start", "--socket", "./s", "--config", "./config.jsonnet"]) .current_dir(tmp_dir.path()) .timeout(Duration::from_secs(5)) diff --git a/tests/start_stop.rs b/tests/start_stop.rs index c555e54..949b6b8 100644 --- a/tests/start_stop.rs +++ b/tests/start_stop.rs @@ -1,6 +1,6 @@ use std::{path::Path, time::Duration}; -use assert_cmd::Command; +use assert_cmd::cargo::cargo_bin_cmd; use assert_fs::{TempDir, prelude::*}; use predicates::prelude::predicate; @@ -80,8 +80,7 @@ fn run_reaction(tmp_dir: &TempDir) { .write_file(Path::new("tests/start_stop.jsonnet")) .unwrap(); - Command::cargo_bin("reaction") - .unwrap() + cargo_bin_cmd!("reaction") .args(["start", "--socket", "./s", "--config", "./config.jsonnet"]) .current_dir(tmp_dir.path()) .timeout(Duration::from_secs(5)) From 5ce773c8e5c1b6d577e87714824464a825f49b8f Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 198/241] cluster: ignore integration tests for now --- tests/plugin_cluster.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/plugin_cluster.rs b/tests/plugin_cluster.rs index cf863fb..2fc99c2 100644 --- a/tests/plugin_cluster.rs +++ b/tests/plugin_cluster.rs @@ -12,6 +12,7 @@ const PUBLIC_KEY_B: &str = "LPSQ9pS7m_5vvNC-fhoBNeL2-eS2Fd6aO4ImSnXp3lc="; // require UDP ports 9876-9879 to be free on 127.0.0.1 #[test] +#[ignore = "failing for now"] fn plugin_cluster_same_startup() { // First build reaction-plugin-cluster Command::new("cargo") @@ -46,6 +47,7 @@ fn plugin_cluster_same_startup() { } #[test] +#[ignore = "failing for now"] fn plugin_cluster_different_startup() { // First build reaction-plugin-cluster Command::new("cargo") From 41bc3525f876179f5ef74f09a870d237c86a26b3 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 199/241] Fix time-based test sometimes failing by increasing sleep --- src/daemon/filter/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/daemon/filter/tests.rs b/src/daemon/filter/tests.rs index 1b4f912..5196e42 100644 --- a/src/daemon/filter/tests.rs +++ b/src/daemon/filter/tests.rs @@ -266,7 +266,7 @@ async fn three_matches_then_action_then_delayed_action() { ); // Now the second action executes - tokio::time::sleep(Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(140)).await; // Check second action assert!( bed.manager.state.lock().await.triggers.is_empty(), From 34e2a8f29463c897640ff274677fde2a2f2ce541 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 200/241] plugin: simpler crate version retrieval --- plugins/reaction-plugin/src/lib.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 87f3443..a01957d 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -187,10 +187,9 @@ pub struct Hello { impl Hello { pub fn new() -> Hello { - let mut version = env!("CARGO_PKG_VERSION").split("."); Hello { - version_major: version.next().unwrap().parse().unwrap(), - version_minor: version.next().unwrap().parse().unwrap(), + version_major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(), + version_minor: env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(), } } From 62933b55e412d08bce598d82a63b0cdb263214a8 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 201/241] Start plugins after start commands Because stop commands run after plugins' shutdown, so it seems better that commands embrace ({ plugins }). Fix outdated comment about aborting on startup. --- src/daemon/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 8db460f..17f90f3 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -152,20 +152,20 @@ async fn daemon_start( (state, stream_managers) }; - // Finish plugin setup - plugins.finish_setup().await?; - plugins.manager(); - // Open socket and run task let socket = Socket::open(socket).await?; socket.manager(config, state, shutdown.clone()); - // reaction won't abort on startup anymore, we can run start commands + // all core systems started, we can run start commands *config_started = true; if !config.start() { return Err("a start command failed, exiting.".into()); } + // Finish plugin setup + plugins.finish_setup().await?; + plugins.manager(); + // Start Stream managers let stream_task_handles = stream_managers.into_iter().filter_map(|stream_manager| { let standalone = stream_manager.is_standalone(); From 12fc90535ab239abec96fee78d79d11286aebf30 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 202/241] Change plugin interface: oneshot load_config and start Instead of multiple stream_impl / action_impl and one finish_setup. This made plugin implementations awkward: they often got some conf and couldn't determine if it was valid or not. Now they get all the conf in one function and don't have to keep partial state from one call to another. This has the other important benefit that configuration loading is separated from startup. This will make plugin lifecycle management less clunky. --- plugins/reaction-plugin/src/lib.rs | 50 +++++++++++++++++------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index a01957d..2ce95dc 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -123,31 +123,20 @@ pub trait PluginInfo { /// Return the manifest of the plugin. async fn manifest(&mut self) -> Result; - /// Return one stream of a given type. - /// Errors if the type does not exist or if config is invalid. - async fn stream_impl( + /// Load all plugin stream and action configurations, + /// Errors if config is invalid. + /// + /// The plugin should not start running mutable commands here: + /// It should be ok to quit without cleanup for now. + async fn load_config( &mut self, - stream_name: String, - stream_type: String, - config: Value, - ) -> RemoteResult; + streams: Vec, + actions: Vec, + ) -> RemoteResult<(Vec, Vec)>; - /// Return one action of a given type. - /// Errors if the type does not exist or if config is invalid. - async fn action_impl( - &mut self, - stream_name: String, - filter_name: String, - action_name: String, - action_type: String, - config: Value, - patterns: Vec, - ) -> RemoteResult; - - /// Notify the plugin that setup is finished, permitting a last occasion to report an error - /// (For example if a stream wants a companion action but it hasn't been initialized) + /// Notify the plugin that setup is finished, permitting a last occasion to report an error that'll make reaction exit. /// All initialization (opening remote connections, starting streams, etc) should happen here. - async fn finish_setup(&mut self) -> RemoteResult<()>; + async fn start(&mut self) -> RemoteResult<()>; /// Notify the plugin that reaction is quitting and that the plugin should quit too. /// A few seconds later, the plugin will receive SIGTERM. @@ -155,6 +144,23 @@ pub trait PluginInfo { async fn close(mut self) -> RemoteResult<()>; } +#[derive(Serialize, Deserialize, Clone)] +pub struct StreamConfig { + pub stream_name: String, + pub stream_type: String, + pub config: Value, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct ActionConfig { + pub stream_name: String, + pub filter_name: String, + pub action_name: String, + pub action_type: String, + pub config: Value, + pub patterns: Vec, +} + #[derive(Serialize, Deserialize)] pub struct Manifest { // Protocol version. available as the [`hello!`] macro. From 57d6da537730c7ea30eab994ba3bfc8b505e54e3 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 203/241] virtual: adapt to plugin interface change --- plugins/reaction-plugin-virtual/src/main.rs | 206 ++++++------- plugins/reaction-plugin-virtual/src/tests.rs | 288 +++++++++++-------- 2 files changed, 262 insertions(+), 232 deletions(-) diff --git a/plugins/reaction-plugin-virtual/src/main.rs b/plugins/reaction-plugin-virtual/src/main.rs index 4372725..aa20f26 100644 --- a/plugins/reaction-plugin-virtual/src/main.rs +++ b/plugins/reaction-plugin-virtual/src/main.rs @@ -1,8 +1,8 @@ use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, - line::PatternLine, + ActionConfig, ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamConfig, + StreamImpl, Value, line::PatternLine, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; @@ -17,10 +17,7 @@ async fn main() { } #[derive(Default)] -struct Plugin { - streams: BTreeMap, - actions_init: Vec, -} +struct Plugin {} impl PluginInfo for Plugin { async fn manifest(&mut self) -> Result { @@ -31,75 +28,69 @@ impl PluginInfo for Plugin { }) } - async fn stream_impl( + async fn load_config( &mut self, - stream_name: String, - stream_type: String, - config: Value, - ) -> RemoteResult { - if stream_type != "virtual" { - return Err("This plugin can't handle other stream types than virtual".into()); - } + streams: Vec, + actions: Vec, + ) -> RemoteResult<(Vec, Vec)> { + let mut ret_streams = Vec::with_capacity(streams.len()); + let mut ret_actions = Vec::with_capacity(actions.len()); - let (virtual_stream, receiver) = VirtualStream::new(config)?; + let mut local_streams = BTreeMap::new(); - if let Some(_) = self.streams.insert(stream_name, virtual_stream) { - return Err("this virtual stream has already been initialized".into()); - } - - Ok(StreamImpl { - stream: receiver, - standalone: false, - }) - } - - async fn action_impl( - &mut self, - stream_name: String, - filter_name: String, - action_name: String, - action_type: String, - config: Value, - patterns: Vec, - ) -> RemoteResult { - if &action_type != "virtual" { - return Err("This plugin can't handle other action types than virtual".into()); - } - - let (virtual_action_init, tx) = - VirtualActionInit::new(stream_name, filter_name, action_name, config, patterns)?; - - self.actions_init.push(virtual_action_init); - Ok(ActionImpl { tx }) - } - - async fn finish_setup(&mut self) -> RemoteResult<()> { - while let Some(action_init) = self.actions_init.pop() { - match self.streams.get(&action_init.to) { - Some(virtual_stream) => { - let virtual_stream = virtual_stream.clone(); - tokio::spawn(async move { - VirtualAction::from(action_init, virtual_stream) - .serve() - .await - }); - } - None => { - return Err(format!( - "action {}.{}.{}: send \"{}\" matches no stream name", - action_init.stream_name, - action_init.filter_name, - action_init.action_name, - action_init.to - ) - .into()); - } + for StreamConfig { + stream_name, + stream_type, + config, + } in streams + { + if stream_type != "virtual" { + return Err("This plugin can't handle other stream types than virtual".into()); } - } - // Free containers - self.streams = Default::default(); - self.actions_init = Default::default(); + let (virtual_stream, receiver) = VirtualStream::new(config)?; + + if let Some(_) = local_streams.insert(stream_name, virtual_stream) { + return Err("this virtual stream has already been initialized".into()); + } + + ret_streams.push(StreamImpl { + stream: receiver, + standalone: false, + }); + } + + for ActionConfig { + stream_name, + filter_name, + action_name, + action_type, + config, + patterns, + } in actions + { + if &action_type != "virtual" { + return Err("This plugin can't handle other action types than virtual".into()); + } + + let (mut virtual_action, tx) = VirtualAction::new( + stream_name, + filter_name, + action_name, + config, + patterns, + &local_streams, + )?; + + tokio::spawn(async move { virtual_action.serve().await }); + + ret_actions.push(ActionImpl { tx }); + } + + Ok((ret_streams, ret_actions)) + } + + async fn start(&mut self) -> RemoteResult<()> { Ok(()) } @@ -140,44 +131,6 @@ struct ActionOptions { to: String, } -struct VirtualActionInit { - stream_name: String, - filter_name: String, - action_name: String, - rx: mpsc::Receiver, - patterns: Vec, - send: String, - to: String, -} - -impl VirtualActionInit { - fn new( - stream_name: String, - filter_name: String, - action_name: String, - config: Value, - patterns: Vec, - ) -> Result<(Self, mpsc::Sender), String> { - let options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { - format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") - })?; - - let (tx, rx) = mpsc::channel(1); - Ok(( - Self { - stream_name, - filter_name, - action_name, - rx, - patterns, - send: options.send, - to: options.to, - }, - tx, - )) - } -} - struct VirtualAction { rx: mpsc::Receiver, send: PatternLine, @@ -185,13 +138,36 @@ struct VirtualAction { } impl VirtualAction { - fn from(action_init: VirtualActionInit, to: VirtualStream) -> VirtualAction { - let send = PatternLine::new(action_init.send, action_init.patterns); - VirtualAction { - rx: action_init.rx, - send, - to, - } + fn new( + stream_name: String, + filter_name: String, + action_name: String, + config: Value, + patterns: Vec, + streams: &BTreeMap, + ) -> Result<(Self, mpsc::Sender), String> { + let options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { + format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") + })?; + + let send = PatternLine::new(options.send, patterns); + + let stream = streams.get(&options.to).ok_or_else(|| { + format!( + "action {}.{}.{}: send \"{}\" matches no stream name", + stream_name, filter_name, action_name, options.to + ) + })?; + + let (tx, rx) = mpsc::channel(1); + Ok(( + Self { + rx, + send: send, + to: stream.clone(), + }, + tx, + )) } async fn serve(&mut self) { diff --git a/plugins/reaction-plugin-virtual/src/tests.rs b/plugins/reaction-plugin-virtual/src/tests.rs index b08f416..32df952 100644 --- a/plugins/reaction-plugin-virtual/src/tests.rs +++ b/plugins/reaction-plugin-virtual/src/tests.rs @@ -1,6 +1,6 @@ use std::time::{SystemTime, UNIX_EPOCH}; -use reaction_plugin::{Exec, PluginInfo, Value}; +use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig, Value}; use serde_json::json; use crate::Plugin; @@ -10,26 +10,42 @@ async fn conf_stream() { // Invalid type assert!( Plugin::default() - .stream_impl("stream".into(), "virtu".into(), Value::Null) + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtu".into(), + config: Value::Null + }], + vec![] + ) .await .is_err() ); assert!( Plugin::default() - .stream_impl("stream".into(), "virtual".into(), Value::Null) + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: Value::Null + }], + vec![] + ) .await .is_ok() ); - eprintln!( - "err: {:?}", - Plugin::default() - .stream_impl("stream".into(), "virtual".into(), json!({}).into()) - .await - ); + assert!( Plugin::default() - .stream_impl("stream".into(), "virtual".into(), json!({}).into()) + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: json!({}).into(), + }], + vec![] + ) .await .is_ok() ); @@ -37,10 +53,13 @@ async fn conf_stream() { // Invalid conf: must be empty assert!( Plugin::default() - .stream_impl( - "stream".into(), - "virtual".into(), - json!({"key": "value" }).into() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: json!({"key": "value" }).into(), + }], + vec![] ) .await .is_err() @@ -49,6 +68,12 @@ async fn conf_stream() { #[tokio::test] async fn conf_action() { + let streams = vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: Value::Null, + }]; + let valid_conf = json!({ "send": "message", "to": "stream" }); let missing_send_conf = json!({ "to": "stream" }); @@ -60,26 +85,32 @@ async fn conf_action() { // Invalid type assert!( Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtu".into(), - Value::Null, - patterns.clone() + .load_config( + streams.clone(), + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtu".into(), + config: Value::Null, + patterns: patterns.clone(), + }] ) .await .is_err() ); assert!( Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - valid_conf.into(), - patterns.clone() + .load_config( + streams.clone(), + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtual".into(), + config: valid_conf.into(), + patterns: patterns.clone() + }] ) .await .is_ok() @@ -88,13 +119,16 @@ async fn conf_action() { for conf in [missing_send_conf, missing_to_conf, extra_attr_conf] { assert!( Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - conf.clone().into(), - patterns.clone() + .load_config( + streams.clone(), + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtual".into(), + config: conf.clone().into(), + patterns: patterns.clone() + }] ) .await .is_err(), @@ -107,42 +141,48 @@ async fn conf_action() { #[tokio::test] async fn conf_send() { // Valid to: option - let mut plugin = Plugin::default(); - plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .unwrap(); - plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": "message", "to": "stream" }).into(), - Vec::default(), - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_ok()); + assert!( + Plugin::default() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: Value::Null, + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtual".into(), + config: json!({ "send": "message", "to": "stream" }).into(), + patterns: vec![], + }] + ) + .await + .is_ok(), + ); // Invalid to: option - let mut plugin = Plugin::default(); - plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .unwrap(); - plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": "message", "to": "stream1" }).into(), - Vec::default(), - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_err()); + assert!( + Plugin::default() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: Value::Null, + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtual".into(), + config: json!({ "send": "message", "to": "stream1" }).into(), + patterns: vec![], + }] + ) + .await + .is_err(), + ); } // Let's allow empty streams for now. @@ -150,35 +190,46 @@ async fn conf_send() { // // #[tokio::test] // async fn conf_empty_stream() { -// let mut plugin = Plugin::default(); -// plugin -// .stream_impl("stream".into(), "virtual".into(), Value::Null) -// .await -// .unwrap(); -// assert!(plugin.finish_setup().await.is_err()); +// assert!( +// Plugin::default() +// .load_config( +// vec![StreamConfig { +// stream_name: "stream".into(), +// stream_type: "virtual".into(), +// config: Value::Null, +// }], +// vec![], +// ) +// .await +// .is_err(), +// ); // } #[tokio::test] async fn run_simple() { let mut plugin = Plugin::default(); - let mut stream = plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) - .await - .unwrap(); - assert!(!stream.standalone); - - let action = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": "message ", "to": "stream" }).into(), - vec!["test".into()], + let (mut streams, mut actions) = plugin + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: Value::Null, + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtual".into(), + config: json!({ "send": "message ", "to": "stream" }).into(), + patterns: vec!["test".into()], + }], ) .await .unwrap(); - assert!(plugin.finish_setup().await.is_ok()); + + let mut stream = streams.pop().unwrap(); + let action = actions.pop().unwrap(); + assert!(!stream.standalone); for m in ["test1", "test2", "test3", " a a a aa a a"] { let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); @@ -202,37 +253,40 @@ async fn run_simple() { #[tokio::test] async fn run_two_actions() { let mut plugin = Plugin::default(); - let mut stream = plugin - .stream_impl("stream".into(), "virtual".into(), Value::Null) + let (mut streams, mut actions) = plugin + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "virtual".into(), + config: Value::Null, + }], + vec![ + ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtual".into(), + config: json!({ "send": "send ", "to": "stream" }).into(), + patterns: vec!["a".into(), "b".into()], + }, + ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "virtual".into(), + config: json!({ "send": " send", "to": "stream" }).into(), + patterns: vec!["a".into(), "b".into()], + }, + ], + ) .await .unwrap(); + + let mut stream = streams.pop().unwrap(); assert!(!stream.standalone); - let action1 = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": "send ", "to": "stream" }).into(), - vec!["a".into(), "b".into()], - ) - .await - .unwrap(); - - let action2 = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "virtual".into(), - json!({ "send": " send", "to": "stream" }).into(), - vec!["a".into(), "b".into()], - ) - .await - .unwrap(); - - assert!(plugin.finish_setup().await.is_ok()); + let action2 = actions.pop().unwrap(); + let action1 = actions.pop().unwrap(); let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); From b0dc3c56adfb8b5e1e4ba2aa04f8e20f90cc46ae Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 204/241] ipset: adapt to plugin interface change --- plugins/reaction-plugin-ipset/src/main.rs | 89 +++++----- plugins/reaction-plugin-ipset/src/tests.rs | 197 ++++++++++++--------- 2 files changed, 157 insertions(+), 129 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/main.rs b/plugins/reaction-plugin-ipset/src/main.rs index 9982b94..268eb95 100644 --- a/plugins/reaction-plugin-ipset/src/main.rs +++ b/plugins/reaction-plugin-ipset/src/main.rs @@ -1,7 +1,8 @@ use std::collections::{BTreeMap, BTreeSet}; use reaction_plugin::{ - ActionImpl, Hello, Manifest, PluginInfo, RemoteError, RemoteResult, StreamImpl, Value, + ActionConfig, ActionImpl, Hello, Manifest, PluginInfo, RemoteError, RemoteResult, StreamConfig, + StreamImpl, shutdown::{ShutdownController, ShutdownToken}, }; use remoc::rtc; @@ -26,7 +27,6 @@ async fn main() { #[derive(Default)] struct Plugin { ipset: IpSet, - set_options: BTreeMap, sets: Vec, actions: Vec, shutdown: ShutdownController, @@ -41,64 +41,69 @@ impl PluginInfo for Plugin { }) } - async fn stream_impl( + async fn load_config( &mut self, - _stream_name: String, - _stream_type: String, - _config: Value, - ) -> RemoteResult { - Err("This plugin can't handle any stream type".into()) - } - - async fn action_impl( - &mut self, - stream_name: String, - filter_name: String, - action_name: String, - action_type: String, - config: Value, - patterns: Vec, - ) -> RemoteResult { - if &action_type != "ipset" { - return Err("This plugin can't handle other action types than ipset".into()); + streams: Vec, + actions: Vec, + ) -> RemoteResult<(Vec, Vec)> { + if !streams.is_empty() { + return Err("This plugin can't handle any stream type".into()); } - let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { + let mut ret_actions = Vec::with_capacity(actions.len()); + let mut set_options: BTreeMap = BTreeMap::new(); + + for ActionConfig { + stream_name, + filter_name, + action_name, + action_type, + config, + patterns, + } in actions + { + if &action_type != "ipset" { + return Err("This plugin can't handle other action types than ipset".into()); + } + + let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") })?; - options.set_ip_index(patterns).map_err(|_| + options.set_ip_index(patterns).map_err(|_| format!( "No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'", &options.pattern ) )?; - // Merge option - self.set_options - .entry(options.set.clone()) - .or_default() - .merge(&options.set_options) - .map_err(|err| format!("ipset {}: {err}", options.set))?; + // Merge option + set_options + .entry(options.set.clone()) + .or_default() + .merge(&options.set_options) + .map_err(|err| format!("ipset {}: {err}", options.set))?; - let (tx, rx) = remoc::rch::mpsc::channel(1); - self.actions.push(Action::new( - self.ipset.clone(), - self.shutdown.token(), - rx, - options, - )?); + let (tx, rx) = remoc::rch::mpsc::channel(1); + self.actions.push(Action::new( + self.ipset.clone(), + self.shutdown.token(), + rx, + options, + )?); - Ok(ActionImpl { tx }) - } + ret_actions.push(ActionImpl { tx }); + } - async fn finish_setup(&mut self) -> RemoteResult<()> { // Init all sets - while let Some((name, options)) = self.set_options.pop_first() { + while let Some((name, options)) = set_options.pop_first() { self.sets.push(Set::from(name, options)); } - self.set_options = Default::default(); + Ok((vec![], ret_actions)) + } + + async fn start(&mut self) -> RemoteResult<()> { let mut first_error = None; for (i, set) in self.sets.iter().enumerate() { // Retain if error @@ -138,8 +143,6 @@ impl PluginInfo for Plugin { } } -impl Plugin {} - async fn destroy_sets_at_shutdown(mut ipset: IpSet, sets: Vec, shutdown: ShutdownToken) { shutdown.wait().await; for set in sets { diff --git a/plugins/reaction-plugin-ipset/src/tests.rs b/plugins/reaction-plugin-ipset/src/tests.rs index 397df39..f1cef9e 100644 --- a/plugins/reaction-plugin-ipset/src/tests.rs +++ b/plugins/reaction-plugin-ipset/src/tests.rs @@ -1,4 +1,4 @@ -use reaction_plugin::{PluginInfo, Value}; +use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig, Value}; use serde_json::json; use crate::Plugin; @@ -8,10 +8,20 @@ async fn conf_stream() { // No stream is supported by ipset assert!( Plugin::default() - .stream_impl("stream".into(), "ipset".into(), Value::Null) + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "ipset".into(), + config: Value::Null + }], + vec![] + ) .await .is_err() ); + + // Nothing is ok + assert!(Plugin::default().load_config(vec![], vec![]).await.is_ok()); } #[tokio::test] @@ -106,13 +116,16 @@ async fn conf_action_standalone() { (false, json!({ "set": "test", "target": ["DROP"] }), &p), ] { let res = Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "ipset".into(), - conf.clone().into(), - patterns.clone(), + .load_config( + vec![], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "ipset".into(), + config: conf.clone().into(), + patterns: patterns.clone(), + }], ) .await; @@ -131,93 +144,105 @@ async fn conf_action_standalone() { async fn conf_action_merge() { let mut plugin = Plugin::default(); - // First set is ok - let res = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "ipset".into(), - json!({ - "set": "test", - "target": "DROP", - "chains": ["INPUT"], - "action": "add", - }) - .into(), - vec!["ip".into()], - ) - .await; - assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); + let set1 = ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action1".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "target": "DROP", + "chains": ["INPUT"], + "action": "add", + }) + .into(), + patterns: vec!["ip".into()], + }; - // Another set without conflict is ok - let res = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "ipset".into(), - json!({ - "set": "test", - "target": "DROP", - "version": "46", - "action": "add", - }) - .into(), - vec!["ip".into()], - ) - .await; - assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); + let set2 = ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action2".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "target": "DROP", + "version": "46", + "action": "add", + }) + .into(), + patterns: vec!["ip".into()], + }; - // Another set without conflict is ok - let res = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "ipset".into(), - json!({ - "set": "test", - "action": "del", - }) - .into(), - vec!["ip".into()], - ) - .await; - assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); + let set3 = ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action2".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "action": "del", + }) + .into(), + patterns: vec!["ip".into()], + }; - // Unrelated set is ok let res = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action2".into(), - "ipset".into(), - json!({ - "set": "test1", - "target": "target1", - "version": "6", - }) - .into(), - vec!["ip".into()], + .load_config( + vec![], + vec![ + // First set + set1.clone(), + // Same set, adding options, no conflict + set2.clone(), + // Same set, no new options, no conflict + set3.clone(), + // Unrelated set, so no conflict + ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action3".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test2", + "target": "target1", + "version": "6", + }) + .into(), + patterns: vec!["ip".into()], + }, + ], ) .await; + assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); // Another set with conflict is not ok let res = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "ipset".into(), - json!({ - "set": "test", - "target": "target2", - "action": "del", - }) - .into(), - vec!["ip".into()], + .load_config( + vec![], + vec![ + // First set + set1, + // Same set, adding options, no conflict + set2, + // Same set, no new options, no conflict + set3, + // Another set with conflict + ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action3".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "target": "target2", + "action": "del", + }) + .into(), + patterns: vec!["ip".into()], + }, + ], ) .await; assert!(res.is_err(), "res: {:?}", res.map(|_| ())); From ae28cfbb310f1b38207ec57d4e6f11de72b37316 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 205/241] cluster: adapt to plugin interface change --- plugins/reaction-plugin-cluster/src/main.rs | 232 +++++++++--------- .../reaction-plugin-cluster/src/tests/conf.rs | 225 +++++++++-------- .../reaction-plugin-cluster/src/tests/e2e.rs | 85 ++++--- .../src/tests/self_.rs | 49 ++-- 4 files changed, 317 insertions(+), 274 deletions(-) diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index fd52fb1..b931026 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -7,8 +7,8 @@ use std::{ use iroh::{EndpointAddr, PublicKey, SecretKey, TransportAddr}; use reaction_plugin::{ - ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamImpl, Value, - line::PatternLine, main_loop, shutdown::ShutdownController, time::parse_duration, + ActionConfig, ActionImpl, Exec, Hello, Line, Manifest, PluginInfo, RemoteResult, StreamConfig, + StreamImpl, line::PatternLine, main_loop, shutdown::ShutdownController, time::parse_duration, }; use remoc::{rch::mpsc, rtc}; use serde::{Deserialize, Serialize}; @@ -32,8 +32,7 @@ async fn main() { #[derive(Default)] struct Plugin { - streams: BTreeMap, - actions: BTreeMap>, + init: BTreeMap)>, cluster_shutdown: ShutdownController, } @@ -108,118 +107,136 @@ impl PluginInfo for Plugin { actions: BTreeSet::from(["cluster_send".into()]), }) } - - async fn stream_impl( + async fn load_config( &mut self, - stream_name: String, - stream_type: String, - config: Value, - ) -> RemoteResult { - if &stream_type != "cluster" { - return Err("This plugin can't handle other stream types than cluster".into()); - } + streams: Vec, + actions: Vec, + ) -> RemoteResult<(Vec, Vec)> { + let mut ret_streams = Vec::with_capacity(streams.len()); + let mut ret_actions = Vec::with_capacity(actions.len()); - let options: StreamOptions = serde_json::from_value(config.into()) - .map_err(|err| format!("invalid options: {err}"))?; + for StreamConfig { + stream_name, + stream_type, + config, + } in streams + { + if &stream_type != "cluster" { + return Err("This plugin can't handle other stream types than cluster".into()); + } - let mut nodes = BTreeMap::default(); + let options: StreamOptions = serde_json::from_value(config.into()) + .map_err(|err| format!("invalid options: {err}"))?; - let message_timeout = parse_duration(&options.message_timeout) - .map_err(|err| format!("invalid message_timeout: {err}"))?; + let mut nodes = BTreeMap::default(); - if options.bind_ipv4.is_none() && options.bind_ipv6.is_none() { - Err( - "At least one of bind_ipv4 and bind_ipv6 must be enabled. Unset at least one of them or set at least one of them to an IP.", - )?; - } + let message_timeout = parse_duration(&options.message_timeout) + .map_err(|err| format!("invalid message_timeout: {err}"))?; - if options.nodes.is_empty() { - Err("At least one remote node has to be configured for a cluster")?; - } + if options.bind_ipv4.is_none() && options.bind_ipv6.is_none() { + Err( + "At least one of bind_ipv4 and bind_ipv6 must be enabled. Unset at least one of them or set at least one of them to an IP.", + )?; + } - for node in options.nodes.into_iter() { - let bytes = key::key_b64_to_bytes(&node.public_key) - .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; + if options.nodes.is_empty() { + Err("At least one remote node has to be configured for a cluster")?; + } - let public_key = PublicKey::from_bytes(&bytes) - .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; + for node in options.nodes.into_iter() { + let bytes = key::key_b64_to_bytes(&node.public_key) + .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; - nodes.insert( - public_key, - EndpointAddr { - id: public_key, - addrs: node - .addresses - .into_iter() - .map(|addr| TransportAddr::Ip(addr)) - .collect(), - }, + let public_key = PublicKey::from_bytes(&bytes) + .map_err(|err| format!("invalid public key {}: {err}", node.public_key))?; + + nodes.insert( + public_key, + EndpointAddr { + id: public_key, + addrs: node + .addresses + .into_iter() + .map(|addr| TransportAddr::Ip(addr)) + .collect(), + }, + ); + } + + let secret_key = key::secret_key(".", &stream_name).await?; + eprintln!( + "INFO public key of this node for cluster {stream_name}: {}", + secret_key.public().show() ); + + let (tx, rx) = mpsc::channel(1); + + let stream = StreamInit { + name: stream_name.clone(), + listen_port: options.listen_port, + bind_ipv4: options.bind_ipv4, + bind_ipv6: options.bind_ipv6, + secret_key, + message_timeout, + nodes, + tx, + }; + + if let Some(_) = self.init.insert(stream_name, (stream, vec![])) { + return Err("this virtual stream has already been initialized".into()); + } + + ret_streams.push(StreamImpl { + stream: rx, + standalone: true, + }) } - let secret_key = key::secret_key(".", &stream_name).await?; - eprintln!( - "INFO public key of this node for cluster {stream_name}: {}", - secret_key.public().show() - ); + for ActionConfig { + stream_name, + filter_name, + action_name, + action_type, + config, + patterns, + } in actions + { + if &action_type != "cluster_send" { + return Err( + "This plugin can't handle other action types than 'cluster_send'".into(), + ); + } - let (tx, rx) = mpsc::channel(1); + let options: ActionOptions = serde_json::from_value(config.into()) + .map_err(|err| format!("invalid options: {err}"))?; - let stream = StreamInit { - name: stream_name.clone(), - listen_port: options.listen_port, - bind_ipv4: options.bind_ipv4, - bind_ipv6: options.bind_ipv6, - secret_key, - message_timeout, - nodes, - tx, - }; + let (tx, rx) = mpsc::channel(1); - if let Some(_) = self.streams.insert(stream_name, stream) { - return Err("this virtual stream has already been initialized".into()); + let init_action = ActionInit { + name: format!("{}.{}.{}", stream_name, filter_name, action_name), + send: PatternLine::new(options.send, patterns), + self_: options.self_, + rx, + }; + + match self.init.get_mut(&options.to) { + Some((_, actions)) => actions.push(init_action), + None => { + return Err(format!( + "ERROR action '{}' sends 'to' unknown stream '{}'", + init_action.name, options.to + ) + .into()); + } + } + + ret_actions.push(ActionImpl { tx }) } - Ok(StreamImpl { - stream: rx, - standalone: true, - }) + Ok((ret_streams, ret_actions)) } - async fn action_impl( - &mut self, - stream_name: String, - filter_name: String, - action_name: String, - action_type: String, - config: Value, - patterns: Vec, - ) -> RemoteResult { - if &action_type != "cluster_send" { - return Err("This plugin can't handle other action types than 'cluster_send'".into()); - } - - let options: ActionOptions = serde_json::from_value(config.into()) - .map_err(|err| format!("invalid options: {err}"))?; - - let (tx, rx) = mpsc::channel(1); - - let init_action = ActionInit { - name: format!("{}.{}.{}", stream_name, filter_name, action_name), - send: PatternLine::new(options.send, patterns), - self_: options.self_, - rx, - }; - - self.actions - .entry(options.to) - .or_default() - .push(init_action); - - Ok(ActionImpl { tx }) - } - - async fn finish_setup(&mut self) -> RemoteResult<()> { + async fn start(&mut self) -> RemoteResult<()> { let mut db = { let path = PathBuf::from("."); let (cancellation_token, task_tracker_token) = self.cluster_shutdown.token().split(); @@ -228,33 +245,20 @@ impl PluginInfo for Plugin { .map_err(|err| format!("Can't open database: {err}"))? }; - while let Some((stream_name, stream)) = self.streams.pop_first() { + while let Some((_, (stream, actions))) = self.init.pop_first() { let endpoint = cluster::bind(&stream).await?; cluster::cluster_tasks( endpoint, stream, - self.actions.remove(&stream_name).unwrap_or_default(), + actions, &mut db, self.cluster_shutdown.clone(), ) .await?; } - // Check there is no action left - if !self.actions.is_empty() { - for (to, actions) in &self.actions { - for action in actions { - eprintln!( - "ERROR action '{}' sends 'to' unknown stream '{}'", - action.name, to - ); - } - } - return Err("at least one cluster_send action has unknown 'to'".into()); - } // Free containers - self.actions = Default::default(); - self.streams = Default::default(); - eprintln!("DEBUG finished setup."); + self.init = Default::default(); + eprintln!("DEBUG started"); Ok(()) } diff --git a/plugins/reaction-plugin-cluster/src/tests/conf.rs b/plugins/reaction-plugin-cluster/src/tests/conf.rs index 68e7d42..2da4974 100644 --- a/plugins/reaction-plugin-cluster/src/tests/conf.rs +++ b/plugins/reaction-plugin-cluster/src/tests/conf.rs @@ -1,12 +1,12 @@ use std::env::set_current_dir; use assert_fs::TempDir; -use reaction_plugin::PluginInfo; +use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig}; use serde_json::json; use crate::{Plugin, tests::insert_secret_key}; -use super::{PUBLIC_KEY_A, TEST_MUTEX, stream_ok, stream_ok_port}; +use super::{PUBLIC_KEY_A, TEST_MUTEX, stream_ok}; #[tokio::test] async fn conf_stream() { @@ -18,7 +18,14 @@ async fn conf_stream() { // Invalid type assert!( Plugin::default() - .stream_impl("stream".into(), "clust".into(), stream_ok().into(),) + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "clust".into(), + config: stream_ok().into(), + }], + vec![] + ) .await .is_err() ); @@ -102,7 +109,14 @@ async fn conf_stream() { ] { assert!(is_ok( &Plugin::default() - .stream_impl("stream".into(), "cluster".into(), json.into()) + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "cluster".into(), + config: json.into(), + }], + vec![] + ) .await )); } @@ -115,17 +129,24 @@ async fn conf_action() { // Invalid type assert!( Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "cluster_sen".into(), - json!({ - "send": "", - "to": "stream", - }) - .into(), - patterns.clone(), + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "cluster".into(), + config: stream_ok().into(), + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "cluster_sen".into(), + config: json!({ + "send": "", + "to": "stream", + }) + .into(), + patterns: patterns.clone(), + }] ) .await .is_err() @@ -134,64 +155,79 @@ async fn conf_action() { for (json, is_ok) in [ ( json!({ - "send": "", - "to": "stream", + "send": "", + "to": "stream", }), - Result::is_ok as fn(&_) -> bool, + true, ), ( json!({ - "send": "", - "to": "stream", - "self": true, + "send": "", + "to": "stream", + "self": true, }), - Result::is_ok, + true, ), ( json!({ - "send": "", - "to": "stream", - "self": false, + "send": "", + "to": "stream", + "self": false, }), - Result::is_ok, + true, ), ( // missing to json!({ - "send": "", + "send": "", }), - Result::is_err, + false, ), ( // missing send json!({ - "to": "stream", + "to": "stream", }), - Result::is_err, + false, ), ( // invalid self json!({ - "send": "", - "to": "stream", - "self": "true", + "send": "", + "to": "stream", + "self": "true", }), - Result::is_err, + false, + ), + ( + // missing conf + json!({}), + false, ), - (json!({}), Result::is_err), ] { - assert!(is_ok( - &Plugin::default() - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "cluster_send".into(), - json.into(), - patterns.clone(), - ) - .await - )); + let ret = Plugin::default() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "cluster".into(), + config: stream_ok().into(), + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "cluster_send".into(), + config: json.clone().into(), + patterns: patterns.clone(), + }], + ) + .await; + + assert!( + ret.is_ok() == is_ok, + "is_ok: {is_ok}, ret: {:?}, action conf: {json:?}", + ret.map(|_| ()) + ); } } @@ -203,62 +239,55 @@ async fn conf_send() { insert_secret_key().await; // No action is ok - let mut plugin = Plugin::default(); - plugin - .stream_impl( - "stream".into(), - "cluster".into(), - stream_ok_port(2051).into(), + let res = Plugin::default() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "cluster".into(), + config: stream_ok().into(), + }], + vec![], ) - .await - .unwrap(); - let res = plugin.finish_setup().await; - eprintln!("{res:?}"); - assert!(res.is_ok()); + .await; + assert!(res.is_ok(), "{:?}", res.map(|_| ())); // An action is ok - let mut plugin = Plugin::default(); - plugin - .stream_impl( - "stream".into(), - "cluster".into(), - stream_ok_port(2052).into(), + let res = Plugin::default() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "cluster".into(), + config: stream_ok().into(), + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "cluster_send".into(), + config: json!({ "send": "message", "to": "stream" }).into(), + patterns: vec![], + }], ) - .await - .unwrap(); - plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "cluster_send".into(), - json!({ "send": "message", "to": "stream" }).into(), - Vec::default(), - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_ok()); + .await; + assert!(res.is_ok(), "{:?}", res.map(|_| ())); // Invalid to: option - let mut plugin = Plugin::default(); - plugin - .stream_impl( - "stream".into(), - "cluster".into(), - stream_ok_port(2053).into(), + let res = Plugin::default() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "cluster".into(), + config: stream_ok().into(), + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "cluster_send".into(), + config: json!({ "send": "message", "to": "stream1" }).into(), + patterns: vec![], + }], ) - .await - .unwrap(); - plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "cluster_send".into(), - json!({ "send": "message", "to": "stream1" }).into(), - Vec::default(), - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_err()); + .await; + assert!(res.is_err(), "{:?}", res.map(|_| ())); } diff --git a/plugins/reaction-plugin-cluster/src/tests/e2e.rs b/plugins/reaction-plugin-cluster/src/tests/e2e.rs index 4c25468..fbf3a28 100644 --- a/plugins/reaction-plugin-cluster/src/tests/e2e.rs +++ b/plugins/reaction-plugin-cluster/src/tests/e2e.rs @@ -1,7 +1,7 @@ use std::{env::set_current_dir, time::Duration}; use assert_fs::TempDir; -use reaction_plugin::{ActionImpl, Exec, PluginInfo, StreamImpl}; +use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig}; use serde_json::json; use tokio::{fs, time::timeout}; use treedb::time::now; @@ -98,11 +98,10 @@ const POOL: [TestNode; 15] = [ ]; async fn stream_action( - plugin: &mut Plugin, name: &str, index: usize, nodes: &[TestNode], -) -> (StreamImpl, ActionImpl) { +) -> (StreamConfig, ActionConfig) { let stream_name = format!("stream_{name}"); let this_node = &nodes[index]; let other_nodes: Vec<_> = nodes @@ -120,37 +119,30 @@ async fn stream_action( .await .unwrap(); - let stream = plugin - .stream_impl( - stream_name.clone(), - "cluster".into(), - json!({ + ( + StreamConfig { + stream_name: stream_name.clone(), + stream_type: "cluster".into(), + config: json!({ "message_timeout": "30s", "listen_port": this_node.port, "nodes": other_nodes, }) .into(), - ) - .await - .unwrap(); - - let action = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "cluster_send".into(), - json!({ + }, + ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "cluster_send".into(), + config: json!({ "send": format!("from {name}: "), "to": stream_name, }) .into(), - vec!["test".into()], - ) - .await - .unwrap(); - - (stream, action) + patterns: vec!["test".into()], + }, + ) } #[tokio::test] @@ -162,19 +154,33 @@ async fn two_nodes_simultaneous_startup() { let ((mut stream_a, action_a), (mut stream_b, action_b)) = if separate_plugin { let mut plugin_a = Plugin::default(); - let a = stream_action(&mut plugin_a, "a", 0, &POOL[0..2]).await; - plugin_a.finish_setup().await.unwrap(); + let (sa, aa) = stream_action("a", 0, &POOL[0..2]).await; + let (mut streams_a, mut actions_a) = + plugin_a.load_config(vec![sa], vec![aa]).await.unwrap(); + plugin_a.start().await.unwrap(); let mut plugin_b = Plugin::default(); - let b = stream_action(&mut plugin_b, "b", 1, &POOL[0..2]).await; - plugin_b.finish_setup().await.unwrap(); - (a, b) + let (sb, ab) = stream_action("b", 1, &POOL[0..2]).await; + let (mut streams_b, mut actions_b) = + plugin_b.load_config(vec![sb], vec![ab]).await.unwrap(); + plugin_b.start().await.unwrap(); + ( + (streams_a.remove(0), actions_a.remove(0)), + (streams_b.remove(0), actions_b.remove(0)), + ) } else { let mut plugin = Plugin::default(); - let a = stream_action(&mut plugin, "a", 0, &POOL[0..2]).await; - let b = stream_action(&mut plugin, "b", 1, &POOL[0..2]).await; - plugin.finish_setup().await.unwrap(); - (a, b) + let a = stream_action("a", 0, &POOL[0..2]).await; + let b = stream_action("b", 1, &POOL[0..2]).await; + let (mut streams, mut actions) = plugin + .load_config(vec![a.0, b.0], vec![a.1, b.1]) + .await + .unwrap(); + plugin.start().await.unwrap(); + ( + (streams.remove(0), actions.remove(0)), + (streams.remove(1), actions.remove(1)), + ) }; for m in ["test1", "test2", "test3"] { @@ -238,7 +244,6 @@ async fn n_nodes_simultaneous_startup() { let mut plugin = Plugin::default(); let name = format!("n{i}"); let (stream, action) = stream_action( - &mut plugin, &name, i, &POOL[0..n] @@ -252,10 +257,14 @@ async fn n_nodes_simultaneous_startup() { .as_slice(), ) .await; - plugin.finish_setup().await.unwrap(); + let (mut stream, mut action) = plugin + .load_config(vec![stream], vec![action]) + .await + .unwrap(); + plugin.start().await.unwrap(); plugins.push(plugin); - streams.push(stream); - actions.push((action, name)); + streams.push(stream.pop().unwrap()); + actions.push((action.pop().unwrap(), name)); } for m in ["test1", "test2", "test3", "test4", "test5"] { diff --git a/plugins/reaction-plugin-cluster/src/tests/self_.rs b/plugins/reaction-plugin-cluster/src/tests/self_.rs index 530e8ee..c06d55b 100644 --- a/plugins/reaction-plugin-cluster/src/tests/self_.rs +++ b/plugins/reaction-plugin-cluster/src/tests/self_.rs @@ -1,7 +1,7 @@ use std::{env::set_current_dir, time::Duration}; use assert_fs::TempDir; -use reaction_plugin::{Exec, PluginInfo}; +use reaction_plugin::{ActionConfig, Exec, PluginInfo, StreamConfig}; use serde_json::json; use tokio::time::timeout; use treedb::time::now; @@ -19,33 +19,34 @@ async fn run_with_self() { for self_ in [true, false] { let mut plugin = Plugin::default(); - let mut stream = plugin - .stream_impl( - "stream".into(), - "cluster".into(), - stream_ok_port(2052).into(), + let (mut streams, mut actions) = plugin + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "cluster".into(), + config: stream_ok_port(2052).into(), + }], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "cluster_send".into(), + config: json!({ + "send": "message ", + "to": "stream", + "self": self_, + }) + .into(), + patterns: vec!["test".into()], + }], ) .await .unwrap(); - assert!(stream.standalone); - let action = plugin - .action_impl( - "stream".into(), - "filter".into(), - "action".into(), - "cluster_send".into(), - json!({ - "send": "message ", - "to": "stream", - "self": self_, - }) - .into(), - vec!["test".into()], - ) - .await - .unwrap(); - assert!(plugin.finish_setup().await.is_ok()); + let mut stream = streams.pop().unwrap(); + let action = actions.pop().unwrap(); + assert!(stream.standalone); + assert!(plugin.start().await.is_ok()); for m in ["test1", "test2", "test3", " a a a aa a a"] { let time = now().into(); From 109fb6d86954154a532329e3ff2380bf21e55b4a Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 206/241] Adapt reaction core to plugin interface change --- src/concepts/action.rs | 20 ++++- src/concepts/stream.rs | 11 +++ src/daemon/filter/mod.rs | 23 +----- src/daemon/mod.rs | 2 +- src/daemon/plugin/mod.rs | 171 ++++++++++++++++++++++++--------------- src/daemon/stream.rs | 13 +-- 6 files changed, 146 insertions(+), 94 deletions(-) diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 4d5f765..04a2a4b 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -1,6 +1,6 @@ use std::{cmp::Ordering, collections::BTreeSet, fmt::Display, sync::Arc, time::Duration}; -use reaction_plugin::time::parse_duration; +use reaction_plugin::{ActionConfig, time::parse_duration}; use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::process::Command; @@ -154,6 +154,24 @@ impl Action { cmd.args(&computed_command[1..]); cmd } + + pub fn to_action_config(&self) -> Result { + Ok(ActionConfig { + stream_name: self.stream_name.clone(), + filter_name: self.filter_name.clone(), + action_name: self.name.clone(), + action_type: self + .action_type + .clone() + .ok_or_else(|| format!("action {} doesn't load a plugin. this is a bug!", self))?, + config: self.options.clone().into(), + patterns: self + .patterns + .iter() + .map(|pattern| pattern.name.clone()) + .collect(), + }) + } } impl PartialEq for Action { diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 8830c66..9a2734e 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -1,5 +1,6 @@ use std::{cmp::Ordering, collections::BTreeMap, hash::Hash}; +use reaction_plugin::StreamConfig; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -88,6 +89,16 @@ impl Stream { Ok(()) } + + pub fn to_stream_config(&self) -> Result { + Ok(StreamConfig { + stream_name: self.name.clone(), + stream_type: self.stream_type.clone().ok_or_else(|| { + format!("stream {} doesn't load a plugin. this is a bug!", self.name) + })?, + config: self.options.clone().into(), + }) + } } impl PartialEq for Stream { diff --git a/src/daemon/filter/mod.rs b/src/daemon/filter/mod.rs index a18b401..d0e2104 100644 --- a/src/daemon/filter/mod.rs +++ b/src/daemon/filter/mod.rs @@ -9,7 +9,7 @@ use chrono::TimeZone; use reaction_plugin::{ActionImpl, shutdown::ShutdownToken}; use regex::Regex; use tokio::sync::{Mutex, MutexGuard, Semaphore}; -use tracing::{debug, error, info}; +use tracing::{error, info}; use crate::{ concepts::{Action, Duplicate, Filter, Match, Pattern, Time}, @@ -65,24 +65,9 @@ impl FilterManager { for (action_name, action) in filter.actions.iter().filter(|action| action.1.is_plugin()) { action_plugins.insert( action_name, - plugins - .init_action_impl( - action.stream_name.clone(), - action.filter_name.clone(), - action.name.clone(), - action.action_type.clone().unwrap(), - action.options.clone(), - action - .patterns - .iter() - .map(|pattern| pattern.name.clone()) - .collect(), - ) - .await?, - ); - debug!( - "successfully intialized action {}.{}.{}", - action.stream_name, action.filter_name, action.name + plugins.get_action_impl(action.to_string()).ok_or_else(|| { + format!("action {action} doesn't load a plugin. this is a bug!") + })?, ); } let this = Self { diff --git a/src/daemon/mod.rs b/src/daemon/mod.rs index 17f90f3..18fb533 100644 --- a/src/daemon/mod.rs +++ b/src/daemon/mod.rs @@ -163,7 +163,7 @@ async fn daemon_start( } // Finish plugin setup - plugins.finish_setup().await?; + plugins.start().await?; plugins.manager(); // Start Stream managers diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index b9e7b88..52ad0e8 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -7,9 +7,10 @@ use std::{ }; use futures::{StreamExt, future::join_all}; -use reaction_plugin::{ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamImpl}; +use reaction_plugin::{ + ActionConfig, ActionImpl, Hello, PluginInfo, PluginInfoClient, StreamConfig, StreamImpl, +}; use remoc::Connect; -use serde_json::Value; use tokio::{ process::{Child, ChildStderr}, time::timeout, @@ -17,7 +18,7 @@ use tokio::{ use tracing::{error, info}; use crate::{ - concepts::{Config, Plugin}, + concepts::{Action, Config, Plugin, Stream}, daemon::{ShutdownToken, stream::reader_to_stream, utils::kill_child}, }; @@ -198,9 +199,18 @@ async fn handle_stderr(stderr: ChildStderr, plugin_name: String) { #[derive(Default)] pub struct Plugins { + /// Loaded plugins plugins: BTreeMap, - streams: BTreeMap, - actions: BTreeMap, + /// stream_type to plugin name + stream_to_plugin: BTreeMap, + /// action_type to plugin name + action_to_plugin: BTreeMap, + /// plugin name to config list + plugin_to_confs: BTreeMap, Vec<&'static Action>)>, + /// stream name to impl + stream_to_impl: BTreeMap, + /// action name to impl + action_to_impl: BTreeMap, } impl Plugins { @@ -214,6 +224,10 @@ impl Plugins { .map_err(|err| format!("plugin {name}: {err}]"))?; } + this.aggregate_plugin_configs(config)?; + + this.load_plugin_configs().await?; + Ok(this) } @@ -227,7 +241,7 @@ impl Plugins { let manager = PluginManager::new(plugin, state_directory, shutdown).await?; for stream in &manager.streams { - if let Some(name) = self.streams.insert(stream.clone(), name.clone()) { + if let Some(name) = self.stream_to_plugin.insert(stream.clone(), name.clone()) { return Err(format!( "plugin {name} already exposed a stream with type name '{stream}'", )); @@ -235,7 +249,7 @@ impl Plugins { } for action in &manager.actions { - if let Some(name) = self.actions.insert(action.clone(), name.clone()) { + if let Some(name) = self.action_to_plugin.insert(action.clone(), name.clone()) { return Err(format!( "plugin {name} already exposed a action with type name '{action}'", )); @@ -246,77 +260,95 @@ impl Plugins { Ok(()) } - pub async fn init_stream_impl( - &mut self, - stream_name: String, - stream_type: String, - config: Value, - ) -> Result { - let plugin_name = match self.streams.get(&stream_type) { - Some(name) => name, - None => { - display_plugin_exposed_types(&self.streams, "stream"); - return Err(format!( - "No plugin provided the stream type '{stream_type}'" - )); + fn aggregate_plugin_configs(&mut self, config: &'static Config) -> Result<(), String> { + for stream in config.streams.values() { + if stream.is_plugin() + && let Some(stream_type) = &stream.stream_type + { + let plugin_name = self.stream_to_plugin.get(stream_type).ok_or_else(|| { + display_plugin_exposed_types(&self.stream_to_plugin, "stream", stream_type) + })?; + let (streams, _) = self + .plugin_to_confs + .entry(plugin_name.to_owned()) + .or_default(); + streams.push(stream); } - }; - let plugin = self.plugins.get_mut(plugin_name).unwrap(); + for action in stream + .filters + .values() + .flat_map(|filter| filter.actions.values()) + { + if action.is_plugin() + && let Some(action_type) = &action.action_type + { + let plugin_name = self.action_to_plugin.get(action_type).ok_or_else(|| { + display_plugin_exposed_types(&self.action_to_plugin, "action", action_type) + })?; + let (_, actions) = self + .plugin_to_confs + .entry(plugin_name.to_owned()) + .or_default(); + actions.push(action); + } + } + } + Ok(()) + } - plugin - .stream_impl(stream_name.clone(), stream_type, config.into()) - .await - .map_err(|err| { - format!( - "plugin error while initializing stream {stream_name}: {}", - err.to_string().replace('\n', " ") + async fn load_plugin_configs(&mut self) -> Result<(), String> { + let plugin_to_confs = std::mem::take(&mut self.plugin_to_confs); + for (plugin_name, (streams, actions)) in plugin_to_confs { + let plugin = self + .plugins + .get_mut(&plugin_name) + .ok_or_else(|| format!("could not find plugin {plugin_name}. this is a bug!"))?; + + let stream_names: Vec = + streams.iter().map(|stream| stream.name.clone()).collect(); + let action_names: Vec = + actions.iter().map(|action| action.to_string()).collect(); + + let (stream_impls, action_impls) = plugin + .load_config( + streams + .into_iter() + .map(Stream::to_stream_config) + .collect::, String>>()?, + actions + .into_iter() + .map(Action::to_action_config) + .collect::, String>>()?, ) - }) + .await + .map_err(|err| { + format!("plugin {plugin_name} is not happy with your config: {err}") + })?; + + self.stream_to_impl + .extend(stream_names.into_iter().zip(stream_impls)); + self.action_to_impl + .extend(action_names.into_iter().zip(action_impls)); + } + + Ok(()) } - pub async fn init_action_impl( - &mut self, - stream_name: String, - filter_name: String, - action_name: String, - action_type: String, - config: Value, - patterns: Vec, - ) -> Result { - let plugin_name = match self.actions.get(&action_type) { - Some(name) => name, - None => { - display_plugin_exposed_types(&self.actions, "action"); - return Err(format!( - "No plugin provided the action type '{action_type}'" - )); - } - }; - - let plugin = self.plugins.get_mut(plugin_name).unwrap(); - - plugin - .action_impl( - stream_name.clone(), - filter_name.clone(), - action_name.clone(), - action_type, - config.into(), - patterns, - ) - .await - .map_err(|err| format!("plugin error while initializing action {stream_name}.{filter_name}.{action_name}: {}", - err.to_string().replace('\n', " ") - )) + pub fn get_stream_impl(&mut self, stream_name: String) -> Option { + self.stream_to_impl.remove(&stream_name) } - pub async fn finish_setup(&mut self) -> Result<(), String> { + pub fn get_action_impl(&mut self, action_fullname: String) -> Option { + self.action_to_impl.remove(&action_fullname) + } + + pub async fn start(&mut self) -> Result<(), String> { // Finish setup of all plugins join_all( self.plugins .values_mut() - .map(|plugin_manager| plugin_manager.finish_setup()), + .map(|plugin_manager| plugin_manager.start()), ) .await // Convert Vec> into Result @@ -342,7 +374,11 @@ impl Plugins { } } -fn display_plugin_exposed_types(type_to_plugin: &BTreeMap, name: &str) { +fn display_plugin_exposed_types( + type_to_plugin: &BTreeMap, + name: &str, + invalid: &str, +) -> String { let mut plugin_to_types: BTreeMap<&str, Vec<&str>> = BTreeMap::new(); for (type_, plugin) in type_to_plugin { plugin_to_types.entry(plugin).or_default().push(type_); @@ -353,4 +389,5 @@ fn display_plugin_exposed_types(type_to_plugin: &BTreeMap, name: types.join("', '") ); } + format!("No plugin provides the {name} type: {invalid}") } diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index 2b881e4..7d50b54 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -72,12 +72,13 @@ impl StreamManager { let stream_plugin = if stream.is_plugin() { Some( plugins - .init_stream_impl( - stream.name.clone(), - stream.stream_type.clone().unwrap(), - stream.options.clone(), - ) - .await?, + .get_stream_impl(stream.name.clone()) + .ok_or_else(|| { + format!( + "stream {} doesn't load a plugin. this is a bug!", + stream.name + ) + })?, ) } else { None From cce850fc7157a6b0739f92a5a6acf18686c5bc1b Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 207/241] Add recommandation on ipset or nftables rather than plain iptables --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 1461db7..58cb46b 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,9 @@ local banFor(time) = {
+It is recommended to setup reaction with [`nftables`](https://reaction.ppom.me/actions/nftables.html) +or [`ipset` + `iptables`](https://reaction.ppom.me/actions/ipset.html), which are much more performant +solutions than `iptables` alone. ### Database From b7d997ca5e9a69c8572bb2ec9d27d0eb03b3cb9f Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 9 Feb 2026 12:00:00 +0100 Subject: [PATCH 208/241] Slight change on the "no audit" sentence --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 58cb46b..5aa33fb 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ A daemon that scans program outputs for repeated patterns, and takes action. A common usage is to scan ssh and webserver logs, and to ban hosts that cause multiple authentication errors. -🚧 This program hasn't received external security audit. However, it already works well on my servers 🚧 +🚧 This program hasn't received external security audit yet. However, it already works well on many servers 🚧 ## Rationale From b07b5064e9fa4d35792ed4911249aa4a5cca9d5d Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 11 Feb 2026 12:00:00 +0100 Subject: [PATCH 209/241] Improve reaction-plugin developer documentation --- plugins/reaction-plugin/src/lib.rs | 277 ++++++++++++++++++++---- plugins/reaction-plugin/src/line.rs | 8 +- plugins/reaction-plugin/src/shutdown.rs | 52 ++++- plugins/reaction-plugin/src/time.rs | 6 + 4 files changed, 297 insertions(+), 46 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 2ce95dc..38a91d1 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -1,6 +1,6 @@ //! This crate defines the API between reaction's core and plugins. //! -//! Plugins must be written in Rust. +//! Plugins must be written in Rust, for now. //! //! This documentation assumes the reader has some knowledge of Rust. //! However, if you find that something is unclear, don't hesitate to @@ -10,6 +10,14 @@ //! the entrypoint for a plugin. //! It permits to define `0` to `n` custom stream and action types. //! +//! ## Note on reaction-plugin API stability +//! +//! This is the v1 of reaction's plugin interface. +//! It's quite efficient and complete, but it has the big drawback of being Rust-only and [`tokio`]-only. +//! +//! In the future, I'd like to define a language-agnostic interface, which will be a major breaking change in the API. +//! However, I'll try my best to reduce the necessary code changes for plugins that use this v1. +//! //! ## Naming & calling conventions //! //! Your plugin should be named `reaction-plugin-$NAME`, eg. `reaction-plugin-postgresql`. @@ -20,26 +28,38 @@ //! This can be useful if you want to provide CLI functionnality to your users, //! so you can distinguish between a human user and reaction. //! +//! ### State directory +//! //! It will be executed in its own directory, in which it should have write access. //! The directory is `$reaction_state_directory/plugin_data/$NAME`. //! reaction's [state_directory](https://reaction.ppom.me/reference.html#state_directory) -//! defaults to its working directory. +//! defaults to its working directory, which is `/var/lib/reaction` in most setups. +//! +//! So your plugin directory should most often be `/var/lib/reaction/plugin_data/$NAME`, +//! but the plugin shouldn't expect that and use the current working directory instead. //! //! ## Communication //! //! Communication between the plugin and reaction is based on [`remoc`], which permits to multiplex channels and remote objects/functions/trait //! calls over a single transport channel. -//! The channels is made of stdin and stdout, so don't use them for something else. +//! The channels read and write channels are stdin and stdout, so you shouldn't use them for something else. //! -//! [`remoc`] build upon [`tokio`], so you'll need to use tokio too. +//! [`remoc`] builds upon [`tokio`], so you'll need to use tokio too. //! //! ### Errors //! -//! Errors can be printed to stderr. +//! Errors during: +//! - config loading in [`PluginInfo::load_config`] +//! - startup in [`PluginInfo::start`] +//! +//! should be returned to reaction by the function's return value, permitting reaction to abort startup. +//! +//! During normal runtime, after the plugin has loaded its config and started, and before reaction is quitting, there is no *rusty* way to send errors to reaction. +//! Then errors can be printed to stderr. //! They'll be captured line by line and re-printed by reaction, with the plugin name prepended. //! //! A line can start with `DEBUG `, `INFO `, `WARN `, `ERROR `. -//! If the starts with none of the above, the line is assumed to be an error. +//! If it starts with none of the above, the line is assumed to be an error. //! //! Example: //! Those lines: @@ -53,27 +73,31 @@ //! ERROR plugin test: Freeeee errrooooorrr //! ``` //! -//! ## Helpers +//! Plugins should not exit when there is an error: reaction quits only when told to do so, +//! or if all its streams exit, and won't retry starting a failing plugin or stream. +//! Please only exit if you're in a 100% failing state. +//! It's considered better to continue operating in a degraded state than exiting. //! -//! Those helpers permits to easily maintain similar configuration interfaces accross plugins: +//! ## Getting started //! -//! - [`line::PatternLine`], to permit users to use templated lines (ie. "\ bad password"). -//! - [`time::parse_duration`] to parse durations (ie. "6h", "3 days"). +//! If you don't have Rust already installed, follow their [*Getting Started* documentation](https://rust-lang.org/learn/get-started/) +//! to get rust build tools and learn about editor support. //! -//! Those helpers solve common issues for reaction plugins: -//! -//! - The [`shutdown`] module provides structures to ease the quitting process when having multiple tokio tasks. -//! -//! ## Starting template +//! Then create a new repository with cargo: //! //! ```bash //! cargo new reaction-plugin-$NAME //! cd reaction-plugin-$NAME -//! cargo add reaction-plugin tokio -//! vim src/main.rs //! ``` //! -//! `src/main.rs` +//! Add required dependencies: +//! +//! ```bash +//! cargo add reaction-plugin tokio +//! ``` +//! +//! Replace `src/main.rs` with those contents: +//! //! ```ignore //! use reaction_plugin::PluginInfo; //! @@ -86,15 +110,20 @@ //! #[derive(Default)] //! struct MyPlugin {} //! -//! impl PluginInfo for Plugin { -//! // ... -//! // Your IDE should propose to implement missing members of the `Plugin` trait +//! impl PluginInfo for MyPlugin { //! // ... //! } //! ``` //! +//! Your IDE should now propose to implement missing members of the [`PluginInfo`] trait. +//! Your journey starts! +//! +//! ## Examples +//! //! Core plugins can be found here: . -//! The "virtual" plugin is the simplest and can serve as a good complete example. +//! +//! - The "virtual" plugin is the simplest and can serve as a good complete example that links custom stream types and custom action types. +//! - The "ipset" plugin is a good example of an action-only plugin. use std::{ collections::{BTreeMap, BTreeSet}, @@ -116,18 +145,35 @@ pub mod line; pub mod shutdown; pub mod time; -/// This is the only trait that **must** be implemented by a plugin. +/// The only trait that **must** be implemented by a plugin. /// It provides lists of stream, filter and action types implemented by a dynamic plugin. #[rtc::remote] pub trait PluginInfo { /// Return the manifest of the plugin. + /// This should not be dynamic, and return always the same manifest. + /// + /// Example implementation: + /// ``` + /// Ok(Manifest { + /// hello: Hello::new(), + /// streams: BTreeSet::from(["mystreamtype".into()]), + /// actions: BTreeSet::from(["myactiontype".into()]), + /// }) + /// ``` + /// + /// First function called. async fn manifest(&mut self) -> Result; - /// Load all plugin stream and action configurations, - /// Errors if config is invalid. + /// Load all plugin stream and action configurations. + /// Must error if config is invalid. /// /// The plugin should not start running mutable commands here: /// It should be ok to quit without cleanup for now. + /// + /// Each [`StreamConfig`] from the `streams` arg should result in a corresponding [`StreamImpl`] returned, in the same order. + /// Each [`ActionConfig`] from the `actions` arg should result in a corresponding [`ActionImpl`] returned, in the same order. + /// + /// Function called after [`PluginInfo::manifest`]. async fn load_config( &mut self, streams: Vec, @@ -136,14 +182,62 @@ pub trait PluginInfo { /// Notify the plugin that setup is finished, permitting a last occasion to report an error that'll make reaction exit. /// All initialization (opening remote connections, starting streams, etc) should happen here. + /// + /// Function called after [`PluginInfo::load_config`]. async fn start(&mut self) -> RemoteResult<()>; /// Notify the plugin that reaction is quitting and that the plugin should quit too. /// A few seconds later, the plugin will receive SIGTERM. /// A few seconds later, the plugin will receive SIGKILL. + /// + /// Function called after [`PluginInfo::start`], when reaction is quitting. async fn close(mut self) -> RemoteResult<()>; } +/// The config for one Stream of a type advertised by this plugin. +/// +/// For example this user config: +/// ```jsonnet +/// { +/// streams: { +/// mystream: { +/// type: "mystreamtype", +/// options: { +/// key: "value", +/// num: 3, +/// }, +/// // filters: ... +/// }, +/// }, +/// } +/// ``` +/// +/// would result in the following `StreamConfig`: +/// +/// ``` +/// StreamConfig { +/// stream_name: "mystream", +/// stream_type: "mystreamtype", +/// config: Value::Object(BTreeMap::from([ +/// ("key", Value::String("value")), +/// ("num", Value::Integer(3)), +/// ])), +/// } +/// ``` +/// +/// Don't hesitate to take advantage of [`serde_json::from_value`], to deserialize the [`Value`] into a Rust struct: +/// +/// ``` +/// #[derive(Deserialize)] +/// struct MyStreamOptions { +/// key: String, +/// num: i64, +/// } +/// +/// fn validate_config(stream_config: Value) -> Result { +/// serde_json::from_value(stream_config.into()) +/// } +/// ``` #[derive(Serialize, Deserialize, Clone)] pub struct StreamConfig { pub stream_name: String, @@ -151,6 +245,59 @@ pub struct StreamConfig { pub config: Value, } +/// The config for one Stream of a type advertised by this plugin. +/// +/// For example this user config: +/// ```jsonnet +/// { +/// streams: { +/// mystream: { +/// // ... +/// filters: { +/// myfilter: { +/// // ... +/// actions: { +/// myaction: { +/// type: "myactiontype", +/// options: { +/// boolean: true, +/// array: ["item"], +/// }, +/// }, +/// }, +/// }, +/// }, +/// }, +/// }, +/// } +/// ``` +/// +/// would result in the following `ActionConfig`: +/// +/// ```rust +/// ActionConfig { +/// action_name: "myaction", +/// action_type: "myactiontype", +/// config: Value::Object(BTreeMap::from([ +/// ("boolean", Value::Boolean(true)), +/// ("array", Value::Array([Value::String("item")])), +/// ])), +/// } +/// ``` +/// +/// Don't hesitate to take advantage of [`serde_json::from_value`], to deserialize the [`Value`] into a Rust struct: +/// +/// ```rust +/// #[derive(Deserialize)] +/// struct MyActionOptions { +/// boolean: bool, +/// array: Vec, +/// } +/// +/// fn validate_config(action_config: Value) -> Result { +/// serde_json::from_value(action_config.into()) +/// } +/// ``` #[derive(Serialize, Deserialize, Clone)] pub struct ActionConfig { pub stream_name: String, @@ -161,11 +308,14 @@ pub struct ActionConfig { pub patterns: Vec, } +/// Mandatory announcement of a plugin's protocol version, stream and action types. #[derive(Serialize, Deserialize)] pub struct Manifest { - // Protocol version. available as the [`hello!`] macro. + // Protocol version. + // Just use the [`Hello::new`] constructor that uses this crate's current version. pub hello: Hello, - /// stream types that should be made available to reaction users + /// Stream types that should be made available to reaction users + /// /// ```jsonnet /// { /// streams: { @@ -177,7 +327,26 @@ pub struct Manifest { /// } /// ``` pub streams: BTreeSet, - /// All action types that should be made available to reaction users + /// Action types that should be made available to reaction users + /// + /// ```jsonnet + /// { + /// streams: { + /// mystream: { + /// filters: { + /// myfilter: { + /// actions: { + /// myaction: { + /// type: "myactiontype", + /// # ↑ all those exposed types + /// }, + /// }, + /// }, + /// }, + /// }, + /// }, + /// } + /// ``` pub actions: BTreeSet, } @@ -192,6 +361,8 @@ pub struct Hello { } impl Hello { + /// Constructor that fills a [`Hello`] struct with [`crate`]'s version. + /// You should use this in your plugin [`Manifest`]. pub fn new() -> Hello { Hello { version_major: env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(), @@ -199,6 +370,9 @@ impl Hello { } } + /// Used by the reaction daemon. Permits to check compatibility between two versions. + /// Major versions must be the same between the daemon and plugin. + /// Minor version of the daemon must be greater than or equal minor version of the plugin. pub fn is_compatible(server: &Hello, plugin: &Hello) -> std::result::Result<(), String> { if server.version_major == plugin.version_major && server.version_minor >= plugin.version_minor @@ -215,8 +389,8 @@ impl Hello { } } -/// A clone of [`serde_json::Value`] -/// Implements From & Into [`serde_json::Value`] +/// A clone of [`serde_json::Value`]. +/// Implements From & Into [`serde_json::Value`]. #[derive(Serialize, Deserialize, Clone)] pub enum Value { Null, @@ -263,14 +437,18 @@ impl Into for Value { } } -pub type Line = (String, Duration); - +/// Represents a Stream handled by a plugin on reaction core's side. +/// +/// During [`PluginInfo::load_config`], the plugin should create a [`remoc::rch::mpsc::channel`] of [`Line`]. +/// It will keep the sending side for itself and put the receiving side in a [`StreamImpl`]. +/// +/// The plugin should start sending [`Line`]s in the channel only after [`PluginInfo::start`] has been called by reaction core. #[derive(Debug, Serialize, Deserialize)] pub struct StreamImpl { pub stream: rch::mpsc::Receiver, - /// Whether this stream works standalone, or if it needs other streams to be fed. + /// Whether this stream works standalone, or if it needs other streams or actions to be fed. /// Defaults to true. - /// When false, reaction will exit if it's the last one standing. + /// When `false`, reaction will exit if it's the last one standing. #[serde(default = "_true")] pub standalone: bool, } @@ -279,22 +457,37 @@ fn _true() -> bool { true } +/// Messages passed from the [`StreamImpl`] of a plugin to reaction core +pub type Line = (String, Duration); + +// // Filters +// // For now, plugins can't handle custom filter implementations. // #[derive(Serialize, Deserialize)] // pub struct FilterImpl { // pub stream: rch::lr::Sender, // } - // #[derive(Serialize, Deserialize)] // pub struct Match { // pub match_: String, // pub result: rch::oneshot::Sender, // } +/// Represents an Action handled by a plugin on reaction core's side. +/// +/// During [`PluginInfo::load_config`], the plugin should create a [`remoc::rch::mpsc::channel`] of [`Exec`]. +/// It will keep the receiving side for itself and put the sending side in a [`ActionImpl`]. +/// +/// The plugin will start receiving [`Exec`]s in the channel from reaction only after [`PluginInfo::start`] has been called by reaction core. #[derive(Clone, Serialize, Deserialize)] pub struct ActionImpl { pub tx: rch::mpsc::Sender, } +/// A [trigger](https://reaction.ppom.me/reference.html#trigger) of the Action, sent by reaction core to the plugin. +/// +/// The plugin should perform the configured action for each received [`Exec`]. +/// +/// Any error during its execution should be logged to stderr, see [`crate#Errors`] for error handling recommandations. #[derive(Serialize, Deserialize)] pub struct Exec { pub match_: Vec, @@ -303,6 +496,9 @@ pub struct Exec { /// The main loop for a plugin. /// +/// Bootstraps the communication with reaction core on the process' stdin and stdout, +/// then holds the connection and maintains the plugin in a server state. +/// /// Your main function should only create a struct that implements [`PluginInfo`] /// and then call [`main_loop`]: /// ```ignore @@ -344,11 +540,18 @@ pub async fn main_loop(plugin_info: T) { pub type RemoteResult = Result; -/// A Plugin Error -/// It's either a connection error or a free String for plugin-specific errors +/// reaction-plugin's Error type. #[derive(Debug, Serialize, Deserialize)] pub enum RemoteError { + /// A connection error that origins from [`remoc`], the crate used for communication on the plugin's `stdin`/`stdout`. + /// + /// You should not instantiate this type of error yourself. Remoc(rtc::CallError), + /// A free String for application-specific errors. + /// + /// You should only instantiate this type of error yourself, for any error that you encounter at startup and shutdown. + /// + /// Otherwise, any error during the plugin's runtime should be logged to stderr, see [`crate#Errors`] for error handling recommandations. Plugin(String), } diff --git a/plugins/reaction-plugin/src/line.rs b/plugins/reaction-plugin/src/line.rs index bf91a8c..7b315d0 100644 --- a/plugins/reaction-plugin/src/line.rs +++ b/plugins/reaction-plugin/src/line.rs @@ -1,3 +1,9 @@ +//! Helper module that permits to use templated lines (ie. `bad password for `), like in Stream's and Action's `cmd`. +//! +//! Corresponding reaction core settings: +//! - [Stream's `cmd`](https://reaction.ppom.me/reference.html#cmd) +//! - [Action's `cmd`](https://reaction.ppom.me/reference.html#cmd-1) +//! #[derive(Debug, PartialEq, Eq)] enum SendItem { Index(usize), @@ -55,7 +61,7 @@ pub struct PatternLine { impl PatternLine { /// Construct [`PatternLine`] from a template line and the list of patterns of the underlying [Filter](https://reaction.ppom.me/reference.html#filter). /// - /// This list of patterns comes from [`PluginInfo::action_impl`]. + /// This list of patterns comes from [`super::ActionConfig`]. pub fn new(template: String, patterns: Vec) -> Self { let line = Self::_from(patterns, Vec::from([SendItem::Str(template)])); Self { diff --git a/plugins/reaction-plugin/src/shutdown.rs b/plugins/reaction-plugin/src/shutdown.rs index f3c6c96..cc9ee4f 100644 --- a/plugins/reaction-plugin/src/shutdown.rs +++ b/plugins/reaction-plugin/src/shutdown.rs @@ -1,10 +1,49 @@ +//! Helper module that provides structures to ease the quitting process when having multiple tokio tasks. +//! +//! It defines a [`ShutdownController`], that permits to keep track of ongoing tasks, ask them to shutdown and wait for all of them to quit. +//! +//! You can have it as an attribute of your plugin struct. +//! ``` +//! struct MyPlugin { +//! shutdown: ShutdownController +//! } +//! ``` +//! +//! You can then give a [`ShutdownToken`] to other tasks when creating them: +//! +//! ``` +//! impl PluginInfo for MyPlugin { +//! async fn start(&mut self) -> RemoteResult<()> { +//! let token = self.shutdown.token(); +//! +//! tokio::spawn(async move { +//! token.wait().await; +//! eprintln!("DEBUG shutdown asked to quit, now quitting") +//! }) +//! } +//! } +//! ``` +//! +//! On closing, calling [`ShutdownController::ask_shutdown`] will inform all tasks waiting on [`ShutdownToken::wait`] that it's time to leave. +//! Then we can wait for [`ShutdownController::wait_all_task_shutdown`] to complete. +//! +//! ``` +//! impl PluginInfo for MyPlugin { +//! async fn close(self) -> RemoteResult<()> { +//! self.shutdown.ask_shutdown(); +//! self.shutdown.wait_all_task_shutdown().await; +//! Ok(()) +//! } +//! } +//! ``` + use tokio_util::{ sync::{CancellationToken, WaitForCancellationFuture}, task::task_tracker::{TaskTracker, TaskTrackerToken}, }; -/// Permits to keep track of ongoing tasks, ask them to shutdown and for all of them to quit. -/// Stupid wrapper around [`tokio_util::CancellationToken`] and [`tokio_util::task_tracker::TaskTracker`]. +/// Permits to keep track of ongoing tasks, ask them to shutdown and wait for all of them to quit. +/// Stupid wrapper around [`tokio_util::sync::CancellationToken`] and [`tokio_util::task::task_tracker::TaskTracker`]. #[derive(Default, Clone)] pub struct ShutdownController { shutdown_notifyer: CancellationToken, @@ -12,12 +51,8 @@ pub struct ShutdownController { } impl ShutdownController { - #[allow(clippy::new_without_default)] pub fn new() -> Self { - Self { - shutdown_notifyer: CancellationToken::new(), - task_tracker: TaskTracker::new(), - } + Self::default() } /// Ask for all tasks to quit @@ -66,7 +101,7 @@ impl ShutdownDelegate { /// /// - Wait for a shutdown request to happen with [`Self::wait`] /// - Keep track of the current task. While this token is held, -/// [`ShutdownController::wait_shutdown`] will block. +/// [`ShutdownController::wait_all_task_shutdown`] will block. #[derive(Clone)] pub struct ShutdownToken { shutdown_notifyer: CancellationToken, @@ -81,6 +116,7 @@ impl ShutdownToken { } } + /// Returns underlying [`CancellationToken`] and [`TaskTrackerToken`], consuming self. pub fn split(self) -> (CancellationToken, TaskTrackerToken) { (self.shutdown_notifyer, self._task_tracker_token) } diff --git a/plugins/reaction-plugin/src/time.rs b/plugins/reaction-plugin/src/time.rs index d682429..60f5914 100644 --- a/plugins/reaction-plugin/src/time.rs +++ b/plugins/reaction-plugin/src/time.rs @@ -1,3 +1,9 @@ +//! This module provides [`parse_duration`], which parses duration in reaction's format (ie. `6h`, `3 days`) +//! +//! Like in those reaction core settings: +//! - [Filters' `retryperiod`](https://reaction.ppom.me/reference.html#retryperiod) +//! - [Actions' `after`](https://reaction.ppom.me/reference.html#after). + use std::time::Duration; /// Parses the &str argument as a Duration From a8651bf2e03cf3859b2bfaea0c2fd41bab662170 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 11 Feb 2026 12:00:00 +0100 Subject: [PATCH 210/241] Removal of nft46 and ip46tables --- .gitignore | 2 - Cargo.toml | 2 - Makefile | 2 - README.md | 21 +++------ build.rs | 50 +--------------------- helpers_c/README.md | 12 ------ helpers_c/ip46tables.c | 91 --------------------------------------- helpers_c/nft46.c | 97 ------------------------------------------ packaging/Makefile | 4 +- release.py | 2 - 10 files changed, 8 insertions(+), 275 deletions(-) delete mode 100644 helpers_c/README.md delete mode 100644 helpers_c/ip46tables.c delete mode 100644 helpers_c/nft46.c diff --git a/.gitignore b/.gitignore index f342516..55ecad6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,4 @@ /reaction -/ip46tables -/nft46 reaction*.db reaction*.db.old /data diff --git a/Cargo.toml b/Cargo.toml index 3e72f6b..5f626de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,8 +22,6 @@ systemd-units = { enable = false } assets = [ # Executables [ "target/release/reaction", "/usr/bin/reaction", "755" ], - [ "target/release/ip46tables", "/usr/bin/ip46tables", "755" ], - [ "target/release/nft46", "/usr/bin/nft46", "755" ], # Man pages [ "target/release/reaction*.1", "/usr/share/man/man1/", "644" ], # Shell completions diff --git a/Makefile b/Makefile index 3efd558..9b31a54 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,6 @@ reaction: install: reaction install -m755 target/release/reaction $(DESTDIR)$(BINDIR) - install -m755 target/release/ip46tables $(DESTDIR)$(BINDIR) - install -m755 target/release/nft46 $(DESTDIR)$(BINDIR) install_systemd: install install -m644 packaging/reaction.service $(SYSTEMDDIR)/system/reaction.service diff --git a/README.md b/README.md index 5aa33fb..7d33ab5 100644 --- a/README.md +++ b/README.md @@ -136,9 +136,9 @@ local banFor(time) = {
-It is recommended to setup reaction with [`nftables`](https://reaction.ppom.me/actions/nftables.html) -or [`ipset` + `iptables`](https://reaction.ppom.me/actions/ipset.html), which are much more performant -solutions than `iptables` alone. +> It is recommended to setup reaction with [`nftables`](https://reaction.ppom.me/actions/nftables.html) +> or [`ipset` + `iptables`](https://reaction.ppom.me/actions/ipset.html), which are much more performant +> solutions than `iptables` alone. ### Database @@ -155,19 +155,10 @@ If you don't know where to start reaction, `/var/lib/reaction` should be a sane - `reaction test-config` shows loaded configuration - `reaction help` for full usage. -### `ip46tables` and `nft46` +### old binaries -> ⚠️Deprecated since v2.2.0: -> reaction now provides builtin support for executing different actions on ipv4 and ipv6. -> They will be removed in a future version. - -`ip46tables` and `nft46` are two minimal c programs present in the `helpers_c` directory with only standard posix dependencies. - -`ip46tables` permits to configure `iptables` and `ip6tables` at the same time. -It will execute `iptables` when detecting ipv4, `ip6tables` when detecting ipv6 and both if no ip address is present on the command line. - -`nft46` works slightly differently: it will replace the `X` in its argument by 4 or 6 depending on the ip address on the command line. -This permits to have 2 IP sets, one of type `ipv4_addr` and one of type `ipv6_addr`. +`ip46tables` and `nft46` binaries are no longer part of reaction. If you really need them, see +[the last commit that included them](https://framagit.org/ppom/reaction/-/tree/b7d997ca5e9a69c8572bb2ec9d27d0eb03b3cb9f/helpers_c). ## Wiki diff --git a/build.rs b/build.rs index 97c433e..ff070e0 100644 --- a/build.rs +++ b/build.rs @@ -1,8 +1,6 @@ use std::{ - env::{var, var_os}, + env::var_os, io::{self, ErrorKind}, - path::Path, - process, }; use clap_complete::shells; @@ -10,54 +8,10 @@ use clap_complete::shells; // SubCommand defined here include!("src/cli.rs"); -fn cc() -> String { - // TARGET looks like aarch64-unknown-linux-musl - let cc = match var("TARGET") { - Ok(target) => { - // We're looking for an environment variable looking like - // CC_aarch64_unknown_linux_musl - let target = target.replace("-", "_"); - var(format!("CC_{}", target.replace("-", "_"))).ok() - } - Err(_) => None, - }; - match cc { - Some(cc) => Some(cc), - // Else we're looking for CC environment variable - None => var("CC").ok(), - } - // Else we use `cc` - .unwrap_or("cc".into()) -} - -fn compile_helper(cc: &str, name: &str, out_dir: &Path) -> io::Result<()> { - let mut args = vec![ - format!("helpers_c/{name}.c"), - "-o".into(), - out_dir - .join(name) - .to_str() - .expect("could not join path") - .to_owned(), - ]; - // We can build static executables in cross environment - if cc.ends_with("-gcc") { - args.push("-static".into()); - } - process::Command::new(cc).args(args).spawn()?; - Ok(()) -} - fn main() -> io::Result<()> { if var_os("PROFILE").ok_or(ErrorKind::NotFound)? == "release" { let out_dir = PathBuf::from(var_os("OUT_DIR").ok_or(ErrorKind::NotFound)?).join("../../.."); - // Compile C helpers - let cc = cc(); - println!("CC is: {}", cc); - compile_helper(&cc, "ip46tables", &out_dir)?; - compile_helper(&cc, "nft46", &out_dir)?; - // Build CLI let cli = clap::Command::new("reaction"); let cli = SubCommand::augment_subcommands(cli); @@ -80,8 +34,6 @@ See usage examples, service configurations and good practices on the wiki: https println!("cargo::rerun-if-changed=build.rs"); println!("cargo::rerun-if-changed=src/cli.rs"); - println!("cargo::rerun-if-changed=helpers_c/ip46tables.c"); - println!("cargo::rerun-if-changed=helpers_c/nft46.c"); Ok(()) } diff --git a/helpers_c/README.md b/helpers_c/README.md deleted file mode 100644 index 2407f19..0000000 --- a/helpers_c/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# C helpers - -Those helpers permit to handle IPv4 & IPv6 at the same time, waiting for [#79](https://framagit.org/ppom/reaction/-/issues/79) to be addressed. - -Compilation: - -```bash -# Produces nft46 binary -gcc -o nft46 nft46.c -# Produces ip46tables binary -gcc -o ip46tables ip46tables.c -``` diff --git a/helpers_c/ip46tables.c b/helpers_c/ip46tables.c deleted file mode 100644 index 02ce85a..0000000 --- a/helpers_c/ip46tables.c +++ /dev/null @@ -1,91 +0,0 @@ -#include -#include -#include -#include -#include -#include - -// If this programs -// - receives an ipv4 address in its arguments: -// → it will executes iptables with the same arguments in place. -// -// - receives an ipv6 address in its arguments: -// → it will executes ip6tables with the same arguments in place. -// -// - doesn't receive an ipv4 or ipv6 address in its arguments: -// → it will executes both, with the same arguments in place. - -int isIPv4(char *tab) { - int i,len; - // IPv4 addresses are at least 7 chars long - len = strlen(tab); - if (len < 7 || !isdigit(tab[0]) || !isdigit(tab[len-1])) { - return 0; - } - // Each char must be a digit or a dot between 2 digits - for (i=1; i= 'a' && tab[i] <= 'f') && !(tab[i] >= 'A' && tab[i] <= 'F')) { - return 0; - } - } - return 1; -} - -int guess_type(int len, char *tab[]) { - int i; - for (i=0; i -#include -#include -#include -#include -#include - -// nft46 'add element inet reaction ipvXbans { 1.2.3.4 }' → nft 'add element inet reaction ipv4bans { 1.2.3.4 }' -// nft46 'add element inet reaction ipvXbans { a:b::c:d }' → nft 'add element inet reaction ipv6bans { a:b::c:d }' -// -// the character X is replaced by 4 or 6 depending on the address family of the specified IP -// -// Limitations: -// - nft46 must receive exactly one argument -// - only one IP must be given per command -// - the IP must be between { braces } - -int isIPv4(char *tab, int len) { - int i; - // IPv4 addresses are at least 7 chars long - if (len < 7 || !isdigit(tab[0]) || !isdigit(tab[len-1])) { - return 0; - } - // Each char must be a digit or a dot between 2 digits - for (i=1; i= 'a' && tab[i] <= 'f') && !(tab[i] >= 'A' && tab[i] <= 'F')) { - return 0; - } - } - return 1; -} - -int findchar(char *tab, char c, int i, int len) { - while (i < len && tab[i] != c) i++; - if (i == len) { - printf("nft46: one %c must be present", c); - exit(1); - } - return i; -} - -void adapt_args(char *tab) { - int i, len, X, startIP, endIP, startedIP; - X = startIP = endIP = -1; - startedIP = 0; - len = strlen(tab); - i = 0; - X = i = findchar(tab, 'X', i, len); - startIP = i = findchar(tab, '{', i, len); - while (startIP + 1 <= (i = findchar(tab, ' ', i, len))) startIP = i + 1; - i = startIP; - endIP = i = findchar(tab, ' ', i, len) - 1; - - if (isIPv4(tab+startIP, endIP-startIP+1)) { - tab[X] = '4'; - return; - } - - if (isIPv6(tab+startIP, endIP-startIP+1)) { - tab[X] = '6'; - return; - } - - printf("nft46: no IP address found\n"); - exit(1); -} - -void exec(char *str, char **argv) { - argv[0] = str; - execvp(str, argv); - // returns only if fails - printf("nft46: exec failed %d\n", errno); -} - -int main(int argc, char **argv) { - if (argc != 2) { - printf("nft46: Exactly one argument must be given\n"); - exit(1); - } - adapt_args(argv[1]); - exec("nft", argv); -} diff --git a/packaging/Makefile b/packaging/Makefile index 4727ac8..ead1735 100644 --- a/packaging/Makefile +++ b/packaging/Makefile @@ -4,7 +4,7 @@ MANDIR = $(PREFIX)/share/man/man1 SYSTEMDDIR ?= /etc/systemd install: - install -Dm755 reaction nft46 ip46tables $(DESTDIR)$(BINDIR) + install -Dm755 reaction $(DESTDIR)$(BINDIR) install -Dm644 reaction*.1 -t $(DESTDIR)$(MANDIR)/ install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish @@ -13,8 +13,6 @@ install: remove: rm -f $(DESTDIR)$(BINDIR)/bin/reaction - rm -f $(DESTDIR)$(BINDIR)/bin/nft46 - rm -f $(DESTDIR)$(BINDIR)/bin/ip46tables rm -f $(DESTDIR)$(MANDIR)/reaction*.1 rm -f $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish diff --git a/release.py b/release.py index 8567551..06c5977 100644 --- a/release.py +++ b/release.py @@ -162,8 +162,6 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service files = [ # Binaries "reaction", - "nft46", - "ip46tables", # Shell completion "reaction.bash", "reaction.fish", From a37a5e5752a2b42f57a813893704e8548c093f94 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 11 Feb 2026 12:00:00 +0100 Subject: [PATCH 211/241] release v2.3.0 - cross-rs project doesn't compile anymore: switching to debian12-amd64 only binary release - package virtual plugin in reaction .deb - package ipset plugin in separate .deb with its required libipset-dev dependency --- Cargo.lock | 6 +- Cargo.toml | 3 +- Dockerfile | 11 ++ packaging/Makefile | 8 +- plugins/reaction-plugin-ipset/Cargo.toml | 16 ++- plugins/reaction-plugin-virtual/Cargo.toml | 2 +- release.py | 159 +++++++++++++-------- 7 files changed, 139 insertions(+), 66 deletions(-) create mode 100644 Dockerfile diff --git a/Cargo.lock b/Cargo.lock index dac6c60..f5b71b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2828,7 +2828,7 @@ dependencies = [ [[package]] name = "reaction" -version = "2.2.1" +version = "2.3.0" dependencies = [ "assert_cmd", "assert_fs", @@ -2889,7 +2889,7 @@ dependencies = [ [[package]] name = "reaction-plugin-ipset" -version = "0.1.0" +version = "1.0.0" dependencies = [ "ipset", "reaction-plugin", @@ -2901,7 +2901,7 @@ dependencies = [ [[package]] name = "reaction-plugin-virtual" -version = "0.1.0" +version = "1.0.0" dependencies = [ "reaction-plugin", "remoc", diff --git a/Cargo.toml b/Cargo.toml index 5f626de..dd537aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction" -version = "2.2.1" +version = "2.3.0" edition = "2024" authors = ["ppom "] license = "AGPL-3.0" @@ -22,6 +22,7 @@ systemd-units = { enable = false } assets = [ # Executables [ "target/release/reaction", "/usr/bin/reaction", "755" ], + [ "target/release/reaction-plugin-virtual", "/usr/bin/reaction-plugin-virtual", "755" ], # Man pages [ "target/release/reaction*.1", "/usr/share/man/man1/", "644" ], # Shell completions diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..1c5d54e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,11 @@ +# This Dockerfile permits to build reaction and its plugins + +# Use debian old-stable, so that it runs on both old-stable and stable +FROM rust:bookworm + +RUN apt update && apt install -y \ + clang \ + libipset-dev \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /reaction diff --git a/packaging/Makefile b/packaging/Makefile index ead1735..ac1a064 100644 --- a/packaging/Makefile +++ b/packaging/Makefile @@ -4,15 +4,21 @@ MANDIR = $(PREFIX)/share/man/man1 SYSTEMDDIR ?= /etc/systemd install: - install -Dm755 reaction $(DESTDIR)$(BINDIR) + install -Dm755 reaction $(DESTDIR)$(BINDIR) + install -Dm755 reaction-plugin-virtual $(DESTDIR)$(BINDIR) install -Dm644 reaction*.1 -t $(DESTDIR)$(MANDIR)/ install -Dm644 reaction.bash $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction install -Dm644 reaction.fish $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish install -Dm644 _reaction $(DESTDIR)$(PREFIX)/share/zsh/vendor-completions/_reaction install -Dm644 reaction.service $(SYSTEMDDIR)/system/reaction.service +install-ipset: + install -Dm755 reaction-plugin-ipset $(DESTDIR)$(BINDIR) + remove: rm -f $(DESTDIR)$(BINDIR)/bin/reaction + rm -f $(DESTDIR)$(BINDIR)/bin/reaction-plugin-virtual + rm -f $(DESTDIR)$(BINDIR)/bin/reaction-plugin-ipset rm -f $(DESTDIR)$(MANDIR)/reaction*.1 rm -f $(DESTDIR)$(PREFIX)/share/bash-completion/completions/reaction rm -f $(DESTDIR)$(PREFIX)/share/fish/vendor_completions.d/reaction.fish diff --git a/plugins/reaction-plugin-ipset/Cargo.toml b/plugins/reaction-plugin-ipset/Cargo.toml index e6247e2..b709176 100644 --- a/plugins/reaction-plugin-ipset/Cargo.toml +++ b/plugins/reaction-plugin-ipset/Cargo.toml @@ -1,7 +1,14 @@ [package] name = "reaction-plugin-ipset" -version = "0.1.0" +description = "ipset plugin for reaction" +version = "1.0.0" edition = "2024" +authors = ["ppom "] +license = "AGPL-3.0" +homepage = "https://reaction.ppom.me" +repository = "https://framagit.org/ppom/reaction" +keywords = ["security", "sysadmin", "fail2ban", "logs", "monitoring"] +default-run = "reaction-plugin-ipset" [dependencies] tokio = { workspace = true, features = ["rt-multi-thread"] } @@ -10,3 +17,10 @@ reaction-plugin.path = "../reaction-plugin" serde.workspace = true serde_json.workspace = true ipset = "0.9.0" + +[package.metadata.deb] +section = "net" +assets = [ + [ "target/release/reaction-plugin-ipset", "/usr/bin/reaction-plugin-ipset", "755" ], +] +depends = ["libipset-dev", "reaction"] diff --git a/plugins/reaction-plugin-virtual/Cargo.toml b/plugins/reaction-plugin-virtual/Cargo.toml index 46e3430..f6a1fca 100644 --- a/plugins/reaction-plugin-virtual/Cargo.toml +++ b/plugins/reaction-plugin-virtual/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reaction-plugin-virtual" -version = "0.1.0" +version = "1.0.0" edition = "2024" [dependencies] diff --git a/release.py b/release.py index 06c5977..5fa870c 100644 --- a/release.py +++ b/release.py @@ -1,11 +1,11 @@ #!/usr/bin/env nix-shell -#!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" -p debian-devscripts git minisign cargo-cross rustup cargo-deb +#!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])" -p debian-devscripts git minisign docker cargo-deb import argparse import http.client import json import os -import subprocess import shutil +import subprocess import sys import tempfile @@ -56,14 +56,14 @@ def main(): print("exiting.") sys.exit(1) + # Minisign password + cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True) + minisign_password = cmd.stdout + if args.publish: # Git push run_command(["git", "push", "--tags"]) - # Minisign password - cmd = subprocess.run(["rbw", "get", "minisign"], capture_output=True, text=True) - minisign_password = cmd.stdout - # Create directory run_command( [ @@ -86,9 +86,11 @@ def main(): pass architectures = { - "x86_64-unknown-linux-musl": "amd64", - "aarch64-unknown-linux-musl": "arm64", - "arm-unknown-linux-gnueabihf": "armhf", + "x86_64-unknown-linux-gnu": "amd64", + # I would like to build for those targets instead: + # "x86_64-unknown-linux-musl": "amd64", + # "aarch64-unknown-linux-musl": "arm64", + # "arm-unknown-linux-gnueabihf": "armhf", } all_files = [] @@ -120,32 +122,55 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service for architecture_rs, architecture_pretty in architectures.items(): # Cargo clean - run_command(["cargo", "clean"]) + # run_command(["cargo", "clean"]) - # Install toolchain + # Build docker image + run_command(["docker", "pull", "rust:bookworm"]) + run_command(["docker", "build", "-t", "rust:reaction", "."]) + + binaries = [ + # Binaries + "reaction", + "reaction-plugin-virtual", + "reaction-plugin-ipset", + ] + + # Build run_command( [ - "rustup", - "toolchain", - "install", - f"stable-{architecture_rs}", - "--force-non-host", # I know, I know! - "--profile", - "minimal", + "docker", + "run", + "--rm", + "-u", str(os.getuid()), + "-v", ".:/reaction", + "rust:reaction", + "sh", "-c", + " && ".join([ + f"cargo build --release --target {architecture_rs} --package {binary}" + for binary in binaries + ]) ] ) - # Build - run_command(["cross", "build", "--release", "--target", architecture_rs]) - # Build .deb - cmd = run_command( - ["cargo-deb", f"--target={architecture_rs}", "--no-build", "--no-strip"] - ) + debs = [ + "reaction", + "reaction-plugin-ipset", + ] + for deb in debs: + cmd = run_command( + [ + "cargo-deb", + "--target", architecture_rs, + "--package", deb, + "--no-build", + "--no-strip" + ] + ) deb_dir = os.path.join("./target", architecture_rs, "debian") - deb_name = [f for f in os.listdir(deb_dir) if f.endswith(".deb")][0] - deb_path = os.path.join(deb_dir, deb_name) + deb_names = [f for f in os.listdir(deb_dir) if f.endswith(".deb")] + deb_paths = [os.path.join(deb_dir, deb_name) for deb_name in deb_names] # Archive files_path = os.path.join("./target", architecture_rs, "release") @@ -159,9 +184,7 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service except FileExistsError: pass - files = [ - # Binaries - "reaction", + files = binaries + [ # Shell completion "reaction.bash", "reaction.fish", @@ -187,16 +210,17 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service os.chdir(root_dir) - if args.publish: - # Sign - run_command( - ["minisign", "-Sm", deb_path, tar_path], - text=True, - input=minisign_password, - ) - deb_sig = f"{deb_path}.minisig" - tar_sig = f"{tar_path}.minisig" + # Sign + run_command( + ["minisign", "-Sm", tar_path] + deb_paths, + text=True, + input=minisign_password, + ) + deb_sig_paths = [f"{deb_path}.minisig" for deb_path in deb_paths] + deb_sig_names = [f"{deb_name}.minisig" for deb_name in deb_names] + tar_sig = f"{tar_path}.minisig" + if args.publish: # Push run_command( [ @@ -204,18 +228,25 @@ $ sudo systemctl enable --now reaction@reaction.jsonnet.service "-az", # "-e", "ssh -J pica01", tar_path, tar_sig, - deb_path, - deb_sig, + ] + + deb_paths + + deb_sig_paths + + [ f"akesi:/var/www/static/reaction/releases/{tag}/", ] ) + else: + # Copy + run_command(["cp", tar_path, tar_sig] + deb_paths + deb_sig_paths + [local_dir]) - all_files.extend([tar_path, tar_sig, deb_path, deb_sig]) + all_files.extend([tar_path, tar_sig]) + all_files.extend(deb_paths) + all_files.extend(deb_sig_paths) - # Instructions + # Instructions - instructions.append( - f""" + instructions.append( + f""" ## Tar installation ({architecture_pretty} linux) ```bash @@ -223,32 +254,42 @@ curl -O https://static.ppom.me/reaction/releases/{tag}/{tar_name} \\ -O https://static.ppom.me/reaction/releases/{tag}/{tar_name}.minisig \\ && minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {tar_name} \\ && rm {tar_name}.minisig \\ - && cd {tar_name} \\ + && tar xvf {tar_name} \\ + && cd {pkg_name} \\ && sudo make install ``` - """.strip() - ) - instructions.append( - f""" +If you want to install the ipset plugin as well: +```bash +sudo apt install -y libipset-dev && sudo make install-ipset +``` +""".strip() + ) + + instructions.append( + f""" ## Debian installation ({architecture_pretty} linux) ```bash -curl -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\ - -O https://static.ppom.me/reaction/releases/{tag}/{deb_name}.minisig \\ - && minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {deb_name} \\ - && rm {deb_name}.minisig \\ - && sudo apt install ./{deb_name} +curl \\ +{"\n".join([ + f" -O https://static.ppom.me/reaction/releases/{tag}/{deb_name} \\" + for deb_name in deb_names + deb_sig_names +])} +{"\n".join([ + f" && minisign -VP RWSpLTPfbvllNqRrXUgZzM7mFjLUA7PQioAItz80ag8uU4A2wtoT2DzX -m {deb_name} \\" + for deb_name in deb_names +])} + && rm {" ".join(deb_sig_names)} \\ + && sudo apt install {" ".join([f"./{deb_name}" for deb_name in deb_names])} ``` *You can also use [this third-party package repository](https://packages.azlux.fr).* - """.strip() - ) - else: - # Copy - run_command(["cp", tar_path, deb_path, local_dir]) +""".strip() + ) if not args.publish: + print("\n\n".join(instructions)) return # Release From 15f923ef6473e5bf34d8233ac0415b52a4eb19db Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 11 Feb 2026 12:00:00 +0100 Subject: [PATCH 212/241] Safeguard against users executing plugins themselves main_loop now first checks that it has been started with the `serve` argument. If not, it prints an info message and quits. --- plugins/reaction-plugin/src/lib.rs | 58 ++++++++++++++++++------------ 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/plugins/reaction-plugin/src/lib.rs b/plugins/reaction-plugin/src/lib.rs index 38a91d1..113494a 100644 --- a/plugins/reaction-plugin/src/lib.rs +++ b/plugins/reaction-plugin/src/lib.rs @@ -127,6 +127,7 @@ use std::{ collections::{BTreeMap, BTreeSet}, + env::args, error::Error, fmt::Display, process::exit, @@ -509,31 +510,44 @@ pub struct Exec { /// } /// ``` pub async fn main_loop(plugin_info: T) { - let (conn, mut tx, _rx): ( - _, - remoc::rch::base::Sender, - remoc::rch::base::Receiver<()>, - ) = Connect::io(remoc::Cfg::default(), stdin(), stdout()) - .await - .unwrap(); + // First check that we're called by reaction + let mut args = args(); + // skip 0th argument + let _skip = args.next(); + if args.next().is_none_or(|arg| arg != "serve") { + eprintln!("This plugin is not meant to be called as-is."); + eprintln!( + "reaction daemon starts plugins itself and communicates with them on stdin, stdout and stderr." + ); + eprintln!("See the doc on plugin configuration: https://reaction.ppom.me/plugins/"); + exit(1); + } else { + let (conn, mut tx, _rx): ( + _, + remoc::rch::base::Sender, + remoc::rch::base::Receiver<()>, + ) = Connect::io(remoc::Cfg::default(), stdin(), stdout()) + .await + .unwrap(); - let (server, client) = PluginInfoServer::new(plugin_info, 1); + let (server, client) = PluginInfoServer::new(plugin_info, 1); - let (res1, (_, res2), res3) = tokio::join!(tx.send(client), server.serve(), conn); - let mut exit_code = 0; - if let Err(err) = res1 { - eprintln!("ERROR could not send plugin info to reaction: {err}"); - exit_code = 1; + let (res1, (_, res2), res3) = tokio::join!(tx.send(client), server.serve(), conn); + let mut exit_code = 0; + if let Err(err) = res1 { + eprintln!("ERROR could not send plugin info to reaction: {err}"); + exit_code = 1; + } + if let Err(err) = res2 { + eprintln!("ERROR could not launch plugin service for reaction: {err}"); + exit_code = 2; + } + if let Err(err) = res3 { + eprintln!("ERROR connection error with reaction: {err}"); + exit_code = 3; + } + exit(exit_code); } - if let Err(err) = res2 { - eprintln!("ERROR could not launch plugin service for reaction: {err}"); - exit_code = 2; - } - if let Err(err) = res3 { - eprintln!("ERROR connection error with reaction: {err}"); - exit_code = 3; - } - exit(exit_code); } // Errors From 270c6cb969320b80c9000c4d3d44db07abdd0a82 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 213/241] systemd service: config file must live in /etc/reaction/ This is a breaking change, but it unifies config for yaml, json, jsonnet and directory users. --- config/reaction.service | 2 +- packaging/{reaction@.service => reaction.service} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename packaging/{reaction@.service => reaction.service} (92%) diff --git a/config/reaction.service b/config/reaction.service index 9265118..0e72a1c 100644 --- a/config/reaction.service +++ b/config/reaction.service @@ -7,7 +7,7 @@ Documentation=https://reaction.ppom.me # See `man systemd.exec` and `man systemd.service` for most options below [Service] -ExecStart=/usr/local/bin/reaction start -c /etc/reaction.jsonnet +ExecStart=/usr/local/bin/reaction start -c /etc/reaction/ # Ask systemd to create /var/lib/reaction (/var/lib/ is implicit) StateDirectory=reaction diff --git a/packaging/reaction@.service b/packaging/reaction.service similarity index 92% rename from packaging/reaction@.service rename to packaging/reaction.service index 729b95b..3003b85 100644 --- a/packaging/reaction@.service +++ b/packaging/reaction.service @@ -7,7 +7,7 @@ Documentation=https://reaction.ppom.me # See `man systemd.exec` and `man systemd.service` for most options below [Service] -ExecStart=/usr/bin/reaction start -c /etc/%i +ExecStart=/usr/bin/reaction start -c /etc/reaction/ # Ask systemd to create /var/lib/reaction (/var/lib/ is implicit) StateDirectory=reaction From b4313699df0f6a27e2e74ce048097f525798ffad Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 214/241] systemd: Let reaction stop its subprocesses before killing them systemd by default send SIGTERM to all processes in the cgroup, which doesn't let reaction handle the shutdown of its plugins. This is fixed by adding KillMode=mixed. --- config/reaction.service | 2 ++ packaging/reaction.service | 2 ++ 2 files changed, 4 insertions(+) diff --git a/config/reaction.service b/config/reaction.service index 0e72a1c..897e869 100644 --- a/config/reaction.service +++ b/config/reaction.service @@ -15,6 +15,8 @@ StateDirectory=reaction RuntimeDirectory=reaction # Start reaction in its state directory WorkingDirectory=/var/lib/reaction +# Let reaction kill its child processes first +KillMode=mixed [Install] WantedBy=multi-user.target diff --git a/packaging/reaction.service b/packaging/reaction.service index 3003b85..5bd1478 100644 --- a/packaging/reaction.service +++ b/packaging/reaction.service @@ -15,6 +15,8 @@ StateDirectory=reaction RuntimeDirectory=reaction # Start reaction in its state directory WorkingDirectory=/var/lib/reaction +# Let reaction kill its child processes first +KillMode=mixed [Install] WantedBy=multi-user.target From 3a61db9e6f79f115248172bb2d574779711eabd5 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 215/241] plugin: shutdown: add function that permit graceful shutdown by signal Handling SIGTERM (etc) signals permit graceful shutdown, cleaning of resources etc. Added in ipset and cluster. --- plugins/reaction-plugin-cluster/src/main.rs | 2 ++ plugins/reaction-plugin-ipset/src/main.rs | 2 ++ plugins/reaction-plugin/Cargo.toml | 2 +- plugins/reaction-plugin/src/shutdown.rs | 24 +++++++++++++++++++++ 4 files changed, 29 insertions(+), 1 deletion(-) diff --git a/plugins/reaction-plugin-cluster/src/main.rs b/plugins/reaction-plugin-cluster/src/main.rs index b931026..d0269e5 100644 --- a/plugins/reaction-plugin-cluster/src/main.rs +++ b/plugins/reaction-plugin-cluster/src/main.rs @@ -237,6 +237,8 @@ impl PluginInfo for Plugin { } async fn start(&mut self) -> RemoteResult<()> { + self.cluster_shutdown.delegate().handle_quit_signals()?; + let mut db = { let path = PathBuf::from("."); let (cancellation_token, task_tracker_token) = self.cluster_shutdown.token().split(); diff --git a/plugins/reaction-plugin-ipset/src/main.rs b/plugins/reaction-plugin-ipset/src/main.rs index 268eb95..1117529 100644 --- a/plugins/reaction-plugin-ipset/src/main.rs +++ b/plugins/reaction-plugin-ipset/src/main.rs @@ -104,6 +104,8 @@ impl PluginInfo for Plugin { } async fn start(&mut self) -> RemoteResult<()> { + self.shutdown.delegate().handle_quit_signals()?; + let mut first_error = None; for (i, set) in self.sets.iter().enumerate() { // Retain if error diff --git a/plugins/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml index 1c22585..784555c 100644 --- a/plugins/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -9,6 +9,6 @@ remoc.workspace = true serde.workspace = true serde_json.workspace = true tokio.workspace = true -tokio.features = ["io-std"] +tokio.features = ["io-std", "signal"] tokio-util.workspace = true tokio-util.features = ["rt"] diff --git a/plugins/reaction-plugin/src/shutdown.rs b/plugins/reaction-plugin/src/shutdown.rs index cc9ee4f..fb4bb40 100644 --- a/plugins/reaction-plugin/src/shutdown.rs +++ b/plugins/reaction-plugin/src/shutdown.rs @@ -36,7 +36,10 @@ //! } //! } //! ``` +//! +//! [`ShutdownDelegate::handle_quit_signals`] permits to handle SIGHUP, SIGINT and SIGTERM by gracefully shutting down tasks. +use tokio::signal::unix::{SignalKind, signal}; use tokio_util::{ sync::{CancellationToken, WaitForCancellationFuture}, task::task_tracker::{TaskTracker, TaskTrackerToken}, @@ -94,6 +97,27 @@ impl ShutdownDelegate { pub fn ask_shutdown(&self) { self.0.cancel(); } + + /// Ensure [`Self::ask_shutdown`] is called whenever we receive SIGHUP, + /// SIGTERM or SIGINT. Spawns a task that consumes self. + pub fn handle_quit_signals(self) -> Result<(), String> { + let err_str = |err| format!("could not register signal: {err}"); + + let mut sighup = signal(SignalKind::hangup()).map_err(err_str)?; + let mut sigint = signal(SignalKind::interrupt()).map_err(err_str)?; + let mut sigterm = signal(SignalKind::terminate()).map_err(err_str)?; + + tokio::spawn(async move { + let signal = tokio::select! { + _ = sighup.recv() => "SIGHUP", + _ = sigint.recv() => "SIGINT", + _ = sigterm.recv() => "SIGTERM", + }; + eprintln!("received {signal}, closing..."); + self.ask_shutdown(); + }); + Ok(()) + } } /// Created by a [`ShutdownController`]. From a4ea173c13b2a1f53e476e36e8286cf657047f21 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 216/241] Do not permit `options` key when stream/action is not a plugin --- src/concepts/action.rs | 7 +++++-- src/concepts/mod.rs | 4 ---- src/concepts/stream.rs | 7 +++++-- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/concepts/action.rs b/src/concepts/action.rs index 04a2a4b..12b0cad 100644 --- a/src/concepts/action.rs +++ b/src/concepts/action.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio::process::Command; -use super::{Match, Pattern, PatternType, null_value}; +use super::{Match, Pattern, PatternType}; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] @@ -45,7 +45,7 @@ pub struct Action { // Plugin-specific #[serde(default, rename = "type", skip_serializing_if = "Option::is_none")] pub action_type: Option, - #[serde(default = "null_value", skip_serializing_if = "Value::is_null")] + #[serde(default, skip_serializing_if = "Value::is_null")] pub options: Value, } @@ -101,6 +101,9 @@ impl Action { if self.cmd[0].is_empty() { return Err("cmd's first item is empty".into()); } + if !self.options.is_null() { + return Err("can't define options without a plugin type".into()); + } } else if !self.cmd.is_empty() { return Err("can't define a cmd and a plugin type".into()); } diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index 8faf1e8..fc93d56 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -43,10 +43,6 @@ fn merge_attrs( Ok(this) } -fn null_value() -> Value { - Value::Null -} - #[cfg(test)] pub use filter::tests as filter_tests; diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 9a2734e..3b3fdf5 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -4,7 +4,7 @@ use reaction_plugin::StreamConfig; use serde::{Deserialize, Serialize}; use serde_json::Value; -use super::{Filter, Patterns, merge_attrs, null_value}; +use super::{Filter, Patterns, merge_attrs}; #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] @@ -22,7 +22,7 @@ pub struct Stream { // Plugin-specific #[serde(default, rename = "type", skip_serializing_if = "Option::is_none")] pub stream_type: Option, - #[serde(default = "null_value", skip_serializing_if = "Value::is_null")] + #[serde(default, skip_serializing_if = "Value::is_null")] pub options: Value, } @@ -75,6 +75,9 @@ impl Stream { if self.cmd[0].is_empty() { return Err("cmd's first item is empty".into()); } + if !self.options.is_null() { + return Err("can't define options without a plugin type".into()); + } } else if !self.cmd.is_empty() { return Err("can't define cmd and a plugin type".into()); } From 5a030ffb7e0d3315691a90942d5887b1c9d74f36 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 217/241] Make systemd default options more accessible for users by moving them up --- src/concepts/plugin.rs | 112 ++++++++++++++++++++--------------------- 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 8365d97..03702ce 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -1,17 +1,17 @@ use std::{collections::BTreeMap, io::Error, process::Stdio}; -#[cfg(target_os = "linux")] -use std::os::linux::fs::MetadataExt; #[cfg(target_os = "freebsd")] use std::os::freebsd::fs::MetadataExt; -#[cfg(target_os = "openbsd")] -use std::os::openbsd::fs::MetadataExt; -#[cfg(target_os = "netbsd")] -use std::os::netbsd::fs::MetadataExt; -#[cfg(target_os = "solaris")] -use std::os::solaris::fs::MetadataExt; #[cfg(target_os = "illumos")] use std::os::illumos::fs::MetadataExt; +#[cfg(target_os = "linux")] +use std::os::linux::fs::MetadataExt; +#[cfg(target_os = "netbsd")] +use std::os::netbsd::fs::MetadataExt; +#[cfg(target_os = "openbsd")] +use std::os::openbsd::fs::MetadataExt; +#[cfg(target_os = "solaris")] +use std::os::solaris::fs::MetadataExt; use serde::{Deserialize, Serialize}; use tokio::{ @@ -20,6 +20,54 @@ use tokio::{ }; use tracing::{debug, warn}; +// TODO commented options block execution of program, +// while developping in my home directory. +// Some options may still be useful in production environments. +fn systemd_default_options(working_directory: &str) -> BTreeMap> { + BTreeMap::from( + [ + // reaction slice (does nothing if inexistent) + ("Slice", vec!["reaction.slice"]), + // Started in its own directory + ("WorkingDirectory", vec![working_directory]), + // No file access except own directory + ("ReadWritePaths", vec![working_directory]), + ("ReadOnlyPaths", vec![]), + // ("NoExecPaths", vec!["/"]), + ("InaccessiblePaths", vec!["/boot", "/etc"]), + // Protect special filesystems + ("PrivateDevices", vec!["true"]), + ("PrivateMounts", vec!["true"]), + ("PrivateTmp", vec!["true"]), + // ("PrivateUsers", vec!["true"]), + ("ProcSubset", vec!["pid"]), + ("ProtectClock", vec!["true"]), + ("ProtectControlGroups", vec!["true"]), + // ("ProtectHome", vec!["true"]), + ("ProtectHostname", vec!["true"]), + ("ProtectKernelLogs", vec!["true"]), + ("ProtectKernelModules", vec!["true"]), + ("ProtectKernelTunables", vec!["true"]), + ("ProtectProc", vec!["invisible"]), + ("ProtectSystem", vec!["strict"]), + // Dynamic User + ("DynamicUser", vec!["true"]), + // Various Protections + ("CapabilityBoundingSet", vec![""]), + ("LockPersonality", vec!["true"]), + ("NoNewPrivileges", vec!["true"]), + // Isolate File + ("RemoveIPC", vec!["true"]), + ("RestrictAddressFamilies", vec![""]), + ("RestrictNamespaces", vec!["true"]), + ("RestrictSUIDSGID", vec!["true"]), + ("SystemCallArchitectures", vec!["native"]), + ("SystemCallFilter", vec!["@system-service", "~@privileged"]), + ] + .map(|(k, v)| (k.into(), v.into_iter().map(|v| v.into()).collect())), + ) +} + #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(test, derive(Default))] #[serde(deny_unknown_fields)] @@ -145,51 +193,3 @@ impl Plugin { .spawn() } } - -// TODO commented options block execution of program, -// while developping in my home directory. -// Some options may still be useful in production environments. -fn systemd_default_options(working_directory: &str) -> BTreeMap> { - BTreeMap::from( - [ - // reaction slice (does nothing if inexistent) - ("Slice", vec!["reaction.slice"]), - // Started in its own directory - ("WorkingDirectory", vec![working_directory]), - // No file access except own directory - ("ReadWritePaths", vec![working_directory]), - ("ReadOnlyPaths", vec![]), - // ("NoExecPaths", vec!["/"]), - ("InaccessiblePaths", vec!["/boot", "/etc"]), - // Protect special filesystems - ("PrivateDevices", vec!["true"]), - ("PrivateMounts", vec!["true"]), - ("PrivateTmp", vec!["true"]), - // ("PrivateUsers", vec!["true"]), - ("ProcSubset", vec!["pid"]), - ("ProtectClock", vec!["true"]), - ("ProtectControlGroups", vec!["true"]), - // ("ProtectHome", vec!["true"]), - ("ProtectHostname", vec!["true"]), - ("ProtectKernelLogs", vec!["true"]), - ("ProtectKernelModules", vec!["true"]), - ("ProtectKernelTunables", vec!["true"]), - ("ProtectProc", vec!["invisible"]), - ("ProtectSystem", vec!["strict"]), - // Dynamic User - ("DynamicUser", vec!["true"]), - // Various Protections - ("CapabilityBoundingSet", vec![""]), - ("LockPersonality", vec!["true"]), - ("NoNewPrivileges", vec!["true"]), - // Isolate File - ("RemoveIPC", vec!["true"]), - ("RestrictAddressFamilies", vec![""]), - ("RestrictNamespaces", vec!["true"]), - ("RestrictSUIDSGID", vec!["true"]), - ("SystemCallArchitectures", vec!["native"]), - ("SystemCallFilter", vec!["@system-service", "~@privileged"]), - ] - .map(|(k, v)| (k.into(), v.into_iter().map(|v| v.into()).collect())), - ) -} From 3c20d8f0080e1159071f86737410ed84e9adea1b Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 218/241] Fix merging of systemd options --- src/concepts/plugin.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 03702ce..fa323cf 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -162,9 +162,10 @@ impl Plugin { let mut command = Command::new("run0"); // --pipe gives direct, non-emulated stdio access, for better performance. command.arg("--pipe"); - self.systemd_setup(&plugin_working_directory); + + let merged_systemd_options = self.systemd_setup(&plugin_working_directory); // run0 options - for (option, values) in self.systemd_options.iter() { + for (option, values) in merged_systemd_options.iter() { for value in values.iter() { command.arg("--property").arg(format!("{option}={value}")); } From d629d57a7e84baf09e0d08cf9e61d3bc62ae4cbf Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 219/241] Change ipset version option from 4/6/46 to ipv4/ipv6/ip --- plugins/reaction-plugin-ipset/src/action.rs | 40 ++++++++++----------- plugins/reaction-plugin-ipset/src/tests.rs | 16 +++++---- tests/test-conf/test-ipset.jsonnet | 2 +- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index 96fd352..8522717 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -8,13 +8,13 @@ use crate::ipset::{CreateSet, IpSet, Order, SetChain, Version}; #[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] pub enum IpVersion { - #[serde(rename = "4")] - V4, - #[serde(rename = "6")] - V6, - #[serde(rename = "46")] #[default] - V46, + #[serde(rename = "ip")] + Ip, + #[serde(rename = "ipv4")] + Ipv4, + #[serde(rename = "ipv6")] + Ipv6, } impl Debug for IpVersion { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -22,9 +22,9 @@ impl Debug for IpVersion { f, "{}", match self { - IpVersion::V4 => "4", - IpVersion::V6 => "6", - IpVersion::V46 => "46", + IpVersion::Ipv4 => "ipv4", + IpVersion::Ipv6 => "ipv6", + IpVersion::Ip => "ip", } ) } @@ -84,9 +84,9 @@ impl ActionOptions { pub struct SetOptions { /// The IP type. /// Defaults to `46`. - /// If `4`: creates an IPv4 set with this name - /// If `6`: creates an IPv6 set with this name - /// If `46`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' + /// If `ipv4`: creates an IPv4 set with this name + /// If `ipv6`: creates an IPv6 set with this name + /// If `ip`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' /// *Merged set-wise*. #[serde(default)] version: Option, @@ -242,14 +242,14 @@ impl SetNames { pub fn new(name: String, version: Option) -> Self { Self { ipv4: match version { - Some(IpVersion::V4) => Some(name.clone()), - Some(IpVersion::V6) => None, - None | Some(IpVersion::V46) => Some(format!("{}v4", name)), + Some(IpVersion::Ipv4) => Some(name.clone()), + Some(IpVersion::Ipv6) => None, + None | Some(IpVersion::Ip) => Some(format!("{}v4", name)), }, ipv6: match version { - Some(IpVersion::V4) => None, - Some(IpVersion::V6) => Some(name), - None | Some(IpVersion::V46) => Some(format!("{}v6", name)), + Some(IpVersion::Ipv4) => None, + Some(IpVersion::Ipv6) => Some(name), + None | Some(IpVersion::Ip) => Some(format!("{}v6", name)), }, } } @@ -359,7 +359,7 @@ mod tests { target: None, }; let s2 = SetOptions { - version: Some(IpVersion::V4), + version: Some(IpVersion::Ipv4), chains: Some(vec!["INPUT".into()]), timeout: Some("3h".into()), timeout_u32: Some(3 * 3600), @@ -397,7 +397,7 @@ mod tests { for s3 in [ SetOptions { - version: Some(IpVersion::V6), + version: Some(IpVersion::Ipv6), ..Default::default() }, SetOptions { diff --git a/plugins/reaction-plugin-ipset/src/tests.rs b/plugins/reaction-plugin-ipset/src/tests.rs index f1cef9e..09ce6cf 100644 --- a/plugins/reaction-plugin-ipset/src/tests.rs +++ b/plugins/reaction-plugin-ipset/src/tests.rs @@ -34,7 +34,7 @@ async fn conf_action_standalone() { (true, json!({ "set": "test" }), &p), // missing set key (false, json!({}), &p), - (false, json!({ "version": 4 }), &p), + (false, json!({ "version": "ipv4" }), &p), // unknown key (false, json!({ "set": "test", "unknown": "yes" }), &p), (false, json!({ "set": "test", "ip_index": 1 }), &p), @@ -66,14 +66,18 @@ async fn conf_action_standalone() { (false, json!({ "set": "test", "action": 1 }), &p), // ip version // // ok - (true, json!({ "set": "test", "version": "4" }), &p), - (true, json!({ "set": "test", "version": "6" }), &p), - (true, json!({ "set": "test", "version": "46" }), &p), + (true, json!({ "set": "test", "version": "ipv4" }), &p), + (true, json!({ "set": "test", "version": "ipv6" }), &p), + (true, json!({ "set": "test", "version": "ip" }), &p), // unknown version (false, json!({ "set": "test", "version": 4 }), &p), (false, json!({ "set": "test", "version": 6 }), &p), (false, json!({ "set": "test", "version": 46 }), &p), (false, json!({ "set": "test", "version": "5" }), &p), + (false, json!({ "set": "test", "version": "ipv5" }), &p), + (false, json!({ "set": "test", "version": "4" }), &p), + (false, json!({ "set": "test", "version": "6" }), &p), + (false, json!({ "set": "test", "version": "46" }), &p), // bad type (false, json!({ "set": "test", "version": true }), &p), // chains // @@ -167,7 +171,7 @@ async fn conf_action_merge() { config: json!({ "set": "test", "target": "DROP", - "version": "46", + "version": "ip", "action": "add", }) .into(), @@ -206,7 +210,7 @@ async fn conf_action_merge() { config: json!({ "set": "test2", "target": "target1", - "version": "6", + "version": "ipv6", }) .into(), patterns: vec!["ip".into()], diff --git a/tests/test-conf/test-ipset.jsonnet b/tests/test-conf/test-ipset.jsonnet index 12ce0c8..7232547 100644 --- a/tests/test-conf/test-ipset.jsonnet +++ b/tests/test-conf/test-ipset.jsonnet @@ -30,7 +30,7 @@ options: { set: 'reactiontest', // pattern: 'ip', - // version: 46, + // version: 'ip', // chains: ['INPUT', 'FORWARD'], // target: 'DROP', // action: 'add', From 2f57f73ac9365fa9e09900d573f420d1d194f472 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 220/241] Fix systemd functionality - Non-absolute WorkingDirectory was refused by systemd - Plugin specific-conf updated Improvements: - ReadOnlyPaths=/ - ProtectHome=true in release builds - SystemCallFilter further restricted Disabled: - DynamicUser: breaks stdio communication, FIXME! - RestrictAddressFamilies: seems impossible to override to default. - CapabilityBoundingSet: too restrictive --- src/concepts/mod.rs | 1 - src/concepts/plugin.rs | 37 ++++++++++++++++++++-------- tests/test-conf/test-ipset.jsonnet | 2 +- tests/test-conf/test-virtual.jsonnet | 1 - 4 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/concepts/mod.rs b/src/concepts/mod.rs index fc93d56..1952b1b 100644 --- a/src/concepts/mod.rs +++ b/src/concepts/mod.rs @@ -8,7 +8,6 @@ mod stream; use std::fmt::Debug; use serde::{Deserialize, Serialize}; -use serde_json::Value; pub use action::Action; pub use config::{Config, Patterns}; diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index fa323cf..204f6f2 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, io::Error, process::Stdio}; +use std::{collections::BTreeMap, io::Error, path, process::Stdio}; #[cfg(target_os = "freebsd")] use std::os::freebsd::fs::MetadataExt; @@ -32,8 +32,7 @@ fn systemd_default_options(working_directory: &str) -> BTreeMap BTreeMap Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 221/241] Set CapabiltyBoundingSet again --- src/concepts/plugin.rs | 3 ++- tests/test-conf/test-ipset.jsonnet | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 204f6f2..c5bc330 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -53,6 +53,8 @@ fn systemd_default_options(working_directory: &str) -> BTreeMap BTreeMap Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 222/241] reaction-plugin: Add metadata --- plugins/reaction-plugin/Cargo.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/plugins/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml index 784555c..08d1eb6 100644 --- a/plugins/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -2,6 +2,13 @@ name = "reaction-plugin" version = "1.0.0" edition = "2024" +authors = ["ppom "] +license = "AGPL-3.0" +homepage = "https://reaction.ppom.me" +repository = "https://framagit.org/ppom/reaction" +keywords = ["security", "sysadmin", "logs", "monitoring", "plugin"] +categories = ["security"] +description = "Plugin interface for reaction, a daemon that scans logs and takes action (alternative to fail2ban)" [dependencies] chrono.workspace = true From a7e958f248686335c8a5ff6149fe4f2df5a98bc3 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 223/241] Update ARCHITECTURE.md --- ARCHITECTURE.md | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index f43d2ad..4289e65 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -6,6 +6,7 @@ Here is a high-level overview of the codebase. ## Build +- `bench/`: Configuration that spawns a very high load on reaction. Useful to test performance improvements and regressions. - `build.rs`: permits to create shell completions and man pages on build. - `Cargo.toml`, `Cargo.lock`: manifest and dependencies. - `config/`: example / test configuration files. Look at its git history to discover more. @@ -15,8 +16,7 @@ Here is a high-level overview of the codebase. ## Main source code -- `helpers_c/`: C helpers. I wish to have special IP support in reaction and get rid of them. See #79 and #116. -- `tests/`: Integration tests. For now they test basic reaction runtime behavior, persistance, and client-daemon communication. +- `tests/`: Integration tests. They test reaction runtime behavior, persistance, client-daemon communication, plugin integrations. - `src/`: The source code, here we go! ### Top-level files @@ -25,18 +25,13 @@ Here is a high-level overview of the codebase. - `src/lib.rs`: Second main entrypoint - `src/cli.rs`: Command-line arguments - `src/tests.rs`: Test utilities +- `src/protocol.rs`: de/serialization and client/daemon protocol messages. ### `src/concepts/` reaction really is about its configuration, which is at the center of the code. -There is one file for each of its concepts: configuration, streams, filters, actions, patterns. - -### `src/protocol/` - -Low-level serialization/deserialization and client-daemon protocol messages. - -Shared by the client and daemon's socket. Also used by daemon's database. +There is one file for each of its concepts: configuration, streams, filters, actions, patterns, plugins. ### `src/client/` @@ -58,9 +53,9 @@ This code has async code, to handle input streams and communication with clients - `mod.rs`: High-level logic - `state.rs`: Inner state operations - `socket.rs`: The socket task, responsible for communication with clients. -- `shutdown.rs`: Logic for passing shutdown signal across all tasks +- `plugin.rs`: Plugin startup, configuration loading and cleanup. -### `src/tree` +### `crates/treedb` Persistence layer. @@ -68,5 +63,19 @@ This is a database highly adapted to reaction workload, making reaction faster t (heed, sled and fjall crates have been tested). Its design is explained in the comments of its files: -- `mod.rs`: main database code, with its two API structs: Tree and Database. -- `raw.rs` low-level part, directly interacting with de/serializisation and files. +- `lib.rs`: main database code, with its two API structs: Tree and Database. +- `raw.rs`: low-level part, directly interacting with de/serializisation and files. +- `time.rs`: time definitions shared with reaction. +- `helpers.rs`: utilities to ease db deserialization from disk. + +### `plugins/reaction-plugin` + +Shared plugin interface between reaction daemon and its plugins. + +Also defines some shared logic between them: +- `shutdown.rs`: Logic for passing shutdown signal across all tasks +- `parse_duration.rs` Duration parsing + +### `plugins/reaction-plugin-*` + +All core plugins. From 645d72ac1ef60ffd52d32628c01d06d970934ac3 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 224/241] .gitignore cleanup --- .gitignore | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 55ecad6..07180cd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,22 +1,15 @@ /reaction -reaction*.db -reaction*.db.old +reaction.db +reaction.db.old /data -/lmdb -reaction*.export.json /reaction*.sock /result /wiki -/deb *.deb *.minisig *.qcow2 -debian-packaging/* *.swp -export-go-db/export-go-db -import-rust-db/target /target -reaction-plugin/target /local .ccls-cache .direnv From 88c99fff0f6782fa57c182def3f6ef7ef6e27ba8 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 12 Feb 2026 12:00:00 +0100 Subject: [PATCH 225/241] Fix install instructions --- release.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release.py b/release.py index 5fa870c..6cc456b 100644 --- a/release.py +++ b/release.py @@ -114,8 +114,8 @@ $ sudo systemctl daemon-reload Then enable and start reaction with this command ```bash -# replace `reaction.jsonnet` with the name of your configuration file in /etc/ -$ sudo systemctl enable --now reaction@reaction.jsonnet.service +# write first your configuration file(s) in /etc/reaction/ +$ sudo systemctl enable --now reaction.service ``` """.strip(), ] From 488dc6c66f792fdbb46b4be6bbc4bb702ae62019 Mon Sep 17 00:00:00 2001 From: ppom Date: Sun, 15 Feb 2026 12:00:00 +0100 Subject: [PATCH 226/241] Update release instructions --- release.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/release.py b/release.py index 6cc456b..8359c7a 100644 --- a/release.py +++ b/release.py @@ -102,9 +102,8 @@ def main(): You'll need to install minisign to check the authenticity of the package. -After installing reaction, create your configuration file at -`/etc/reaction.json`, `/etc/reaction.jsonnet` or `/etc/reaction.yml`. -You can also provide a directory containing multiple configuration files in the previous formats. +After installing reaction, create your configuration file(s) in JSON, YAML or JSONnet in the +`/etc/reaction/` directory. See for documentation. Reload systemd: From dc51d7d432eeb408551ae84c6b08d172efbf4649 Mon Sep 17 00:00:00 2001 From: ppom Date: Tue, 17 Feb 2026 12:00:00 +0100 Subject: [PATCH 227/241] Add support for macOS --- src/concepts/plugin.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index c5bc330..8c3c142 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -1,5 +1,7 @@ use std::{collections::BTreeMap, io::Error, path, process::Stdio}; +#[cfg(target_os = "macos")] +use std::os::darwin::fs::MetadataExt; #[cfg(target_os = "freebsd")] use std::os::freebsd::fs::MetadataExt; #[cfg(target_os = "illumos")] From 285954f7cd1fe935fb382f09b2716e85460430aa Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 18 Feb 2026 12:00:00 +0100 Subject: [PATCH 228/241] Remove outdated FIXME --- plugins/reaction-plugin-ipset/src/action.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index 8522717..1d33c7b 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -39,9 +39,6 @@ pub enum AddDel { Del, } -// FIXME block configs that have different set options for the same name -// treat default values as none? - /// User-facing action options #[derive(Serialize, Deserialize)] #[serde(deny_unknown_fields)] From 26cf3a96e74d856447eb96625c8ad64b5a6ee999 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 20 Feb 2026 12:00:00 +0100 Subject: [PATCH 229/241] First draft of an nftables plugin Not compiling yet but I'm getting there. Must be careful on the unsafe, C-wrapping code! --- Cargo.lock | 105 ++++ Cargo.toml | 1 + plugins/reaction-plugin-nftables/Cargo.toml | 13 + .../reaction-plugin-nftables/src/action.rs | 493 ++++++++++++++++++ .../reaction-plugin-nftables/src/helpers.rs | 15 + plugins/reaction-plugin-nftables/src/main.rs | 169 ++++++ plugins/reaction-plugin-nftables/src/nft.rs | 69 +++ plugins/reaction-plugin-nftables/src/tests.rs | 253 +++++++++ shell.nix | 1 + 9 files changed, 1119 insertions(+) create mode 100644 plugins/reaction-plugin-nftables/Cargo.toml create mode 100644 plugins/reaction-plugin-nftables/src/action.rs create mode 100644 plugins/reaction-plugin-nftables/src/helpers.rs create mode 100644 plugins/reaction-plugin-nftables/src/main.rs create mode 100644 plugins/reaction-plugin-nftables/src/nft.rs create mode 100644 plugins/reaction-plugin-nftables/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index f5b71b2..f138507 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1980,6 +1980,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "libnftables1-sys" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b290d0d41f0ad578660aeed371bcae4cf85f129a6fe31350dbd2e097518cd7f" +dependencies = [ + "bindgen", +] + [[package]] name = "linux-raw-sys" version = "0.11.0" @@ -2248,6 +2257,22 @@ dependencies = [ "wmi", ] +[[package]] +name = "nftables" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c57e7343eed9e9330e084eef12651b15be3c8ed7825915a0ffa33736b852bed" +dependencies = [ + "schemars", + "serde", + "serde_json", + "serde_path_to_error", + "strum", + "strum_macros", + "thiserror 2.0.18", + "tokio", +] + [[package]] name = "nix" version = "0.29.0" @@ -2899,6 +2924,19 @@ dependencies = [ "tokio", ] +[[package]] +name = "reaction-plugin-nftables" +version = "0.1.0" +dependencies = [ + "libnftables1-sys", + "nftables", + "reaction-plugin", + "remoc", + "serde", + "serde_json", + "tokio", +] + [[package]] name = "reaction-plugin-virtual" version = "1.0.0" @@ -2919,6 +2957,26 @@ dependencies = [ "bitflags", ] +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "regex" version = "1.12.2" @@ -3194,6 +3252,31 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "schemars" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" +dependencies = [ + "dyn-clone", + "ref-cast", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d115b50f4aaeea07e79c1912f645c7513d81715d0420f8bc77a18c6260b307f" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 2.0.114", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -3287,6 +3370,17 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "serde_json" version = "1.0.149" @@ -3300,6 +3394,17 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" diff --git a/Cargo.toml b/Cargo.toml index dd537aa..39be1a6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,7 @@ members = [ "plugins/reaction-plugin", "plugins/reaction-plugin-cluster", "plugins/reaction-plugin-ipset", + "plugins/reaction-plugin-nftables", "plugins/reaction-plugin-virtual" ] diff --git a/plugins/reaction-plugin-nftables/Cargo.toml b/plugins/reaction-plugin-nftables/Cargo.toml new file mode 100644 index 0000000..1de8e6b --- /dev/null +++ b/plugins/reaction-plugin-nftables/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "reaction-plugin-nftables" +version = "0.1.0" +edition = "2024" + +[dependencies] +tokio = { workspace = true, features = ["rt-multi-thread"] } +remoc.workspace = true +reaction-plugin.path = "../reaction-plugin" +serde.workspace = true +serde_json.workspace = true +nftables = { version = "0.6.3", features = ["tokio"] } +libnftables1-sys = { version = "0.1.1" } diff --git a/plugins/reaction-plugin-nftables/src/action.rs b/plugins/reaction-plugin-nftables/src/action.rs new file mode 100644 index 0000000..6649bd1 --- /dev/null +++ b/plugins/reaction-plugin-nftables/src/action.rs @@ -0,0 +1,493 @@ +use std::{ + borrow::Cow, + collections::HashSet, + fmt::{Debug, Display}, + u32, +}; + +use nftables::{ + batch::Batch, + expr::Expression, + helper::apply_ruleset_async, + schema::{Element, NfListObject, Rule, SetFlag, SetType, SetTypeValue}, + stmt::Statement, + types::{NfFamily, NfHook}, +}; +use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration}; +use remoc::rch::mpsc as remocMpsc; +use serde::{Deserialize, Serialize}; + +use crate::{helpers::Version, nft::NftClient}; + +#[derive(Default, Serialize, Deserialize, PartialEq, Eq, Clone, Copy)] +pub enum IpVersion { + #[default] + #[serde(rename = "ip")] + Ip, + #[serde(rename = "ipv4")] + Ipv4, + #[serde(rename = "ipv6")] + Ipv6, +} +impl Debug for IpVersion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + IpVersion::Ipv4 => "ipv4", + IpVersion::Ipv6 => "ipv6", + IpVersion::Ip => "ip", + } + ) + } +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub enum AddDel { + #[default] + #[serde(alias = "add")] + Add, + #[serde(alias = "delete")] + Delete, +} + +/// User-facing action options +#[derive(Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ActionOptions { + /// The set that should be used by this action + pub set: String, + /// The pattern name of the IP. + /// Defaults to "ip" + #[serde(default = "serde_ip")] + pub pattern: String, + #[serde(skip)] + ip_index: usize, + // Whether the action is to "add" or "del" the ip from the set + #[serde(default)] + action: AddDel, + + #[serde(flatten)] + pub set_options: SetOptions, +} + +fn serde_ip() -> String { + "ip".into() +} + +impl ActionOptions { + pub fn set_ip_index(&mut self, patterns: Vec) -> Result<(), ()> { + self.ip_index = patterns + .into_iter() + .enumerate() + .filter(|(_, name)| name == &self.pattern) + .next() + .ok_or(())? + .0; + Ok(()) + } +} + +/// Merged set options +#[derive(Default, Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct SetOptions { + /// The IP type. + /// Defaults to `46`. + /// If `ipv4`: creates an IPv4 set with this name + /// If `ipv6`: creates an IPv6 set with this name + /// If `ip`: creates an IPv4 set with its name suffixed by 'v4' AND an IPv6 set with its name suffixed by 'v6' + /// *Merged set-wise*. + #[serde(default)] + version: Option, + /// Chains where the IP set should be inserted. + /// Defaults to `["input", "forward"]` + /// *Merged set-wise*. + #[serde(default)] + hooks: Option>, + // Optional timeout, letting linux/netfilter handle set removal instead of reaction + // Note that `reaction show` and `reaction flush` won't work if set instead of an `after` action + // Same syntax as after and retryperiod in reaction. + /// *Merged set-wise*. + #[serde(skip_serializing_if = "Option::is_none")] + timeout: Option, + #[serde(skip)] + timeout_u32: Option, + // Target that iptables should use when the IP is encountered. + // Defaults to DROP, but can also be ACCEPT, RETURN or any user-defined chain + /// *Merged set-wise*. + #[serde(default)] + target: Option, +} + +impl SetOptions { + pub fn merge(&mut self, options: &SetOptions) -> Result<(), String> { + // merge two Option and fail if there is conflict + fn inner_merge( + a: &mut Option, + b: &Option, + name: &str, + ) -> Result<(), String> { + match (&a, &b) { + (Some(aa), Some(bb)) => { + if aa != bb { + return Err(format!( + "Conflicting options for {name}: `{aa:?}` and `{bb:?}`" + )); + } + } + (None, Some(_)) => { + *a = b.clone(); + } + _ => (), + }; + Ok(()) + } + + inner_merge(&mut self.version, &options.version, "version")?; + inner_merge(&mut self.timeout, &options.timeout, "timeout")?; + inner_merge(&mut self.hooks, &options.hooks, "chains")?; + inner_merge(&mut self.target, &options.target, "target")?; + + if let Some(timeout) = &self.timeout { + let duration = parse_duration(timeout) + .map_err(|err| format!("failed to parse timeout: {}", err))? + .as_secs(); + if duration > u32::MAX as u64 { + return Err(format!( + "timeout is limited to {} seconds (approx {} days)", + u32::MAX, + 49_000 + )); + } + self.timeout_u32 = Some(duration as u32); + } + + Ok(()) + } +} + +#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum RHook { + Ingress, + Prerouting, + Forward, + Input, + Output, + Postrouting, + Egress, +} + +impl RHook { + pub fn as_str(&self) -> &'static str { + match self { + RHook::Ingress => "ingress", + RHook::Prerouting => "prerouting", + RHook::Forward => "forward", + RHook::Input => "input", + RHook::Output => "output", + RHook::Postrouting => "postrouting", + RHook::Egress => "egress", + } + } +} + +impl Display for RHook { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl From<&RHook> for NfHook { + fn from(value: &RHook) -> Self { + match value { + RHook::Ingress => Self::Ingress, + RHook::Prerouting => Self::Prerouting, + RHook::Forward => Self::Forward, + RHook::Input => Self::Input, + RHook::Output => Self::Output, + RHook::Postrouting => Self::Postrouting, + RHook::Egress => Self::Egress, + } + } +} + +#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum RStatement { + Accept, + Drop, + Continue, + Return, +} + +pub struct Set { + pub sets: SetNames, + pub hooks: Vec, + pub timeout: Option, + pub target: RStatement, +} + +impl Set { + pub fn from(name: String, options: SetOptions) -> Self { + Self { + sets: SetNames::new(name, options.version), + timeout: options.timeout_u32, + target: options.target.unwrap_or(RStatement::Drop), + hooks: options.hooks.unwrap_or(vec![RHook::Input, RHook::Forward]), + } + } + + pub fn init<'a>(&self, batch: &mut Batch<'a>) -> Result<(), String> { + for (set, version) in [ + (&self.sets.ipv4, Version::IPv4), + (&self.sets.ipv6, Version::IPv6), + ] { + if let Some(set) = set { + let family = NfFamily::INet; + let table = Cow::from("reaction"); + let name = Cow::from(set.as_str()); + + // create set + batch.add(NfListObject::<'a>::Set(Box::new(nftables::schema::Set { + family, + table: table.to_owned(), + name, + // TODO Try a set which is both ipv4 and ipv6? + set_type: SetTypeValue::Single(match version { + Version::IPv4 => SetType::Ipv4Addr, + Version::IPv6 => SetType::Ipv6Addr, + }), + flags: Some({ + let mut flags = HashSet::from([SetFlag::Interval]); + if self.timeout.is_some() { + flags.insert(SetFlag::Timeout); + } + flags + }), + timeout: self.timeout.clone(), + ..Default::default() + }))); + // insert set in chains + let expr = vec![match self.target { + RStatement::Accept => Statement::Accept(None), + RStatement::Drop => Statement::Drop(None), + RStatement::Continue => Statement::Continue(None), + RStatement::Return => Statement::Return(None), + }]; + for hook in &self.hooks { + batch.add(NfListObject::Rule(Rule { + family, + table: table.to_owned(), + chain: Cow::from(hook.to_string()), + expr: Cow::Owned(expr.clone()), + ..Default::default() + })); + } + } + } + Ok(()) + } +} + +pub struct SetNames { + pub ipv4: Option, + pub ipv6: Option, +} + +impl SetNames { + pub fn new(name: String, version: Option) -> Self { + Self { + ipv4: match version { + Some(IpVersion::Ipv4) => Some(name.clone()), + Some(IpVersion::Ipv6) => None, + None | Some(IpVersion::Ip) => Some(format!("{}v4", name)), + }, + ipv6: match version { + Some(IpVersion::Ipv4) => None, + Some(IpVersion::Ipv6) => Some(name), + None | Some(IpVersion::Ip) => Some(format!("{}v6", name)), + }, + } + } +} + +pub struct Action { + nft: NftClient, + rx: remocMpsc::Receiver, + shutdown: ShutdownToken, + sets: SetNames, + // index of pattern ip in match vec + ip_index: usize, + action: AddDel, +} + +impl Action { + pub fn new( + nft: NftClient, + shutdown: ShutdownToken, + rx: remocMpsc::Receiver, + options: ActionOptions, + ) -> Result { + Ok(Action { + nft, + rx, + shutdown, + sets: SetNames::new(options.set, options.set_options.version), + ip_index: options.ip_index, + action: options.action, + }) + } + + pub async fn serve(mut self) { + loop { + let event = tokio::select! { + exec = self.rx.recv() => Some(exec), + _ = self.shutdown.wait() => None, + }; + match event { + // shutdown asked + None => break, + // channel closed + Some(Ok(None)) => break, + // error from channel + Some(Err(err)) => { + eprintln!("ERROR {err}"); + break; + } + // ok + Some(Ok(Some(exec))) => { + if let Err(err) = self.handle_exec(exec).await { + eprintln!("ERROR {err}"); + break; + } + } + } + } + // eprintln!("DEBUG Asking for shutdown"); + // self.shutdown.ask_shutdown(); + } + + async fn handle_exec(&mut self, mut exec: Exec) -> Result<(), String> { + // safeguard against Vec::remove's panic + if exec.match_.len() <= self.ip_index { + return Err(format!( + "match received from reaction is smaller than expected. looking for index {} but size is {}. this is a bug!", + self.ip_index, + exec.match_.len() + )); + } + let ip = exec.match_.remove(self.ip_index); + // select set + let set = match (&self.sets.ipv4, &self.sets.ipv6) { + (None, None) => return Err(format!("action is neither IPv4 nor IPv6, this is a bug!")), + (None, Some(set)) => set, + (Some(set), None) => set, + (Some(set4), Some(set6)) => { + if ip.contains(':') { + set6 + } else { + set4 + } + } + }; + // add/remove ip to set + let element = NfListObject::Element(Element { + family: NfFamily::INet, + table: Cow::from("reaction"), + name: Cow::from(set), + elem: Cow::from(vec![Expression::String(Cow::from(ip.clone()))]), + }); + let mut batch = Batch::new(); + match self.action { + AddDel::Add => batch.add(element), + AddDel::Delete => batch.delete(element), + }; + match self.nft.send(batch).await { + Ok(ok) => { + eprintln!("DEBUG action ok {:?} {ip}: {ok}", self.action); + Ok(()) + } + Err(err) => Err(format!("action ko {:?} {ip}: {err}", self.action)), + } + } +} + +#[cfg(test)] +mod tests { + use crate::action::{IpVersion, RHook, RStatement, SetOptions}; + + #[tokio::test] + async fn set_options_merge() { + let s1 = SetOptions { + version: None, + hooks: None, + timeout: None, + timeout_u32: None, + target: None, + }; + let s2 = SetOptions { + version: Some(IpVersion::Ipv4), + hooks: Some(vec![RHook::Input]), + timeout: Some("3h".into()), + timeout_u32: Some(3 * 3600), + target: Some(RStatement::Drop), + }; + assert_ne!(s1, s2); + assert_eq!(s1, SetOptions::default()); + + { + // s2 can be merged in s1 + let mut s1 = s1.clone(); + assert!(s1.merge(&s2).is_ok()); + assert_eq!(s1, s2); + } + + { + // s1 can be merged in s2 + let mut s2 = s2.clone(); + assert!(s2.merge(&s1).is_ok()); + } + + { + // s1 can be merged in itself + let mut s3 = s1.clone(); + assert!(s3.merge(&s1).is_ok()); + assert_eq!(s1, s3); + } + + { + // s2 can be merged in itself + let mut s3 = s2.clone(); + assert!(s3.merge(&s2).is_ok()); + assert_eq!(s2, s3); + } + + for s3 in [ + SetOptions { + version: Some(IpVersion::Ipv6), + ..Default::default() + }, + SetOptions { + hooks: Some(vec![RHook::Output]), + ..Default::default() + }, + SetOptions { + timeout: Some("30min".into()), + ..Default::default() + }, + SetOptions { + target: Some(RStatement::Continue), + ..Default::default() + }, + ] { + // none with some is ok + assert!(s3.clone().merge(&s1).is_ok(), "s3: {s3:?}"); + assert!(s1.clone().merge(&s3).is_ok(), "s3: {s3:?}"); + // different some is ko + assert!(s3.clone().merge(&s2).is_err(), "s3: {s3:?}"); + assert!(s2.clone().merge(&s3).is_err(), "s3: {s3:?}"); + } + } +} diff --git a/plugins/reaction-plugin-nftables/src/helpers.rs b/plugins/reaction-plugin-nftables/src/helpers.rs new file mode 100644 index 0000000..b8b97b2 --- /dev/null +++ b/plugins/reaction-plugin-nftables/src/helpers.rs @@ -0,0 +1,15 @@ +use std::fmt::Display; + +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] +pub enum Version { + IPv4, + IPv6, +} +impl Display for Version { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Version::IPv4 => "IPv4", + Version::IPv6 => "IPv6", + }) + } +} diff --git a/plugins/reaction-plugin-nftables/src/main.rs b/plugins/reaction-plugin-nftables/src/main.rs new file mode 100644 index 0000000..ac93eba --- /dev/null +++ b/plugins/reaction-plugin-nftables/src/main.rs @@ -0,0 +1,169 @@ +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, +}; + +use libnftables1_sys::Nftables; +use nftables::{ + batch::Batch, + schema::{Chain, NfListObject, Table}, + types::{NfChainType, NfFamily}, +}; +use reaction_plugin::{ + ActionConfig, ActionImpl, Hello, Manifest, PluginInfo, RemoteResult, StreamConfig, StreamImpl, + shutdown::ShutdownController, +}; +use remoc::rtc; + +use crate::action::{Action, ActionOptions, Set, SetOptions}; + +#[cfg(test)] +mod tests; + +mod action; +pub mod helpers; +mod nft; + +#[tokio::main] +async fn main() { + let plugin = Plugin::default(); + reaction_plugin::main_loop(plugin).await; +} + +#[derive(Default)] +struct Plugin { + sets: Vec, + actions: Vec, + shutdown: ShutdownController, +} + +impl PluginInfo for Plugin { + async fn manifest(&mut self) -> Result { + Ok(Manifest { + hello: Hello::new(), + streams: BTreeSet::default(), + actions: BTreeSet::from(["ipset".into()]), + }) + } + + async fn load_config( + &mut self, + streams: Vec, + actions: Vec, + ) -> RemoteResult<(Vec, Vec)> { + if !streams.is_empty() { + return Err("This plugin can't handle any stream type".into()); + } + + let mut ret_actions = Vec::with_capacity(actions.len()); + let mut set_options: BTreeMap = BTreeMap::new(); + + for ActionConfig { + stream_name, + filter_name, + action_name, + action_type, + config, + patterns, + } in actions + { + if &action_type != "nftables" { + return Err("This plugin can't handle other action types than nftables".into()); + } + + let mut options: ActionOptions = serde_json::from_value(config.into()).map_err(|err| { + format!("invalid options for action {stream_name}.{filter_name}.{action_name}: {err}") + })?; + + options.set_ip_index(patterns).map_err(|_| + format!( + "No pattern with name {} in filter {stream_name}.{filter_name}. Try setting the option `pattern` to your pattern name of type 'ip'", + &options.pattern + ) + )?; + + // Merge option + set_options + .entry(options.set.clone()) + .or_default() + .merge(&options.set_options) + .map_err(|err| format!("set {}: {err}", options.set))?; + + let (tx, rx) = remoc::rch::mpsc::channel(1); + self.actions + .push(Action::new(self.shutdown.token(), rx, options)?); + + ret_actions.push(ActionImpl { tx }); + } + + // Init all sets + while let Some((name, options)) = set_options.pop_first() { + self.sets.push(Set::from(name, options)); + } + + Ok((vec![], ret_actions)) + } + + async fn start(&mut self) -> RemoteResult<()> { + self.shutdown.delegate().handle_quit_signals()?; + + let mut batch = Batch::new(); + batch.add(reaction_table()); + + // Create a chain for each registered netfilter hook + for hook in self + .sets + .iter() + .flat_map(|set| &set.hooks) + .collect::>() + { + batch.add(NfListObject::Chain(Chain { + family: NfFamily::INet, + table: Cow::Borrowed("reaction"), + name: Cow::from(hook.as_str()), + _type: Some(NfChainType::Filter), + hook: Some(hook.into()), + prio: Some(0), + ..Default::default() + })); + } + + for set in &self.sets { + set.init(&mut batch)?; + } + + // TODO apply batch + Nftables::new(); + + // Launch a task that will destroy the table on shutdown + { + let token = self.shutdown.token(); + tokio::spawn(async move { + token.wait().await; + Batch::new().delete(reaction_table()); + }); + } + + // Launch all actions + while let Some(action) = self.actions.pop() { + tokio::spawn(async move { action.serve().await }); + } + self.actions = Default::default(); + + Ok(()) + } + + async fn close(self) -> RemoteResult<()> { + self.shutdown.ask_shutdown(); + self.shutdown.wait_all_task_shutdown().await; + Ok(()) + } +} + +fn reaction_table() -> NfListObject<'static> { + NfListObject::Table(Table { + family: NfFamily::INet, + name: Cow::Borrowed("reaction"), + handle: None, + }) +} diff --git a/plugins/reaction-plugin-nftables/src/nft.rs b/plugins/reaction-plugin-nftables/src/nft.rs new file mode 100644 index 0000000..45c4f5c --- /dev/null +++ b/plugins/reaction-plugin-nftables/src/nft.rs @@ -0,0 +1,69 @@ +use std::{ + ffi::{CStr, CString}, + thread, +}; + +use libnftables1_sys::Nftables; +use nftables::batch::Batch; +use tokio::sync::{mpsc, oneshot}; + +pub fn nftables_thread() -> NftClient { + let (tx, mut rx) = mpsc::channel(10); + + thread::spawn(move || { + let mut conn = Nftables::new(); + + while let Some(NftCommand { json, ret }) = rx.blocking_recv() { + let (rc, output, error) = conn.run_cmd(json.as_ptr()); + let res = match rc { + 0 => to_rust_string(output) + .ok_or_else(|| "unknown ok (rc = 0 but no output buffer)".into()), + _ => to_rust_string(error) + .map(|err| format!("error (rc = {rc}: {err})")) + .ok_or_else(|| format!("unknown error (rc = {rc} but no error buffer)")), + }; + ret.send(res); + } + }); + + NftClient { tx } +} + +fn to_rust_string(c_ptr: *const i8) -> Option { + if c_ptr.is_null() { + None + } else { + Some( + unsafe { CStr::from_ptr(c_ptr) } + .to_string_lossy() + .into_owned(), + ) + } +} + +pub struct NftClient { + tx: mpsc::Sender, +} + +impl NftClient { + pub async fn send(&self, batch: Batch<'_>) -> Result { + // convert JSON to CString + let mut json = serde_json::to_vec(&batch.to_nftables()) + .map_err(|err| format!("couldn't build json to send to nftables: {err}"))?; + json.push('\0' as u8); + let json = CString::from_vec_with_nul(json) + .map_err(|err| format!("invalid json with null char: {err}"))?; + // Send command + let (tx, rx) = oneshot::channel(); + let command = NftCommand { json, ret: tx }; + self.tx.send(command).await; + // Wait for result + rx.await + .map_err(|err| format!("nftables thread has quit: {err}"))? + } +} + +struct NftCommand { + json: CString, + ret: oneshot::Sender>, +} diff --git a/plugins/reaction-plugin-nftables/src/tests.rs b/plugins/reaction-plugin-nftables/src/tests.rs new file mode 100644 index 0000000..09ce6cf --- /dev/null +++ b/plugins/reaction-plugin-nftables/src/tests.rs @@ -0,0 +1,253 @@ +use reaction_plugin::{ActionConfig, PluginInfo, StreamConfig, Value}; +use serde_json::json; + +use crate::Plugin; + +#[tokio::test] +async fn conf_stream() { + // No stream is supported by ipset + assert!( + Plugin::default() + .load_config( + vec![StreamConfig { + stream_name: "stream".into(), + stream_type: "ipset".into(), + config: Value::Null + }], + vec![] + ) + .await + .is_err() + ); + + // Nothing is ok + assert!(Plugin::default().load_config(vec![], vec![]).await.is_ok()); +} + +#[tokio::test] +async fn conf_action_standalone() { + let p = vec!["name".into(), "ip".into(), "ip2".into()]; + let p_noip = vec!["name".into(), "ip2".into()]; + + for (is_ok, conf, patterns) in [ + // minimal set + (true, json!({ "set": "test" }), &p), + // missing set key + (false, json!({}), &p), + (false, json!({ "version": "ipv4" }), &p), + // unknown key + (false, json!({ "set": "test", "unknown": "yes" }), &p), + (false, json!({ "set": "test", "ip_index": 1 }), &p), + (false, json!({ "set": "test", "timeout_u32": 1 }), &p), + // pattern // + (true, json!({ "set": "test" }), &p), + (true, json!({ "set": "test", "pattern": "ip" }), &p), + (true, json!({ "set": "test", "pattern": "ip2" }), &p), + (true, json!({ "set": "test", "pattern": "ip2" }), &p_noip), + // unknown pattern "ip" + (false, json!({ "set": "test" }), &p_noip), + (false, json!({ "set": "test", "pattern": "ip" }), &p_noip), + // unknown pattern + (false, json!({ "set": "test", "pattern": "unknown" }), &p), + (false, json!({ "set": "test", "pattern": "uwu" }), &p_noip), + // bad type + (false, json!({ "set": "test", "pattern": 0 }), &p_noip), + (false, json!({ "set": "test", "pattern": true }), &p_noip), + // action // + (true, json!({ "set": "test", "action": "add" }), &p), + (true, json!({ "set": "test", "action": "del" }), &p), + // unknown action + (false, json!({ "set": "test", "action": "create" }), &p), + (false, json!({ "set": "test", "action": "insert" }), &p), + (false, json!({ "set": "test", "action": "delete" }), &p), + (false, json!({ "set": "test", "action": "destroy" }), &p), + // bad type + (false, json!({ "set": "test", "action": true }), &p), + (false, json!({ "set": "test", "action": 1 }), &p), + // ip version // + // ok + (true, json!({ "set": "test", "version": "ipv4" }), &p), + (true, json!({ "set": "test", "version": "ipv6" }), &p), + (true, json!({ "set": "test", "version": "ip" }), &p), + // unknown version + (false, json!({ "set": "test", "version": 4 }), &p), + (false, json!({ "set": "test", "version": 6 }), &p), + (false, json!({ "set": "test", "version": 46 }), &p), + (false, json!({ "set": "test", "version": "5" }), &p), + (false, json!({ "set": "test", "version": "ipv5" }), &p), + (false, json!({ "set": "test", "version": "4" }), &p), + (false, json!({ "set": "test", "version": "6" }), &p), + (false, json!({ "set": "test", "version": "46" }), &p), + // bad type + (false, json!({ "set": "test", "version": true }), &p), + // chains // + // everything is fine really + (true, json!({ "set": "test", "chains": [] }), &p), + (true, json!({ "set": "test", "chains": ["INPUT"] }), &p), + (true, json!({ "set": "test", "chains": ["FORWARD"] }), &p), + ( + true, + json!({ "set": "test", "chains": ["custom_chain"] }), + &p, + ), + ( + true, + json!({ "set": "test", "chains": ["INPUT", "FORWARD"] }), + &p, + ), + ( + true, + json!({ + "set": "test", + "chains": ["INPUT", "FORWARD", "my_iptables_chain"] + }), + &p, + ), + // timeout // + (true, json!({ "set": "test", "timeout": "1m" }), &p), + (true, json!({ "set": "test", "timeout": "3 days" }), &p), + // bad + (false, json!({ "set": "test", "timeout": "3 dayz"}), &p), + (false, json!({ "set": "test", "timeout": 12 }), &p), + // target // + // anything is fine too + (true, json!({ "set": "test", "target": "DROP" }), &p), + (true, json!({ "set": "test", "target": "ACCEPT" }), &p), + (true, json!({ "set": "test", "target": "RETURN" }), &p), + (true, json!({ "set": "test", "target": "custom_chain" }), &p), + // bad + (false, json!({ "set": "test", "target": 11 }), &p), + (false, json!({ "set": "test", "target": ["DROP"] }), &p), + ] { + let res = Plugin::default() + .load_config( + vec![], + vec![ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action".into(), + action_type: "ipset".into(), + config: conf.clone().into(), + patterns: patterns.clone(), + }], + ) + .await; + + assert!( + res.is_ok() == is_ok, + "conf: {:?}, must be ok: {is_ok}, result: {:?}", + conf, + // empty Result::Ok because ActionImpl is not Debug + res.map(|_| ()) + ); + } +} + +// TODO +#[tokio::test] +async fn conf_action_merge() { + let mut plugin = Plugin::default(); + + let set1 = ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action1".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "target": "DROP", + "chains": ["INPUT"], + "action": "add", + }) + .into(), + patterns: vec!["ip".into()], + }; + + let set2 = ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action2".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "target": "DROP", + "version": "ip", + "action": "add", + }) + .into(), + patterns: vec!["ip".into()], + }; + + let set3 = ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action2".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "action": "del", + }) + .into(), + patterns: vec!["ip".into()], + }; + + let res = plugin + .load_config( + vec![], + vec![ + // First set + set1.clone(), + // Same set, adding options, no conflict + set2.clone(), + // Same set, no new options, no conflict + set3.clone(), + // Unrelated set, so no conflict + ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action3".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test2", + "target": "target1", + "version": "ipv6", + }) + .into(), + patterns: vec!["ip".into()], + }, + ], + ) + .await; + + assert!(res.is_ok(), "res: {:?}", res.map(|_| ())); + + // Another set with conflict is not ok + let res = plugin + .load_config( + vec![], + vec![ + // First set + set1, + // Same set, adding options, no conflict + set2, + // Same set, no new options, no conflict + set3, + // Another set with conflict + ActionConfig { + stream_name: "stream".into(), + filter_name: "filter".into(), + action_name: "action3".into(), + action_type: "ipset".into(), + config: json!({ + "set": "test", + "target": "target2", + "action": "del", + }) + .into(), + patterns: vec!["ip".into()], + }, + ], + ) + .await; + assert!(res.is_err(), "res: {:?}", res.map(|_| ())); +} diff --git a/shell.nix b/shell.nix index 27dac77..ecb4318 100644 --- a/shell.nix +++ b/shell.nix @@ -4,6 +4,7 @@ pkgs.mkShell { name = "libipset"; buildInputs = [ ipset + nftables clang ]; src = null; From 0cd765251a270ae1cf8b157c626ed72f74a1c840 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 20 Feb 2026 12:00:00 +0100 Subject: [PATCH 230/241] run plugins in the same slice as reaction And reaction should be started in system-reaction.slice. The plugins could then be grouped together with the daemon --- src/concepts/plugin.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 8c3c142..3f3936c 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -172,6 +172,8 @@ impl Plugin { let mut command = Command::new("run0"); // --pipe gives direct, non-emulated stdio access, for better performance. command.arg("--pipe"); + // run the command inside the same slice as reaction + command.arg("--slice-intherit"); // Make path absolute for systemd let full_workdir = path::absolute(&plugin_working_directory)?; From 5b6cc35deb80b73db83ce077231d2ade034b6caf Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 25 Feb 2026 12:00:00 +0100 Subject: [PATCH 231/241] nftables: Fix compilation errors and actually use libnftables --- .../reaction-plugin-nftables/src/action.rs | 8 +- plugins/reaction-plugin-nftables/src/main.rs | 17 ++- plugins/reaction-plugin-nftables/src/nft.rs | 100 ++++++++++-------- 3 files changed, 72 insertions(+), 53 deletions(-) diff --git a/plugins/reaction-plugin-nftables/src/action.rs b/plugins/reaction-plugin-nftables/src/action.rs index 6649bd1..3898ab5 100644 --- a/plugins/reaction-plugin-nftables/src/action.rs +++ b/plugins/reaction-plugin-nftables/src/action.rs @@ -8,7 +8,6 @@ use std::{ use nftables::{ batch::Batch, expr::Expression, - helper::apply_ruleset_async, schema::{Element, NfListObject, Rule, SetFlag, SetType, SetTypeValue}, stmt::Statement, types::{NfFamily, NfHook}, @@ -247,13 +246,14 @@ impl Set { if let Some(set) = set { let family = NfFamily::INet; let table = Cow::from("reaction"); - let name = Cow::from(set.as_str()); // create set - batch.add(NfListObject::<'a>::Set(Box::new(nftables::schema::Set { + batch.add(NfListObject::<'a>::Set(Box::new(nftables::schema::Set::< + 'a, + > { family, table: table.to_owned(), - name, + name: Cow::Owned(set.to_owned()), // TODO Try a set which is both ipv4 and ipv6? set_type: SetTypeValue::Single(match version { Version::IPv4 => SetType::Ipv4Addr, diff --git a/plugins/reaction-plugin-nftables/src/main.rs b/plugins/reaction-plugin-nftables/src/main.rs index ac93eba..13e14a7 100644 --- a/plugins/reaction-plugin-nftables/src/main.rs +++ b/plugins/reaction-plugin-nftables/src/main.rs @@ -3,7 +3,6 @@ use std::{ collections::{BTreeMap, BTreeSet}, }; -use libnftables1_sys::Nftables; use nftables::{ batch::Batch, schema::{Chain, NfListObject, Table}, @@ -15,7 +14,10 @@ use reaction_plugin::{ }; use remoc::rtc; -use crate::action::{Action, ActionOptions, Set, SetOptions}; +use crate::{ + action::{Action, ActionOptions, Set, SetOptions}, + nft::NftClient, +}; #[cfg(test)] mod tests; @@ -32,6 +34,7 @@ async fn main() { #[derive(Default)] struct Plugin { + nft: NftClient, sets: Vec, actions: Vec, shutdown: ShutdownController, @@ -90,8 +93,12 @@ impl PluginInfo for Plugin { .map_err(|err| format!("set {}: {err}", options.set))?; let (tx, rx) = remoc::rch::mpsc::channel(1); - self.actions - .push(Action::new(self.shutdown.token(), rx, options)?); + self.actions.push(Action::new( + self.nft.clone(), + self.shutdown.token(), + rx, + options, + )?); ret_actions.push(ActionImpl { tx }); } @@ -133,7 +140,7 @@ impl PluginInfo for Plugin { } // TODO apply batch - Nftables::new(); + self.nft.send(batch).await?; // Launch a task that will destroy the table on shutdown { diff --git a/plugins/reaction-plugin-nftables/src/nft.rs b/plugins/reaction-plugin-nftables/src/nft.rs index 45c4f5c..a1641d2 100644 --- a/plugins/reaction-plugin-nftables/src/nft.rs +++ b/plugins/reaction-plugin-nftables/src/nft.rs @@ -7,26 +7,65 @@ use libnftables1_sys::Nftables; use nftables::batch::Batch; use tokio::sync::{mpsc, oneshot}; -pub fn nftables_thread() -> NftClient { - let (tx, mut rx) = mpsc::channel(10); +/// A client with a dedicated server thread to libnftables. +/// Calling [`Default::default()`] spawns a new server thread. +/// Cloning just creates a new client to the same server thread. +#[derive(Clone)] +pub struct NftClient { + tx: mpsc::Sender, +} - thread::spawn(move || { - let mut conn = Nftables::new(); +impl Default for NftClient { + fn default() -> Self { + let (tx, mut rx) = mpsc::channel(10); - while let Some(NftCommand { json, ret }) = rx.blocking_recv() { - let (rc, output, error) = conn.run_cmd(json.as_ptr()); - let res = match rc { - 0 => to_rust_string(output) - .ok_or_else(|| "unknown ok (rc = 0 but no output buffer)".into()), - _ => to_rust_string(error) - .map(|err| format!("error (rc = {rc}: {err})")) - .ok_or_else(|| format!("unknown error (rc = {rc} but no error buffer)")), - }; - ret.send(res); - } - }); + thread::spawn(move || { + let mut conn = Nftables::new(); - NftClient { tx } + while let Some(NftCommand { json, ret }) = rx.blocking_recv() { + let (rc, output, error) = conn.run_cmd(json.as_ptr()); + let res = match rc { + 0 => to_rust_string(output) + .ok_or_else(|| "unknown ok (rc = 0 but no output buffer)".into()), + _ => to_rust_string(error) + .map(|err| format!("error (rc = {rc}: {err})")) + .ok_or_else(|| format!("unknown error (rc = {rc} but no error buffer)")), + }; + let _ = ret.send(res); + } + }); + + NftClient { tx } + } +} + +impl NftClient { + /// Send a batch to nftables. + pub async fn send(&self, batch: Batch<'_>) -> Result { + // convert JSON to CString + let mut json = serde_json::to_vec(&batch.to_nftables()) + .map_err(|err| format!("couldn't build json to send to nftables: {err}"))?; + json.push('\0' as u8); + let json = CString::from_vec_with_nul(json) + .map_err(|err| format!("invalid json with null char: {err}"))?; + + // Send command + let (tx, rx) = oneshot::channel(); + let command = NftCommand { json, ret: tx }; + self.tx + .send(command) + .await + .map_err(|err| format!("nftables thread has quit, can't send command: {err}"))?; + + // Wait for result + rx.await + .map_err(|_| format!("nftables thread has quit, no response for command"))? + } +} + +struct NftCommand { + json: CString, + ret: oneshot::Sender>, } fn to_rust_string(c_ptr: *const i8) -> Option { @@ -40,30 +79,3 @@ fn to_rust_string(c_ptr: *const i8) -> Option { ) } } - -pub struct NftClient { - tx: mpsc::Sender, -} - -impl NftClient { - pub async fn send(&self, batch: Batch<'_>) -> Result { - // convert JSON to CString - let mut json = serde_json::to_vec(&batch.to_nftables()) - .map_err(|err| format!("couldn't build json to send to nftables: {err}"))?; - json.push('\0' as u8); - let json = CString::from_vec_with_nul(json) - .map_err(|err| format!("invalid json with null char: {err}"))?; - // Send command - let (tx, rx) = oneshot::channel(); - let command = NftCommand { json, ret: tx }; - self.tx.send(command).await; - // Wait for result - rx.await - .map_err(|err| format!("nftables thread has quit: {err}"))? - } -} - -struct NftCommand { - json: CString, - ret: oneshot::Sender>, -} From 3d7e647ef7f18d95354b7c63b0decf37c6cd8e47 Mon Sep 17 00:00:00 2001 From: ppom Date: Wed, 25 Feb 2026 12:00:00 +0100 Subject: [PATCH 232/241] Adapt tests to nftables configuration --- plugins/reaction-plugin-nftables/src/tests.rs | 64 +++++++++---------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/plugins/reaction-plugin-nftables/src/tests.rs b/plugins/reaction-plugin-nftables/src/tests.rs index 09ce6cf..7c2de26 100644 --- a/plugins/reaction-plugin-nftables/src/tests.rs +++ b/plugins/reaction-plugin-nftables/src/tests.rs @@ -5,13 +5,13 @@ use crate::Plugin; #[tokio::test] async fn conf_stream() { - // No stream is supported by ipset + // No stream is supported by nftables assert!( Plugin::default() .load_config( vec![StreamConfig { stream_name: "stream".into(), - stream_type: "ipset".into(), + stream_type: "nftables".into(), config: Value::Null }], vec![] @@ -20,7 +20,7 @@ async fn conf_stream() { .is_err() ); - // Nothing is ok + // Empty config is ok assert!(Plugin::default().load_config(vec![], vec![]).await.is_ok()); } @@ -55,11 +55,11 @@ async fn conf_action_standalone() { (false, json!({ "set": "test", "pattern": true }), &p_noip), // action // (true, json!({ "set": "test", "action": "add" }), &p), - (true, json!({ "set": "test", "action": "del" }), &p), + (true, json!({ "set": "test", "action": "delete" }), &p), // unknown action (false, json!({ "set": "test", "action": "create" }), &p), (false, json!({ "set": "test", "action": "insert" }), &p), - (false, json!({ "set": "test", "action": "delete" }), &p), + (false, json!({ "set": "test", "action": "del" }), &p), (false, json!({ "set": "test", "action": "destroy" }), &p), // bad type (false, json!({ "set": "test", "action": true }), &p), @@ -80,27 +80,19 @@ async fn conf_action_standalone() { (false, json!({ "set": "test", "version": "46" }), &p), // bad type (false, json!({ "set": "test", "version": true }), &p), - // chains // + // hooks // // everything is fine really - (true, json!({ "set": "test", "chains": [] }), &p), - (true, json!({ "set": "test", "chains": ["INPUT"] }), &p), - (true, json!({ "set": "test", "chains": ["FORWARD"] }), &p), + (true, json!({ "set": "test", "hooks": [] }), &p), ( true, - json!({ "set": "test", "chains": ["custom_chain"] }), + json!({ "set": "test", "hooks": ["input", "forward", "ingress", "prerouting", "output", "postrouting", "egress"] }), &p, ), + (false, json!({ "set": "test", "hooks": ["INPUT"] }), &p), + (false, json!({ "set": "test", "hooks": ["FORWARD"] }), &p), ( - true, - json!({ "set": "test", "chains": ["INPUT", "FORWARD"] }), - &p, - ), - ( - true, - json!({ - "set": "test", - "chains": ["INPUT", "FORWARD", "my_iptables_chain"] - }), + false, + json!({ "set": "test", "hooks": ["unknown_hook"] }), &p, ), // timeout // @@ -111,11 +103,13 @@ async fn conf_action_standalone() { (false, json!({ "set": "test", "timeout": 12 }), &p), // target // // anything is fine too - (true, json!({ "set": "test", "target": "DROP" }), &p), - (true, json!({ "set": "test", "target": "ACCEPT" }), &p), - (true, json!({ "set": "test", "target": "RETURN" }), &p), - (true, json!({ "set": "test", "target": "custom_chain" }), &p), + (true, json!({ "set": "test", "target": "drop" }), &p), + (true, json!({ "set": "test", "target": "accept" }), &p), + (true, json!({ "set": "test", "target": "return" }), &p), + (true, json!({ "set": "test", "target": "continue" }), &p), // bad + (false, json!({ "set": "test", "target": "custom" }), &p), + (false, json!({ "set": "test", "target": "DROP" }), &p), (false, json!({ "set": "test", "target": 11 }), &p), (false, json!({ "set": "test", "target": ["DROP"] }), &p), ] { @@ -126,7 +120,7 @@ async fn conf_action_standalone() { stream_name: "stream".into(), filter_name: "filter".into(), action_name: "action".into(), - action_type: "ipset".into(), + action_type: "nftables".into(), config: conf.clone().into(), patterns: patterns.clone(), }], @@ -152,11 +146,11 @@ async fn conf_action_merge() { stream_name: "stream".into(), filter_name: "filter".into(), action_name: "action1".into(), - action_type: "ipset".into(), + action_type: "nftables".into(), config: json!({ "set": "test", - "target": "DROP", - "chains": ["INPUT"], + "target": "drop", + "hooks": ["input"], "action": "add", }) .into(), @@ -167,10 +161,10 @@ async fn conf_action_merge() { stream_name: "stream".into(), filter_name: "filter".into(), action_name: "action2".into(), - action_type: "ipset".into(), + action_type: "nftables".into(), config: json!({ "set": "test", - "target": "DROP", + "target": "drop", "version": "ip", "action": "add", }) @@ -182,10 +176,10 @@ async fn conf_action_merge() { stream_name: "stream".into(), filter_name: "filter".into(), action_name: "action2".into(), - action_type: "ipset".into(), + action_type: "nftables".into(), config: json!({ "set": "test", - "action": "del", + "action": "delete", }) .into(), patterns: vec!["ip".into()], @@ -206,10 +200,10 @@ async fn conf_action_merge() { stream_name: "stream".into(), filter_name: "filter".into(), action_name: "action3".into(), - action_type: "ipset".into(), + action_type: "nftables".into(), config: json!({ "set": "test2", - "target": "target1", + "target": "return", "version": "ipv6", }) .into(), @@ -237,7 +231,7 @@ async fn conf_action_merge() { stream_name: "stream".into(), filter_name: "filter".into(), action_name: "action3".into(), - action_type: "ipset".into(), + action_type: "nftables".into(), config: json!({ "set": "test", "target": "target2", From c41c89101dddfc8c7fdcbbfea16d456ac5051f00 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 26 Feb 2026 12:00:00 +0100 Subject: [PATCH 233/241] Fix #151: Move RegexSet creation from StreamManager to config Stream This move the potential error of a too big regex set to the config setup, a place where it can be gracefully handled, instead of the place it was, where this would make reaction mess up with start/stop, etc. --- src/concepts/stream.rs | 21 +++++++++++++++++++++ src/daemon/stream.rs | 32 ++++++++++++++++---------------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/src/concepts/stream.rs b/src/concepts/stream.rs index 3b3fdf5..97e6ece 100644 --- a/src/concepts/stream.rs +++ b/src/concepts/stream.rs @@ -1,6 +1,7 @@ use std::{cmp::Ordering, collections::BTreeMap, hash::Hash}; use reaction_plugin::StreamConfig; +use regex::RegexSet; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -19,6 +20,11 @@ pub struct Stream { #[serde(skip)] pub name: String, + #[serde(skip)] + pub compiled_regex_set: RegexSet, + #[serde(skip)] + pub regex_index_to_filter_name: Vec, + // Plugin-specific #[serde(default, rename = "type", skip_serializing_if = "Option::is_none")] pub stream_type: Option, @@ -90,6 +96,21 @@ impl Stream { filter.setup(name, key, patterns)?; } + let all_regexes: BTreeMap<_, _> = self + .filters + .values() + .flat_map(|filter| { + filter + .regex + .iter() + .map(|regex| (regex, filter.name.clone())) + }) + .collect(); + + self.compiled_regex_set = RegexSet::new(all_regexes.keys()) + .map_err(|err| format!("too much regexes on the filters of this stream: {err}"))?; + self.regex_index_to_filter_name = all_regexes.into_values().collect(); + Ok(()) } diff --git a/src/daemon/stream.rs b/src/daemon/stream.rs index 7d50b54..3ed0d56 100644 --- a/src/daemon/stream.rs +++ b/src/daemon/stream.rs @@ -1,11 +1,10 @@ use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{BTreeSet, HashMap}, process::Stdio, }; use futures::{FutureExt, Stream as AsyncStream, StreamExt, future::join_all}; use reaction_plugin::{StreamImpl, shutdown::ShutdownToken}; -use regex::RegexSet; use tokio::{ io::{AsyncBufReadExt, BufReader}, process::{Child, ChildStderr, ChildStdout, Command}, @@ -45,7 +44,6 @@ pub fn reader_to_stream( } pub struct StreamManager { - compiled_regex_set: RegexSet, regex_index_to_filter_manager: Vec, stream: &'static Stream, stream_plugin: Option, @@ -59,16 +57,6 @@ impl StreamManager { shutdown: ShutdownToken, plugins: &mut Plugins, ) -> Result { - let all_regexes: BTreeMap<_, _> = filter_managers - .iter() - .flat_map(|(filter, filter_manager)| { - filter - .regex - .iter() - .map(|regex| (regex, filter_manager.clone())) - }) - .collect(); - let stream_plugin = if stream.is_plugin() { Some( plugins @@ -84,11 +72,23 @@ impl StreamManager { None }; + let regex_index_to_filter_manager = stream + .regex_index_to_filter_name + .iter() + .map(|filter_name| { + filter_managers + .iter() + .find(|(filter, _)| filter_name == &filter.name) + .unwrap() + .1 + .clone() + }) + .collect(); + debug!("successfully initialized stream {}", stream.name); Ok(StreamManager { - compiled_regex_set: RegexSet::new(all_regexes.keys()).map_err(|err| err.to_string())?, - regex_index_to_filter_manager: all_regexes.into_values().collect(), + regex_index_to_filter_manager, stream, stream_plugin, shutdown, @@ -230,7 +230,7 @@ impl StreamManager { } fn matching_filters(&self, line: &str) -> BTreeSet<&FilterManager> { - let matches = self.compiled_regex_set.matches(line); + let matches = self.stream.compiled_regex_set.matches(line); matches .into_iter() .map(|match_| &self.regex_index_to_filter_manager[match_]) From ea0e7177d95816829b9259de7358075d93b4e707 Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 26 Feb 2026 12:00:00 +0100 Subject: [PATCH 234/241] nftables: Fix bad action advertised --- plugins/reaction-plugin-nftables/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/reaction-plugin-nftables/src/main.rs b/plugins/reaction-plugin-nftables/src/main.rs index 13e14a7..6a7f067 100644 --- a/plugins/reaction-plugin-nftables/src/main.rs +++ b/plugins/reaction-plugin-nftables/src/main.rs @@ -45,7 +45,7 @@ impl PluginInfo for Plugin { Ok(Manifest { hello: Hello::new(), streams: BTreeSet::default(), - actions: BTreeSet::from(["ipset".into()]), + actions: BTreeSet::from(["nftables".into()]), }) } From 00725ed9e212d0ecb08901a9cb2569ba7ed5d29d Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 26 Feb 2026 12:00:00 +0100 Subject: [PATCH 235/241] notif test: add a filter that shouldn't match --- tests/notif.jsonnet | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/notif.jsonnet b/tests/notif.jsonnet index 2e46471..14d76c0 100644 --- a/tests/notif.jsonnet +++ b/tests/notif.jsonnet @@ -41,6 +41,23 @@ }, }, }, + f2: { + regex: [ + "^can't found $", + ], + retry: 2, + retryperiod: '60s', + actions: { + damn: { + cmd: ['notify-send', 'you should not see that', 'ban '], + }, + undamn: { + cmd: ['notify-send', 'you should not see that', 'unban '], + after: '3s', + onexit: true, + }, + }, + }, }, }, }, From f2b1accec07fe8262368f26134d0edf253ededfb Mon Sep 17 00:00:00 2001 From: ppom Date: Thu, 26 Feb 2026 12:00:00 +0100 Subject: [PATCH 236/241] Fix slice-inherit option --- src/concepts/plugin.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/concepts/plugin.rs b/src/concepts/plugin.rs index 3f3936c..5e22287 100644 --- a/src/concepts/plugin.rs +++ b/src/concepts/plugin.rs @@ -173,7 +173,7 @@ impl Plugin { // --pipe gives direct, non-emulated stdio access, for better performance. command.arg("--pipe"); // run the command inside the same slice as reaction - command.arg("--slice-intherit"); + command.arg("--slice-inherit"); // Make path absolute for systemd let full_workdir = path::absolute(&plugin_working_directory)?; From 5a6c203c016daf752b30d3754bd2afc1073d9ef6 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 27 Feb 2026 12:00:00 +0100 Subject: [PATCH 237/241] Add system-reaction.slice --- Cargo.toml | 2 ++ packaging/reaction.service | 4 +++- packaging/system-reaction.slice | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 packaging/system-reaction.slice diff --git a/Cargo.toml b/Cargo.toml index 39be1a6..66e95e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,8 @@ assets = [ [ "target/release/reaction.bash", "/usr/share/bash-completion/completions/reaction", "644" ], [ "target/release/reaction.fish", "/usr/share/fish/completions/", "644" ], [ "target/release/_reaction", "/usr/share/zsh/vendor-completions/", "644" ], + # Slice + [ "packaging/system-reaction.slice", "/usr/lib/systemd/system/", "644" ], ] [dependencies] diff --git a/packaging/reaction.service b/packaging/reaction.service index 5bd1478..343015d 100644 --- a/packaging/reaction.service +++ b/packaging/reaction.service @@ -1,6 +1,6 @@ # vim: ft=systemd [Unit] -Description=A daemon that scans program outputs for repeated patterns, and takes action. +Description=reaction daemon Documentation=https://reaction.ppom.me # Ensure reaction will insert its chain after docker has inserted theirs. Only useful when iptables & docker are used # After=docker.service @@ -17,6 +17,8 @@ RuntimeDirectory=reaction WorkingDirectory=/var/lib/reaction # Let reaction kill its child processes first KillMode=mixed +# Put reaction in its own slice so that plugins can be grouped within. +Slice=system-reaction.slice [Install] WantedBy=multi-user.target diff --git a/packaging/system-reaction.slice b/packaging/system-reaction.slice new file mode 100644 index 0000000..732f276 --- /dev/null +++ b/packaging/system-reaction.slice @@ -0,0 +1 @@ +[Slice] From 938a366576aad22ccd7b8bbe72b3c3560a133de8 Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 27 Feb 2026 12:00:00 +0100 Subject: [PATCH 238/241] More useful error message when plugin can't launch and systemd=true --- src/daemon/plugin/mod.rs | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/daemon/plugin/mod.rs b/src/daemon/plugin/mod.rs index 52ad0e8..c32e283 100644 --- a/src/daemon/plugin/mod.rs +++ b/src/daemon/plugin/mod.rs @@ -1,5 +1,6 @@ use std::{ collections::{BTreeMap, BTreeSet}, + fmt::Display, io, ops::{Deref, DerefMut}, process::ExitStatus, @@ -52,7 +53,7 @@ impl PluginManager { let mut child = plugin .launch(state_directory) .await - .map_err(|err| format!("could not launch plugin: {err}"))?; + .map_err(|err| systemd_error(plugin, "could not launch plugin", err))?; { let stderr = child.stderr.take().unwrap(); @@ -70,10 +71,7 @@ impl PluginManager { ) = Connect::io(remoc::Cfg::default(), stdout, stdin) .await .map_err(|err| { - format!( - "could not init communication with plugin {}: {err}", - plugin.name - ) + systemd_error(plugin, "could not init communication with plugin", err) })?; tokio::spawn(conn); @@ -166,6 +164,20 @@ impl PluginManager { } } +fn systemd_error(plugin: &Plugin, message: &str, err: impl Display) -> String { + if plugin.systemd { + format!( + "{message}: {err}. \ +`plugins.{0}.systemd` is set to true, so this may be an issue with systemd's run0. \ +please make sure `sudo run0 ls /` returns the same thing as `sudo ls /` as a test. \ +if run0 can't be found or doesn't output anything, set `plugins.{0}.systemd` to false.", + plugin.name, + ) + } else { + format!("{message}: {err}") + } +} + async fn handle_stderr(stderr: ChildStderr, plugin_name: String) { // read lines until shutdown let lines = reader_to_stream(stderr); From 16692731f0fdaf4fe613a24933e66f555605bd71 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 2 Mar 2026 12:00:00 +0100 Subject: [PATCH 239/241] Remove useless chrono dependency from reaction-plugin --- Cargo.lock | 1 - plugins/reaction-plugin/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f138507..c9a7b56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2886,7 +2886,6 @@ dependencies = [ name = "reaction-plugin" version = "1.0.0" dependencies = [ - "chrono", "remoc", "serde", "serde_json", diff --git a/plugins/reaction-plugin/Cargo.toml b/plugins/reaction-plugin/Cargo.toml index 08d1eb6..0fa3a38 100644 --- a/plugins/reaction-plugin/Cargo.toml +++ b/plugins/reaction-plugin/Cargo.toml @@ -11,7 +11,6 @@ categories = ["security"] description = "Plugin interface for reaction, a daemon that scans logs and takes action (alternative to fail2ban)" [dependencies] -chrono.workspace = true remoc.workspace = true serde.workspace = true serde_json.workspace = true From 3ca54c6c43ab6cae209b8cf15aa2685ca9d3c670 Mon Sep 17 00:00:00 2001 From: ppom Date: Mon, 2 Mar 2026 12:00:00 +0100 Subject: [PATCH 240/241] ipset: Better error handling and messages - Clearer messages. - Make sure logs are showed in order. - When cleaning after an error on startup, do not try to undo an action that failed. --- plugins/reaction-plugin-ipset/src/action.rs | 38 +++++++------- plugins/reaction-plugin-ipset/src/ipset.rs | 56 ++++++++++++--------- plugins/reaction-plugin-ipset/src/main.rs | 18 ++++--- 3 files changed, 62 insertions(+), 50 deletions(-) diff --git a/plugins/reaction-plugin-ipset/src/action.rs b/plugins/reaction-plugin-ipset/src/action.rs index 1d33c7b..c820e28 100644 --- a/plugins/reaction-plugin-ipset/src/action.rs +++ b/plugins/reaction-plugin-ipset/src/action.rs @@ -1,4 +1,4 @@ -use std::{fmt::Debug, u32}; +use std::{fmt::Debug, u32, usize}; use reaction_plugin::{Exec, shutdown::ShutdownToken, time::parse_duration}; use remoc::rch::mpsc as remocMpsc; @@ -173,7 +173,7 @@ impl Set { } } - pub async fn init(&self, ipset: &mut IpSet) -> Result<(), String> { + pub async fn init(&self, ipset: &mut IpSet) -> Result<(), (usize, String)> { for (set, version) in [ (&self.sets.ipv4, Version::IPv4), (&self.sets.ipv6, Version::IPv6), @@ -186,44 +186,42 @@ impl Set { version, timeout: self.timeout, })) - .await?; + .await + .map_err(|err| (0, err.to_string()))?; // insert set in chains - for chain in &self.chains { + for (i, chain) in self.chains.iter().enumerate() { ipset .order(Order::InsertSet(SetChain { set: set.clone(), chain: chain.clone(), target: self.target.clone(), })) - .await?; + .await + .map_err(|err| (i + 1, err.to_string()))?; } } } Ok(()) } - pub async fn destroy(&self, ipset: &mut IpSet) { - for (set, version) in [ - (&self.sets.ipv4, Version::IPv4), - (&self.sets.ipv6, Version::IPv6), - ] { + pub async fn destroy(&self, ipset: &mut IpSet, until: Option) { + for set in [&self.sets.ipv4, &self.sets.ipv6] { if let Some(set) = set { - for chain in &self.chains { - if let Err(err) = ipset + for chain in self + .chains + .iter() + .take(until.map(|until| until - 1).unwrap_or(usize::MAX)) + { + let _ = ipset .order(Order::RemoveSet(SetChain { set: set.clone(), chain: chain.clone(), target: self.target.clone(), })) - .await - { - eprintln!( - "ERROR while removing {version} set {set} from chain {chain}: {err}" - ); - } + .await; } - if let Err(err) = ipset.order(Order::DestroySet(set.clone())).await { - eprintln!("ERROR while destroying {version} set {set}: {err}"); + if until.is_none_or(|until| until != 0) { + let _ = ipset.order(Order::DestroySet(set.clone())).await; } } } diff --git a/plugins/reaction-plugin-ipset/src/ipset.rs b/plugins/reaction-plugin-ipset/src/ipset.rs index b2fcb78..81b1061 100644 --- a/plugins/reaction-plugin-ipset/src/ipset.rs +++ b/plugins/reaction-plugin-ipset/src/ipset.rs @@ -72,7 +72,7 @@ impl IpSet { pub enum IpSetError { Thread(String), - IpSet(String), + IpSet(()), } impl Display for IpSetError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -81,7 +81,7 @@ impl Display for IpSetError { "{}", match self { IpSetError::Thread(err) => err, - IpSetError::IpSet(err) => err, + IpSetError::IpSet(()) => "ipset error", } ) } @@ -90,12 +90,12 @@ impl From for String { fn from(value: IpSetError) -> Self { match value { IpSetError::Thread(err) => err, - IpSetError::IpSet(err) => err, + IpSetError::IpSet(()) => "ipset error".to_string(), } } } -pub type OrderType = (Order, oneshot::Sender>); +pub type OrderType = (Order, oneshot::Sender>); struct Set { session: Session, @@ -121,7 +121,7 @@ impl IPsetManager { } } - fn handle_order(&mut self, order: Order) -> Result<(), String> { + fn handle_order(&mut self, order: Order) -> Result<(), ()> { match order { Order::CreateSet(CreateSet { name, @@ -139,7 +139,7 @@ impl IPsetManager { }; builder.with_ipv6(version == Version::IPv6)?.build() }) - .map_err(|err| format!("Could not create set {name}: {err}"))?; + .map_err(|err| eprintln!("ERROR Could not create set {name}: {err}"))?; self.sessions.insert(name, Set { session, version }); } @@ -149,7 +149,7 @@ impl IPsetManager { session .session .destroy() - .map_err(|err| format!("Could not destroy set {set}: {err}"))?; + .map_err(|err| eprintln!("ERROR Could not destroy set {set}: {err}"))?; } } @@ -162,9 +162,13 @@ impl IPsetManager { Ok(()) } - fn insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), String> { + fn insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), ()> { + self._insert_remove_ip(set, ip, insert) + .map_err(|err| eprintln!("ERROR {err}")) + } + fn _insert_remove_ip(&mut self, set: String, ip: String, insert: bool) -> Result<(), String> { let session = self.sessions.get_mut(&set).ok_or(format!( - "No set handled by us with this name: {set}. This likely is a bug." + "No set handled by this plugin with this name: {set}. This likely is a bug." ))?; let mut net_data = NetDataType::new(Ipv4Addr::LOCALHOST, 0); @@ -182,24 +186,28 @@ impl IPsetManager { Ok(()) } - fn insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), String> { - let SetChain { - set, - chain, - target: action, - } = options; + fn insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), ()> { + self._insert_remove_set(options, insert) + .map_err(|err| eprintln!("ERROR {err}")) + } + fn _insert_remove_set(&self, options: SetChain, insert: bool) -> Result<(), String> { + let SetChain { set, chain, target } = options; let version = self .sessions .get(&set) - .ok_or(format!("No set managed by us with this name: {set}"))? + .ok_or(format!( + "No set managed by this plugin with this name: {set}" + ))? .version; - if insert { - eprintln!("INFO inserting {version} set {set} in chain {chain}"); + let (verb, verbing, from) = if insert { + ("insert", "inserting", "in") } else { - eprintln!("INFO removing {version} set {set} from chain {chain}"); - } + ("remove", "removing", "from") + }; + + eprintln!("INFO {verbing} {version} set {set} {from} chain {chain}"); let command = match version { Version::IPv4 => "iptables", @@ -217,20 +225,20 @@ impl IPsetManager { &set, "src", "-j", - &action, + &target, ]) .spawn() - .map_err(|err| format!("Could not insert ipset {set} in chain {chain}: {err}"))?; + .map_err(|err| format!("Could not {verb} ipset {set} {from} chain {chain}: Could not execute {command}: {err}"))?; let exit = child .wait() - .map_err(|err| format!("Could not insert ipset: {err}"))?; + .map_err(|err| format!("Could not {verb} ipset {set} {from} chain {chain}: {err}"))?; if exit.success() { Ok(()) } else { Err(format!( - "Could not insert ipset: exit code {}", + "Could not {verb} ipset: exit code {}", exit.code() .map(|c| c.to_string()) .unwrap_or_else(|| "".to_string()) diff --git a/plugins/reaction-plugin-ipset/src/main.rs b/plugins/reaction-plugin-ipset/src/main.rs index 1117529..828c3a8 100644 --- a/plugins/reaction-plugin-ipset/src/main.rs +++ b/plugins/reaction-plugin-ipset/src/main.rs @@ -109,15 +109,21 @@ impl PluginInfo for Plugin { let mut first_error = None; for (i, set) in self.sets.iter().enumerate() { // Retain if error - if let Err(err) = set.init(&mut self.ipset).await { - first_error = Some((i, RemoteError::Plugin(err))); + if let Err((failed_step, err)) = set.init(&mut self.ipset).await { + first_error = Some((i, failed_step, RemoteError::Plugin(err))); break; } } // Destroy initialized sets if error - if let Some((i, err)) = first_error { - for set in self.sets.iter().take(i + 1) { - let _ = set.destroy(&mut self.ipset).await; + if let Some((last_set, failed_step, err)) = first_error { + eprintln!("DEBUG last_set: {last_set} failed_step: {failed_step} err: {err}"); + for (curr_set, set) in self.sets.iter().enumerate().take(last_set + 1) { + let until = if last_set == curr_set { + Some(failed_step) + } else { + None + }; + let _ = set.destroy(&mut self.ipset, until).await; } return Err(err); } @@ -148,6 +154,6 @@ impl PluginInfo for Plugin { async fn destroy_sets_at_shutdown(mut ipset: IpSet, sets: Vec, shutdown: ShutdownToken) { shutdown.wait().await; for set in sets { - set.destroy(&mut ipset).await; + set.destroy(&mut ipset, None).await; } } From 8a34a1fa11077146d7e6b4856d18755ea6be9edc Mon Sep 17 00:00:00 2001 From: ppom Date: Fri, 13 Mar 2026 12:00:00 +0100 Subject: [PATCH 241/241] Remove useless gitlab ci file --- .gitlab-ci.yml | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index 78d7601..0000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -image: golang:1.20-bookworm -stages: - - build - -variables: - DEBIAN_FRONTEND: noninteractive - -test_building: - stage: build - before_script: - - apt-get -qq -y update - - apt-get -qq -y install build-essential devscripts debhelper quilt wget - script: - - make reaction ip46tables nft46