mirror of
https://github.com/dnote/dnote
synced 2026-03-18 08:19:55 +01:00
Auto vacuum and manage connections (#705)
* Test concurrent sync * Auto vacuum and manage connection
This commit is contained in:
parent
a46afb821f
commit
ae290a226f
8 changed files with 175 additions and 1 deletions
|
|
@ -63,7 +63,8 @@ func InitTestMemoryDB(t *testing.T) *DB {
|
|||
|
||||
// InitTestFileDB initializes a file-based test database with the default schema.
|
||||
func InitTestFileDB(t *testing.T) (*DB, string) {
|
||||
dbPath := filepath.Join(t.TempDir(), "dnote.db")
|
||||
uuid := mustGenerateTestUUID(t)
|
||||
dbPath := filepath.Join(t.TempDir(), fmt.Sprintf("dnote-%s.db", uuid))
|
||||
db := InitTestFileDBRaw(t, dbPath)
|
||||
return db, dbPath
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
package sync
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/dnote/dnote/pkg/assert"
|
||||
|
|
@ -26,6 +27,7 @@ import (
|
|||
cliDatabase "github.com/dnote/dnote/pkg/cli/database"
|
||||
clitest "github.com/dnote/dnote/pkg/cli/testutils"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TestSync_EmptyFragmentPreservesLastMaxUSN verifies that last_max_usn is not reset to 0
|
||||
|
|
@ -71,3 +73,94 @@ func TestSync_EmptyFragmentPreservesLastMaxUSN(t *testing.T) {
|
|||
|
||||
assert.Equal(t, lastMaxUSN, 3, "last_max_usn should be 3 after syncing")
|
||||
}
|
||||
|
||||
// TestSync_ConcurrentInitialSync reproduces the issue where two clients with identical
|
||||
// local data syncing simultaneously to an empty server results in 500 errors.
|
||||
//
|
||||
// This demonstrates the race condition:
|
||||
// - Client1 starts sync to empty server, gets empty server state
|
||||
// - Client2 syncs.
|
||||
// - Client1 tries to create same books → 409 "duplicate"
|
||||
// - Client1 tries to create notes with wrong UUIDs → 500 "record not found"
|
||||
// - stepSync recovers by renaming local books with _2 suffix
|
||||
func TestSync_ConcurrentInitialSync(t *testing.T) {
|
||||
env := setupTestEnv(t)
|
||||
|
||||
user := setupUserAndLogin(t, env)
|
||||
|
||||
// Step 1: Create local data and sync
|
||||
clitest.RunDnoteCmd(t, env.CmdOpts, cliBinaryName, "add", "javascript", "-c", "js note from client1")
|
||||
clitest.RunDnoteCmd(t, env.CmdOpts, cliBinaryName, "sync")
|
||||
checkState(t, env.DB, user, env.ServerDB, systemState{
|
||||
clientNoteCount: 1,
|
||||
clientBookCount: 1,
|
||||
clientLastMaxUSN: 2,
|
||||
clientLastSyncAt: serverTime.Unix(),
|
||||
serverNoteCount: 1,
|
||||
serverBookCount: 1,
|
||||
serverUserMaxUSN: 2,
|
||||
})
|
||||
|
||||
// Step 2: Switch to new empty server to simulate concurrent initial sync scenario
|
||||
switchToEmptyServer(t, &env)
|
||||
user = setupUserAndLogin(t, env)
|
||||
|
||||
// Set up client2 with separate database
|
||||
client2DB, client2DBPath := cliDatabase.InitTestFileDB(t)
|
||||
login(t, client2DB, env.ServerDB, user)
|
||||
client2DB.Close() // Close so CLI can access the database
|
||||
|
||||
// Step 3: Client1 syncs to empty server, but during sync Client2 uploads same data
|
||||
// This simulates the race condition deterministically
|
||||
raceCallback := func(stdout io.Reader, stdin io.WriteCloser) error {
|
||||
// Wait for empty server prompt to ensure Client1 has called GetSyncState
|
||||
clitest.MustWaitForPrompt(t, stdout, clitest.PromptEmptyServer)
|
||||
|
||||
// Now Client2 creates the same book and note via CLI (creating the race condition)
|
||||
clitest.RunDnoteCmd(t, env.CmdOpts, cliBinaryName, "--dbPath", client2DBPath, "add", "javascript", "-c", "js note from client2")
|
||||
clitest.RunDnoteCmd(t, env.CmdOpts, cliBinaryName, "--dbPath", client2DBPath, "sync")
|
||||
|
||||
// User confirms sync
|
||||
if _, err := io.WriteString(stdin, "y\n"); err != nil {
|
||||
return errors.Wrap(err, "confirming sync")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Client1 continues sync - will hit 409 conflict, then 500 error, then recover
|
||||
clitest.MustWaitDnoteCmd(t, env.CmdOpts, raceCallback, cliBinaryName, "sync")
|
||||
|
||||
// After sync:
|
||||
// - Server has 2 books: "javascript" (from client2) and "javascript_2" (from client1 renamed)
|
||||
// - Server has 2 notes
|
||||
// - Both clients should converge to the same state
|
||||
expectedState := systemState{
|
||||
clientNoteCount: 2, // both notes
|
||||
clientBookCount: 2, // javascript and javascript_2
|
||||
clientLastMaxUSN: 4, // 2 books + 2 notes
|
||||
clientLastSyncAt: serverTime.Unix(),
|
||||
serverNoteCount: 2,
|
||||
serverBookCount: 2,
|
||||
serverUserMaxUSN: 4,
|
||||
}
|
||||
checkState(t, env.DB, user, env.ServerDB, expectedState)
|
||||
|
||||
// Client2 syncs again to download client1's data
|
||||
clitest.RunDnoteCmd(t, env.CmdOpts, cliBinaryName, "--dbPath", client2DBPath, "sync")
|
||||
client2DB = clitest.MustOpenDatabase(t, client2DBPath)
|
||||
defer client2DB.Close()
|
||||
|
||||
// Client2 should have converged to the same state as client1
|
||||
checkState(t, client2DB, user, env.ServerDB, expectedState)
|
||||
|
||||
// Verify no orphaned notes on server
|
||||
var orphanedCount int
|
||||
if err := env.ServerDB.Raw(`
|
||||
SELECT COUNT(*) FROM notes
|
||||
WHERE book_uuid NOT IN (SELECT uuid FROM books)
|
||||
`).Scan(&orphanedCount).Error; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, orphanedCount, 0, "server should have no orphaned notes after sync")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ func (a *App) CreateBook(user database.User, name string) (database.Book, error)
|
|||
|
||||
uuid, err := helpers.GenUUID()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return database.Book{}, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ func (a *App) CreateNote(user database.User, bookUUID, content string, addedOn *
|
|||
|
||||
uuid, err := helpers.GenUUID()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return database.Note{}, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -66,9 +66,11 @@ func (a *App) CreateUser(email, password string, passwordConfirmation string) (d
|
|||
|
||||
var count int64
|
||||
if err := tx.Model(&database.User{}).Where("email = ?", email).Count(&count).Error; err != nil {
|
||||
tx.Rollback()
|
||||
return database.User{}, pkgErrors.Wrap(err, "counting user")
|
||||
}
|
||||
if count > 0 {
|
||||
tx.Rollback()
|
||||
return database.User{}, ErrDuplicateEmail
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,10 +22,12 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/dnote/dnote/pkg/server/buildinfo"
|
||||
"github.com/dnote/dnote/pkg/server/config"
|
||||
"github.com/dnote/dnote/pkg/server/controllers"
|
||||
"github.com/dnote/dnote/pkg/server/database"
|
||||
"github.com/dnote/dnote/pkg/server/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
|
@ -67,6 +69,12 @@ func startCmd(args []string) {
|
|||
}
|
||||
}()
|
||||
|
||||
// Start WAL checkpointing to prevent WAL file from growing unbounded.
|
||||
database.StartWALCheckpointing(app.DB, 5*time.Minute)
|
||||
|
||||
// Start periodic VACUUM to reclaim space and defragment database.
|
||||
database.StartPeriodicVacuum(app.DB, 24*time.Hour)
|
||||
|
||||
ctl := controllers.New(&app)
|
||||
rc := controllers.RouteConfig{
|
||||
WebRoutes: controllers.NewWebRoutes(&app, ctl),
|
||||
|
|
|
|||
|
|
@ -203,11 +203,13 @@ func (b *Books) update(r *http.Request) (database.Book, error) {
|
|||
|
||||
var book database.Book
|
||||
if err := tx.Where("user_id = ? AND uuid = ?", user.ID, uuid).First(&book).Error; err != nil {
|
||||
tx.Rollback()
|
||||
return database.Book{}, pkgErrors.Wrap(err, "finding book")
|
||||
}
|
||||
|
||||
var params updateBookPayload
|
||||
if err := parseRequestData(r, ¶ms); err != nil {
|
||||
tx.Rollback()
|
||||
return database.Book{}, pkgErrors.Wrap(err, "decoding payload")
|
||||
}
|
||||
|
||||
|
|
@ -253,11 +255,13 @@ func (b *Books) del(r *http.Request) (database.Book, error) {
|
|||
|
||||
var book database.Book
|
||||
if err := tx.Where("user_id = ? AND uuid = ?", user.ID, uuid).First(&book).Error; err != nil {
|
||||
tx.Rollback()
|
||||
return database.Book{}, pkgErrors.Wrap(err, "finding a book")
|
||||
}
|
||||
|
||||
var notes []database.Note
|
||||
if err := tx.Where("book_uuid = ? AND NOT deleted", uuid).Order("usn ASC").Find(¬es).Error; err != nil {
|
||||
tx.Rollback()
|
||||
return database.Book{}, pkgErrors.Wrap(err, "finding notes for the book")
|
||||
}
|
||||
|
||||
|
|
@ -270,6 +274,7 @@ func (b *Books) del(r *http.Request) (database.Book, error) {
|
|||
|
||||
book, err := b.app.DeleteBook(tx, *user, book)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return database.Book{}, pkgErrors.Wrap(err, "deleting the book")
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ package database
|
|||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"gorm.io/driver/sqlite"
|
||||
|
|
@ -58,5 +59,67 @@ func Open(dbPath string) *gorm.DB {
|
|||
panic(errors.Wrap(err, "opening database conection"))
|
||||
}
|
||||
|
||||
// Get underlying *sql.DB to configure connection pool
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "getting underlying database connection"))
|
||||
}
|
||||
|
||||
// Configure connection pool for SQLite with WAL mode
|
||||
sqlDB.SetMaxOpenConns(25)
|
||||
sqlDB.SetMaxIdleConns(5)
|
||||
sqlDB.SetConnMaxLifetime(0) // Doesn't expire.
|
||||
|
||||
// Apply performance PRAGMAs
|
||||
pragmas := []string{
|
||||
"PRAGMA journal_mode=WAL", // Enable WAL mode for better concurrency
|
||||
"PRAGMA synchronous=NORMAL", // Balance between safety and speed
|
||||
"PRAGMA cache_size=-64000", // 64MB cache (negative = KB)
|
||||
"PRAGMA busy_timeout=5000", // Wait up to 5s for locks
|
||||
"PRAGMA foreign_keys=ON", // Enforce foreign key constraints
|
||||
"PRAGMA temp_store=MEMORY", // Store temp tables in memory
|
||||
}
|
||||
|
||||
for _, pragma := range pragmas {
|
||||
if err := db.Exec(pragma).Error; err != nil {
|
||||
panic(errors.Wrapf(err, "executing pragma: %s", pragma))
|
||||
}
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
// StartWALCheckpointing starts a background goroutine that periodically
|
||||
// checkpoints the WAL file to prevent it from growing unbounded
|
||||
func StartWALCheckpointing(db *gorm.DB, interval time.Duration) {
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
// TRUNCATE mode removes the WAL file after checkpointing
|
||||
if err := db.Exec("PRAGMA wal_checkpoint(TRUNCATE)").Error; err != nil {
|
||||
// Log error but don't panic - this is a background maintenance task
|
||||
// TODO: Use proper logging once available
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// StartPeriodicVacuum runs full VACUUM on a schedule to reclaim space and defragment.
|
||||
// WARNING: VACUUM acquires an exclusive lock and blocks all database operations briefly.
|
||||
func StartPeriodicVacuum(db *gorm.DB, interval time.Duration) {
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
if err := db.Exec("VACUUM").Error; err != nil {
|
||||
// Log error but don't panic - this is a background maintenance task
|
||||
// TODO: Use proper logging once available
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue