All files / crypto lifecycle.ts

91.66% Statements 66/72
76.92% Branches 30/39
72.72% Functions 8/11
92.85% Lines 65/70

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199                                                                                      47x   47x                 47x     3x 3x       3x   3x 2x   3x 2x   3x 2x   3x 3x     47x                 15x 15x       16x                     42x   40x     40x     40x     9x 9x 7x 7x   2x 9x 9x 9x     9x     40x   12x   12x   12x 5x 5x                 12x 12x 12x 9x 9x 9x   3x       3x 3x 1x     1x   2x 2x 2x   2x     1x 1x 1x     1x     40x 40x   40x                   40x 40x   40x 40x 40x 40x    
/**
 * Container boot lifecycle for encrypted stork instances.
 *
 * State machine:
 *   setup   → no stork.keys file; only /api/health and /api/setup are accessible
 *   locked  → stork.keys exists but vault key not yet loaded; 423 on all data routes
 *   unlocked → vault key loaded, database open, sync running
 */
 
import type Database from "better-sqlite3-multiple-ciphers";
import type { Hono } from "hono";
import { openDatabase } from "../storage/db.js";
import { R2Poller } from "../sync/r2-poller.js";
import { SyncScheduler } from "../sync/sync-scheduler.js";
import { keysFileExists } from "./keys.js";
 
export type ContainerState = "setup" | "locked" | "unlocked";
 
export interface ContainerContext {
	/** Current lock state */
	state: ContainerState;
	dataDir: string;
	/** Database instance, null until unlocked */
	db: Database.Database | null;
	/** Sync scheduler, null until unlocked */
	scheduler: SyncScheduler | null;
	/** R2 queue poller, null until unlocked */
	r2Poller: R2Poller | null;
	/** Vault key in memory, zeroed after DB open */
	_vaultKeyInMemory: Buffer | null;
}
 
export type AppFactory = (context: ContainerContext) => { app: Hono };
 
export interface BootResult {
	app: Hono;
	shutdown: () => Promise<void>;
}
 
export async function bootContainer(
	dataDir: string,
	createApp: (context: ContainerContext) => { app: Hono },
): Promise<BootResult> {
	const initialState: ContainerState = keysFileExists(dataDir) ? "locked" : "setup";
 
	const context: ContainerContext = {
		state: initialState,
		dataDir,
		db: null,
		scheduler: null,
		r2Poller: null,
		_vaultKeyInMemory: null,
	};
 
	const { app } = createApp(context);
 
	async function shutdown(): Promise<void> {
		console.log("\nShutting down...");
		const forceTimer = setTimeout(() => {
			console.error("Graceful shutdown timed out, forcing exit");
			process.exit(1);
		}, 10_000);
		forceTimer.unref();
 
		if (context.r2Poller) {
			await context.r2Poller.stop();
		}
		if (context.scheduler) {
			await context.scheduler.stop();
		}
		if (context.db) {
			context.db.close();
		}
		clearTimeout(forceTimer);
		process.exit(0);
	}
 
	return { app, shutdown };
}
 
/**
 * Called after successful unlock: opens the database with the vault key,
 * starts the sync scheduler, transitions to unlocked state.
 */
/** Returns a short HH:MM:SS timestamp for log lines. */
function ts(): string {
	const d = new Date();
	return `${String(d.getHours()).padStart(2, "0")}:${String(d.getMinutes()).padStart(2, "0")}:${String(d.getSeconds()).padStart(2, "0")}`;
}
 
/** Number of consecutive identical errors to show individually before batching. */
const BATCH_SHOW_LIMIT = 3;
 
interface ErrorBatch {
	folder: string | null;
	errorType: string;
	retriable: boolean;
	shownCount: number;
	suppressedCount: number;
}
 
export function transitionToUnlocked(context: ContainerContext, vaultKey: Buffer): void {
	if (context.state === "unlocked") return;
 
	const db = openDatabase("stork.db", context.dataDir, vaultKey);
 
	// Zero vault key immediately after passing to SQLCipher
	vaultKey.fill(0);
 
	// Per-connector batching state for consecutive identical sync errors
	const errorBatches = new Map<number, ErrorBatch>();
 
	function flushErrorBatch(connectorId: number): void {
		const batch = errorBatches.get(connectorId);
		if (!batch || batch.suppressedCount === 0) {
			errorBatches.delete(connectorId);
			return;
		}
		const retry = batch.retriable ? "(will retry)" : "(permanent)";
		const total = batch.shownCount + batch.suppressedCount;
		const folder = batch.folder ? `"${batch.folder}"` : "sync";
		console.error(
			`  ${ts()} [${batch.errorType}] ${folder} — ${total} batches failed, retrying automatically ${retry}`,
		);
		errorBatches.delete(connectorId);
	}
 
	const scheduler = new SyncScheduler(db, {
		onSyncRecordError: (connectorId, err) => {
			const batch = errorBatches.get(connectorId);
			const matchesBatch =
				batch && batch.folder === err.folderPath && batch.errorType === err.errorType;
 
			if (!matchesBatch) {
				flushErrorBatch(connectorId);
				errorBatches.set(connectorId, {
					folder: err.folderPath,
					errorType: err.errorType,
					retriable: err.retriable,
					shownCount: 0,
					suppressedCount: 0,
				});
			}
 
			const current = errorBatches.get(connectorId);
			Iif (current === undefined) return;
			if (current.shownCount < BATCH_SHOW_LIMIT) {
				const retry = err.retriable ? "(will retry)" : "(permanent)";
				console.error(`  ${ts()} [${err.errorType}] ${err.message} ${retry}`);
				current.shownCount++;
			} else {
				current.suppressedCount++;
			}
		},
		onSyncComplete: (connectorId, result) => {
			flushErrorBatch(connectorId);
			if (result.aborted) {
				console.log(
					`${ts()} Sync interrupted for connector ${connectorId}: ${result.totalNew} new (aborted)`,
				);
				return;
			}
			const parts = [`${ts()} Sync complete for connector ${connectorId}: ${result.totalNew} new`];
			Eif (result.totalErrors > 0) {
				parts.push(`${result.totalErrors} errors`);
			}
			console.log(parts.join(", "));
		},
		onSyncError: (connectorId, error) => {
			flushErrorBatch(connectorId);
			const imapErr = error as Error & { responseText?: string; responseStatus?: string };
			const detail = imapErr.responseText
				? `${imapErr.responseStatus ?? "ERROR"}: ${imapErr.responseText}`
				: error.message;
			console.error(`${ts()} Sync failed for connector ${connectorId}: ${detail}`);
		},
	});
	scheduler.loadConnectorsFromDb();
	scheduler.start();
 
	const r2Poller = new R2Poller(db, {
		onPollComplete: (connectorId, stored) => {
			if (stored > 0) {
				console.log(`${ts()} R2 poll complete for connector ${connectorId}: ${stored} new`);
			}
		},
		onPollError: (connectorId, error) => {
			console.error(`${ts()} R2 poll failed for connector ${connectorId}: ${error.message}`);
		},
	});
	r2Poller.loadConnectorsFromDb();
	r2Poller.start();
 
	context.db = db;
	context.scheduler = scheduler;
	context.r2Poller = r2Poller;
	context.state = "unlocked";
}