screentinker/server/db/database.js

523 lines
23 KiB
JavaScript

const Database = require('better-sqlite3');
const fs = require('fs');
const path = require('path');
const config = require('../config');
const dbDir = path.dirname(config.dbPath);
if (!fs.existsSync(dbDir)) fs.mkdirSync(dbDir, { recursive: true });
const db = new Database(config.dbPath);
// Enable WAL mode and foreign keys
db.pragma('journal_mode = WAL');
db.pragma('foreign_keys = ON');
// Run schema
const schema = fs.readFileSync(path.join(__dirname, 'schema.sql'), 'utf8');
db.exec(schema);
// Auto-apply Phase 1 multi-tenancy migration if not yet applied. Without this
// a self-hoster who pulls latest and restarts hits a crash in
// migrateFolderWorkspaceIds (queries workspaces table that doesn't exist).
// Pre-existing data is snapshotted to db/remote_display.pre-migration-<ts>.db
// before the migration runs - clear restore path on failure. Fresh installs
// run against empty data (creates tables, no rows to backfill).
function ensureMultitenancyMigration() {
let applied = false;
try {
applied = !!db.prepare(
"SELECT 1 FROM schema_migrations WHERE id = 'phase5_multitenancy_backfill'"
).get();
} catch { /* schema_migrations may not exist yet; treat as not applied */ }
if (applied) return;
console.warn('[boot] Multi-tenancy schema not present - applying migration...');
const ts = new Date().toISOString().replace(/[:.]/g, '-');
const snapshotPath = path.join(dbDir, `remote_display.pre-migration-${ts}.db`);
try {
db.pragma('wal_checkpoint(TRUNCATE)');
fs.copyFileSync(config.dbPath, snapshotPath);
console.warn(`[boot] Pre-migration snapshot: ${snapshotPath}`);
} catch (e) {
console.error(`[boot] Snapshot failed: ${e.message}`);
process.exit(1);
}
try {
const { runMigration } = require('../../scripts/migrate-multitenancy');
runMigration({ db });
console.warn('[boot] Migration complete, continuing startup');
} catch (e) {
console.error(`[boot] Migration FAILED: ${e.message}`);
console.error(`[boot] Restore with: cp ${snapshotPath} ${config.dbPath}`);
process.exit(1);
}
}
// Note: ensureMultitenancyMigration() is called LATER, after the inline
// migrations array has added team_id and workspace_id columns. The Phase 1
// migration script reads team_id from resource tables during its backfill
// loop, so those columns must exist first. Definition kept here near the
// top so the auto-migration logic is easy to find when reading the file.
// Migrations for existing databases
const migrations = [
'ALTER TABLE content ADD COLUMN remote_url TEXT',
'ALTER TABLE devices ADD COLUMN user_id TEXT REFERENCES users(id)',
'ALTER TABLE content ADD COLUMN user_id TEXT REFERENCES users(id)',
"ALTER TABLE users ADD COLUMN plan_id TEXT DEFAULT 'free'",
'ALTER TABLE users ADD COLUMN stripe_customer_id TEXT',
'ALTER TABLE users ADD COLUMN stripe_subscription_id TEXT',
"ALTER TABLE users ADD COLUMN subscription_status TEXT DEFAULT 'active'",
'ALTER TABLE users ADD COLUMN subscription_ends INTEGER',
// Layout & zone support on devices and assignments
'ALTER TABLE devices ADD COLUMN layout_id TEXT',
'ALTER TABLE devices ADD COLUMN timezone TEXT DEFAULT \'UTC\'',
'ALTER TABLE devices ADD COLUMN wall_id TEXT',
'ALTER TABLE devices ADD COLUMN team_id TEXT',
'ALTER TABLE assignments ADD COLUMN zone_id TEXT',
'ALTER TABLE assignments ADD COLUMN widget_id TEXT',
// Team support on content
'ALTER TABLE content ADD COLUMN team_id TEXT',
// Device notes
'ALTER TABLE devices ADD COLUMN notes TEXT',
// Email settings on users
"ALTER TABLE users ADD COLUMN email_alerts INTEGER DEFAULT 1",
// Content folders
'ALTER TABLE content ADD COLUMN folder TEXT',
// Device orientation and default content
"ALTER TABLE devices ADD COLUMN orientation TEXT DEFAULT 'landscape'",
'ALTER TABLE devices ADD COLUMN default_content_id TEXT',
// Audio control per assignment
"ALTER TABLE assignments ADD COLUMN muted INTEGER DEFAULT 0",
// Trial tracking
"ALTER TABLE users ADD COLUMN trial_started INTEGER",
"ALTER TABLE users ADD COLUMN trial_plan TEXT DEFAULT 'pro'",
// Stripe price IDs on plans
"ALTER TABLE plans ADD COLUMN stripe_price_monthly TEXT",
"ALTER TABLE plans ADD COLUMN stripe_price_yearly TEXT",
// Last login tracking
"ALTER TABLE users ADD COLUMN last_login INTEGER",
// Phase 2: every device gets a playlist, schedules can override with a playlist
"ALTER TABLE devices ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
"ALTER TABLE schedules ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
"ALTER TABLE playlists ADD COLUMN is_auto_generated INTEGER NOT NULL DEFAULT 0",
// Device authentication token
"ALTER TABLE devices ADD COLUMN device_token TEXT",
// Phase 3: playlist publish/draft state
"ALTER TABLE playlists ADD COLUMN status TEXT NOT NULL DEFAULT 'draft'",
"ALTER TABLE playlists ADD COLUMN published_snapshot TEXT",
// Phase 4: group scheduling (column add only — full migration with CHECK below)
"ALTER TABLE schedules ADD COLUMN group_id TEXT REFERENCES device_groups(id) ON DELETE SET NULL",
// Hierarchical content folders (per-user)
`CREATE TABLE IF NOT EXISTS content_folders (
id TEXT PRIMARY KEY,
user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
parent_id TEXT REFERENCES content_folders(id) ON DELETE CASCADE,
name TEXT NOT NULL,
created_at INTEGER NOT NULL DEFAULT (strftime('%s','now'))
)`,
"CREATE INDEX IF NOT EXISTS idx_content_folders_user ON content_folders(user_id, parent_id)",
"ALTER TABLE content ADD COLUMN folder_id TEXT REFERENCES content_folders(id) ON DELETE SET NULL",
"CREATE INDEX IF NOT EXISTS idx_content_folder ON content(folder_id)",
// Group-level playlist: when set, devices added to the group inherit it.
"ALTER TABLE device_groups ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
// Wall-level playlist: video walls now play a playlist (not just one content).
"ALTER TABLE video_walls ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
// Free-form canvas layout: walls store a player rect; member devices store
// their own rect. Coordinates are in arbitrary canvas units (effectively px).
"ALTER TABLE video_walls ADD COLUMN player_x REAL",
"ALTER TABLE video_walls ADD COLUMN player_y REAL",
"ALTER TABLE video_walls ADD COLUMN player_width REAL",
"ALTER TABLE video_walls ADD COLUMN player_height REAL",
"ALTER TABLE video_wall_devices ADD COLUMN canvas_x REAL",
"ALTER TABLE video_wall_devices ADD COLUMN canvas_y REAL",
"ALTER TABLE video_wall_devices ADD COLUMN canvas_width REAL",
"ALTER TABLE video_wall_devices ADD COLUMN canvas_height REAL",
// Phase 2.2c: content_folders gets workspace_id. Phase 1 missed this table.
"ALTER TABLE content_folders ADD COLUMN workspace_id TEXT REFERENCES workspaces(id)",
"CREATE INDEX IF NOT EXISTS idx_content_folders_workspace ON content_folders(workspace_id)",
];
for (const sql of migrations) {
try { db.exec(sql); } catch (e) { /* already exists */ }
}
// Fix assignments table: make content_id nullable (SQLite requires table rebuild)
try {
const colInfo = db.prepare("PRAGMA table_info(assignments)").all();
const contentCol = colInfo.find(c => c.name === 'content_id');
if (contentCol && contentCol.notnull === 1) {
console.log('Migrating assignments table: making content_id nullable...');
db.exec(`
CREATE TABLE assignments_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
device_id TEXT NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
content_id TEXT REFERENCES content(id) ON DELETE CASCADE,
widget_id TEXT REFERENCES widgets(id) ON DELETE CASCADE,
zone_id TEXT,
sort_order INTEGER NOT NULL DEFAULT 0,
duration_sec INTEGER NOT NULL DEFAULT 10,
schedule_start TEXT,
schedule_end TEXT,
schedule_days TEXT,
enabled INTEGER NOT NULL DEFAULT 1,
muted INTEGER DEFAULT 0,
created_at INTEGER NOT NULL DEFAULT (strftime('%s','now'))
);
INSERT INTO assignments_new SELECT id, device_id, content_id, widget_id, zone_id, sort_order, duration_sec, schedule_start, schedule_end, schedule_days, enabled, muted, created_at FROM assignments;
DROP TABLE assignments;
ALTER TABLE assignments_new RENAME TO assignments;
`);
console.log('Assignments table migrated successfully.');
}
} catch (e) {
console.error('Assignments migration error:', e.message);
}
// Phase 2 migration: convert existing assignments into per-device playlists
const MIGRATION_ID = 'phase2_playlist_migration';
async function migrateAssignmentsToPlaylists() {
// Skip if already ran (tracked in schema_migrations table)
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(MIGRATION_ID);
if (already) return;
const { v4: uuidv4 } = require('uuid');
const { execFile } = require('child_process');
// Find devices that have at least one assignment
const devicesWithAssignments = db.prepare(`
SELECT DISTINCT d.id, d.name, d.user_id
FROM devices d
INNER JOIN assignments a ON a.device_id = d.id
WHERE d.user_id IS NOT NULL
`).all();
if (devicesWithAssignments.length === 0) return;
console.log(`Migrating ${devicesWithAssignments.length} device(s) from assignments to playlists...`);
// Async ffprobe — matches the pattern in playlists.js probeAndUpdateDuration
async function probeVideoDuration(content) {
if (!content || !content.mime_type || !content.mime_type.startsWith('video/')) return null;
if (content.duration_sec) return Math.ceil(content.duration_sec);
if (!content.filepath) return null;
try {
const fullPath = path.join(config.contentDir, content.filepath);
const stdout = await new Promise((resolve, reject) => {
execFile('ffprobe', [
'-v', 'quiet', '-print_format', 'json', '-show_format', fullPath
], { timeout: 15000 }, (err, out) => err ? reject(err) : resolve(out));
});
const info = JSON.parse(stdout);
if (info.format?.duration) {
const dur = parseFloat(info.format.duration);
db.prepare('UPDATE content SET duration_sec = ? WHERE id = ?').run(dur, content.id);
return Math.ceil(dur);
}
} catch (e) {
console.warn(` ffprobe failed for ${content.id}:`, e.message);
}
return null;
}
const getAssignments = db.prepare(`
SELECT a.content_id, a.widget_id, a.sort_order, a.duration_sec,
c.mime_type, c.filepath, c.duration_sec as content_duration
FROM assignments a
LEFT JOIN content c ON a.content_id = c.id
WHERE a.device_id = ? AND a.enabled = 1
ORDER BY a.sort_order ASC
`);
// Probe durations outside the transaction (async ffprobe can't run inside SQLite transaction)
const devicePlaylists = [];
let videosProbed = 0;
let totalItems = 0;
for (const device of devicesWithAssignments) {
const playlistId = uuidv4();
const assignments = getAssignments.all(device.id);
const items = [];
for (const a of assignments) {
let duration = a.duration_sec;
if (a.content_id && a.mime_type?.startsWith('video/')) {
const probed = await probeVideoDuration({ id: a.content_id, mime_type: a.mime_type, filepath: a.filepath, duration_sec: a.content_duration });
if (probed) { duration = probed; videosProbed++; }
}
items.push({ content_id: a.content_id, widget_id: a.widget_id, sort_order: a.sort_order, duration_sec: duration });
totalItems++;
}
devicePlaylists.push({ device, playlistId, items });
}
// Insert everything in a single transaction
const insertPlaylist = db.prepare(`INSERT INTO playlists (id, user_id, name, description, is_auto_generated) VALUES (?, ?, ?, ?, 1)`);
const insertItem = db.prepare(`INSERT INTO playlist_items (playlist_id, content_id, widget_id, sort_order, duration_sec) VALUES (?, ?, ?, ?, ?)`);
const setDevicePlaylist = db.prepare('UPDATE devices SET playlist_id = ? WHERE id = ?');
const migrate = db.transaction(() => {
for (const { device, playlistId, items } of devicePlaylists) {
insertPlaylist.run(playlistId, device.user_id, `${device.name} (migrated)`, 'Auto-generated from previous assignments');
for (const item of items) {
insertItem.run(playlistId, item.content_id || null, item.widget_id || null, item.sort_order, item.duration_sec);
}
setDevicePlaylist.run(playlistId, device.id);
}
});
migrate();
// Record that this migration has run
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(MIGRATION_ID);
const scheduleCount = db.prepare('SELECT COUNT(*) as count FROM schedules').get().count;
console.log(`Migration complete: ${devicesWithAssignments.length} device(s), ${totalItems} playlist item(s), ${videosProbed} video(s) probed, ${scheduleCount} schedule(s).`);
}
migrateAssignmentsToPlaylists().catch(e => console.error('Migration error:', e));
// Phase 3 migration: snapshot existing playlist items into published_snapshot
const PHASE3_MIGRATION_ID = 'phase3_publish_snapshot';
function migratePublishSnapshots() {
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE3_MIGRATION_ID);
if (already) return;
const playlists = db.prepare('SELECT id FROM playlists').all();
if (playlists.length === 0) {
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE3_MIGRATION_ID);
return;
}
console.log(`Phase 3 migration: snapshotting ${playlists.length} playlist(s) as published...`);
const getItems = db.prepare(`
SELECT pi.content_id, pi.widget_id, pi.sort_order, pi.duration_sec,
COALESCE(c.filename, w.name) as filename, c.mime_type, c.filepath, c.file_size,
c.duration_sec as content_duration, c.remote_url,
w.name as widget_name, w.widget_type, w.config as widget_config
FROM playlist_items pi
LEFT JOIN content c ON pi.content_id = c.id
LEFT JOIN widgets w ON pi.widget_id = w.id
WHERE pi.playlist_id = ?
ORDER BY pi.sort_order ASC
`);
const updatePlaylist = db.prepare("UPDATE playlists SET status = 'published', published_snapshot = ? WHERE id = ?");
const migrate = db.transaction(() => {
let snapshotted = 0;
for (const playlist of playlists) {
const items = getItems.all(playlist.id);
updatePlaylist.run(JSON.stringify(items), playlist.id);
snapshotted++;
}
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE3_MIGRATION_ID);
console.log(`Phase 3 migration complete: ${snapshotted} playlist(s) snapshotted as published.`);
});
migrate();
}
migratePublishSnapshots();
// Phase 4 migration: add group_id to schedules, make device_id nullable, add CHECK constraint
const PHASE4_MIGRATION_ID = 'phase4_group_schedules';
function migrateGroupSchedules() {
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE4_MIGRATION_ID);
if (already) return;
console.log('Phase 4 migration: adding group_id to schedules, making device_id nullable...');
const migrate = db.transaction(() => {
db.exec(`
CREATE TABLE schedules_new (
id TEXT PRIMARY KEY,
user_id TEXT NOT NULL REFERENCES users(id),
device_id TEXT REFERENCES devices(id) ON DELETE CASCADE,
group_id TEXT REFERENCES device_groups(id) ON DELETE SET NULL,
zone_id TEXT REFERENCES layout_zones(id) ON DELETE CASCADE,
content_id TEXT REFERENCES content(id) ON DELETE CASCADE,
widget_id TEXT REFERENCES widgets(id) ON DELETE CASCADE,
layout_id TEXT REFERENCES layouts(id) ON DELETE SET NULL,
playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL,
title TEXT NOT NULL DEFAULT '',
start_time TEXT NOT NULL,
end_time TEXT NOT NULL,
timezone TEXT NOT NULL DEFAULT 'UTC',
recurrence TEXT,
recurrence_end TEXT,
priority INTEGER NOT NULL DEFAULT 0,
enabled INTEGER NOT NULL DEFAULT 1,
color TEXT DEFAULT '#3B82F6',
created_at INTEGER NOT NULL DEFAULT (strftime('%s','now')),
updated_at INTEGER NOT NULL DEFAULT (strftime('%s','now')),
CHECK ((device_id IS NOT NULL AND group_id IS NULL) OR (device_id IS NULL AND group_id IS NOT NULL))
);
INSERT INTO schedules_new (id, user_id, device_id, zone_id, content_id, widget_id, layout_id, playlist_id,
title, start_time, end_time, timezone, recurrence, recurrence_end, priority, enabled, color, created_at, updated_at)
SELECT id, user_id, device_id, zone_id, content_id, widget_id, layout_id, playlist_id,
title, start_time, end_time, timezone, recurrence, recurrence_end, priority, enabled, color, created_at, updated_at
FROM schedules;
DROP TABLE schedules;
ALTER TABLE schedules_new RENAME TO schedules;
CREATE INDEX idx_schedules_device ON schedules(device_id, enabled);
CREATE INDEX idx_schedules_group ON schedules(group_id, enabled);
`);
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE4_MIGRATION_ID);
console.log('Phase 4 migration complete: schedules table rebuilt with group_id support.');
});
migrate();
}
migrateGroupSchedules();
// Phase 1 multi-tenancy migration (auto-applies if not yet run). Must come
// AFTER the inline migrations above so that team_id / workspace_id columns
// exist on resource tables - the Phase 1 backfill loop reads team_id and
// updates workspace_id.
ensureMultitenancyMigration();
// Phase 2.2c migration: backfill content_folders.workspace_id from owner's
// default workspace. The ALTER lives in the migrations array above; this
// one-shot populates the column for any rows that pre-date it.
const PHASE6_MIGRATION_ID = 'phase6_content_folders_workspace';
function migrateFolderWorkspaceIds() {
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE6_MIGRATION_ID);
if (already) return;
// Belt-and-suspenders: if multi-tenancy tables aren't present (auto-runner
// somehow skipped), skip cleanly instead of crashing on the JOIN below.
const hasWorkspaces = db.prepare(
"SELECT 1 FROM sqlite_master WHERE type='table' AND name='workspaces'"
).get();
if (!hasWorkspaces) {
console.warn('migrateFolderWorkspaceIds: workspaces table missing, skipping');
return;
}
// Check the column exists before trying to backfill. (Defensive: on a fresh
// install the schema.sql defines content_folders without the column, the
// ALTER above adds it, and we proceed; but if anything went sideways we
// skip rather than throw.)
const cols = db.prepare("PRAGMA table_info(content_folders)").all();
if (!cols.some(c => c.name === 'workspace_id')) {
console.warn('Phase 2.2c migration: content_folders.workspace_id column missing, skipping backfill');
return;
}
const stmt = db.prepare(`
UPDATE content_folders SET workspace_id = (
SELECT w.id FROM workspaces w
JOIN workspace_members wm ON wm.workspace_id = w.id
WHERE wm.user_id = content_folders.user_id
ORDER BY wm.joined_at ASC LIMIT 1
)
WHERE workspace_id IS NULL AND user_id IS NOT NULL
`);
const tx = db.transaction(() => {
const result = stmt.run();
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE6_MIGRATION_ID);
return result.changes;
});
const changes = tx();
if (changes > 0) console.log(`Phase 2.2c migration: backfilled workspace_id on ${changes} content_folders row(s).`);
}
migrateFolderWorkspaceIds();
const PHASE_2_2_ACTIVITY_STOP_ID = 'phase_2_2_activity_log_stop_bleeding';
// One-time backfill of activity_log rows that were written between the
// Phase 1 schema migration and the writer-leak fix in this commit. Strategy:
// * Rows with device_id: derive workspace_id from devices.workspace_id
// (the activity is about a specific device, so this is unambiguous).
// * Rows with no device_id but a user_id: derive from the user's oldest
// workspace_members row (pre-flight confirmed 0 affected users have
// more than one workspace, so the choice is unambiguous).
// Rows with user_id IS NULL (auth:login_failed and similar pre-tenancy
// system events) are left alone - they have no tenant context.
function backfillActivityLogWorkspace() {
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE_2_2_ACTIVITY_STOP_ID);
if (already) return;
// Belt-and-suspenders: if multi-tenancy tables aren't present (auto-runner
// somehow skipped), skip cleanly instead of crashing on workspace_members.
const hasMembers = db.prepare(
"SELECT 1 FROM sqlite_master WHERE type='table' AND name='workspace_members'"
).get();
if (!hasMembers) {
console.warn('backfillActivityLogWorkspace: workspace_members table missing, skipping');
return;
}
const viaDevice = db.prepare(`
UPDATE activity_log SET workspace_id = (
SELECT workspace_id FROM devices WHERE devices.id = activity_log.device_id
)
WHERE workspace_id IS NULL AND device_id IS NOT NULL
AND EXISTS (SELECT 1 FROM devices WHERE devices.id = activity_log.device_id AND devices.workspace_id IS NOT NULL)
`);
const viaMembers = db.prepare(`
UPDATE activity_log SET workspace_id = (
SELECT wm.workspace_id FROM workspace_members wm
WHERE wm.user_id = activity_log.user_id
ORDER BY wm.joined_at ASC LIMIT 1
)
WHERE workspace_id IS NULL AND user_id IS NOT NULL AND device_id IS NULL
AND EXISTS (SELECT 1 FROM workspace_members wm WHERE wm.user_id = activity_log.user_id)
`);
const tx = db.transaction(() => {
const d = viaDevice.run().changes;
const m = viaMembers.run().changes;
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE_2_2_ACTIVITY_STOP_ID);
return { d, m };
});
const { d, m } = tx();
if (d + m > 0) console.log(`activity_log backfill: ${d} via device.workspace_id, ${m} via workspace_members lookup`);
}
backfillActivityLogWorkspace();
// Prune old telemetry (keep last 24h worth at 15s intervals = ~5760, cap at 6000)
function pruneTelemetry(deviceId) {
db.prepare(`
DELETE FROM device_telemetry
WHERE device_id = ? AND id NOT IN (
SELECT id FROM device_telemetry
WHERE device_id = ?
ORDER BY reported_at DESC LIMIT 6000
)
`).run(deviceId, deviceId);
}
// Prune old screenshots (keep only latest per device)
function pruneScreenshots(deviceId) {
const old = db.prepare(`
SELECT filepath FROM screenshots
WHERE device_id = ? AND id NOT IN (
SELECT id FROM screenshots WHERE device_id = ? ORDER BY captured_at DESC LIMIT 1
)
`).all(deviceId, deviceId);
for (const row of old) {
const fullPath = path.join(config.screenshotsDir, row.filepath);
if (fs.existsSync(fullPath)) fs.unlinkSync(fullPath);
}
db.prepare(`
DELETE FROM screenshots
WHERE device_id = ? AND id NOT IN (
SELECT id FROM screenshots WHERE device_id = ? ORDER BY captured_at DESC LIMIT 1
)
`).run(deviceId, deviceId);
}
module.exports = { db, pruneTelemetry, pruneScreenshots };