mirror of
https://github.com/screentinker/screentinker.git
synced 2026-05-15 07:32:23 -06:00
Phase 2 (assignments -> playlist_items) dropped zone_id during the conversion: migrateAssignmentsToPlaylists INSERTed only (playlist_id, content_id, widget_id, sort_order, duration_sec), and the new playlist_items DDL omitted the zone_id column entirely. Every write path on top of playlist_items inherited that omission - the multi-zone layout assignment feature stopped working. Frontend always sent zone_id correctly (device-detail.js:1015,1072 POST and PUT both include it; api.addAssignment and api.updateAssignment forward the body verbatim). Server silently dropped it. The assignments.js PUT route was the most direct evidence: it destructured zone_id from req.body but never added it to the updates array. Schema: - schema.sql: add zone_id TEXT REFERENCES layout_zones(id) ON DELETE SET NULL to fresh-install DDL. - database.js migrations[]: add idempotent ALTER TABLE for existing installs (the surrounding try/catch loop handles duplicate-column). Backfill (new gated migration phase2_zone_id_backfill): - Pre-migration snapshot copied to db/remote_display.pre-zone-id- backfill-<ts>.db (one-off for this migration; the general every-migration-snapshot framework is a separate concern, not built here). - Best-effort UPDATE playlist_items.zone_id from surviving assignments rows via device.playlist_id + content_id/widget_id match, LIMIT 1 for the multi-match edge case. - Regenerates published_snapshot for every published playlist so the JSON the player consumes carries zone_id going forward. Even with zero rows backfilled (the common case post-Phase-2 cleanup) this closes the snapshot-staleness gap. - Stamps schema_migrations regardless so it won't re-run on next boot. - On the live local DB: 0 playlist_items backfilled, 18 published_snapshots regenerated. On the April 13 prod fixture (sandboxed copy): 0 backfilled, 7 regenerated. Expected and matches our pre-flight finding that assignments was effectively scrubbed of zone_id everywhere. Route wiring (7 sites + 1 shared constant): - assignments.js ITEM_SELECT: project pi.zone_id (read path so the frontend display at device-detail.js:500 surfaces the value). - assignments.js POST INSERT: include zone_id column + value. - assignments.js PUT: actually use the already-destructured zone_id in the updates allow-list. Treats undefined as "no change" so a PUT that omits zone_id leaves the existing value intact; any explicit value (including null) is written. - assignments.js copy-to INSERT: preserve a.zone_id during device-to-device playlist copy. - playlists.js buildSnapshotItems: project pi.zone_id so the snapshot JSON carries it. This is what the player's renderZones loop reads (player/index.html:1338 matches a.zone_id === zone.id). - playlists.js discard-revert INSERT: restore zone_id from snapshot item on revert. Out of scope (verified safe by SQL semantics + UI inspection): - playlists.js POST item-add and PUT item-update in the playlist-detail surface: the UI there doesn't expose zone editing, and their SQL leaves zone_id NULL on insert / untouched on update. No regression. - Other playlists.js SELECT projections (lines 141, 190, 240, 265, 334, 379, 419) all use SELECT pi.* and auto-pick zone_id once the column exists. - Kiosk-page assign at device-detail.js:1027 doesn't send zone_id; separate pre-existing gap, not part of this regression. Tests (all local, no push, no prod deploy): - Migration boot on live local DB: clean, idempotent (second boot skips the gated function). - Migration boot on April 13 prod fixture (sandboxed copy at /tmp/zone-fix-fixtures/test-run.db): cleanly runs the full migration stack (multi-tenancy + 5 other phases the fixture predated) then the new zone_id backfill. Live local DB untouched. - 8 SQL-level route behavior tests pass: INSERT stores zone_id, PUT changes/clears zone_id, ITEM_SELECT and buildSnapshotItems projections include zone_id, copy-to preserves, discard-revert restores from snapshot JSON, undefined zone_id in PUT leaves existing value intact. Not verified: end-to-end multi-zone playback on a real device. The SQL + snapshot JSON layer is correct (player consumes playlist.find(a => a.zone_id === zone.id) and now gets the right zone_id back from the snapshot); confirming render-to-correct-zone on actual hardware is the next step before prod deploy. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
631 lines
28 KiB
JavaScript
631 lines
28 KiB
JavaScript
const Database = require('better-sqlite3');
|
|
const fs = require('fs');
|
|
const path = require('path');
|
|
const config = require('../config');
|
|
|
|
const dbDir = path.dirname(config.dbPath);
|
|
if (!fs.existsSync(dbDir)) fs.mkdirSync(dbDir, { recursive: true });
|
|
|
|
const db = new Database(config.dbPath);
|
|
|
|
// Enable WAL mode and foreign keys
|
|
db.pragma('journal_mode = WAL');
|
|
db.pragma('foreign_keys = ON');
|
|
|
|
// Run schema
|
|
const schema = fs.readFileSync(path.join(__dirname, 'schema.sql'), 'utf8');
|
|
db.exec(schema);
|
|
|
|
// Auto-apply Phase 1 multi-tenancy migration if not yet applied. Without this
|
|
// a self-hoster who pulls latest and restarts hits a crash in
|
|
// migrateFolderWorkspaceIds (queries workspaces table that doesn't exist).
|
|
// Pre-existing data is snapshotted to db/remote_display.pre-migration-<ts>.db
|
|
// before the migration runs - clear restore path on failure. Fresh installs
|
|
// run against empty data (creates tables, no rows to backfill).
|
|
function ensureMultitenancyMigration() {
|
|
let applied = false;
|
|
try {
|
|
applied = !!db.prepare(
|
|
"SELECT 1 FROM schema_migrations WHERE id = 'phase5_multitenancy_backfill'"
|
|
).get();
|
|
} catch { /* schema_migrations may not exist yet; treat as not applied */ }
|
|
if (applied) return;
|
|
|
|
console.warn('[boot] Multi-tenancy schema not present - applying migration...');
|
|
const ts = new Date().toISOString().replace(/[:.]/g, '-');
|
|
const snapshotPath = path.join(dbDir, `remote_display.pre-migration-${ts}.db`);
|
|
try {
|
|
db.pragma('wal_checkpoint(TRUNCATE)');
|
|
fs.copyFileSync(config.dbPath, snapshotPath);
|
|
console.warn(`[boot] Pre-migration snapshot: ${snapshotPath}`);
|
|
} catch (e) {
|
|
console.error(`[boot] Snapshot failed: ${e.message}`);
|
|
process.exit(1);
|
|
}
|
|
|
|
try {
|
|
const { runMigration } = require('../../scripts/migrate-multitenancy');
|
|
runMigration({ db });
|
|
console.warn('[boot] Migration complete, continuing startup');
|
|
} catch (e) {
|
|
console.error(`[boot] Migration FAILED: ${e.message}`);
|
|
console.error(`[boot] Restore with: cp ${snapshotPath} ${config.dbPath}`);
|
|
process.exit(1);
|
|
}
|
|
}
|
|
|
|
// Note: ensureMultitenancyMigration() is called LATER, after the inline
|
|
// migrations array has added team_id and workspace_id columns. The Phase 1
|
|
// migration script reads team_id from resource tables during its backfill
|
|
// loop, so those columns must exist first. Definition kept here near the
|
|
// top so the auto-migration logic is easy to find when reading the file.
|
|
|
|
// Migrations for existing databases
|
|
const migrations = [
|
|
'ALTER TABLE content ADD COLUMN remote_url TEXT',
|
|
'ALTER TABLE devices ADD COLUMN user_id TEXT REFERENCES users(id)',
|
|
'ALTER TABLE content ADD COLUMN user_id TEXT REFERENCES users(id)',
|
|
"ALTER TABLE users ADD COLUMN plan_id TEXT DEFAULT 'free'",
|
|
'ALTER TABLE users ADD COLUMN stripe_customer_id TEXT',
|
|
'ALTER TABLE users ADD COLUMN stripe_subscription_id TEXT',
|
|
"ALTER TABLE users ADD COLUMN subscription_status TEXT DEFAULT 'active'",
|
|
'ALTER TABLE users ADD COLUMN subscription_ends INTEGER',
|
|
// Layout & zone support on devices and assignments
|
|
'ALTER TABLE devices ADD COLUMN layout_id TEXT',
|
|
'ALTER TABLE devices ADD COLUMN timezone TEXT DEFAULT \'UTC\'',
|
|
'ALTER TABLE devices ADD COLUMN wall_id TEXT',
|
|
'ALTER TABLE devices ADD COLUMN team_id TEXT',
|
|
'ALTER TABLE assignments ADD COLUMN zone_id TEXT',
|
|
'ALTER TABLE assignments ADD COLUMN widget_id TEXT',
|
|
// Team support on content
|
|
'ALTER TABLE content ADD COLUMN team_id TEXT',
|
|
// Device notes
|
|
'ALTER TABLE devices ADD COLUMN notes TEXT',
|
|
// Email settings on users
|
|
"ALTER TABLE users ADD COLUMN email_alerts INTEGER DEFAULT 1",
|
|
// Content folders
|
|
'ALTER TABLE content ADD COLUMN folder TEXT',
|
|
// Device orientation and default content
|
|
"ALTER TABLE devices ADD COLUMN orientation TEXT DEFAULT 'landscape'",
|
|
'ALTER TABLE devices ADD COLUMN default_content_id TEXT',
|
|
// Audio control per assignment
|
|
"ALTER TABLE assignments ADD COLUMN muted INTEGER DEFAULT 0",
|
|
// Trial tracking
|
|
"ALTER TABLE users ADD COLUMN trial_started INTEGER",
|
|
"ALTER TABLE users ADD COLUMN trial_plan TEXT DEFAULT 'pro'",
|
|
// Stripe price IDs on plans
|
|
"ALTER TABLE plans ADD COLUMN stripe_price_monthly TEXT",
|
|
"ALTER TABLE plans ADD COLUMN stripe_price_yearly TEXT",
|
|
// Last login tracking
|
|
"ALTER TABLE users ADD COLUMN last_login INTEGER",
|
|
// Phase 2: every device gets a playlist, schedules can override with a playlist
|
|
"ALTER TABLE devices ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
|
|
"ALTER TABLE schedules ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
|
|
"ALTER TABLE playlists ADD COLUMN is_auto_generated INTEGER NOT NULL DEFAULT 0",
|
|
// Device authentication token
|
|
"ALTER TABLE devices ADD COLUMN device_token TEXT",
|
|
// Phase 3: playlist publish/draft state
|
|
"ALTER TABLE playlists ADD COLUMN status TEXT NOT NULL DEFAULT 'draft'",
|
|
"ALTER TABLE playlists ADD COLUMN published_snapshot TEXT",
|
|
// Phase 4: group scheduling (column add only — full migration with CHECK below)
|
|
"ALTER TABLE schedules ADD COLUMN group_id TEXT REFERENCES device_groups(id) ON DELETE SET NULL",
|
|
// Hierarchical content folders (per-user)
|
|
`CREATE TABLE IF NOT EXISTS content_folders (
|
|
id TEXT PRIMARY KEY,
|
|
user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
|
parent_id TEXT REFERENCES content_folders(id) ON DELETE CASCADE,
|
|
name TEXT NOT NULL,
|
|
created_at INTEGER NOT NULL DEFAULT (strftime('%s','now'))
|
|
)`,
|
|
"CREATE INDEX IF NOT EXISTS idx_content_folders_user ON content_folders(user_id, parent_id)",
|
|
"ALTER TABLE content ADD COLUMN folder_id TEXT REFERENCES content_folders(id) ON DELETE SET NULL",
|
|
"CREATE INDEX IF NOT EXISTS idx_content_folder ON content(folder_id)",
|
|
// Group-level playlist: when set, devices added to the group inherit it.
|
|
"ALTER TABLE device_groups ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
|
|
// Wall-level playlist: video walls now play a playlist (not just one content).
|
|
"ALTER TABLE video_walls ADD COLUMN playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL",
|
|
// Free-form canvas layout: walls store a player rect; member devices store
|
|
// their own rect. Coordinates are in arbitrary canvas units (effectively px).
|
|
"ALTER TABLE video_walls ADD COLUMN player_x REAL",
|
|
"ALTER TABLE video_walls ADD COLUMN player_y REAL",
|
|
"ALTER TABLE video_walls ADD COLUMN player_width REAL",
|
|
"ALTER TABLE video_walls ADD COLUMN player_height REAL",
|
|
"ALTER TABLE video_wall_devices ADD COLUMN canvas_x REAL",
|
|
"ALTER TABLE video_wall_devices ADD COLUMN canvas_y REAL",
|
|
"ALTER TABLE video_wall_devices ADD COLUMN canvas_width REAL",
|
|
"ALTER TABLE video_wall_devices ADD COLUMN canvas_height REAL",
|
|
// Phase 2.2c: content_folders gets workspace_id. Phase 1 missed this table.
|
|
"ALTER TABLE content_folders ADD COLUMN workspace_id TEXT REFERENCES workspaces(id)",
|
|
"CREATE INDEX IF NOT EXISTS idx_content_folders_workspace ON content_folders(workspace_id)",
|
|
// Phase 2 zone_id regression fix: playlist_items needs zone_id so the
|
|
// multi-zone-layout assignment feature works. The Phase 2 assignments->
|
|
// playlist_items conversion (migrateAssignmentsToPlaylists) dropped this
|
|
// column. Column ADD is idempotent via the surrounding try/catch loop.
|
|
"ALTER TABLE playlist_items ADD COLUMN zone_id TEXT REFERENCES layout_zones(id) ON DELETE SET NULL",
|
|
];
|
|
for (const sql of migrations) {
|
|
try { db.exec(sql); } catch (e) { /* already exists */ }
|
|
}
|
|
|
|
// Fix assignments table: make content_id nullable (SQLite requires table rebuild)
|
|
try {
|
|
const colInfo = db.prepare("PRAGMA table_info(assignments)").all();
|
|
const contentCol = colInfo.find(c => c.name === 'content_id');
|
|
if (contentCol && contentCol.notnull === 1) {
|
|
console.log('Migrating assignments table: making content_id nullable...');
|
|
db.exec(`
|
|
CREATE TABLE assignments_new (
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
device_id TEXT NOT NULL REFERENCES devices(id) ON DELETE CASCADE,
|
|
content_id TEXT REFERENCES content(id) ON DELETE CASCADE,
|
|
widget_id TEXT REFERENCES widgets(id) ON DELETE CASCADE,
|
|
zone_id TEXT,
|
|
sort_order INTEGER NOT NULL DEFAULT 0,
|
|
duration_sec INTEGER NOT NULL DEFAULT 10,
|
|
schedule_start TEXT,
|
|
schedule_end TEXT,
|
|
schedule_days TEXT,
|
|
enabled INTEGER NOT NULL DEFAULT 1,
|
|
muted INTEGER DEFAULT 0,
|
|
created_at INTEGER NOT NULL DEFAULT (strftime('%s','now'))
|
|
);
|
|
INSERT INTO assignments_new SELECT id, device_id, content_id, widget_id, zone_id, sort_order, duration_sec, schedule_start, schedule_end, schedule_days, enabled, muted, created_at FROM assignments;
|
|
DROP TABLE assignments;
|
|
ALTER TABLE assignments_new RENAME TO assignments;
|
|
`);
|
|
console.log('Assignments table migrated successfully.');
|
|
}
|
|
} catch (e) {
|
|
console.error('Assignments migration error:', e.message);
|
|
}
|
|
|
|
// Phase 2 migration: convert existing assignments into per-device playlists
|
|
const MIGRATION_ID = 'phase2_playlist_migration';
|
|
|
|
async function migrateAssignmentsToPlaylists() {
|
|
// Skip if already ran (tracked in schema_migrations table)
|
|
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(MIGRATION_ID);
|
|
if (already) return;
|
|
|
|
const { v4: uuidv4 } = require('uuid');
|
|
const { execFile } = require('child_process');
|
|
|
|
// Find devices that have at least one assignment
|
|
const devicesWithAssignments = db.prepare(`
|
|
SELECT DISTINCT d.id, d.name, d.user_id
|
|
FROM devices d
|
|
INNER JOIN assignments a ON a.device_id = d.id
|
|
WHERE d.user_id IS NOT NULL
|
|
`).all();
|
|
|
|
if (devicesWithAssignments.length === 0) return;
|
|
|
|
console.log(`Migrating ${devicesWithAssignments.length} device(s) from assignments to playlists...`);
|
|
|
|
// Async ffprobe — matches the pattern in playlists.js probeAndUpdateDuration
|
|
async function probeVideoDuration(content) {
|
|
if (!content || !content.mime_type || !content.mime_type.startsWith('video/')) return null;
|
|
if (content.duration_sec) return Math.ceil(content.duration_sec);
|
|
if (!content.filepath) return null;
|
|
try {
|
|
const fullPath = path.join(config.contentDir, content.filepath);
|
|
const stdout = await new Promise((resolve, reject) => {
|
|
execFile('ffprobe', [
|
|
'-v', 'quiet', '-print_format', 'json', '-show_format', fullPath
|
|
], { timeout: 15000 }, (err, out) => err ? reject(err) : resolve(out));
|
|
});
|
|
const info = JSON.parse(stdout);
|
|
if (info.format?.duration) {
|
|
const dur = parseFloat(info.format.duration);
|
|
db.prepare('UPDATE content SET duration_sec = ? WHERE id = ?').run(dur, content.id);
|
|
return Math.ceil(dur);
|
|
}
|
|
} catch (e) {
|
|
console.warn(` ffprobe failed for ${content.id}:`, e.message);
|
|
}
|
|
return null;
|
|
}
|
|
|
|
const getAssignments = db.prepare(`
|
|
SELECT a.content_id, a.widget_id, a.sort_order, a.duration_sec,
|
|
c.mime_type, c.filepath, c.duration_sec as content_duration
|
|
FROM assignments a
|
|
LEFT JOIN content c ON a.content_id = c.id
|
|
WHERE a.device_id = ? AND a.enabled = 1
|
|
ORDER BY a.sort_order ASC
|
|
`);
|
|
|
|
// Probe durations outside the transaction (async ffprobe can't run inside SQLite transaction)
|
|
const devicePlaylists = [];
|
|
let videosProbed = 0;
|
|
let totalItems = 0;
|
|
for (const device of devicesWithAssignments) {
|
|
const playlistId = uuidv4();
|
|
const assignments = getAssignments.all(device.id);
|
|
const items = [];
|
|
for (const a of assignments) {
|
|
let duration = a.duration_sec;
|
|
if (a.content_id && a.mime_type?.startsWith('video/')) {
|
|
const probed = await probeVideoDuration({ id: a.content_id, mime_type: a.mime_type, filepath: a.filepath, duration_sec: a.content_duration });
|
|
if (probed) { duration = probed; videosProbed++; }
|
|
}
|
|
items.push({ content_id: a.content_id, widget_id: a.widget_id, sort_order: a.sort_order, duration_sec: duration });
|
|
totalItems++;
|
|
}
|
|
devicePlaylists.push({ device, playlistId, items });
|
|
}
|
|
|
|
// Insert everything in a single transaction
|
|
const insertPlaylist = db.prepare(`INSERT INTO playlists (id, user_id, name, description, is_auto_generated) VALUES (?, ?, ?, ?, 1)`);
|
|
const insertItem = db.prepare(`INSERT INTO playlist_items (playlist_id, content_id, widget_id, sort_order, duration_sec) VALUES (?, ?, ?, ?, ?)`);
|
|
const setDevicePlaylist = db.prepare('UPDATE devices SET playlist_id = ? WHERE id = ?');
|
|
|
|
const migrate = db.transaction(() => {
|
|
for (const { device, playlistId, items } of devicePlaylists) {
|
|
insertPlaylist.run(playlistId, device.user_id, `${device.name} (migrated)`, 'Auto-generated from previous assignments');
|
|
for (const item of items) {
|
|
insertItem.run(playlistId, item.content_id || null, item.widget_id || null, item.sort_order, item.duration_sec);
|
|
}
|
|
setDevicePlaylist.run(playlistId, device.id);
|
|
}
|
|
});
|
|
migrate();
|
|
|
|
// Record that this migration has run
|
|
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(MIGRATION_ID);
|
|
|
|
const scheduleCount = db.prepare('SELECT COUNT(*) as count FROM schedules').get().count;
|
|
console.log(`Migration complete: ${devicesWithAssignments.length} device(s), ${totalItems} playlist item(s), ${videosProbed} video(s) probed, ${scheduleCount} schedule(s).`);
|
|
}
|
|
|
|
migrateAssignmentsToPlaylists().catch(e => console.error('Migration error:', e));
|
|
|
|
// Phase 3 migration: snapshot existing playlist items into published_snapshot
|
|
const PHASE3_MIGRATION_ID = 'phase3_publish_snapshot';
|
|
|
|
function migratePublishSnapshots() {
|
|
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE3_MIGRATION_ID);
|
|
if (already) return;
|
|
|
|
const playlists = db.prepare('SELECT id FROM playlists').all();
|
|
if (playlists.length === 0) {
|
|
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE3_MIGRATION_ID);
|
|
return;
|
|
}
|
|
|
|
console.log(`Phase 3 migration: snapshotting ${playlists.length} playlist(s) as published...`);
|
|
|
|
const getItems = db.prepare(`
|
|
SELECT pi.content_id, pi.widget_id, pi.sort_order, pi.duration_sec,
|
|
COALESCE(c.filename, w.name) as filename, c.mime_type, c.filepath, c.file_size,
|
|
c.duration_sec as content_duration, c.remote_url,
|
|
w.name as widget_name, w.widget_type, w.config as widget_config
|
|
FROM playlist_items pi
|
|
LEFT JOIN content c ON pi.content_id = c.id
|
|
LEFT JOIN widgets w ON pi.widget_id = w.id
|
|
WHERE pi.playlist_id = ?
|
|
ORDER BY pi.sort_order ASC
|
|
`);
|
|
const updatePlaylist = db.prepare("UPDATE playlists SET status = 'published', published_snapshot = ? WHERE id = ?");
|
|
|
|
const migrate = db.transaction(() => {
|
|
let snapshotted = 0;
|
|
for (const playlist of playlists) {
|
|
const items = getItems.all(playlist.id);
|
|
updatePlaylist.run(JSON.stringify(items), playlist.id);
|
|
snapshotted++;
|
|
}
|
|
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE3_MIGRATION_ID);
|
|
console.log(`Phase 3 migration complete: ${snapshotted} playlist(s) snapshotted as published.`);
|
|
});
|
|
migrate();
|
|
}
|
|
|
|
migratePublishSnapshots();
|
|
|
|
// Phase 4 migration: add group_id to schedules, make device_id nullable, add CHECK constraint
|
|
const PHASE4_MIGRATION_ID = 'phase4_group_schedules';
|
|
|
|
function migrateGroupSchedules() {
|
|
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE4_MIGRATION_ID);
|
|
if (already) return;
|
|
|
|
console.log('Phase 4 migration: adding group_id to schedules, making device_id nullable...');
|
|
|
|
const migrate = db.transaction(() => {
|
|
db.exec(`
|
|
CREATE TABLE schedules_new (
|
|
id TEXT PRIMARY KEY,
|
|
user_id TEXT NOT NULL REFERENCES users(id),
|
|
device_id TEXT REFERENCES devices(id) ON DELETE CASCADE,
|
|
group_id TEXT REFERENCES device_groups(id) ON DELETE SET NULL,
|
|
zone_id TEXT REFERENCES layout_zones(id) ON DELETE CASCADE,
|
|
content_id TEXT REFERENCES content(id) ON DELETE CASCADE,
|
|
widget_id TEXT REFERENCES widgets(id) ON DELETE CASCADE,
|
|
layout_id TEXT REFERENCES layouts(id) ON DELETE SET NULL,
|
|
playlist_id TEXT REFERENCES playlists(id) ON DELETE SET NULL,
|
|
title TEXT NOT NULL DEFAULT '',
|
|
start_time TEXT NOT NULL,
|
|
end_time TEXT NOT NULL,
|
|
timezone TEXT NOT NULL DEFAULT 'UTC',
|
|
recurrence TEXT,
|
|
recurrence_end TEXT,
|
|
priority INTEGER NOT NULL DEFAULT 0,
|
|
enabled INTEGER NOT NULL DEFAULT 1,
|
|
color TEXT DEFAULT '#3B82F6',
|
|
created_at INTEGER NOT NULL DEFAULT (strftime('%s','now')),
|
|
updated_at INTEGER NOT NULL DEFAULT (strftime('%s','now')),
|
|
CHECK ((device_id IS NOT NULL AND group_id IS NULL) OR (device_id IS NULL AND group_id IS NOT NULL))
|
|
);
|
|
|
|
INSERT INTO schedules_new (id, user_id, device_id, zone_id, content_id, widget_id, layout_id, playlist_id,
|
|
title, start_time, end_time, timezone, recurrence, recurrence_end, priority, enabled, color, created_at, updated_at)
|
|
SELECT id, user_id, device_id, zone_id, content_id, widget_id, layout_id, playlist_id,
|
|
title, start_time, end_time, timezone, recurrence, recurrence_end, priority, enabled, color, created_at, updated_at
|
|
FROM schedules;
|
|
|
|
DROP TABLE schedules;
|
|
ALTER TABLE schedules_new RENAME TO schedules;
|
|
|
|
CREATE INDEX idx_schedules_device ON schedules(device_id, enabled);
|
|
CREATE INDEX idx_schedules_group ON schedules(group_id, enabled);
|
|
`);
|
|
|
|
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE4_MIGRATION_ID);
|
|
console.log('Phase 4 migration complete: schedules table rebuilt with group_id support.');
|
|
});
|
|
migrate();
|
|
}
|
|
|
|
migrateGroupSchedules();
|
|
|
|
// Phase 1 multi-tenancy migration (auto-applies if not yet run). Must come
|
|
// AFTER the inline migrations above so that team_id / workspace_id columns
|
|
// exist on resource tables - the Phase 1 backfill loop reads team_id and
|
|
// updates workspace_id.
|
|
ensureMultitenancyMigration();
|
|
|
|
// Phase 2.2c migration: backfill content_folders.workspace_id from owner's
|
|
// default workspace. The ALTER lives in the migrations array above; this
|
|
// one-shot populates the column for any rows that pre-date it.
|
|
const PHASE6_MIGRATION_ID = 'phase6_content_folders_workspace';
|
|
|
|
function migrateFolderWorkspaceIds() {
|
|
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE6_MIGRATION_ID);
|
|
if (already) return;
|
|
|
|
// Belt-and-suspenders: if multi-tenancy tables aren't present (auto-runner
|
|
// somehow skipped), skip cleanly instead of crashing on the JOIN below.
|
|
const hasWorkspaces = db.prepare(
|
|
"SELECT 1 FROM sqlite_master WHERE type='table' AND name='workspaces'"
|
|
).get();
|
|
if (!hasWorkspaces) {
|
|
console.warn('migrateFolderWorkspaceIds: workspaces table missing, skipping');
|
|
return;
|
|
}
|
|
|
|
// Check the column exists before trying to backfill. (Defensive: on a fresh
|
|
// install the schema.sql defines content_folders without the column, the
|
|
// ALTER above adds it, and we proceed; but if anything went sideways we
|
|
// skip rather than throw.)
|
|
const cols = db.prepare("PRAGMA table_info(content_folders)").all();
|
|
if (!cols.some(c => c.name === 'workspace_id')) {
|
|
console.warn('Phase 2.2c migration: content_folders.workspace_id column missing, skipping backfill');
|
|
return;
|
|
}
|
|
|
|
const stmt = db.prepare(`
|
|
UPDATE content_folders SET workspace_id = (
|
|
SELECT w.id FROM workspaces w
|
|
JOIN workspace_members wm ON wm.workspace_id = w.id
|
|
WHERE wm.user_id = content_folders.user_id
|
|
ORDER BY wm.joined_at ASC LIMIT 1
|
|
)
|
|
WHERE workspace_id IS NULL AND user_id IS NOT NULL
|
|
`);
|
|
|
|
const tx = db.transaction(() => {
|
|
const result = stmt.run();
|
|
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE6_MIGRATION_ID);
|
|
return result.changes;
|
|
});
|
|
const changes = tx();
|
|
if (changes > 0) console.log(`Phase 2.2c migration: backfilled workspace_id on ${changes} content_folders row(s).`);
|
|
}
|
|
|
|
migrateFolderWorkspaceIds();
|
|
|
|
const PHASE_2_2_ACTIVITY_STOP_ID = 'phase_2_2_activity_log_stop_bleeding';
|
|
|
|
// One-time backfill of activity_log rows that were written between the
|
|
// Phase 1 schema migration and the writer-leak fix in this commit. Strategy:
|
|
// * Rows with device_id: derive workspace_id from devices.workspace_id
|
|
// (the activity is about a specific device, so this is unambiguous).
|
|
// * Rows with no device_id but a user_id: derive from the user's oldest
|
|
// workspace_members row (pre-flight confirmed 0 affected users have
|
|
// more than one workspace, so the choice is unambiguous).
|
|
// Rows with user_id IS NULL (auth:login_failed and similar pre-tenancy
|
|
// system events) are left alone - they have no tenant context.
|
|
function backfillActivityLogWorkspace() {
|
|
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE_2_2_ACTIVITY_STOP_ID);
|
|
if (already) return;
|
|
|
|
// Belt-and-suspenders: if multi-tenancy tables aren't present (auto-runner
|
|
// somehow skipped), skip cleanly instead of crashing on workspace_members.
|
|
const hasMembers = db.prepare(
|
|
"SELECT 1 FROM sqlite_master WHERE type='table' AND name='workspace_members'"
|
|
).get();
|
|
if (!hasMembers) {
|
|
console.warn('backfillActivityLogWorkspace: workspace_members table missing, skipping');
|
|
return;
|
|
}
|
|
|
|
const viaDevice = db.prepare(`
|
|
UPDATE activity_log SET workspace_id = (
|
|
SELECT workspace_id FROM devices WHERE devices.id = activity_log.device_id
|
|
)
|
|
WHERE workspace_id IS NULL AND device_id IS NOT NULL
|
|
AND EXISTS (SELECT 1 FROM devices WHERE devices.id = activity_log.device_id AND devices.workspace_id IS NOT NULL)
|
|
`);
|
|
|
|
const viaMembers = db.prepare(`
|
|
UPDATE activity_log SET workspace_id = (
|
|
SELECT wm.workspace_id FROM workspace_members wm
|
|
WHERE wm.user_id = activity_log.user_id
|
|
ORDER BY wm.joined_at ASC LIMIT 1
|
|
)
|
|
WHERE workspace_id IS NULL AND user_id IS NOT NULL AND device_id IS NULL
|
|
AND EXISTS (SELECT 1 FROM workspace_members wm WHERE wm.user_id = activity_log.user_id)
|
|
`);
|
|
|
|
const tx = db.transaction(() => {
|
|
const d = viaDevice.run().changes;
|
|
const m = viaMembers.run().changes;
|
|
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE_2_2_ACTIVITY_STOP_ID);
|
|
return { d, m };
|
|
});
|
|
const { d, m } = tx();
|
|
if (d + m > 0) console.log(`activity_log backfill: ${d} via device.workspace_id, ${m} via workspace_members lookup`);
|
|
}
|
|
|
|
backfillActivityLogWorkspace();
|
|
|
|
// Phase 2 zone_id backfill. Companion to the ADD COLUMN above. Attempts to
|
|
// recover zone_id values for playlist_items rows by joining back to the
|
|
// (legacy) assignments table on device+content/widget. On installs where
|
|
// assignments is empty or never had zone_id populated this is a no-op; the
|
|
// migration row is stamped regardless so it doesn't re-run.
|
|
//
|
|
// Also regenerates published_snapshot JSON for every published playlist so
|
|
// the snapshot the player consumes carries zone_id going forward (the
|
|
// player resolves a.zone_id === zone.id in renderZones). Even with zero
|
|
// rows backfilled, this republish closes the snapshot-staleness gap.
|
|
//
|
|
// Pre-migration snapshot is a one-off for this migration only - the general
|
|
// "every migration backs up first" framework is tracked as a separate
|
|
// concern, not built here.
|
|
const PHASE2_ZONE_ID_BACKFILL_ID = 'phase2_zone_id_backfill';
|
|
function backfillPlaylistItemsZoneId() {
|
|
const already = db.prepare('SELECT 1 FROM schema_migrations WHERE id = ?').get(PHASE2_ZONE_ID_BACKFILL_ID);
|
|
if (already) return;
|
|
|
|
const ts = new Date().toISOString().replace(/[:.]/g, '-');
|
|
const snapshotPath = path.join(dbDir, `remote_display.pre-zone-id-backfill-${ts}.db`);
|
|
try {
|
|
db.pragma('wal_checkpoint(TRUNCATE)');
|
|
fs.copyFileSync(config.dbPath, snapshotPath);
|
|
console.warn(`[zone-id backfill] Pre-migration snapshot: ${snapshotPath}`);
|
|
} catch (e) {
|
|
console.error(`[zone-id backfill] Snapshot failed: ${e.message}`);
|
|
process.exit(1);
|
|
}
|
|
|
|
try {
|
|
const tx = db.transaction(() => {
|
|
// Backfill: best-effort match playlist_items back to assignments via
|
|
// device.playlist_id and content/widget identity. LIMIT 1 covers the
|
|
// unlikely "same content assigned twice in different zones on one
|
|
// device" edge case. Items with no matching legacy assignment, or
|
|
// matches that themselves had zone_id NULL, are left as NULL.
|
|
const backfilled = db.prepare(`
|
|
UPDATE playlist_items
|
|
SET zone_id = (
|
|
SELECT a.zone_id FROM assignments a
|
|
JOIN devices d ON d.id = a.device_id
|
|
WHERE d.playlist_id = playlist_items.playlist_id
|
|
AND a.zone_id IS NOT NULL
|
|
AND (
|
|
(a.content_id IS NOT NULL AND a.content_id = playlist_items.content_id)
|
|
OR
|
|
(a.widget_id IS NOT NULL AND a.widget_id = playlist_items.widget_id)
|
|
)
|
|
LIMIT 1
|
|
)
|
|
WHERE zone_id IS NULL
|
|
AND EXISTS (
|
|
SELECT 1 FROM assignments a
|
|
JOIN devices d ON d.id = a.device_id
|
|
WHERE d.playlist_id = playlist_items.playlist_id
|
|
AND a.zone_id IS NOT NULL
|
|
AND (
|
|
(a.content_id IS NOT NULL AND a.content_id = playlist_items.content_id)
|
|
OR
|
|
(a.widget_id IS NOT NULL AND a.widget_id = playlist_items.widget_id)
|
|
)
|
|
)
|
|
`).run().changes;
|
|
|
|
// Republish: regenerate published_snapshot for every published playlist
|
|
// so the snapshot JSON carries zone_id. Mirrors buildSnapshotItems in
|
|
// routes/playlists.js - kept inline here to avoid pulling routes/* in
|
|
// at migration time (circular require).
|
|
const publishedPlaylists = db.prepare("SELECT id FROM playlists WHERE status = 'published'").all();
|
|
const buildSnapshot = db.prepare(`
|
|
SELECT pi.content_id, pi.widget_id, pi.zone_id, pi.sort_order, pi.duration_sec,
|
|
COALESCE(c.filename, w.name) as filename, c.mime_type, c.filepath, c.file_size,
|
|
c.duration_sec as content_duration, c.remote_url,
|
|
w.name as widget_name, w.widget_type, w.config as widget_config
|
|
FROM playlist_items pi
|
|
LEFT JOIN content c ON pi.content_id = c.id
|
|
LEFT JOIN widgets w ON pi.widget_id = w.id
|
|
WHERE pi.playlist_id = ?
|
|
ORDER BY pi.sort_order ASC
|
|
`);
|
|
const updateSnap = db.prepare("UPDATE playlists SET published_snapshot = ?, updated_at = strftime('%s','now') WHERE id = ?");
|
|
let republished = 0;
|
|
for (const pl of publishedPlaylists) {
|
|
const items = buildSnapshot.all(pl.id);
|
|
updateSnap.run(JSON.stringify(items), pl.id);
|
|
republished++;
|
|
}
|
|
|
|
db.prepare('INSERT OR IGNORE INTO schema_migrations (id) VALUES (?)').run(PHASE2_ZONE_ID_BACKFILL_ID);
|
|
return { backfilled, republished };
|
|
});
|
|
const { backfilled, republished } = tx();
|
|
console.log(`[zone-id backfill] ${backfilled} playlist_items recovered zone_id, ${republished} published_snapshots regenerated`);
|
|
} catch (e) {
|
|
console.error(`[zone-id backfill] Migration FAILED: ${e.message}`);
|
|
console.error(`[zone-id backfill] Restore with: cp ${snapshotPath} ${config.dbPath}`);
|
|
process.exit(1);
|
|
}
|
|
}
|
|
|
|
backfillPlaylistItemsZoneId();
|
|
|
|
// Prune old telemetry (keep last 24h worth at 15s intervals = ~5760, cap at 6000)
|
|
function pruneTelemetry(deviceId) {
|
|
db.prepare(`
|
|
DELETE FROM device_telemetry
|
|
WHERE device_id = ? AND id NOT IN (
|
|
SELECT id FROM device_telemetry
|
|
WHERE device_id = ?
|
|
ORDER BY reported_at DESC LIMIT 6000
|
|
)
|
|
`).run(deviceId, deviceId);
|
|
}
|
|
|
|
// Prune old screenshots (keep only latest per device)
|
|
function pruneScreenshots(deviceId) {
|
|
const old = db.prepare(`
|
|
SELECT filepath FROM screenshots
|
|
WHERE device_id = ? AND id NOT IN (
|
|
SELECT id FROM screenshots WHERE device_id = ? ORDER BY captured_at DESC LIMIT 1
|
|
)
|
|
`).all(deviceId, deviceId);
|
|
|
|
for (const row of old) {
|
|
const fullPath = path.join(config.screenshotsDir, row.filepath);
|
|
if (fs.existsSync(fullPath)) fs.unlinkSync(fullPath);
|
|
}
|
|
|
|
db.prepare(`
|
|
DELETE FROM screenshots
|
|
WHERE device_id = ? AND id NOT IN (
|
|
SELECT id FROM screenshots WHERE device_id = ? ORDER BY captured_at DESC LIMIT 1
|
|
)
|
|
`).run(deviceId, deviceId);
|
|
}
|
|
|
|
module.exports = { db, pruneTelemetry, pruneScreenshots };
|