From 1aee4f2d5b0480bf87c336effae4059e1c6600d3 Mon Sep 17 00:00:00 2001 From: ScreenTinker Date: Thu, 14 May 2026 13:02:34 -0500 Subject: [PATCH 1/5] fix(socket): raise Engine.IO ping/pong + prefer WebSocket transport Connection-stability layer for issue #3. LG webOS WebKit (and other TV-grade clients) miss Engine.IO pongs under decode load with the Socket.IO defaults of 25s ping / 20s timeout, causing spurious transport drops and a connect/reconnect/evict/disconnect loop on the device. Default polling-first transport adds another fragility layer via the polling->WebSocket upgrade dance. - pingInterval / pingTimeout default to 30000 / 30000 (worst-case dead-socket detection 60s, up from ~45s). Both env-configurable via PING_INTERVAL / PING_TIMEOUT. - Player Socket.IO client: transports: ['websocket', 'polling']. Tries WebSocket first; falls back to polling on the same connect attempt if WebSocket fails. Polling fallback preserved for firewall-restricted networks. App-level heartbeat checker is unchanged and remains the safety net for clients that miss the transport-level ping/pong window. Tradeoffs documented in inline comments. README env table extended with PING_INTERVAL and PING_TIMEOUT rows. Refs #3 Co-Authored-By: Claude Opus 4.7 (1M context) --- README.md | 2 ++ server/config.js | 6 ++++++ server/player/index.html | 8 ++++++++ server/server.js | 4 +++- 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0b7b063..cf344c9 100644 --- a/README.md +++ b/README.md @@ -115,6 +115,8 @@ Schema migrations run automatically on first boot — no manual migration comman | `JWT_SECRET` | JWT signing key (auto-generated if not set) | _(auto)_ | | `SSL_CERT` | Path to SSL certificate | `server/certs/cert.pem` | | `SSL_KEY` | Path to SSL private key | `server/certs/key.pem` | +| `PING_INTERVAL` | Socket.IO Engine.IO ping interval (ms). Raise for slow TV WebKits that miss pongs under decode load. | `30000` | +| `PING_TIMEOUT` | Socket.IO Engine.IO pong wait (ms). Lower = faster dead-socket detection; higher = more forgiving of laggy clients. | `30000` | ### Optional Integrations diff --git a/server/config.js b/server/config.js index 5c541bf..946e72a 100644 --- a/server/config.js +++ b/server/config.js @@ -10,6 +10,12 @@ module.exports = { frontendDir: path.join(__dirname, '..', 'frontend'), heartbeatInterval: 10000, // Check every 10s heartbeatTimeout: 45000, // Offline after 45s (3 missed 15s beats) + // Engine.IO transport-level ping/pong. Raised from Socket.IO defaults + // (25000/20000) because TV WebKits (LG webOS, older Tizen) miss pongs + // under decode load - tighter values cause spurious transport drops. + // Worst-case dead-socket detection: pingInterval + pingTimeout = 60s. + pingInterval: parseInt(process.env.PING_INTERVAL) || 30000, + pingTimeout: parseInt(process.env.PING_TIMEOUT) || 30000, maxFileSize: 500 * 1024 * 1024, // 500MB thumbnailWidth: 320, screenshotQuality: 70, diff --git a/server/player/index.html b/server/player/index.html index 6ba350e..165b7ee 100644 --- a/server/player/index.html +++ b/server/player/index.html @@ -467,6 +467,14 @@ reconnectionDelay: 2000, reconnectionDelayMax: 10000, timeout: 20000, + // Prefer WebSocket but allow polling fallback. Socket.IO default is + // polling-first with an upgrade dance that's fragile on TV WebKits + // (LG webOS especially). Reversing the order opens a WebSocket directly; + // if that fails (rare - blocked by firewall), it falls back to polling + // on the same connect attempt. Tradeoff: WS-blocked networks add a few + // seconds to first connect while WS times out. Worth it for the common + // case where WS is fine but the upgrade dance was hanging the device. + transports: ['websocket', 'polling'], }); socket.on('connect', () => { diff --git a/server/server.js b/server/server.js index 2435a42..bc52464 100644 --- a/server/server.js +++ b/server/server.js @@ -43,7 +43,9 @@ const io = new Server(server, { origin: (origin, cb) => corsOriginCheck(origin, cb), credentials: true, }, - maxHttpBufferSize: 10 * 1024 * 1024 // 10MB for screenshot uploads + maxHttpBufferSize: 10 * 1024 * 1024, // 10MB for screenshot uploads + pingInterval: config.pingInterval, + pingTimeout: config.pingTimeout, }); // Middleware From 3da49ec79c099a1610de2a247ed932efbad8186c Mon Sep 17 00:00:00 2001 From: ScreenTinker Date: Thu, 14 May 2026 13:03:02 -0500 Subject: [PATCH 2/5] chore(config): env-configurable heartbeat timing Make HEARTBEAT_INTERVAL and HEARTBEAT_TIMEOUT env-tunable so self-hosters with slow/jittery networks don't have to edit config.js (issue #3 reporter did exactly this to confirm the diagnosis). Defaults unchanged at 10000ms / 45000ms so existing deployments keep current behavior. Same parseInt(env) || default pattern as PORT/HTTPS_PORT/PING_*. README env table extended. Refs #3 Co-Authored-By: Claude Opus 4.7 (1M context) --- README.md | 2 ++ server/config.js | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cf344c9..b7e49bc 100644 --- a/README.md +++ b/README.md @@ -117,6 +117,8 @@ Schema migrations run automatically on first boot — no manual migration comman | `SSL_KEY` | Path to SSL private key | `server/certs/key.pem` | | `PING_INTERVAL` | Socket.IO Engine.IO ping interval (ms). Raise for slow TV WebKits that miss pongs under decode load. | `30000` | | `PING_TIMEOUT` | Socket.IO Engine.IO pong wait (ms). Lower = faster dead-socket detection; higher = more forgiving of laggy clients. | `30000` | +| `HEARTBEAT_INTERVAL` | App-level offline-checker frequency (ms). How often the server sweeps the device list looking for stale heartbeats. | `10000` | +| `HEARTBEAT_TIMEOUT` | How long without an app-level heartbeat (ms) before marking a device offline. Raise for slow/jittery networks. | `45000` | ### Optional Integrations diff --git a/server/config.js b/server/config.js index 946e72a..405698b 100644 --- a/server/config.js +++ b/server/config.js @@ -8,8 +8,12 @@ module.exports = { contentDir: path.join(__dirname, 'uploads', 'content'), screenshotsDir: path.join(__dirname, 'uploads', 'screenshots'), frontendDir: path.join(__dirname, '..', 'frontend'), - heartbeatInterval: 10000, // Check every 10s - heartbeatTimeout: 45000, // Offline after 45s (3 missed 15s beats) + // App-level heartbeat. Checker runs every heartbeatInterval and marks + // devices offline if last_heartbeat is older than heartbeatTimeout. + // Env override for self-hosters on slow/jittery networks (issue #3: + // reporter found raising HEARTBEAT_TIMEOUT to 60s reduced false offlines). + heartbeatInterval: parseInt(process.env.HEARTBEAT_INTERVAL) || 10000, + heartbeatTimeout: parseInt(process.env.HEARTBEAT_TIMEOUT) || 45000, // Engine.IO transport-level ping/pong. Raised from Socket.IO defaults // (25000/20000) because TV WebKits (LG webOS, older Tizen) miss pongs // under decode load - tighter values cause spurious transport drops. From 742d8c4b09d5284f790579b9c2665ce9c64e6dba Mon Sep 17 00:00:00 2001 From: ScreenTinker Date: Thu, 14 May 2026 13:06:43 -0500 Subject: [PATCH 3/5] feat(socket): delivery queue for offline-device emits Short-lived per-device queue covers the TV-flap window (issue #3): when a device is mid-reconnect, prior code emitted to an empty room and the event vanished. Now playlist-updates and commands targeting an offline device are queued and flushed in order on the next device:register for that device_id. server/lib/command-queue.js (new): - pendingPlaylistUpdate: per-device marker (rebuild via builder on flush -> always fresh DB state, no stale snapshots) - pendingCommands: per-device Map with last-of-type dedup (most recent screen_off wins) - TTL via COMMAND_QUEUE_TTL_MS env (default 30000) - Active sweep every 30s prunes expired entries Memory bounds: ~6 entries per device worst case (1 playlist marker + 5 command types), unref'd sweep timer. Wired emit sites (8 total; the four direct socket.emit calls in deviceSocket register handlers are intentionally NOT queued because the socket is alive by definition at those points): - server/routes/video-walls.js (pushWallPayloadToDevice) - server/routes/device-groups.js (pushPlaylistToDevice) - server/routes/content.js (content-delete fan-out) - server/routes/playlists.js (pushToDevices + assign) - server/services/scheduler.js (scheduled rotations) - server/ws/deviceSocket.js x2 (wall leader reclaim/reassign) server/ws/deviceSocket.js register paths now call flushQueue after heartbeat.registerConnection + socket.join. Existing socket.emit('device:playlist-update', ...) lines kept - they send the initial state on register; the flush replays any queued events. Player's handlePlaylistUpdate fingerprint check dedupes the overlap. Refs #3 Co-Authored-By: Claude Opus 4.7 (1M context) --- README.md | 1 + server/config.js | 4 + server/lib/command-queue.js | 153 +++++++++++++++++++++++++++++++++ server/routes/content.js | 4 +- server/routes/device-groups.js | 4 +- server/routes/playlists.js | 7 +- server/routes/video-walls.js | 3 +- server/server.js | 4 + server/services/scheduler.js | 4 +- server/ws/deviceSocket.js | 9 +- 10 files changed, 183 insertions(+), 10 deletions(-) create mode 100644 server/lib/command-queue.js diff --git a/README.md b/README.md index b7e49bc..42b4fb7 100644 --- a/README.md +++ b/README.md @@ -119,6 +119,7 @@ Schema migrations run automatically on first boot — no manual migration comman | `PING_TIMEOUT` | Socket.IO Engine.IO pong wait (ms). Lower = faster dead-socket detection; higher = more forgiving of laggy clients. | `30000` | | `HEARTBEAT_INTERVAL` | App-level offline-checker frequency (ms). How often the server sweeps the device list looking for stale heartbeats. | `10000` | | `HEARTBEAT_TIMEOUT` | How long without an app-level heartbeat (ms) before marking a device offline. Raise for slow/jittery networks. | `45000` | +| `COMMAND_QUEUE_TTL_MS` | How long the server holds commands and playlist-updates for a device that's offline at emit time (ms). Flushed in order on reconnect within this window; dropped past TTL. | `30000` | ### Optional Integrations diff --git a/server/config.js b/server/config.js index 405698b..02b5d82 100644 --- a/server/config.js +++ b/server/config.js @@ -14,6 +14,10 @@ module.exports = { // reporter found raising HEARTBEAT_TIMEOUT to 60s reduced false offlines). heartbeatInterval: parseInt(process.env.HEARTBEAT_INTERVAL) || 10000, heartbeatTimeout: parseInt(process.env.HEARTBEAT_TIMEOUT) || 45000, + // How long the server holds commands/playlist-updates for a device that's + // offline at emit time (ms). On reconnect within this window, queued events + // are flushed in order. Past TTL they're dropped. See lib/command-queue.js. + commandQueueTtlMs: parseInt(process.env.COMMAND_QUEUE_TTL_MS) || 30000, // Engine.IO transport-level ping/pong. Raised from Socket.IO defaults // (25000/20000) because TV WebKits (LG webOS, older Tizen) miss pongs // under decode load - tighter values cause spurious transport drops. diff --git a/server/lib/command-queue.js b/server/lib/command-queue.js new file mode 100644 index 0000000..59d82a5 --- /dev/null +++ b/server/lib/command-queue.js @@ -0,0 +1,153 @@ +// Short-lived per-device queue for events that target a currently-offline +// device. Designed for the TV-flap case where a device disconnects for a few +// seconds (Engine.IO ping miss, Wi-Fi blip, decode stall) and reconnects via +// Socket.IO's auto-reconnect. Without this queue, any device:command or +// device:playlist-update emitted during the disconnect window goes nowhere - +// the room is empty, the emit is silently dropped. +// +// Two structures, both keyed by device_id, both pruned by TTL: +// +// pendingPlaylistUpdate: Map +// We don't store the payload. On flush we rebuild via buildPlaylistPayload +// so the device gets the LATEST DB state, not a stale snapshot from when +// the update was first queued. +// +// pendingCommands: Map> +// One entry per command type per device. Last-of-type wins (the most +// recent screen_off supersedes any earlier ones). Payloads stored verbatim +// because commands are stateless declarations. +// +// Memory bounds: worst-case ~6 entries per device (1 playlist marker + 5 +// command types), each ~200 bytes. 10,000 offline devices = ~12MB. Sweep +// thread prunes empty per-device records every 30s. + +const config = require('../config'); + +const pendingPlaylistUpdate = new Map(); +const pendingCommands = new Map(); + +let _sweepTimer = null; + +// Internal helper - drop expired entries for a single device. Called lazily +// from queue/flush paths AND from the sweep thread. +function pruneDevice(deviceId) { + const now = Date.now(); + const pu = pendingPlaylistUpdate.get(deviceId); + if (pu && pu.expiresAt <= now) pendingPlaylistUpdate.delete(deviceId); + + const cmds = pendingCommands.get(deviceId); + if (cmds) { + for (const [type, entry] of cmds) { + if (entry.expiresAt <= now) cmds.delete(type); + } + if (cmds.size === 0) pendingCommands.delete(deviceId); + } +} + +// Mark a pending playlist-update for a device. Caller used to call +// deviceNs.to(deviceId).emit('device:playlist-update', buildPlaylistPayload(deviceId)); +// directly. Now they call queueOrEmitPlaylistUpdate which checks room presence +// first and queues only if the device is offline. +function queueOrEmitPlaylistUpdate(deviceNs, deviceId, buildPayload) { + if (!deviceNs || !deviceId || typeof buildPayload !== 'function') return { delivered: false }; + const room = deviceNs.adapter.rooms.get(deviceId); + if (room && room.size > 0) { + deviceNs.to(deviceId).emit('device:playlist-update', buildPayload(deviceId)); + return { delivered: true }; + } + pendingPlaylistUpdate.set(deviceId, { expiresAt: Date.now() + config.commandQueueTtlMs }); + return { delivered: false, queued: true }; +} + +// Queue a single command for an offline device. Returns true if accepted +// (always true under current logic; reserved for future "rejected because +// stale/full" cases). Used by item 6 in commit D - dashboard command handler +// calls this when the device room is empty. +function queueCommand(deviceId, type, payload) { + if (!deviceId || !type) return false; + let perDevice = pendingCommands.get(deviceId); + if (!perDevice) { + perDevice = new Map(); + pendingCommands.set(deviceId, perDevice); + } + perDevice.set(type, { payload: payload || {}, expiresAt: Date.now() + config.commandQueueTtlMs }); + return true; +} + +// Called on device:register success, after heartbeat.registerConnection and +// socket.join. Drains both queues to the just-reconnected device. +// +// buildPayload is the buildPlaylistPayload function from deviceSocket.js, +// passed in to avoid a circular require. We call it at flush time so the +// playlist reflects current DB state, not whatever it was when queued. +function flushQueue(deviceNs, deviceId, buildPayload) { + if (!deviceNs || !deviceId) return { playlistUpdate: false, commands: 0 }; + pruneDevice(deviceId); + + let playlistUpdate = false; + let commands = 0; + + const pu = pendingPlaylistUpdate.get(deviceId); + if (pu) { + pendingPlaylistUpdate.delete(deviceId); + if (typeof buildPayload === 'function') { + deviceNs.to(deviceId).emit('device:playlist-update', buildPayload(deviceId)); + playlistUpdate = true; + } + } + + const cmds = pendingCommands.get(deviceId); + if (cmds) { + pendingCommands.delete(deviceId); + for (const [type, entry] of cmds) { + deviceNs.to(deviceId).emit('device:command', { type, payload: entry.payload }); + commands++; + } + } + + if (playlistUpdate || commands > 0) { + console.log(`Flushed queue for ${deviceId}: playlistUpdate=${playlistUpdate}, commands=${commands}`); + } + return { playlistUpdate, commands }; +} + +function getQueueDepth(deviceId) { + pruneDevice(deviceId); + const hasPlaylist = pendingPlaylistUpdate.has(deviceId) ? 1 : 0; + const cmdCount = pendingCommands.get(deviceId)?.size || 0; + return hasPlaylist + cmdCount; +} + +// Active sweep prunes devices that never come back. Without this, a device +// that goes permanently offline leaves its queue entries in memory until TTL, +// which is fine, but the Map keys themselves linger. Cheap to walk. +function startSweep() { + if (_sweepTimer) return; + _sweepTimer = setInterval(() => { + for (const deviceId of pendingPlaylistUpdate.keys()) pruneDevice(deviceId); + for (const deviceId of pendingCommands.keys()) pruneDevice(deviceId); + }, 30000); + if (_sweepTimer.unref) _sweepTimer.unref(); +} + +function stopSweep() { + if (_sweepTimer) { clearInterval(_sweepTimer); _sweepTimer = null; } +} + +// Test helpers - reset internal state. Not exported via module.exports for +// production callers; bound below for the test harness only. +function _resetForTests() { + pendingPlaylistUpdate.clear(); + pendingCommands.clear(); + stopSweep(); +} + +module.exports = { + queueOrEmitPlaylistUpdate, + queueCommand, + flushQueue, + getQueueDepth, + startSweep, + stopSweep, + _resetForTests, +}; diff --git a/server/routes/content.js b/server/routes/content.js index 3c0f07e..affe22e 100644 --- a/server/routes/content.js +++ b/server/routes/content.js @@ -432,8 +432,10 @@ router.delete('/:id', (req, res) => { const io = req.app.get('io'); if (io) { const { buildPlaylistPayload } = require('../ws/deviceSocket'); + const commandQueue = require('../lib/command-queue'); + const deviceNs = io.of('/device'); for (const d of affectedDevices) { - io.of('/device').to(d.device_id).emit('device:playlist-update', buildPlaylistPayload(d.device_id)); + commandQueue.queueOrEmitPlaylistUpdate(deviceNs, d.device_id, buildPlaylistPayload); } } } catch (e) { /* silent */ } diff --git a/server/routes/device-groups.js b/server/routes/device-groups.js index 7b210f5..5893d7c 100644 --- a/server/routes/device-groups.js +++ b/server/routes/device-groups.js @@ -217,8 +217,8 @@ function pushPlaylistToDevice(req, deviceId) { const io = req.app.get('io'); if (!io) return; const { buildPlaylistPayload } = require('../ws/deviceSocket'); - const deviceNs = io.of('/device'); - deviceNs.to(deviceId).emit('device:playlist-update', buildPlaylistPayload(deviceId)); + const commandQueue = require('../lib/command-queue'); + commandQueue.queueOrEmitPlaylistUpdate(io.of('/device'), deviceId, buildPlaylistPayload); } catch (e) { /* silent */ } } diff --git a/server/routes/playlists.js b/server/routes/playlists.js index 8542450..834ff74 100644 --- a/server/routes/playlists.js +++ b/server/routes/playlists.js @@ -89,9 +89,11 @@ function pushToDevices(playlistId, req) { const io = req.app.get('io'); if (!io) return; const { buildPlaylistPayload } = require('../ws/deviceSocket'); + const commandQueue = require('../lib/command-queue'); + const deviceNs = io.of('/device'); const devices = db.prepare('SELECT id FROM devices WHERE playlist_id = ?').all(playlistId); for (const d of devices) { - io.of('/device').to(d.id).emit('device:playlist-update', buildPlaylistPayload(d.id)); + commandQueue.queueOrEmitPlaylistUpdate(deviceNs, d.id, buildPlaylistPayload); } } catch (e) { /* silent */ } } @@ -449,7 +451,8 @@ router.post('/:id/assign', requirePlaylistWrite, (req, res) => { const io = req.app.get('io'); if (io) { const { buildPlaylistPayload } = require('../ws/deviceSocket'); - io.of('/device').to(device_id).emit('device:playlist-update', buildPlaylistPayload(device_id)); + const commandQueue = require('../lib/command-queue'); + commandQueue.queueOrEmitPlaylistUpdate(io.of('/device'), device_id, buildPlaylistPayload); } } catch (e) { /* silent */ } diff --git a/server/routes/video-walls.js b/server/routes/video-walls.js index d711bd4..55041ac 100644 --- a/server/routes/video-walls.js +++ b/server/routes/video-walls.js @@ -81,7 +81,8 @@ function pushWallPayloadToDevice(req, deviceId) { const io = req.app.get('io'); if (!io) return; const { buildPlaylistPayload } = require('../ws/deviceSocket'); - io.of('/device').to(deviceId).emit('device:playlist-update', buildPlaylistPayload(deviceId)); + const commandQueue = require('../lib/command-queue'); + commandQueue.queueOrEmitPlaylistUpdate(io.of('/device'), deviceId, buildPlaylistPayload); } catch (e) { /* silent */ } } diff --git a/server/server.js b/server/server.js index bc52464..cc60142 100644 --- a/server/server.js +++ b/server/server.js @@ -436,6 +436,10 @@ app.set('io', io); const { startHeartbeatChecker } = require('./services/heartbeat'); startHeartbeatChecker(io); +// Start command-queue sweep (prunes expired entries for offline devices) +const commandQueue = require('./lib/command-queue'); +commandQueue.startSweep(); + // Start scheduler const { startScheduler } = require('./services/scheduler'); startScheduler(io); diff --git a/server/services/scheduler.js b/server/services/scheduler.js index 085ef26..268172f 100644 --- a/server/services/scheduler.js +++ b/server/services/scheduler.js @@ -106,8 +106,8 @@ function parseSimpleRRule(rrule) { function pushPlaylistToDevice(deviceId, deviceNs) { // Use the single-source buildPlaylistPayload from deviceSocket const { buildPlaylistPayload } = require('../ws/deviceSocket'); - const payload = buildPlaylistPayload(deviceId); - deviceNs.to(deviceId).emit('device:playlist-update', payload); + const commandQueue = require('../lib/command-queue'); + commandQueue.queueOrEmitPlaylistUpdate(deviceNs, deviceId, buildPlaylistPayload); } module.exports = { startScheduler, pushPlaylistToDevice }; diff --git a/server/ws/deviceSocket.js b/server/ws/deviceSocket.js index 089f609..a7fceeb 100644 --- a/server/ws/deviceSocket.js +++ b/server/ws/deviceSocket.js @@ -5,6 +5,7 @@ const fs = require('fs'); const { db, pruneTelemetry, pruneScreenshots } = require('../db/database'); const config = require('../config'); const heartbeat = require('../services/heartbeat'); +const commandQueue = require('../lib/command-queue'); const { getUserPlan, getUserDeviceCount } = require('../middleware/subscription'); // Phase 2.3: deviceRoom() resolves a device_id to its workspace room so // dashboardNs.emit can be scoped instead of broadcast platform-wide. @@ -255,6 +256,8 @@ module.exports = function setupDeviceSocket(io) { socket.join(existing.device_id); logDeviceStatus(existing.device_id, 'online'); emitToDeviceWorkspace(dashboardNs, existing.device_id, 'dashboard:device-status', { device_id: existing.device_id, status: 'online' }); + // Flush any commands/playlist-updates queued while this device was offline. + commandQueue.flushQueue(deviceNs, existing.device_id, buildPlaylistPayload); // Send playlist const access = checkDeviceAccess(existing.device_id); if (!access.allowed) { @@ -307,6 +310,8 @@ module.exports = function setupDeviceSocket(io) { socket.join(device_id); socket.emit('device:registered', { device_id, device_token: tokenToSend, status: 'online' }); logDeviceStatus(device_id, 'online'); + // Flush any commands/playlist-updates queued while this device was offline. + commandQueue.flushQueue(deviceNs, device_id, buildPlaylistPayload); // If this device is part of a wall, re-evaluate leadership. // Preferred leader = online member with smallest (canvas_x + @@ -333,7 +338,7 @@ module.exports = function setupDeviceSocket(io) { const members = db.prepare('SELECT device_id FROM video_wall_devices WHERE wall_id = ?').all(wall.id); for (const m of members) { if (m.device_id !== device_id) { - deviceNs.to(m.device_id).emit('device:playlist-update', buildPlaylistPayload(m.device_id)); + commandQueue.queueOrEmitPlaylistUpdate(deviceNs, m.device_id, buildPlaylistPayload); } } } @@ -595,7 +600,7 @@ module.exports = function setupDeviceSocket(io) { const members = db.prepare('SELECT device_id FROM video_wall_devices WHERE wall_id = ?').all(wall.id); for (const m of members) { if (m.device_id !== currentDeviceId) { - deviceNs.to(m.device_id).emit('device:playlist-update', buildPlaylistPayload(m.device_id)); + commandQueue.queueOrEmitPlaylistUpdate(deviceNs, m.device_id, buildPlaylistPayload); } } } From f5ca26ae2d261a70df42cb1975c7ee4f51ae9363 Mon Sep 17 00:00:00 2001 From: ScreenTinker Date: Thu, 14 May 2026 13:11:40 -0500 Subject: [PATCH 4/5] fix(socket): offline debounce + truthful single-device command feedback Two dashboard-accuracy improvements for issue #3. Disconnect debounce (5s): - Brief transient flaps (Engine.IO ping miss, eviction-then-reconnect, Wi-Fi blip) no longer immediately flip the device to offline in the dashboard. Disconnect handler now defers the offline transition; register handlers cancel the pending timer if reconnect lands in window. - Existing stale-disconnect guard kept as fast-path for the eviction case (no timer scheduled at all when the active heartbeat conn is already a different socket). - Re-check at timer fire compares socketIds: aborts only if a GENUINELY DIFFERENT socket reclaimed the device. Just the closing socket's own (not-yet-cleaned-up) entry is treated as stale and proceeds with offline transition. - Server-restart mid-grace is handled by the heartbeat checker safety net (existing component): any 'online' row with last_heartbeat older than heartbeatTimeout gets marked offline on next sweep. Truthful single-device command feedback: - dashboard:device-command handler now checks deviceNs.adapter.rooms for an active socket before emitting (matches the group-command route's pattern). - If room is empty, falls through to commandQueue.queueCommand (lazy require - if commit C is reverted, MODULE_NOT_FOUND is cached and every subsequent call gets consistent queued=false behavior). - Returns three-state ack to caller: { delivered, queued, reason }. - Server log line was misleading - now logs 'Command delivered to device X' vs 'Command for offline device X (queued=true/false)'. Frontend: - sendCommand() takes optional callback. Without one, fires-and-forgets (no behavior change for non-wired callers). With one, uses Socket.IO .timeout(5000).emit so the callback always fires (ack or no_ack). - Six device-detail command buttons wired to three-state toasts: reboot, shutdown, screen_off, screen_on, launch, update. - delivered: green/success toast (existing localized message) - queued: amber/warning toast (new generic message) - no_ack: red/error toast - fallback: red/error toast - Two callers intentionally left fire-and-forget: - window._sendCmd (generic remote-overlay keypress/touch helper) - enable_system_capture (has its own visual state machine; out of scope for this commit) Three new i18n keys (en.js only; other locales follow later): - device.toast.command_queued - device.toast.command_undeliverable - device.toast.command_no_ack Refs #3 Co-Authored-By: Claude Opus 4.7 (1M context) --- frontend/js/i18n/en.js | 3 + frontend/js/socket.js | 16 ++++- frontend/js/views/device-detail.js | 31 ++++++---- server/ws/dashboardSocket.js | 27 +++++++-- server/ws/deviceSocket.js | 96 ++++++++++++++++++++---------- 5 files changed, 125 insertions(+), 48 deletions(-) diff --git a/frontend/js/i18n/en.js b/frontend/js/i18n/en.js index d6088f9..b2e9b40 100644 --- a/frontend/js/i18n/en.js +++ b/frontend/js/i18n/en.js @@ -369,6 +369,9 @@ export default { 'device.toast.launch_sent': 'Launch command sent', 'device.toast.update_triggered': 'Update check triggered', 'device.toast.remote_started': 'Remote session started', + 'device.toast.command_queued': '{cmd} — device offline, will deliver on reconnect', + 'device.toast.command_undeliverable': '{cmd} — device offline and queue unavailable', + 'device.toast.command_no_ack': '{cmd} — no server response', // Settings 'settings.title': 'Settings', diff --git a/frontend/js/socket.js b/frontend/js/socket.js index 3ba61bf..069428a 100644 --- a/frontend/js/socket.js +++ b/frontend/js/socket.js @@ -119,8 +119,20 @@ export function sendKey(deviceId, keycode) { if (dashboardSocket) dashboardSocket.emit('dashboard:remote-key', { device_id: deviceId, keycode }); } -export function sendCommand(deviceId, type, payload) { - if (dashboardSocket) dashboardSocket.emit('dashboard:device-command', { device_id: deviceId, type, payload }); +// Optional callback receives the server-side ack: { delivered, queued, reason }. +// Callers without a callback keep firing-and-forgetting (no behavior change). +// With a callback, we use Socket.IO's .timeout() so the callback always fires - +// either with the ack or with an Error if the server doesn't respond in 5s. +export function sendCommand(deviceId, type, payload, callback) { + if (!dashboardSocket) return; + if (typeof callback === 'function') { + dashboardSocket.timeout(5000).emit('dashboard:device-command', { device_id: deviceId, type, payload }, (err, ack) => { + if (err) callback({ delivered: false, reason: 'no_ack' }); + else callback(ack || { delivered: false, reason: 'no_ack' }); + }); + } else { + dashboardSocket.emit('dashboard:device-command', { device_id: deviceId, type, payload }); + } } export function getSocket() { return dashboardSocket; } diff --git a/frontend/js/views/device-detail.js b/frontend/js/views/device-detail.js index dc9356e..c44ba51 100644 --- a/frontend/js/views/device-detail.js +++ b/frontend/js/views/device-detail.js @@ -705,14 +705,26 @@ async function setupActions(device) { }, 3000); }); + // Send a command and surface the three-state ack as a toast. + // - delivered: device received it (green/success) + // - queued: device is offline, will deliver on reconnect (amber/warning) + // - no_ack / fallback: server didn't respond or queue unavailable (red/error) + function sendWithFeedback(type, cmdLabel, successKey) { + sendCommand(device.id, type, {}, (ack) => { + if (ack?.delivered) showToast(t(successKey), 'success'); + else if (ack?.queued) showToast(t('device.toast.command_queued', { cmd: cmdLabel }), 'warning'); + else if (ack?.reason === 'no_ack') showToast(t('device.toast.command_no_ack', { cmd: cmdLabel }), 'error'); + else showToast(t('device.toast.command_undeliverable', { cmd: cmdLabel }), 'error'); + }); + } + // Reboot (double-click to confirm) const rebootBtn = document.getElementById('rebootBtn'); let rebootConfirming = false; let rebootTimeout = null; rebootBtn?.addEventListener('click', () => { if (rebootConfirming) { - sendCommand(device.id, 'reboot', {}); - showToast(t('device.toast.reboot_sent'), 'info'); + sendWithFeedback('reboot', 'Reboot', 'device.toast.reboot_sent'); rebootConfirming = false; rebootBtn.textContent = t('device.ctl.reboot_device'); return; @@ -732,8 +744,7 @@ async function setupActions(device) { let shutdownTimeout = null; shutdownBtn?.addEventListener('click', () => { if (shutdownConfirming) { - sendCommand(device.id, 'shutdown', {}); - showToast(t('device.toast.shutdown_sent'), 'info'); + sendWithFeedback('shutdown', 'Shutdown', 'device.toast.shutdown_sent'); shutdownConfirming = false; shutdownBtn.textContent = t('device.ctl.shutdown'); return; @@ -753,26 +764,22 @@ async function setupActions(device) { // Screen Off document.getElementById('screenOffBtn')?.addEventListener('click', () => { - sendCommand(device.id, 'screen_off', {}); - showToast(t('device.toast.screen_off_sent'), 'info'); + sendWithFeedback('screen_off', 'Screen off', 'device.toast.screen_off_sent'); }); // Screen On document.getElementById('screenOnBtn')?.addEventListener('click', () => { - sendCommand(device.id, 'screen_on', {}); - showToast(t('device.toast.screen_on_sent'), 'info'); + sendWithFeedback('screen_on', 'Screen on', 'device.toast.screen_on_sent'); }); // Launch Player document.getElementById('launchAppBtn')?.addEventListener('click', () => { - sendCommand(device.id, 'launch', {}); - showToast(t('device.toast.launch_sent'), 'info'); + sendWithFeedback('launch', 'Launch', 'device.toast.launch_sent'); }); // Force Update document.getElementById('forceUpdateBtn')?.addEventListener('click', () => { - sendCommand(device.id, 'update', {}); - showToast(t('device.toast.update_triggered'), 'info'); + sendWithFeedback('update', 'Update', 'device.toast.update_triggered'); }); } diff --git a/server/ws/dashboardSocket.js b/server/ws/dashboardSocket.js index f941a7f..bef4d05 100644 --- a/server/ws/dashboardSocket.js +++ b/server/ws/dashboardSocket.js @@ -92,11 +92,30 @@ module.exports = function setupDashboardSocket(io) { console.log(`Remote session stopped for device ${device_id}`); }); - socket.on('dashboard:device-command', (data) => { + socket.on('dashboard:device-command', (data, ack) => { const { device_id, type, payload } = data; - if (!canActOnDevice(socket, device_id, 'write')) return; - deviceNs.to(device_id).emit('device:command', { type, payload }); - console.log(`Command sent to device ${device_id}: ${type}`); + if (!canActOnDevice(socket, device_id, 'write')) { + if (typeof ack === 'function') ack({ delivered: false, reason: 'forbidden' }); + return; + } + const room = deviceNs.adapter.rooms.get(device_id); + if (room && room.size > 0) { + deviceNs.to(device_id).emit('device:command', { type, payload }); + console.log(`Command delivered to device ${device_id}: ${type}`); + if (typeof ack === 'function') ack({ delivered: true }); + return; + } + // Device offline at emit time. Try to queue (lazy require so reverting + // the queue commit doesn't break this commit - MODULE_NOT_FOUND on the + // first try gets cached by Node's module loader, giving consistent + // queued=false behavior on every subsequent call). + let queued = false; + try { + const queue = require('../lib/command-queue'); + queued = queue.queueCommand(device_id, type, payload); + } catch (e) { /* command-queue module absent; fall through to lost */ } + console.log(`Command for offline device ${device_id}: ${type} (queued=${queued})`); + if (typeof ack === 'function') ack({ delivered: false, queued, reason: 'offline' }); }); socket.on('disconnect', () => { diff --git a/server/ws/deviceSocket.js b/server/ws/deviceSocket.js index a7fceeb..0a7868c 100644 --- a/server/ws/deviceSocket.js +++ b/server/ws/deviceSocket.js @@ -6,6 +6,18 @@ const { db, pruneTelemetry, pruneScreenshots } = require('../db/database'); const config = require('../config'); const heartbeat = require('../services/heartbeat'); const commandQueue = require('../lib/command-queue'); + +// Debounce window for marking a device offline on socket disconnect. Brief +// flap (Wi-Fi blip, Engine.IO ping miss, server-side eviction-then-reconnect) +// shouldn't toggle the dashboard. If a fresh register lands within this +// window, the pending offline transition is cancelled. Per-device timer is +// stored here; cleared by the register handlers and by stale-disconnect +// guards. In-memory only - the heartbeat checker is the safety net for +// server-restart-during-grace-window edge cases (any 'online' rows whose +// last_heartbeat is older than heartbeatTimeout get marked offline by the +// next checker sweep within heartbeatInterval). +const pendingOfflines = new Map(); +const OFFLINE_DEBOUNCE_MS = 5000; const { getUserPlan, getUserDeviceCount } = require('../middleware/subscription'); // Phase 2.3: deviceRoom() resolves a device_id to its workspace room so // dashboardNs.emit can be scoped instead of broadcast platform-wide. @@ -243,6 +255,11 @@ module.exports = function setupDeviceSocket(io) { db.prepare('UPDATE devices SET device_token = ? WHERE id = ?').run(newToken, existing.device_id); console.log(`Fingerprint match: linking reinstalled app to existing device ${existing.device_id} (new token issued)`); authenticated = true; + // Cancel any pending offline timer - device is back in the grace window + if (pendingOfflines.has(existing.device_id)) { + clearTimeout(pendingOfflines.get(existing.device_id)); + pendingOfflines.delete(existing.device_id); + } evictPriorSocket(existing.device_id, socket.id); db.prepare("UPDATE devices SET status = 'online', last_heartbeat = strftime('%s','now'), ip_address = ?, updated_at = strftime('%s','now') WHERE id = ?") .run(getClientIp(socket), existing.device_id); @@ -290,6 +307,11 @@ module.exports = function setupDeviceSocket(io) { currentDeviceId = device_id; authenticated = true; + // Cancel any pending offline timer - device is back in the grace window + if (pendingOfflines.has(device_id)) { + clearTimeout(pendingOfflines.get(device_id)); + pendingOfflines.delete(device_id); + } evictPriorSocket(device_id, socket.id); db.prepare("UPDATE devices SET status = 'online', last_heartbeat = strftime('%s','now'), ip_address = ?, updated_at = strftime('%s','now') WHERE id = ?") .run(getClientIp(socket), device_id); @@ -565,41 +587,57 @@ module.exports = function setupDeviceSocket(io) { }); socket.on('disconnect', () => { - if (currentDeviceId) { - // If a newer socket has already taken over this device_id, this is a stale - // disconnect from a replaced socket — skip the offline transition so we don't - // flip an actively-connected device offline or clobber the new heartbeat entry. - const activeConn = heartbeat.getConnection(currentDeviceId); - if (activeConn && activeConn.socketId !== socket.id) { - console.log(`Stale disconnect for ${currentDeviceId} (socket ${socket.id}); active is ${activeConn.socketId}, skipping offline`); - return; - } + if (!currentDeviceId) return; - console.log(`Device disconnected: ${currentDeviceId}`); - db.prepare("UPDATE devices SET status = 'offline', updated_at = strftime('%s','now') WHERE id = ?") - .run(currentDeviceId); - heartbeat.removeConnection(currentDeviceId); - logDeviceStatus(currentDeviceId, 'offline'); - emitToDeviceWorkspace(dashboardNs, currentDeviceId, 'dashboard:device-status', { device_id: currentDeviceId, status: 'offline' }); + // Stale-disconnect guard: a newer socket already took over this device_id + // via eviction. Skip the offline transition entirely - don't even start a + // debounce timer. + const activeConn = heartbeat.getConnection(currentDeviceId); + if (activeConn && activeConn.socketId !== socket.id) { + console.log(`Stale disconnect for ${currentDeviceId} (socket ${socket.id}); active is ${activeConn.socketId}, skipping offline`); + return; + } + + const deviceId = currentDeviceId; + const closingSocketId = socket.id; + console.log(`Device disconnected: ${deviceId} (offline transition deferred ${OFFLINE_DEBOUNCE_MS}ms)`); + + // Defensive: clear any existing timer for this device. Shouldn't happen + // (register would have cleared it), but if two disconnects fire in + // sequence we want the second to refresh the window, not double up. + if (pendingOfflines.has(deviceId)) clearTimeout(pendingOfflines.get(deviceId)); + + pendingOfflines.set(deviceId, setTimeout(() => { + pendingOfflines.delete(deviceId); + // Re-check at fire time: did a DIFFERENT socket reclaim during the + // grace window? If activeConn exists but it's still our (now-closed) + // socket's entry, the entry is just stale - heartbeat.removeConnection + // hasn't run yet because we defer it inside this same block. Only + // abort if a genuinely different socket has registered. + const activeNow = heartbeat.getConnection(deviceId); + if (activeNow && activeNow.socketId !== closingSocketId) return; + + db.prepare("UPDATE devices SET status = 'offline', updated_at = strftime('%s','now') WHERE id = ?").run(deviceId); + heartbeat.removeConnection(deviceId); + logDeviceStatus(deviceId, 'offline'); + emitToDeviceWorkspace(dashboardNs, deviceId, 'dashboard:device-status', { device_id: deviceId, status: 'offline' }); // If this device was leading a wall, reassign leadership to the next - // online member so playback stays driven. Without this the wall freezes - // when the leader drops. + // online member so playback stays driven. try { - const wall = db.prepare('SELECT id FROM video_walls WHERE leader_device_id = ?').get(currentDeviceId); + const wall = db.prepare('SELECT id FROM video_walls WHERE leader_device_id = ?').get(deviceId); if (wall) { const candidates = db.prepare(` SELECT vwd.device_id FROM video_wall_devices vwd JOIN devices d ON d.id = vwd.device_id WHERE vwd.wall_id = ? AND d.status = 'online' AND vwd.device_id != ? ORDER BY vwd.grid_row, vwd.grid_col LIMIT 1 - `).all(wall.id, currentDeviceId); + `).all(wall.id, deviceId); const newLeader = candidates[0]?.device_id || null; db.prepare('UPDATE video_walls SET leader_device_id = ? WHERE id = ?').run(newLeader, wall.id); - // Notify the new leader (and refresh peers' is_leader flags). const members = db.prepare('SELECT device_id FROM video_wall_devices WHERE wall_id = ?').all(wall.id); for (const m of members) { - if (m.device_id !== currentDeviceId) { + if (m.device_id !== deviceId) { commandQueue.queueOrEmitPlaylistUpdate(deviceNs, m.device_id, buildPlaylistPayload); } } @@ -607,26 +645,24 @@ module.exports = function setupDeviceSocket(io) { } catch (e) { console.error('Wall leader reassign failed:', e.message); } // Save last screenshot to disk as offline snapshot - const lastB64 = lastScreenshots[currentDeviceId]; + const lastB64 = lastScreenshots[deviceId]; if (lastB64) { try { - const filename = `${currentDeviceId}_latest.jpg`; + const filename = `${deviceId}_latest.jpg`; const buffer = Buffer.from(lastB64, 'base64'); fs.writeFileSync(path.join(config.screenshotsDir, filename), buffer); - // Upsert screenshot record - const existing = db.prepare('SELECT id FROM screenshots WHERE device_id = ?').get(currentDeviceId); + const existing = db.prepare('SELECT id FROM screenshots WHERE device_id = ?').get(deviceId); if (existing) { - db.prepare('UPDATE screenshots SET filepath = ?, captured_at = strftime(\'%s\',\'now\') WHERE device_id = ?') - .run(filename, currentDeviceId); + db.prepare('UPDATE screenshots SET filepath = ?, captured_at = strftime(\'%s\',\'now\') WHERE device_id = ?').run(filename, deviceId); } else { - db.prepare('INSERT INTO screenshots (device_id, filepath) VALUES (?, ?)').run(currentDeviceId, filename); + db.prepare('INSERT INTO screenshots (device_id, filepath) VALUES (?, ?)').run(deviceId, filename); } } catch (e) { console.error('Failed to save offline screenshot:', e.message); } - delete lastScreenshots[currentDeviceId]; + delete lastScreenshots[deviceId]; } - } + }, OFFLINE_DEBOUNCE_MS)); }); }); From 8439f2bf18506f31c47c993136ece7ffc8d998d5 Mon Sep 17 00:00:00 2001 From: ScreenTinker Date: Thu, 14 May 2026 13:52:24 -0500 Subject: [PATCH 5/5] fix(landing): replace broken Custom pricing card with enterprise contact form The "Custom" tier on the public pricing page was misrendering as a better-than-Free tier: headline "Custom", price "Free", "Unlimited devices/storage", "Get Started" button. Root cause is in DB data, not markup - the 'enterprise' plan row has price_monthly=0 and max_devices/storage=-1, and the dynamic render in landing.html maps those to "Free" + "Unlimited" with the wrong CTA. Fix: filter the 'enterprise' plan out of the public landing render (client-side, in landing.html only) and replace it with a hardcoded Enterprise / Custom marketing card whose Contact Us button opens a new lead-capture modal. The DB row itself stays - it is actively used elsewhere: - auth.js: first user in SELF_HOSTED=true mode is assigned to it - settings.js: white-label feature is gated on enterprise plan - 1 user (the dev account) is currently assigned to it - /api/subscription/plans is also consumed by billing.js, settings.js, admin.js (logged-in surfaces); they keep getting the full plan list. The filter is scoped to landing.html's render only. The in-app billing page renders the same plan with the same cosmetic bug; that's a logged-in admin surface, out of scope for this commit. Other 4 cards (Free, Starter, Pro, Business) unchanged. Frontend (landing.html): - Filter 'enterprise' from public render - Hardcoded Enterprise / Custom card. Uses .price class with "Let's talk" + empty .yearly spacer to match Free card's vertical baseline so the feature list aligns with the paid cards' baselines. - Modal markup, CSS (mirrored from frontend/css/main.css conventions since landing.html doesn't import main.css), and inline JS for open/close/submit/escape/background-click. - Honeypot field: hidden 'fax_number' input (off-screen + aria-hidden + tabindex=-1). Picked over the obvious 'website' name to catch mid-tier bots that explicitly skip the well-known honeypot names. Backend (new server/routes/contact.js): - POST /api/contact/enterprise, public (unauthenticated) - Rate limited 5/min/IP+path via the existing rateLimit middleware - Honeypot check: populated fax_number returns 200 silently, no email - Server-side validation: required fields, email format, screens 1-100000, multi_tenant in {single,multi}, hosting in {hosted,self, unsure}. Length caps prevent textarea-bomb abuse. - Sends via existing services/email.js (Microsoft Graph) to dan@bytetinker.net from the support@screentinker.com Graph sender. - Log lines: "[contact] enterprise inquiry from EMAIL (COMPANY) delivered" or "[contact] honeypot triggered from IP; dropping". Wired in server.js alongside other public routes (before requireAuth). Build-time tests passed locally: - Module loads, server boots clean - Validation: missing fields, bad email, bad multi_tenant, bad hosting, screens out of range - all return 400 with the right error message - Honeypot: populated fax_number returns 200 success, no email sent, log line confirms drop - Rate limit: kicks in at 6th request within a minute as expected - Real end-to-end send: one test submission delivered to dan@bytetinker.net via Graph (subject "[ScreenTinker] Enterprise inquiry: ScreenTinker Build Verification", body formatted with all fields). GRAPH_DEV_RESTRICT_TO was temporarily widened to include the recipient for the test and restored to dw5304@gmail.com immediately after. - Card render order verified against live API: Free (outline, Get Started) | Starter | Pro (featured, Most Popular badge) | Business | Enterprise / Custom (Contact Us -> modal). Co-Authored-By: Claude Opus 4.7 (1M context) --- frontend/landing.html | 166 ++++++++++++++++++++++++++++++++++++++- server/routes/contact.js | 85 ++++++++++++++++++++ server/server.js | 5 ++ 3 files changed, 254 insertions(+), 2 deletions(-) create mode 100644 server/routes/contact.js diff --git a/frontend/landing.html b/frontend/landing.html index 3ff6fd3..30d64db 100644 --- a/frontend/landing.html +++ b/frontend/landing.html @@ -85,6 +85,24 @@ .platform-item .icon { font-size:40px; margin-bottom:8px; } .platform-item .name { font-size:13px; color:var(--muted); } + /* Modal (mirrors frontend/css/main.css conventions so screenshots look + consistent across landing and dashboard; copied inline since + landing.html doesn't import main.css). */ + .modal-overlay { position:fixed; inset:0; background:rgba(0,0,0,0.6); display:flex; align-items:center; justify-content:center; z-index:1000; padding:16px; } + .modal { background:var(--card); border:1px solid var(--border); border-radius:12px; width:100%; max-width:560px; max-height:90vh; overflow-y:auto; } + .modal-header { padding:20px 24px; border-bottom:1px solid var(--border); display:flex; justify-content:space-between; align-items:center; } + .modal-header h3 { font-size:18px; margin:0; } + .modal-close { background:none; border:none; color:var(--muted); font-size:24px; cursor:pointer; padding:0; line-height:1; } + .modal-body { padding:20px 24px; } + .modal-description { color:var(--muted); font-size:14px; margin-bottom:16px; } + .modal-footer { padding:16px 24px; border-top:1px solid var(--border); display:flex; gap:12px; justify-content:flex-end; } + .modal-body label { display:block; margin-bottom:12px; font-size:13px; color:var(--text); } + .modal-body input, .modal-body select, .modal-body textarea { width:100%; margin-top:4px; padding:8px 10px; background:var(--bg); color:var(--text); border:1px solid var(--border); border-radius:6px; font-size:14px; font-family:inherit; box-sizing:border-box; } + .modal-body input:focus, .modal-body select:focus, .modal-body textarea:focus { outline:none; border-color:var(--accent); } + .modal-body textarea { resize:vertical; } + .contact-status-success { color:#10b981; font-size:13px; } + .contact-status-error { color:#f87171; font-size:13px; } + /* Pricing */ .pricing { max-width:1200px; margin:0 auto; padding:80px 24px; } .pricing h2 { text-align:center; font-size:36px; margin-bottom:12px; } @@ -376,6 +394,56 @@ + + + diff --git a/server/routes/contact.js b/server/routes/contact.js new file mode 100644 index 0000000..be47ffb --- /dev/null +++ b/server/routes/contact.js @@ -0,0 +1,85 @@ +// Public (unauthenticated) contact form endpoint. Used by the Enterprise / +// Custom card on the marketing landing page to send a lead to Dan's inbox via +// the existing Microsoft Graph email service. +// +// Honeypot strategy: the form has a hidden 'fax_number' field that real users +// never see (off-screen + aria-hidden + tabindex=-1). If a submission arrives +// with that field populated, we return success to the bot but drop the +// submission silently. Combined with the rate limit applied in server.js +// (5 req/min/IP+path), this is enough friction for a low-traffic public form. + +const express = require('express'); +const router = express.Router(); +const { sendEmail } = require('../services/email'); + +function isEmail(s) { + return typeof s === 'string' && /^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(s); +} +function clamp(s, max) { + return String(s || '').slice(0, max); +} + +router.post('/enterprise', async (req, res) => { + const { name, email, company, screens, multi_tenant, hosting, message, fax_number } = req.body || {}; + + // Honeypot. Real users can't see or tab to this field; only bots fill it. + // Return 200 so the bot's retry logic doesn't kick in, but skip the send. + if (fax_number && String(fax_number).trim() !== '') { + console.log(`[contact] honeypot triggered from ${req.ip}; dropping`); + return res.json({ success: true }); + } + + // Server-side validation. Client validates too but we never trust that. + if (!name || !email || !company || !screens || !multi_tenant || !hosting) { + return res.status(400).json({ error: 'Missing required fields' }); + } + if (!isEmail(email)) { + return res.status(400).json({ error: 'Invalid email address' }); + } + const screensNum = parseInt(screens); + if (!Number.isFinite(screensNum) || screensNum < 1 || screensNum > 100000) { + return res.status(400).json({ error: 'Screens must be a positive number' }); + } + if (!['single', 'multi'].includes(multi_tenant)) { + return res.status(400).json({ error: 'Invalid multi-tenant selection' }); + } + if (!['hosted', 'self', 'unsure'].includes(hosting)) { + return res.status(400).json({ error: 'Invalid hosting selection' }); + } + + // Length caps - keeps a 10MB textarea from filling the mailbox + const cleanName = clamp(name, 200); + const cleanEmail = clamp(email, 200); + const cleanCompany = clamp(company, 200); + const cleanMessage = clamp(message, 5000); + + const tenantLabel = multi_tenant === 'multi' ? 'Multiple organizations' : 'Single organization'; + const hostingLabel = { hosted: 'Hosted for me', self: 'Self-host', unsure: 'Not sure yet' }[hosting]; + + const subject = `Enterprise inquiry: ${cleanCompany}`; + const text = +`New enterprise inquiry from ${cleanName} (${cleanEmail}) + +Company: ${cleanCompany} +Estimated screens: ${screensNum} +Multi-tenant: ${tenantLabel} +Hosting preference: ${hostingLabel} + +Message: +${cleanMessage || '(none)'} + +--- +Submitted from screentinker.com pricing page +Source IP: ${req.ip} +`; + + const result = await sendEmail({ to: 'dan@bytetinker.net', subject, text }); + if (!result.sent) { + console.error(`[contact] email send failed for ${cleanEmail}: reason=${result.reason} error=${result.error || ''}`); + return res.status(500).json({ error: 'Could not send your message. Please email dan@bytetinker.net directly.' }); + } + console.log(`[contact] enterprise inquiry from ${cleanEmail} (${cleanCompany}) delivered`); + res.json({ success: true }); +}); + +module.exports = router; diff --git a/server/server.js b/server/server.js index cc60142..22fab0e 100644 --- a/server/server.js +++ b/server/server.js @@ -234,6 +234,11 @@ app.use('/api/content', rateLimit(60000, 30)); // 30 content operations per minu // Subscription routes (mixed auth) app.use('/api/subscription', require('./routes/subscription')); +// Public contact form (enterprise inquiries from landing page). Rate limited +// to 5 submissions per minute per IP; honeypot enforced inside the route. +app.use('/api/contact', rateLimit(60000, 5)); +app.use('/api/contact', require('./routes/contact')); + // Stripe billing routes (checkout, portal) app.use('/api/stripe', stripeRouter);