mirror of
https://github.com/screentinker/screentinker.git
synced 2026-05-15 07:32:23 -06:00
fix(socket): offline debounce + truthful single-device command feedback
Two dashboard-accuracy improvements for issue #3. Disconnect debounce (5s): - Brief transient flaps (Engine.IO ping miss, eviction-then-reconnect, Wi-Fi blip) no longer immediately flip the device to offline in the dashboard. Disconnect handler now defers the offline transition; register handlers cancel the pending timer if reconnect lands in window. - Existing stale-disconnect guard kept as fast-path for the eviction case (no timer scheduled at all when the active heartbeat conn is already a different socket). - Re-check at timer fire compares socketIds: aborts only if a GENUINELY DIFFERENT socket reclaimed the device. Just the closing socket's own (not-yet-cleaned-up) entry is treated as stale and proceeds with offline transition. - Server-restart mid-grace is handled by the heartbeat checker safety net (existing component): any 'online' row with last_heartbeat older than heartbeatTimeout gets marked offline on next sweep. Truthful single-device command feedback: - dashboard:device-command handler now checks deviceNs.adapter.rooms for an active socket before emitting (matches the group-command route's pattern). - If room is empty, falls through to commandQueue.queueCommand (lazy require - if commit C is reverted, MODULE_NOT_FOUND is cached and every subsequent call gets consistent queued=false behavior). - Returns three-state ack to caller: { delivered, queued, reason }. - Server log line was misleading - now logs 'Command delivered to device X' vs 'Command for offline device X (queued=true/false)'. Frontend: - sendCommand() takes optional callback. Without one, fires-and-forgets (no behavior change for non-wired callers). With one, uses Socket.IO .timeout(5000).emit so the callback always fires (ack or no_ack). - Six device-detail command buttons wired to three-state toasts: reboot, shutdown, screen_off, screen_on, launch, update. - delivered: green/success toast (existing localized message) - queued: amber/warning toast (new generic message) - no_ack: red/error toast - fallback: red/error toast - Two callers intentionally left fire-and-forget: - window._sendCmd (generic remote-overlay keypress/touch helper) - enable_system_capture (has its own visual state machine; out of scope for this commit) Three new i18n keys (en.js only; other locales follow later): - device.toast.command_queued - device.toast.command_undeliverable - device.toast.command_no_ack Refs #3 Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
742d8c4b09
commit
f5ca26ae2d
|
|
@ -369,6 +369,9 @@ export default {
|
|||
'device.toast.launch_sent': 'Launch command sent',
|
||||
'device.toast.update_triggered': 'Update check triggered',
|
||||
'device.toast.remote_started': 'Remote session started',
|
||||
'device.toast.command_queued': '{cmd} — device offline, will deliver on reconnect',
|
||||
'device.toast.command_undeliverable': '{cmd} — device offline and queue unavailable',
|
||||
'device.toast.command_no_ack': '{cmd} — no server response',
|
||||
|
||||
// Settings
|
||||
'settings.title': 'Settings',
|
||||
|
|
|
|||
|
|
@ -119,8 +119,20 @@ export function sendKey(deviceId, keycode) {
|
|||
if (dashboardSocket) dashboardSocket.emit('dashboard:remote-key', { device_id: deviceId, keycode });
|
||||
}
|
||||
|
||||
export function sendCommand(deviceId, type, payload) {
|
||||
if (dashboardSocket) dashboardSocket.emit('dashboard:device-command', { device_id: deviceId, type, payload });
|
||||
// Optional callback receives the server-side ack: { delivered, queued, reason }.
|
||||
// Callers without a callback keep firing-and-forgetting (no behavior change).
|
||||
// With a callback, we use Socket.IO's .timeout() so the callback always fires -
|
||||
// either with the ack or with an Error if the server doesn't respond in 5s.
|
||||
export function sendCommand(deviceId, type, payload, callback) {
|
||||
if (!dashboardSocket) return;
|
||||
if (typeof callback === 'function') {
|
||||
dashboardSocket.timeout(5000).emit('dashboard:device-command', { device_id: deviceId, type, payload }, (err, ack) => {
|
||||
if (err) callback({ delivered: false, reason: 'no_ack' });
|
||||
else callback(ack || { delivered: false, reason: 'no_ack' });
|
||||
});
|
||||
} else {
|
||||
dashboardSocket.emit('dashboard:device-command', { device_id: deviceId, type, payload });
|
||||
}
|
||||
}
|
||||
|
||||
export function getSocket() { return dashboardSocket; }
|
||||
|
|
|
|||
|
|
@ -705,14 +705,26 @@ async function setupActions(device) {
|
|||
}, 3000);
|
||||
});
|
||||
|
||||
// Send a command and surface the three-state ack as a toast.
|
||||
// - delivered: device received it (green/success)
|
||||
// - queued: device is offline, will deliver on reconnect (amber/warning)
|
||||
// - no_ack / fallback: server didn't respond or queue unavailable (red/error)
|
||||
function sendWithFeedback(type, cmdLabel, successKey) {
|
||||
sendCommand(device.id, type, {}, (ack) => {
|
||||
if (ack?.delivered) showToast(t(successKey), 'success');
|
||||
else if (ack?.queued) showToast(t('device.toast.command_queued', { cmd: cmdLabel }), 'warning');
|
||||
else if (ack?.reason === 'no_ack') showToast(t('device.toast.command_no_ack', { cmd: cmdLabel }), 'error');
|
||||
else showToast(t('device.toast.command_undeliverable', { cmd: cmdLabel }), 'error');
|
||||
});
|
||||
}
|
||||
|
||||
// Reboot (double-click to confirm)
|
||||
const rebootBtn = document.getElementById('rebootBtn');
|
||||
let rebootConfirming = false;
|
||||
let rebootTimeout = null;
|
||||
rebootBtn?.addEventListener('click', () => {
|
||||
if (rebootConfirming) {
|
||||
sendCommand(device.id, 'reboot', {});
|
||||
showToast(t('device.toast.reboot_sent'), 'info');
|
||||
sendWithFeedback('reboot', 'Reboot', 'device.toast.reboot_sent');
|
||||
rebootConfirming = false;
|
||||
rebootBtn.textContent = t('device.ctl.reboot_device');
|
||||
return;
|
||||
|
|
@ -732,8 +744,7 @@ async function setupActions(device) {
|
|||
let shutdownTimeout = null;
|
||||
shutdownBtn?.addEventListener('click', () => {
|
||||
if (shutdownConfirming) {
|
||||
sendCommand(device.id, 'shutdown', {});
|
||||
showToast(t('device.toast.shutdown_sent'), 'info');
|
||||
sendWithFeedback('shutdown', 'Shutdown', 'device.toast.shutdown_sent');
|
||||
shutdownConfirming = false;
|
||||
shutdownBtn.textContent = t('device.ctl.shutdown');
|
||||
return;
|
||||
|
|
@ -753,26 +764,22 @@ async function setupActions(device) {
|
|||
|
||||
// Screen Off
|
||||
document.getElementById('screenOffBtn')?.addEventListener('click', () => {
|
||||
sendCommand(device.id, 'screen_off', {});
|
||||
showToast(t('device.toast.screen_off_sent'), 'info');
|
||||
sendWithFeedback('screen_off', 'Screen off', 'device.toast.screen_off_sent');
|
||||
});
|
||||
|
||||
// Screen On
|
||||
document.getElementById('screenOnBtn')?.addEventListener('click', () => {
|
||||
sendCommand(device.id, 'screen_on', {});
|
||||
showToast(t('device.toast.screen_on_sent'), 'info');
|
||||
sendWithFeedback('screen_on', 'Screen on', 'device.toast.screen_on_sent');
|
||||
});
|
||||
|
||||
// Launch Player
|
||||
document.getElementById('launchAppBtn')?.addEventListener('click', () => {
|
||||
sendCommand(device.id, 'launch', {});
|
||||
showToast(t('device.toast.launch_sent'), 'info');
|
||||
sendWithFeedback('launch', 'Launch', 'device.toast.launch_sent');
|
||||
});
|
||||
|
||||
// Force Update
|
||||
document.getElementById('forceUpdateBtn')?.addEventListener('click', () => {
|
||||
sendCommand(device.id, 'update', {});
|
||||
showToast(t('device.toast.update_triggered'), 'info');
|
||||
sendWithFeedback('update', 'Update', 'device.toast.update_triggered');
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -92,11 +92,30 @@ module.exports = function setupDashboardSocket(io) {
|
|||
console.log(`Remote session stopped for device ${device_id}`);
|
||||
});
|
||||
|
||||
socket.on('dashboard:device-command', (data) => {
|
||||
socket.on('dashboard:device-command', (data, ack) => {
|
||||
const { device_id, type, payload } = data;
|
||||
if (!canActOnDevice(socket, device_id, 'write')) return;
|
||||
if (!canActOnDevice(socket, device_id, 'write')) {
|
||||
if (typeof ack === 'function') ack({ delivered: false, reason: 'forbidden' });
|
||||
return;
|
||||
}
|
||||
const room = deviceNs.adapter.rooms.get(device_id);
|
||||
if (room && room.size > 0) {
|
||||
deviceNs.to(device_id).emit('device:command', { type, payload });
|
||||
console.log(`Command sent to device ${device_id}: ${type}`);
|
||||
console.log(`Command delivered to device ${device_id}: ${type}`);
|
||||
if (typeof ack === 'function') ack({ delivered: true });
|
||||
return;
|
||||
}
|
||||
// Device offline at emit time. Try to queue (lazy require so reverting
|
||||
// the queue commit doesn't break this commit - MODULE_NOT_FOUND on the
|
||||
// first try gets cached by Node's module loader, giving consistent
|
||||
// queued=false behavior on every subsequent call).
|
||||
let queued = false;
|
||||
try {
|
||||
const queue = require('../lib/command-queue');
|
||||
queued = queue.queueCommand(device_id, type, payload);
|
||||
} catch (e) { /* command-queue module absent; fall through to lost */ }
|
||||
console.log(`Command for offline device ${device_id}: ${type} (queued=${queued})`);
|
||||
if (typeof ack === 'function') ack({ delivered: false, queued, reason: 'offline' });
|
||||
});
|
||||
|
||||
socket.on('disconnect', () => {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,18 @@ const { db, pruneTelemetry, pruneScreenshots } = require('../db/database');
|
|||
const config = require('../config');
|
||||
const heartbeat = require('../services/heartbeat');
|
||||
const commandQueue = require('../lib/command-queue');
|
||||
|
||||
// Debounce window for marking a device offline on socket disconnect. Brief
|
||||
// flap (Wi-Fi blip, Engine.IO ping miss, server-side eviction-then-reconnect)
|
||||
// shouldn't toggle the dashboard. If a fresh register lands within this
|
||||
// window, the pending offline transition is cancelled. Per-device timer is
|
||||
// stored here; cleared by the register handlers and by stale-disconnect
|
||||
// guards. In-memory only - the heartbeat checker is the safety net for
|
||||
// server-restart-during-grace-window edge cases (any 'online' rows whose
|
||||
// last_heartbeat is older than heartbeatTimeout get marked offline by the
|
||||
// next checker sweep within heartbeatInterval).
|
||||
const pendingOfflines = new Map();
|
||||
const OFFLINE_DEBOUNCE_MS = 5000;
|
||||
const { getUserPlan, getUserDeviceCount } = require('../middleware/subscription');
|
||||
// Phase 2.3: deviceRoom() resolves a device_id to its workspace room so
|
||||
// dashboardNs.emit can be scoped instead of broadcast platform-wide.
|
||||
|
|
@ -243,6 +255,11 @@ module.exports = function setupDeviceSocket(io) {
|
|||
db.prepare('UPDATE devices SET device_token = ? WHERE id = ?').run(newToken, existing.device_id);
|
||||
console.log(`Fingerprint match: linking reinstalled app to existing device ${existing.device_id} (new token issued)`);
|
||||
authenticated = true;
|
||||
// Cancel any pending offline timer - device is back in the grace window
|
||||
if (pendingOfflines.has(existing.device_id)) {
|
||||
clearTimeout(pendingOfflines.get(existing.device_id));
|
||||
pendingOfflines.delete(existing.device_id);
|
||||
}
|
||||
evictPriorSocket(existing.device_id, socket.id);
|
||||
db.prepare("UPDATE devices SET status = 'online', last_heartbeat = strftime('%s','now'), ip_address = ?, updated_at = strftime('%s','now') WHERE id = ?")
|
||||
.run(getClientIp(socket), existing.device_id);
|
||||
|
|
@ -290,6 +307,11 @@ module.exports = function setupDeviceSocket(io) {
|
|||
|
||||
currentDeviceId = device_id;
|
||||
authenticated = true;
|
||||
// Cancel any pending offline timer - device is back in the grace window
|
||||
if (pendingOfflines.has(device_id)) {
|
||||
clearTimeout(pendingOfflines.get(device_id));
|
||||
pendingOfflines.delete(device_id);
|
||||
}
|
||||
evictPriorSocket(device_id, socket.id);
|
||||
db.prepare("UPDATE devices SET status = 'online', last_heartbeat = strftime('%s','now'), ip_address = ?, updated_at = strftime('%s','now') WHERE id = ?")
|
||||
.run(getClientIp(socket), device_id);
|
||||
|
|
@ -565,41 +587,57 @@ module.exports = function setupDeviceSocket(io) {
|
|||
});
|
||||
|
||||
socket.on('disconnect', () => {
|
||||
if (currentDeviceId) {
|
||||
// If a newer socket has already taken over this device_id, this is a stale
|
||||
// disconnect from a replaced socket — skip the offline transition so we don't
|
||||
// flip an actively-connected device offline or clobber the new heartbeat entry.
|
||||
if (!currentDeviceId) return;
|
||||
|
||||
// Stale-disconnect guard: a newer socket already took over this device_id
|
||||
// via eviction. Skip the offline transition entirely - don't even start a
|
||||
// debounce timer.
|
||||
const activeConn = heartbeat.getConnection(currentDeviceId);
|
||||
if (activeConn && activeConn.socketId !== socket.id) {
|
||||
console.log(`Stale disconnect for ${currentDeviceId} (socket ${socket.id}); active is ${activeConn.socketId}, skipping offline`);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Device disconnected: ${currentDeviceId}`);
|
||||
db.prepare("UPDATE devices SET status = 'offline', updated_at = strftime('%s','now') WHERE id = ?")
|
||||
.run(currentDeviceId);
|
||||
heartbeat.removeConnection(currentDeviceId);
|
||||
logDeviceStatus(currentDeviceId, 'offline');
|
||||
emitToDeviceWorkspace(dashboardNs, currentDeviceId, 'dashboard:device-status', { device_id: currentDeviceId, status: 'offline' });
|
||||
const deviceId = currentDeviceId;
|
||||
const closingSocketId = socket.id;
|
||||
console.log(`Device disconnected: ${deviceId} (offline transition deferred ${OFFLINE_DEBOUNCE_MS}ms)`);
|
||||
|
||||
// Defensive: clear any existing timer for this device. Shouldn't happen
|
||||
// (register would have cleared it), but if two disconnects fire in
|
||||
// sequence we want the second to refresh the window, not double up.
|
||||
if (pendingOfflines.has(deviceId)) clearTimeout(pendingOfflines.get(deviceId));
|
||||
|
||||
pendingOfflines.set(deviceId, setTimeout(() => {
|
||||
pendingOfflines.delete(deviceId);
|
||||
// Re-check at fire time: did a DIFFERENT socket reclaim during the
|
||||
// grace window? If activeConn exists but it's still our (now-closed)
|
||||
// socket's entry, the entry is just stale - heartbeat.removeConnection
|
||||
// hasn't run yet because we defer it inside this same block. Only
|
||||
// abort if a genuinely different socket has registered.
|
||||
const activeNow = heartbeat.getConnection(deviceId);
|
||||
if (activeNow && activeNow.socketId !== closingSocketId) return;
|
||||
|
||||
db.prepare("UPDATE devices SET status = 'offline', updated_at = strftime('%s','now') WHERE id = ?").run(deviceId);
|
||||
heartbeat.removeConnection(deviceId);
|
||||
logDeviceStatus(deviceId, 'offline');
|
||||
emitToDeviceWorkspace(dashboardNs, deviceId, 'dashboard:device-status', { device_id: deviceId, status: 'offline' });
|
||||
|
||||
// If this device was leading a wall, reassign leadership to the next
|
||||
// online member so playback stays driven. Without this the wall freezes
|
||||
// when the leader drops.
|
||||
// online member so playback stays driven.
|
||||
try {
|
||||
const wall = db.prepare('SELECT id FROM video_walls WHERE leader_device_id = ?').get(currentDeviceId);
|
||||
const wall = db.prepare('SELECT id FROM video_walls WHERE leader_device_id = ?').get(deviceId);
|
||||
if (wall) {
|
||||
const candidates = db.prepare(`
|
||||
SELECT vwd.device_id FROM video_wall_devices vwd
|
||||
JOIN devices d ON d.id = vwd.device_id
|
||||
WHERE vwd.wall_id = ? AND d.status = 'online' AND vwd.device_id != ?
|
||||
ORDER BY vwd.grid_row, vwd.grid_col LIMIT 1
|
||||
`).all(wall.id, currentDeviceId);
|
||||
`).all(wall.id, deviceId);
|
||||
const newLeader = candidates[0]?.device_id || null;
|
||||
db.prepare('UPDATE video_walls SET leader_device_id = ? WHERE id = ?').run(newLeader, wall.id);
|
||||
// Notify the new leader (and refresh peers' is_leader flags).
|
||||
const members = db.prepare('SELECT device_id FROM video_wall_devices WHERE wall_id = ?').all(wall.id);
|
||||
for (const m of members) {
|
||||
if (m.device_id !== currentDeviceId) {
|
||||
if (m.device_id !== deviceId) {
|
||||
commandQueue.queueOrEmitPlaylistUpdate(deviceNs, m.device_id, buildPlaylistPayload);
|
||||
}
|
||||
}
|
||||
|
|
@ -607,26 +645,24 @@ module.exports = function setupDeviceSocket(io) {
|
|||
} catch (e) { console.error('Wall leader reassign failed:', e.message); }
|
||||
|
||||
// Save last screenshot to disk as offline snapshot
|
||||
const lastB64 = lastScreenshots[currentDeviceId];
|
||||
const lastB64 = lastScreenshots[deviceId];
|
||||
if (lastB64) {
|
||||
try {
|
||||
const filename = `${currentDeviceId}_latest.jpg`;
|
||||
const filename = `${deviceId}_latest.jpg`;
|
||||
const buffer = Buffer.from(lastB64, 'base64');
|
||||
fs.writeFileSync(path.join(config.screenshotsDir, filename), buffer);
|
||||
// Upsert screenshot record
|
||||
const existing = db.prepare('SELECT id FROM screenshots WHERE device_id = ?').get(currentDeviceId);
|
||||
const existing = db.prepare('SELECT id FROM screenshots WHERE device_id = ?').get(deviceId);
|
||||
if (existing) {
|
||||
db.prepare('UPDATE screenshots SET filepath = ?, captured_at = strftime(\'%s\',\'now\') WHERE device_id = ?')
|
||||
.run(filename, currentDeviceId);
|
||||
db.prepare('UPDATE screenshots SET filepath = ?, captured_at = strftime(\'%s\',\'now\') WHERE device_id = ?').run(filename, deviceId);
|
||||
} else {
|
||||
db.prepare('INSERT INTO screenshots (device_id, filepath) VALUES (?, ?)').run(currentDeviceId, filename);
|
||||
db.prepare('INSERT INTO screenshots (device_id, filepath) VALUES (?, ?)').run(deviceId, filename);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Failed to save offline screenshot:', e.message);
|
||||
}
|
||||
delete lastScreenshots[currentDeviceId];
|
||||
}
|
||||
delete lastScreenshots[deviceId];
|
||||
}
|
||||
}, OFFLINE_DEBOUNCE_MS));
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue