mirror of
https://github.com/screentinker/screentinker.git
synced 2026-05-15 07:32:23 -06:00
Two dashboard-accuracy improvements for issue #3. Disconnect debounce (5s): - Brief transient flaps (Engine.IO ping miss, eviction-then-reconnect, Wi-Fi blip) no longer immediately flip the device to offline in the dashboard. Disconnect handler now defers the offline transition; register handlers cancel the pending timer if reconnect lands in window. - Existing stale-disconnect guard kept as fast-path for the eviction case (no timer scheduled at all when the active heartbeat conn is already a different socket). - Re-check at timer fire compares socketIds: aborts only if a GENUINELY DIFFERENT socket reclaimed the device. Just the closing socket's own (not-yet-cleaned-up) entry is treated as stale and proceeds with offline transition. - Server-restart mid-grace is handled by the heartbeat checker safety net (existing component): any 'online' row with last_heartbeat older than heartbeatTimeout gets marked offline on next sweep. Truthful single-device command feedback: - dashboard:device-command handler now checks deviceNs.adapter.rooms for an active socket before emitting (matches the group-command route's pattern). - If room is empty, falls through to commandQueue.queueCommand (lazy require - if commit C is reverted, MODULE_NOT_FOUND is cached and every subsequent call gets consistent queued=false behavior). - Returns three-state ack to caller: { delivered, queued, reason }. - Server log line was misleading - now logs 'Command delivered to device X' vs 'Command for offline device X (queued=true/false)'. Frontend: - sendCommand() takes optional callback. Without one, fires-and-forgets (no behavior change for non-wired callers). With one, uses Socket.IO .timeout(5000).emit so the callback always fires (ack or no_ack). - Six device-detail command buttons wired to three-state toasts: reboot, shutdown, screen_off, screen_on, launch, update. - delivered: green/success toast (existing localized message) - queued: amber/warning toast (new generic message) - no_ack: red/error toast - fallback: red/error toast - Two callers intentionally left fire-and-forget: - window._sendCmd (generic remote-overlay keypress/touch helper) - enable_system_capture (has its own visual state machine; out of scope for this commit) Three new i18n keys (en.js only; other locales follow later): - device.toast.command_queued - device.toast.command_undeliverable - device.toast.command_no_ack Refs #3 Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
129 lines
5.7 KiB
JavaScript
129 lines
5.7 KiB
JavaScript
const heartbeat = require('../services/heartbeat');
|
|
const { verifyToken } = require('../middleware/auth');
|
|
const { db } = require('../db/database');
|
|
const { accessContext, accessibleWorkspaceIds } = require('../lib/tenancy');
|
|
const { workspaceRoom } = require('../lib/socket-rooms');
|
|
|
|
// Phase 2.3: workspace-scoped socket rooms + per-command permission gates.
|
|
// Replaces the previous flat dashboardNs.emit broadcast (which leaked every
|
|
// device's status/screenshot/playback events to every connected dashboard)
|
|
// and the legacy admin/superadmin role bypass (dead code post-Phase-1
|
|
// rename - admin -> user, superadmin -> platform_admin).
|
|
//
|
|
// On connect: enumerate the user's accessible workspace_ids and socket.join
|
|
// a room per workspace. Outbound broadcasts route via dashboardNs.to(room).
|
|
// Inbound commands check permission against the target device's workspace.
|
|
|
|
// Permission gate for inbound socket commands. Read tier = workspace_viewer+;
|
|
// write tier = workspace_editor+. Platform_admin and org_owner/admin always
|
|
// pass via actingAs.
|
|
function canActOnDevice(socket, deviceId, tier /* 'read' | 'write' */) {
|
|
const device = db.prepare('SELECT workspace_id FROM devices WHERE id = ?').get(deviceId);
|
|
if (!device || !device.workspace_id) return false;
|
|
const ws = db.prepare('SELECT * FROM workspaces WHERE id = ?').get(device.workspace_id);
|
|
if (!ws) return false;
|
|
const ctx = accessContext(socket.userId, socket.userRole, ws);
|
|
if (!ctx) return false;
|
|
if (ctx.actingAs) return true; // platform_admin or org admin
|
|
if (tier === 'read') return !!ctx.workspaceRole; // viewer/editor/admin all OK
|
|
// write tier: workspace_editor or workspace_admin
|
|
return ctx.workspaceRole === 'workspace_editor' || ctx.workspaceRole === 'workspace_admin';
|
|
}
|
|
|
|
module.exports = function setupDashboardSocket(io) {
|
|
const dashboardNs = io.of('/dashboard');
|
|
const deviceNs = io.of('/device');
|
|
|
|
dashboardNs.use((socket, next) => {
|
|
const token = socket.handshake.auth?.token;
|
|
if (!token) return next(new Error('Authentication required'));
|
|
try {
|
|
const decoded = verifyToken(token);
|
|
socket.userId = decoded.id;
|
|
socket.userRole = decoded.role;
|
|
next();
|
|
} catch {
|
|
next(new Error('Invalid token'));
|
|
}
|
|
});
|
|
|
|
dashboardNs.on('connection', (socket) => {
|
|
// Note on workspace-switch lifecycle: the switcher (Phase 3 MVP) calls
|
|
// window.location.reload() after switching, which forces a new socket
|
|
// connection with fresh JWT claims. So workspace memberships are
|
|
// re-evaluated at connect time and we don't need to re-evaluate per-emit.
|
|
const wsIds = accessibleWorkspaceIds(socket.userId, socket.userRole);
|
|
for (const wsId of wsIds) socket.join(workspaceRoom(wsId));
|
|
console.log(`Dashboard client connected: ${socket.id} (user: ${socket.userId}, rooms: ${wsIds.length})`);
|
|
|
|
socket.on('dashboard:request-screenshot', (data) => {
|
|
const { device_id } = data;
|
|
if (!canActOnDevice(socket, device_id, 'read')) return;
|
|
const conn = heartbeat.getConnection(device_id);
|
|
if (conn) deviceNs.to(device_id).emit('device:screenshot-request', {});
|
|
});
|
|
|
|
socket.on('dashboard:remote-touch', (data) => {
|
|
const { device_id, x, y, action } = data;
|
|
if (!canActOnDevice(socket, device_id, 'write')) return;
|
|
deviceNs.to(device_id).emit('device:remote-touch', { x, y, action });
|
|
});
|
|
|
|
socket.on('dashboard:remote-key', (data) => {
|
|
const { device_id, keycode } = data;
|
|
if (!canActOnDevice(socket, device_id, 'write')) return;
|
|
console.log(`Remote key: ${keycode} -> ${device_id}`);
|
|
deviceNs.to(device_id).emit('device:remote-key', { keycode });
|
|
});
|
|
|
|
socket.on('dashboard:remote-start', (data) => {
|
|
const { device_id } = data;
|
|
if (!canActOnDevice(socket, device_id, 'write')) return;
|
|
const room = deviceNs.adapter.rooms.get(device_id);
|
|
console.log(`Remote start for ${device_id}, room has ${room?.size || 0} socket(s)`);
|
|
deviceNs.to(device_id).emit('device:remote-start', {});
|
|
console.log(`Remote session started for device ${device_id}`);
|
|
});
|
|
|
|
socket.on('dashboard:remote-stop', (data) => {
|
|
const { device_id } = data;
|
|
if (!canActOnDevice(socket, device_id, 'write')) return;
|
|
deviceNs.to(device_id).emit('device:remote-stop', {});
|
|
console.log(`Remote session stopped for device ${device_id}`);
|
|
});
|
|
|
|
socket.on('dashboard:device-command', (data, ack) => {
|
|
const { device_id, type, payload } = data;
|
|
if (!canActOnDevice(socket, device_id, 'write')) {
|
|
if (typeof ack === 'function') ack({ delivered: false, reason: 'forbidden' });
|
|
return;
|
|
}
|
|
const room = deviceNs.adapter.rooms.get(device_id);
|
|
if (room && room.size > 0) {
|
|
deviceNs.to(device_id).emit('device:command', { type, payload });
|
|
console.log(`Command delivered to device ${device_id}: ${type}`);
|
|
if (typeof ack === 'function') ack({ delivered: true });
|
|
return;
|
|
}
|
|
// Device offline at emit time. Try to queue (lazy require so reverting
|
|
// the queue commit doesn't break this commit - MODULE_NOT_FOUND on the
|
|
// first try gets cached by Node's module loader, giving consistent
|
|
// queued=false behavior on every subsequent call).
|
|
let queued = false;
|
|
try {
|
|
const queue = require('../lib/command-queue');
|
|
queued = queue.queueCommand(device_id, type, payload);
|
|
} catch (e) { /* command-queue module absent; fall through to lost */ }
|
|
console.log(`Command for offline device ${device_id}: ${type} (queued=${queued})`);
|
|
if (typeof ack === 'function') ack({ delivered: false, queued, reason: 'offline' });
|
|
});
|
|
|
|
socket.on('disconnect', () => {
|
|
console.log(`Dashboard client disconnected: ${socket.id}`);
|
|
});
|
|
});
|
|
|
|
return dashboardNs;
|
|
};
|
|
|