From 3f05b6655c9b28511fdf1c3d2963d80c2cec5efd Mon Sep 17 00:00:00 2001
From: ed
Date: Mon, 11 Mar 2024 01:32:02 +0100
Subject: [PATCH] add UI to abort an unfinished upload; suggested in #77
to abort an upload, refresh the page and access the unpost tab,
which now includes unfinished uploads (sorted before completed ones)
can be configured through u2abort (global or volflag);
by default it requires both the IP and account to match
https://a.ocv.me/pub/g/nerd-stuff/2024-0310-stoltzekleiven.jpg
---
copyparty/__main__.py | 1 +
copyparty/authsrv.py | 9 +++--
copyparty/cfg.py | 2 +
copyparty/ftpd.py | 2 +-
copyparty/httpcli.py | 25 ++++++++----
copyparty/metrics.py | 3 ++
copyparty/smbd.py | 2 +-
copyparty/tftpd.py | 2 +-
copyparty/up2k.py | 84 +++++++++++++++++++++++++++++++++------
copyparty/web/browser.css | 4 ++
copyparty/web/browser.js | 58 ++++++++++++++++++++-------
docs/TODO.md | 4 --
tests/util.py | 4 +-
13 files changed, 153 insertions(+), 47 deletions(-)
diff --git a/copyparty/__main__.py b/copyparty/__main__.py
index 437ddd65..cd684544 100755
--- a/copyparty/__main__.py
+++ b/copyparty/__main__.py
@@ -871,6 +871,7 @@ def add_upload(ap):
ap2.add_argument("--dotpart", action="store_true", help="dotfile incomplete uploads, hiding them from clients unless \033[33m-ed\033[0m")
ap2.add_argument("--plain-ip", action="store_true", help="when avoiding filename collisions by appending the uploader's ip to the filename: append the plaintext ip instead of salting and hashing the ip")
ap2.add_argument("--unpost", metavar="SEC", type=int, default=3600*12, help="grace period where uploads can be deleted by the uploader, even without delete permissions; 0=disabled, default=12h")
+ ap2.add_argument("--u2abort", metavar="NUM", type=int, default=1, help="clients can abort incomplete uploads by using the unpost tab (requires \033[33m-e2d\033[0m). [\033[32m0\033[0m] = never allowed (disable feature), [\033[32m1\033[0m] = allow if client has the same IP as the upload AND is using the same account, [\033[32m2\033[0m] = just check the IP, [\033[32m3\033[0m] = just check account-name (volflag=u2abort)")
ap2.add_argument("--blank-wt", metavar="SEC", type=int, default=300, help="file write grace period (any client can write to a blank file last-modified more recently than \033[33mSEC\033[0m seconds ago)")
ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without \033[33m-e2d\033[0m; roughly 1 MiB RAM per 600")
ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload (bad idea to enable this on windows and/or cow filesystems)")
diff --git a/copyparty/authsrv.py b/copyparty/authsrv.py
index cf7b066a..38ea34d1 100644
--- a/copyparty/authsrv.py
+++ b/copyparty/authsrv.py
@@ -1485,7 +1485,7 @@ class AuthSrv(object):
if k not in vol.flags:
vol.flags[k] = getattr(self.args, k)
- for k in ("nrand",):
+ for k in ("nrand", "u2abort"):
if k in vol.flags:
vol.flags[k] = int(vol.flags[k])
@@ -2101,7 +2101,9 @@ def split_cfg_ln(ln: str) -> dict[str, Any]:
return ret
-def expand_config_file(log: Optional["NamedLogger"], ret: list[str], fp: str, ipath: str) -> None:
+def expand_config_file(
+ log: Optional["NamedLogger"], ret: list[str], fp: str, ipath: str
+) -> None:
"""expand all % file includes"""
fp = absreal(fp)
if len(ipath.split(" -> ")) > 64:
@@ -2137,7 +2139,8 @@ def expand_config_file(log: Optional["NamedLogger"], ret: list[str], fp: str, ip
return
if not os.path.exists(fp):
- t = "warning: tried to read config from '%s' but the file/folder does not exist" % (fp,)
+ t = "warning: tried to read config from '%s' but the file/folder does not exist"
+ t = t % (fp,)
if log:
log(t, 3)
diff --git a/copyparty/cfg.py b/copyparty/cfg.py
index abfb6c13..9781979f 100644
--- a/copyparty/cfg.py
+++ b/copyparty/cfg.py
@@ -66,6 +66,7 @@ def vf_vmap() -> dict[str, str]:
"rm_retry",
"sort",
"unlist",
+ "u2abort",
"u2ts",
):
ret[k] = k
@@ -131,6 +132,7 @@ flagcats = {
"rand": "force randomized filenames, 9 chars long by default",
"nrand=N": "randomized filenames are N chars long",
"u2ts=fc": "[f]orce [c]lient-last-modified or [u]pload-time",
+ "u2abort=1": "allow aborting unfinished uploads? 0=no 1=strict 2=ip-chk 3=acct-chk",
"sz=1k-3m": "allow filesizes between 1 KiB and 3MiB",
"df=1g": "ensure 1 GiB free disk space",
},
diff --git a/copyparty/ftpd.py b/copyparty/ftpd.py
index 4d72c4b1..4d9c4879 100644
--- a/copyparty/ftpd.py
+++ b/copyparty/ftpd.py
@@ -300,7 +300,7 @@ class FtpFs(AbstractedFS):
vp = join(self.cwd, path).lstrip("/")
try:
- self.hub.up2k.handle_rm(self.uname, self.h.cli_ip, [vp], [], False)
+ self.hub.up2k.handle_rm(self.uname, self.h.cli_ip, [vp], [], False, False)
except Exception as ex:
raise FSE(str(ex))
diff --git a/copyparty/httpcli.py b/copyparty/httpcli.py
index 818498d0..a6c20348 100644
--- a/copyparty/httpcli.py
+++ b/copyparty/httpcli.py
@@ -3550,8 +3550,7 @@ class HttpCli(object):
return ret
def tx_ups(self) -> bool:
- if not self.args.unpost:
- raise Pebkac(403, "the unpost feature is disabled in server config")
+ have_unpost = self.args.unpost and "e2d" in self.vn.flags
idx = self.conn.get_u2idx()
if not idx or not hasattr(idx, "p_end"):
@@ -3570,7 +3569,14 @@ class HttpCli(object):
if "fk" in vol.flags
and (self.uname in vol.axs.uread or self.uname in vol.axs.upget)
}
- for vol in self.asrv.vfs.all_vols.values():
+
+ x = self.conn.hsrv.broker.ask(
+ "up2k.get_unfinished_by_user", self.uname, self.ip
+ )
+ uret = x.get()
+
+ allvols = self.asrv.vfs.all_vols if have_unpost else {}
+ for vol in allvols.values():
cur = idx.get_cur(vol.realpath)
if not cur:
continue
@@ -3622,9 +3628,13 @@ class HttpCli(object):
for v in ret:
v["vp"] = self.args.SR + v["vp"]
- jtxt = json.dumps(ret, indent=2, sort_keys=True).encode("utf-8", "replace")
- self.log("{} #{} {:.2f}sec".format(lm, len(ret), time.time() - t0))
- self.reply(jtxt, mime="application/json")
+ if not have_unpost:
+ ret = [{"kinshi":1}]
+
+ jtxt = '{"u":%s,"c":%s}' % (uret, json.dumps(ret, indent=0))
+ zi = len(uret.split('\n"pd":')) - 1
+ self.log("%s #%d+%d %.2fsec" % (lm, zi, len(ret), time.time() - t0))
+ self.reply(jtxt.encode("utf-8", "replace"), mime="application/json")
return True
def handle_rm(self, req: list[str]) -> bool:
@@ -3639,11 +3649,12 @@ class HttpCli(object):
elif self.is_vproxied:
req = [x[len(self.args.SR) :] for x in req]
+ unpost = "unpost" in self.uparam
nlim = int(self.uparam.get("lim") or 0)
lim = [nlim, nlim] if nlim else []
x = self.conn.hsrv.broker.ask(
- "up2k.handle_rm", self.uname, self.ip, req, lim, False
+ "up2k.handle_rm", self.uname, self.ip, req, lim, False, unpost
)
self.loud_reply(x.get())
return True
diff --git a/copyparty/metrics.py b/copyparty/metrics.py
index 72e86fdb..3af8be9d 100644
--- a/copyparty/metrics.py
+++ b/copyparty/metrics.py
@@ -206,6 +206,9 @@ class Metrics(object):
try:
x = self.hsrv.broker.ask("up2k.get_unfinished")
xs = x.get()
+ if not xs:
+ raise Exception("up2k mutex acquisition timed out")
+
xj = json.loads(xs)
for ptop, (nbytes, nfiles) in xj.items():
tnbytes += nbytes
diff --git a/copyparty/smbd.py b/copyparty/smbd.py
index 1e97386d..979c11df 100644
--- a/copyparty/smbd.py
+++ b/copyparty/smbd.py
@@ -340,7 +340,7 @@ class SMB(object):
yeet("blocked delete (no-del-acc): " + vpath)
vpath = vpath.replace("\\", "/").lstrip("/")
- self.hub.up2k.handle_rm(uname, "1.7.6.2", [vpath], [], False)
+ self.hub.up2k.handle_rm(uname, "1.7.6.2", [vpath], [], False, False)
def _utime(self, vpath: str, times: tuple[float, float]) -> None:
if not self.args.smbw:
diff --git a/copyparty/tftpd.py b/copyparty/tftpd.py
index 0020e96a..7b09533a 100644
--- a/copyparty/tftpd.py
+++ b/copyparty/tftpd.py
@@ -360,7 +360,7 @@ class Tftpd(object):
yeet("attempted delete of non-empty file")
vpath = vpath.replace("\\", "/").lstrip("/")
- self.hub.up2k.handle_rm("*", "8.3.8.7", [vpath], [], False)
+ self.hub.up2k.handle_rm("*", "8.3.8.7", [vpath], [], False, False)
def _access(self, *a: Any) -> bool:
return True
diff --git a/copyparty/up2k.py b/copyparty/up2k.py
index 372d0c6a..5c26e6a5 100644
--- a/copyparty/up2k.py
+++ b/copyparty/up2k.py
@@ -282,9 +282,44 @@ class Up2k(object):
}
return json.dumps(ret, indent=4)
+ def get_unfinished_by_user(self, uname, ip) -> str:
+ if PY2 or not self.mutex.acquire(timeout=2):
+ return '[{"timeout":1}]'
+
+ ret: list[tuple[int, str, int, int, int]] = []
+ try:
+ for ptop, tab2 in self.registry.items():
+ cfg = self.flags.get(ptop, {}).get("u2abort", 1)
+ if not cfg:
+ continue
+ addr = (ip or "\n") if cfg in (1, 2) else ""
+ user = (uname or "\n") if cfg in (1, 3) else ""
+ drp = self.droppable.get(ptop, {})
+ for wark, job in tab2.items():
+ if wark in drp or (user and user != job["user"]) or (addr and addr != job["addr"]):
+ continue
+
+ zt5 = (
+ int(job["t0"]),
+ djoin(job["vtop"], job["prel"], job["name"]),
+ job["size"],
+ len(job["need"]),
+ len(job["hash"]),
+ )
+ ret.append(zt5)
+ finally:
+ self.mutex.release()
+
+ ret.sort(reverse=True)
+ ret2 = [
+ {"at": at, "vp": "/" + vp, "pd": 100 - ((nn * 100) // (nh or 1)), "sz": sz}
+ for (at, vp, sz, nn, nh) in ret
+ ]
+ return json.dumps(ret2, indent=0)
+
def get_unfinished(self) -> str:
if PY2 or not self.mutex.acquire(timeout=0.5):
- return "{}"
+ return ""
ret: dict[str, tuple[int, int]] = {}
try:
@@ -463,7 +498,7 @@ class Up2k(object):
if vp:
fvp = "%s/%s" % (vp, fvp)
- self._handle_rm(LEELOO_DALLAS, "", fvp, [], True)
+ self._handle_rm(LEELOO_DALLAS, "", fvp, [], True, False)
nrm += 1
if nrm:
@@ -2690,6 +2725,9 @@ class Up2k(object):
a = [job[x] for x in zs.split()]
self.db_add(cur, vfs.flags, *a)
cur.connection.commit()
+ elif wark in reg:
+ # checks out, but client may have hopped IPs
+ job["addr"] = cj["addr"]
if not job:
ap1 = djoin(cj["ptop"], cj["prel"])
@@ -3226,7 +3264,7 @@ class Up2k(object):
pass
def handle_rm(
- self, uname: str, ip: str, vpaths: list[str], lim: list[int], rm_up: bool
+ self, uname: str, ip: str, vpaths: list[str], lim: list[int], rm_up: bool, unpost: bool
) -> str:
n_files = 0
ok = {}
@@ -3236,7 +3274,7 @@ class Up2k(object):
self.log("hit delete limit of {} files".format(lim[1]), 3)
break
- a, b, c = self._handle_rm(uname, ip, vp, lim, rm_up)
+ a, b, c = self._handle_rm(uname, ip, vp, lim, rm_up, unpost)
n_files += a
for k in b:
ok[k] = 1
@@ -3250,25 +3288,42 @@ class Up2k(object):
return "deleted {} files (and {}/{} folders)".format(n_files, iok, iok + ing)
def _handle_rm(
- self, uname: str, ip: str, vpath: str, lim: list[int], rm_up: bool
+ self, uname: str, ip: str, vpath: str, lim: list[int], rm_up: bool, unpost: bool
) -> tuple[int, list[str], list[str]]:
self.db_act = time.time()
- try:
+ partial = ""
+ if not unpost:
permsets = [[True, False, False, True]]
vn, rem = self.asrv.vfs.get(vpath, uname, *permsets[0])
vn, rem = vn.get_dbv(rem)
- unpost = False
- except:
+ else:
# unpost with missing permissions? verify with db
- if not self.args.unpost:
- raise Pebkac(400, "the unpost feature is disabled in server config")
-
- unpost = True
permsets = [[False, True]]
vn, rem = self.asrv.vfs.get(vpath, uname, *permsets[0])
vn, rem = vn.get_dbv(rem)
+ ptop = vn.realpath
with self.mutex:
- _, _, _, _, dip, dat = self._find_from_vpath(vn.realpath, rem)
+ abrt_cfg = self.flags.get(ptop, {}).get("u2abort", 1)
+ addr = (ip or "\n") if abrt_cfg in (1, 2) else ""
+ user = (uname or "\n") if abrt_cfg in (1, 3) else ""
+ reg = self.registry.get(ptop, {}) if abrt_cfg else {}
+ for wark, job in reg.items():
+ if (user and user != job["user"]) or (addr and addr != job["addr"]):
+ continue
+ if djoin(job["prel"], job["name"]) == rem:
+ if job["ptop"] != ptop:
+ t = "job.ptop [%s] != vol.ptop [%s] ??"
+ raise Exception(t % (job["ptop"] != ptop))
+ partial = vn.canonical(vjoin(job["prel"], job["tnam"]))
+ break
+ if partial:
+ dip = ip
+ dat = time.time()
+ else:
+ if not self.args.unpost:
+ raise Pebkac(400, "the unpost feature is disabled in server config")
+
+ _, _, _, _, dip, dat = self._find_from_vpath(ptop, rem)
t = "you cannot delete this: "
if not dip:
@@ -3361,6 +3416,9 @@ class Up2k(object):
cur.connection.commit()
wunlink(self.log, abspath, dbv.flags)
+ if partial:
+ wunlink(self.log, partial, dbv.flags)
+ partial = ""
if xad:
runhook(
self.log,
diff --git a/copyparty/web/browser.css b/copyparty/web/browser.css
index f7a98132..c1e8595f 100644
--- a/copyparty/web/browser.css
+++ b/copyparty/web/browser.css
@@ -1839,6 +1839,10 @@ html.y #tree.nowrap .ntree a+a:hover {
margin: 0;
padding: 0;
}
+#unpost td:nth-child(3),
+#unpost td:nth-child(4) {
+ text-align: right;
+}
#rui {
background: #fff;
background: var(--bg);
diff --git a/copyparty/web/browser.js b/copyparty/web/browser.js
index 03907e5a..5c9ec3bc 100644
--- a/copyparty/web/browser.js
+++ b/copyparty/web/browser.js
@@ -102,7 +102,7 @@ var Ls = {
"access": " access",
"ot_close": "close submenu",
"ot_search": "search for files by attributes, path / name, music tags, or any combination of those$N$N<code>foo bar</code> = must contain both «foo» and «bar»,$N<code>foo -bar</code> = must contain «foo» but not «bar»,$N<code>^yana .opus$</code> = start with «yana» and be an «opus» file$N<code>"try unite"</code> = contain exactly «try unite»$N$Nthe date format is iso-8601, like$N<code>2009-12-31</code> or <code>2020-09-12 23:30:00</code>",
- "ot_unpost": "unpost: delete your recent uploads",
+ "ot_unpost": "unpost: delete your recent uploads, or abort unfinished ones",
"ot_bup": "bup: basic uploader, even supports netscape 4.0",
"ot_mkdir": "mkdir: create a new directory",
"ot_md": "new-md: create a new markdown document",
@@ -412,7 +412,7 @@ var Ls = {
"fz_zipd": "zip with traditional cp437 filenames, for really old software",
"fz_zipc": "cp437 with crc32 computed early,$Nfor MS-DOS PKZIP v2.04g (october 1993)$N(takes longer to process before download can start)",
- "un_m1": "you can delete your recent uploads below",
+ "un_m1": "you can delete your recent uploads (or abort unfinished ones) below",
"un_upd": "refresh",
"un_m4": "or share the files visible below:",
"un_ulist": "show",
@@ -421,12 +421,15 @@ var Ls = {
"un_fclr": "clear filter",
"un_derr": 'unpost-delete failed:\n',
"un_f5": 'something broke, please try a refresh or hit F5',
+ "un_nou": 'warning: server too busy to show unfinished uploads; click the "refresh" link in a bit',
+ "un_noc": 'warning: unpost of fully uploaded files is not enabled/permitted in server config',
"un_max": "showing first 2000 files (use the filter)",
- "un_avail": "{0} uploads can be deleted",
- "un_m2": "sorted by upload time – most recent first:",
+ "un_avail": "{0} recent uploads can be deleted
{1} unfinished ones can be aborted",
+ "un_m2": "sorted by upload time; most recent first:",
"un_no1": "sike! no uploads are sufficiently recent",
"un_no2": "sike! no uploads matching that filter are sufficiently recent",
"un_next": "delete the next {0} files below",
+ "un_abrt": "abort",
"un_del": "delete",
"un_m3": "loading your recent uploads...",
"un_busy": "deleting {0} files...",
@@ -912,7 +915,7 @@ var Ls = {
"fz_zipd": "zip med filnavn i cp437, for høggamle maskiner",
"fz_zipc": "cp437 med tidlig crc32,$Nfor MS-DOS PKZIP v2.04g (oktober 1993)$N(øker behandlingstid på server)",
- "un_m1": "nedenfor kan du angre / slette filer som du nylig har lastet opp",
+ "un_m1": "nedenfor kan du angre / slette filer som du nylig har lastet opp, eller avbryte ufullstendige opplastninger",
"un_upd": "oppdater",
"un_m4": "eller hvis du vil dele nedlastnings-lenkene:",
"un_ulist": "vis",
@@ -921,12 +924,15 @@ var Ls = {
"un_fclr": "nullstill filter",
"un_derr": 'unpost-sletting feilet:\n',
"un_f5": 'noe gikk galt, prøv å oppdatere listen eller trykk F5',
+ "un_nou": 'advarsel: kan ikke vise ufullstendige opplastninger akkurat nå; klikk på oppdater-linken om litt',
+ "un_noc": 'advarsel: angring av fullførte opplastninger er deaktivert i serverkonfigurasjonen',
"un_max": "viser de første 2000 filene (bruk filteret for å innsnevre)",
- "un_avail": "{0} filer kan slettes",
- "un_m2": "sortert etter opplastningstid – nyeste først:",
+ "un_avail": "{0} nylig opplastede filer kan slettes
{1} ufullstendige opplastninger kan avbrytes",
+ "un_m2": "sortert etter opplastningstid; nyeste først:",
"un_no1": "men nei, her var det jaggu ikkeno som slettes kan",
"un_no2": "men nei, her var det jaggu ingenting som passet overens med filteret",
"un_next": "slett de neste {0} filene nedenfor",
+ "un_abrt": "avbryt",
"un_del": "slett",
"un_m3": "henter listen med nylig opplastede filer...",
"un_busy": "sletter {0} filer...",
@@ -1030,7 +1036,7 @@ modal.load();
ebi('ops').innerHTML = (
'--' +
'🔎' +
- (have_del && have_unpost ? '🧯' : '') +
+ (have_del ? '🧯' : '') +
'🚀' +
'🎈' +
'📂' +
@@ -7883,19 +7889,38 @@ var unpost = (function () {
return ebi('op_unpost').innerHTML = L.fu_xe1;
try {
- var res = JSON.parse(this.responseText);
+ var ores = JSON.parse(this.responseText);
}
catch (ex) {
return ebi('op_unpost').innerHTML = '
' + L.badreply + ':
' + unpre(this.responseText); } + + if (ores.u.length == 1 && ores.u[0].timeout) { + html.push('' + L.un_nou + '
'); + ores.u = []; + } + + if (ores.c.length == 1 && ores.c[0].kinshi) { + html.push('' + L.un_noc + '
'); + ores.c = []; + } + + for (var a = 0; a < ores.u.length; a++) + ores.u[a].k = 'u'; + + for (var a = 0; a < ores.c.length; a++) + ores.c[a].k = 'c'; + + var res = ores.u.concat(ores.c); + if (res.length) { if (res.length == 2000) html.push("" + L.un_max); else - html.push("
" + L.un_avail.format(res.length)); + html.push("
" + L.un_avail.format(ores.c.length, ores.u.length)); - html.push(" – " + L.un_m2 + "
"); - html.push("time | size | file |
time | size | done | file | |||
' + '' + L.un_next.format(Math.min(mods[b], res.length - a)) + ' | ||||||
' + L.un_del + ' | ' + + '||||||
' + (done ? L.un_del : L.un_abrt) + ' | ' + '' + unix2iso(res[a].at) + ' | ' + - '' + res[a].sz + ' | ' + + '' + ('' + res[a].sz).replace(/\B(?=(\d{3})+(?!\d))/g, " ") + ' | ' + + (done ? '100% | ' : '' + res[a].pd + '% | ') + '' + linksplit(res[a].vp).join(' / ') + ' |