add UI to abort an unfinished upload; suggested in #77

to abort an upload, refresh the page and access the unpost tab,
which now includes unfinished uploads (sorted before completed ones)

can be configured through u2abort (global or volflag);
by default it requires both the IP and account to match

https://a.ocv.me/pub/g/nerd-stuff/2024-0310-stoltzekleiven.jpg
This commit is contained in:
ed 2024-03-11 01:32:02 +01:00
parent 51a83b04a0
commit 3f05b6655c
13 changed files with 153 additions and 47 deletions

View file

@ -871,6 +871,7 @@ def add_upload(ap):
ap2.add_argument("--dotpart", action="store_true", help="dotfile incomplete uploads, hiding them from clients unless \033[33m-ed\033[0m")
ap2.add_argument("--plain-ip", action="store_true", help="when avoiding filename collisions by appending the uploader's ip to the filename: append the plaintext ip instead of salting and hashing the ip")
ap2.add_argument("--unpost", metavar="SEC", type=int, default=3600*12, help="grace period where uploads can be deleted by the uploader, even without delete permissions; 0=disabled, default=12h")
ap2.add_argument("--u2abort", metavar="NUM", type=int, default=1, help="clients can abort incomplete uploads by using the unpost tab (requires \033[33m-e2d\033[0m). [\033[32m0\033[0m] = never allowed (disable feature), [\033[32m1\033[0m] = allow if client has the same IP as the upload AND is using the same account, [\033[32m2\033[0m] = just check the IP, [\033[32m3\033[0m] = just check account-name (volflag=u2abort)")
ap2.add_argument("--blank-wt", metavar="SEC", type=int, default=300, help="file write grace period (any client can write to a blank file last-modified more recently than \033[33mSEC\033[0m seconds ago)")
ap2.add_argument("--reg-cap", metavar="N", type=int, default=38400, help="max number of uploads to keep in memory when running without \033[33m-e2d\033[0m; roughly 1 MiB RAM per 600")
ap2.add_argument("--no-fpool", action="store_true", help="disable file-handle pooling -- instead, repeatedly close and reopen files during upload (bad idea to enable this on windows and/or cow filesystems)")

View file

@ -1485,7 +1485,7 @@ class AuthSrv(object):
if k not in vol.flags:
vol.flags[k] = getattr(self.args, k)
for k in ("nrand",):
for k in ("nrand", "u2abort"):
if k in vol.flags:
vol.flags[k] = int(vol.flags[k])
@ -2101,7 +2101,9 @@ def split_cfg_ln(ln: str) -> dict[str, Any]:
return ret
def expand_config_file(log: Optional["NamedLogger"], ret: list[str], fp: str, ipath: str) -> None:
def expand_config_file(
log: Optional["NamedLogger"], ret: list[str], fp: str, ipath: str
) -> None:
"""expand all % file includes"""
fp = absreal(fp)
if len(ipath.split(" -> ")) > 64:
@ -2137,7 +2139,8 @@ def expand_config_file(log: Optional["NamedLogger"], ret: list[str], fp: str, ip
return
if not os.path.exists(fp):
t = "warning: tried to read config from '%s' but the file/folder does not exist" % (fp,)
t = "warning: tried to read config from '%s' but the file/folder does not exist"
t = t % (fp,)
if log:
log(t, 3)

View file

@ -66,6 +66,7 @@ def vf_vmap() -> dict[str, str]:
"rm_retry",
"sort",
"unlist",
"u2abort",
"u2ts",
):
ret[k] = k
@ -131,6 +132,7 @@ flagcats = {
"rand": "force randomized filenames, 9 chars long by default",
"nrand=N": "randomized filenames are N chars long",
"u2ts=fc": "[f]orce [c]lient-last-modified or [u]pload-time",
"u2abort=1": "allow aborting unfinished uploads? 0=no 1=strict 2=ip-chk 3=acct-chk",
"sz=1k-3m": "allow filesizes between 1 KiB and 3MiB",
"df=1g": "ensure 1 GiB free disk space",
},

View file

@ -300,7 +300,7 @@ class FtpFs(AbstractedFS):
vp = join(self.cwd, path).lstrip("/")
try:
self.hub.up2k.handle_rm(self.uname, self.h.cli_ip, [vp], [], False)
self.hub.up2k.handle_rm(self.uname, self.h.cli_ip, [vp], [], False, False)
except Exception as ex:
raise FSE(str(ex))

View file

@ -3550,8 +3550,7 @@ class HttpCli(object):
return ret
def tx_ups(self) -> bool:
if not self.args.unpost:
raise Pebkac(403, "the unpost feature is disabled in server config")
have_unpost = self.args.unpost and "e2d" in self.vn.flags
idx = self.conn.get_u2idx()
if not idx or not hasattr(idx, "p_end"):
@ -3570,7 +3569,14 @@ class HttpCli(object):
if "fk" in vol.flags
and (self.uname in vol.axs.uread or self.uname in vol.axs.upget)
}
for vol in self.asrv.vfs.all_vols.values():
x = self.conn.hsrv.broker.ask(
"up2k.get_unfinished_by_user", self.uname, self.ip
)
uret = x.get()
allvols = self.asrv.vfs.all_vols if have_unpost else {}
for vol in allvols.values():
cur = idx.get_cur(vol.realpath)
if not cur:
continue
@ -3622,9 +3628,13 @@ class HttpCli(object):
for v in ret:
v["vp"] = self.args.SR + v["vp"]
jtxt = json.dumps(ret, indent=2, sort_keys=True).encode("utf-8", "replace")
self.log("{} #{} {:.2f}sec".format(lm, len(ret), time.time() - t0))
self.reply(jtxt, mime="application/json")
if not have_unpost:
ret = [{"kinshi":1}]
jtxt = '{"u":%s,"c":%s}' % (uret, json.dumps(ret, indent=0))
zi = len(uret.split('\n"pd":')) - 1
self.log("%s #%d+%d %.2fsec" % (lm, zi, len(ret), time.time() - t0))
self.reply(jtxt.encode("utf-8", "replace"), mime="application/json")
return True
def handle_rm(self, req: list[str]) -> bool:
@ -3639,11 +3649,12 @@ class HttpCli(object):
elif self.is_vproxied:
req = [x[len(self.args.SR) :] for x in req]
unpost = "unpost" in self.uparam
nlim = int(self.uparam.get("lim") or 0)
lim = [nlim, nlim] if nlim else []
x = self.conn.hsrv.broker.ask(
"up2k.handle_rm", self.uname, self.ip, req, lim, False
"up2k.handle_rm", self.uname, self.ip, req, lim, False, unpost
)
self.loud_reply(x.get())
return True

View file

@ -206,6 +206,9 @@ class Metrics(object):
try:
x = self.hsrv.broker.ask("up2k.get_unfinished")
xs = x.get()
if not xs:
raise Exception("up2k mutex acquisition timed out")
xj = json.loads(xs)
for ptop, (nbytes, nfiles) in xj.items():
tnbytes += nbytes

View file

@ -340,7 +340,7 @@ class SMB(object):
yeet("blocked delete (no-del-acc): " + vpath)
vpath = vpath.replace("\\", "/").lstrip("/")
self.hub.up2k.handle_rm(uname, "1.7.6.2", [vpath], [], False)
self.hub.up2k.handle_rm(uname, "1.7.6.2", [vpath], [], False, False)
def _utime(self, vpath: str, times: tuple[float, float]) -> None:
if not self.args.smbw:

View file

@ -360,7 +360,7 @@ class Tftpd(object):
yeet("attempted delete of non-empty file")
vpath = vpath.replace("\\", "/").lstrip("/")
self.hub.up2k.handle_rm("*", "8.3.8.7", [vpath], [], False)
self.hub.up2k.handle_rm("*", "8.3.8.7", [vpath], [], False, False)
def _access(self, *a: Any) -> bool:
return True

View file

@ -282,9 +282,44 @@ class Up2k(object):
}
return json.dumps(ret, indent=4)
def get_unfinished_by_user(self, uname, ip) -> str:
if PY2 or not self.mutex.acquire(timeout=2):
return '[{"timeout":1}]'
ret: list[tuple[int, str, int, int, int]] = []
try:
for ptop, tab2 in self.registry.items():
cfg = self.flags.get(ptop, {}).get("u2abort", 1)
if not cfg:
continue
addr = (ip or "\n") if cfg in (1, 2) else ""
user = (uname or "\n") if cfg in (1, 3) else ""
drp = self.droppable.get(ptop, {})
for wark, job in tab2.items():
if wark in drp or (user and user != job["user"]) or (addr and addr != job["addr"]):
continue
zt5 = (
int(job["t0"]),
djoin(job["vtop"], job["prel"], job["name"]),
job["size"],
len(job["need"]),
len(job["hash"]),
)
ret.append(zt5)
finally:
self.mutex.release()
ret.sort(reverse=True)
ret2 = [
{"at": at, "vp": "/" + vp, "pd": 100 - ((nn * 100) // (nh or 1)), "sz": sz}
for (at, vp, sz, nn, nh) in ret
]
return json.dumps(ret2, indent=0)
def get_unfinished(self) -> str:
if PY2 or not self.mutex.acquire(timeout=0.5):
return "{}"
return ""
ret: dict[str, tuple[int, int]] = {}
try:
@ -463,7 +498,7 @@ class Up2k(object):
if vp:
fvp = "%s/%s" % (vp, fvp)
self._handle_rm(LEELOO_DALLAS, "", fvp, [], True)
self._handle_rm(LEELOO_DALLAS, "", fvp, [], True, False)
nrm += 1
if nrm:
@ -2690,6 +2725,9 @@ class Up2k(object):
a = [job[x] for x in zs.split()]
self.db_add(cur, vfs.flags, *a)
cur.connection.commit()
elif wark in reg:
# checks out, but client may have hopped IPs
job["addr"] = cj["addr"]
if not job:
ap1 = djoin(cj["ptop"], cj["prel"])
@ -3226,7 +3264,7 @@ class Up2k(object):
pass
def handle_rm(
self, uname: str, ip: str, vpaths: list[str], lim: list[int], rm_up: bool
self, uname: str, ip: str, vpaths: list[str], lim: list[int], rm_up: bool, unpost: bool
) -> str:
n_files = 0
ok = {}
@ -3236,7 +3274,7 @@ class Up2k(object):
self.log("hit delete limit of {} files".format(lim[1]), 3)
break
a, b, c = self._handle_rm(uname, ip, vp, lim, rm_up)
a, b, c = self._handle_rm(uname, ip, vp, lim, rm_up, unpost)
n_files += a
for k in b:
ok[k] = 1
@ -3250,25 +3288,42 @@ class Up2k(object):
return "deleted {} files (and {}/{} folders)".format(n_files, iok, iok + ing)
def _handle_rm(
self, uname: str, ip: str, vpath: str, lim: list[int], rm_up: bool
self, uname: str, ip: str, vpath: str, lim: list[int], rm_up: bool, unpost: bool
) -> tuple[int, list[str], list[str]]:
self.db_act = time.time()
try:
partial = ""
if not unpost:
permsets = [[True, False, False, True]]
vn, rem = self.asrv.vfs.get(vpath, uname, *permsets[0])
vn, rem = vn.get_dbv(rem)
unpost = False
except:
else:
# unpost with missing permissions? verify with db
if not self.args.unpost:
raise Pebkac(400, "the unpost feature is disabled in server config")
unpost = True
permsets = [[False, True]]
vn, rem = self.asrv.vfs.get(vpath, uname, *permsets[0])
vn, rem = vn.get_dbv(rem)
ptop = vn.realpath
with self.mutex:
_, _, _, _, dip, dat = self._find_from_vpath(vn.realpath, rem)
abrt_cfg = self.flags.get(ptop, {}).get("u2abort", 1)
addr = (ip or "\n") if abrt_cfg in (1, 2) else ""
user = (uname or "\n") if abrt_cfg in (1, 3) else ""
reg = self.registry.get(ptop, {}) if abrt_cfg else {}
for wark, job in reg.items():
if (user and user != job["user"]) or (addr and addr != job["addr"]):
continue
if djoin(job["prel"], job["name"]) == rem:
if job["ptop"] != ptop:
t = "job.ptop [%s] != vol.ptop [%s] ??"
raise Exception(t % (job["ptop"] != ptop))
partial = vn.canonical(vjoin(job["prel"], job["tnam"]))
break
if partial:
dip = ip
dat = time.time()
else:
if not self.args.unpost:
raise Pebkac(400, "the unpost feature is disabled in server config")
_, _, _, _, dip, dat = self._find_from_vpath(ptop, rem)
t = "you cannot delete this: "
if not dip:
@ -3361,6 +3416,9 @@ class Up2k(object):
cur.connection.commit()
wunlink(self.log, abspath, dbv.flags)
if partial:
wunlink(self.log, partial, dbv.flags)
partial = ""
if xad:
runhook(
self.log,

View file

@ -1839,6 +1839,10 @@ html.y #tree.nowrap .ntree a+a:hover {
margin: 0;
padding: 0;
}
#unpost td:nth-child(3),
#unpost td:nth-child(4) {
text-align: right;
}
#rui {
background: #fff;
background: var(--bg);

View file

@ -102,7 +102,7 @@ var Ls = {
"access": " access",
"ot_close": "close submenu",
"ot_search": "search for files by attributes, path / name, music tags, or any combination of those$N$N<code>foo bar</code> = must contain both «foo» and «bar»,$N<code>foo -bar</code> = must contain «foo» but not «bar»,$N<code>^yana .opus$</code> = start with «yana» and be an «opus» file$N<code>"try unite"</code> = contain exactly «try unite»$N$Nthe date format is iso-8601, like$N<code>2009-12-31</code> or <code>2020-09-12 23:30:00</code>",
"ot_unpost": "unpost: delete your recent uploads",
"ot_unpost": "unpost: delete your recent uploads, or abort unfinished ones",
"ot_bup": "bup: basic uploader, even supports netscape 4.0",
"ot_mkdir": "mkdir: create a new directory",
"ot_md": "new-md: create a new markdown document",
@ -412,7 +412,7 @@ var Ls = {
"fz_zipd": "zip with traditional cp437 filenames, for really old software",
"fz_zipc": "cp437 with crc32 computed early,$Nfor MS-DOS PKZIP v2.04g (october 1993)$N(takes longer to process before download can start)",
"un_m1": "you can delete your recent uploads below",
"un_m1": "you can delete your recent uploads (or abort unfinished ones) below",
"un_upd": "refresh",
"un_m4": "or share the files visible below:",
"un_ulist": "show",
@ -421,12 +421,15 @@ var Ls = {
"un_fclr": "clear filter",
"un_derr": 'unpost-delete failed:\n',
"un_f5": 'something broke, please try a refresh or hit F5',
"un_nou": '<b>warning:</b> server too busy to show unfinished uploads; click the "refresh" link in a bit',
"un_noc": '<b>warning:</b> unpost of fully uploaded files is not enabled/permitted in server config',
"un_max": "showing first 2000 files (use the filter)",
"un_avail": "{0} uploads can be deleted",
"un_m2": "sorted by upload time &ndash; most recent first:",
"un_avail": "{0} recent uploads can be deleted<br />{1} unfinished ones can be aborted",
"un_m2": "sorted by upload time; most recent first:",
"un_no1": "sike! no uploads are sufficiently recent",
"un_no2": "sike! no uploads matching that filter are sufficiently recent",
"un_next": "delete the next {0} files below",
"un_abrt": "abort",
"un_del": "delete",
"un_m3": "loading your recent uploads...",
"un_busy": "deleting {0} files...",
@ -912,7 +915,7 @@ var Ls = {
"fz_zipd": "zip med filnavn i cp437, for høggamle maskiner",
"fz_zipc": "cp437 med tidlig crc32,$Nfor MS-DOS PKZIP v2.04g (oktober 1993)$N(øker behandlingstid på server)",
"un_m1": "nedenfor kan du angre / slette filer som du nylig har lastet opp",
"un_m1": "nedenfor kan du angre / slette filer som du nylig har lastet opp, eller avbryte ufullstendige opplastninger",
"un_upd": "oppdater",
"un_m4": "eller hvis du vil dele nedlastnings-lenkene:",
"un_ulist": "vis",
@ -921,12 +924,15 @@ var Ls = {
"un_fclr": "nullstill filter",
"un_derr": 'unpost-sletting feilet:\n',
"un_f5": 'noe gikk galt, prøv å oppdatere listen eller trykk F5',
"un_nou": '<b>advarsel:</b> kan ikke vise ufullstendige opplastninger akkurat nå; klikk på oppdater-linken om litt',
"un_noc": '<b>advarsel:</b> angring av fullførte opplastninger er deaktivert i serverkonfigurasjonen',
"un_max": "viser de første 2000 filene (bruk filteret for å innsnevre)",
"un_avail": "{0} filer kan slettes",
"un_m2": "sortert etter opplastningstid &ndash; nyeste først:",
"un_avail": "{0} nylig opplastede filer kan slettes<br />{1} ufullstendige opplastninger kan avbrytes",
"un_m2": "sortert etter opplastningstid; nyeste først:",
"un_no1": "men nei, her var det jaggu ikkeno som slettes kan",
"un_no2": "men nei, her var det jaggu ingenting som passet overens med filteret",
"un_next": "slett de neste {0} filene nedenfor",
"un_abrt": "avbryt",
"un_del": "slett",
"un_m3": "henter listen med nylig opplastede filer...",
"un_busy": "sletter {0} filer...",
@ -1030,7 +1036,7 @@ modal.load();
ebi('ops').innerHTML = (
'<a href="#" data-dest="" tt="' + L.ot_close + '">--</a>' +
'<a href="#" data-perm="read" data-dep="idx" data-dest="search" tt="' + L.ot_search + '">🔎</a>' +
(have_del && have_unpost ? '<a href="#" data-dest="unpost" data-dep="idx" tt="' + L.ot_unpost + '">🧯</a>' : '') +
(have_del ? '<a href="#" data-dest="unpost" tt="' + L.ot_unpost + '">🧯</a>' : '') +
'<a href="#" data-dest="up2k">🚀</a>' +
'<a href="#" data-perm="write" data-dest="bup" tt="' + L.ot_bup + '">🎈</a>' +
'<a href="#" data-perm="write" data-dest="mkdir" tt="' + L.ot_mkdir + '">📂</a>' +
@ -7883,19 +7889,38 @@ var unpost = (function () {
return ebi('op_unpost').innerHTML = L.fu_xe1;
try {
var res = JSON.parse(this.responseText);
var ores = JSON.parse(this.responseText);
}
catch (ex) {
return ebi('op_unpost').innerHTML = '<p>' + L.badreply + ':</p>' + unpre(this.responseText);
}
if (ores.u.length == 1 && ores.u[0].timeout) {
html.push('<p>' + L.un_nou + '</p>');
ores.u = [];
}
if (ores.c.length == 1 && ores.c[0].kinshi) {
html.push('<p>' + L.un_noc + '</p>');
ores.c = [];
}
for (var a = 0; a < ores.u.length; a++)
ores.u[a].k = 'u';
for (var a = 0; a < ores.c.length; a++)
ores.c[a].k = 'c';
var res = ores.u.concat(ores.c);
if (res.length) {
if (res.length == 2000)
html.push("<p>" + L.un_max);
else
html.push("<p>" + L.un_avail.format(res.length));
html.push("<p>" + L.un_avail.format(ores.c.length, ores.u.length));
html.push(" &ndash; " + L.un_m2 + "</p>");
html.push("<table><thead><tr><td></td><td>time</td><td>size</td><td>file</td></tr></thead><tbody>");
html.push("<br />" + L.un_m2 + "</p>");
html.push("<table><thead><tr><td></td><td>time</td><td>size</td><td>done</td><td>file</td></tr></thead><tbody>");
}
else
html.push('-- <em>' + (filt.value ? L.un_no2 : L.un_no1) + '</em>');
@ -7908,10 +7933,13 @@ var unpost = (function () {
'<tr><td></td><td colspan="3" style="padding:.5em">' +
'<a me="' + me + '" class="n' + a + '" n2="' + (a + mods[b]) +
'" href="#">' + L.un_next.format(Math.min(mods[b], res.length - a)) + '</a></td></tr>');
var done = res[a].k == 'c';
html.push(
'<tr><td><a me="' + me + '" class="n' + a + '" href="#">' + L.un_del + '</a></td>' +
'<tr><td><a me="' + me + '" class="n' + a + '" href="#">' + (done ? L.un_del : L.un_abrt) + '</a></td>' +
'<td>' + unix2iso(res[a].at) + '</td>' +
'<td>' + res[a].sz + '</td>' +
'<td>' + ('' + res[a].sz).replace(/\B(?=(\d{3})+(?!\d))/g, " ") + '</td>' +
(done ? '<td>100%</td>' : '<td>' + res[a].pd + '%</td>') +
'<td>' + linksplit(res[a].vp).join('<span> / </span>') + '</td></tr>');
}
@ -7997,7 +8025,7 @@ var unpost = (function () {
var xhr = new XHR();
xhr.n = n;
xhr.n2 = n2;
xhr.open('POST', SR + '/?delete&lim=' + req.length, true);
xhr.open('POST', SR + '/?delete&unpost&lim=' + req.length, true);
xhr.onload = xhr.onerror = unpost_delete_cb;
xhr.send(JSON.stringify(req));
};

View file

@ -13,10 +13,6 @@ a living list of upcoming features / fixes / changes, very roughly in order of p
* sanchk that autogenerated volumes below inaccessible parent
* disable logout links if idp detected
* [github discussion #77](https://github.com/9001/copyparty/discussions/77) - cancel-buttons for uploads
* definitely included in the unpost list
* probably an X-button next to each progressbar
* download accelerator
* definitely download chunks in parallel
* maybe resumable downloads (chrome-only, jank api)

View file

@ -119,13 +119,13 @@ class Cfg(Namespace):
ex = "ah_cli ah_gen css_browser hist ipa_re js_browser no_forget no_hash no_idx nonsus_urls"
ka.update(**{k: None for k in ex.split()})
ex = "hash_mt srch_time u2j"
ex = "hash_mt srch_time u2abort u2j"
ka.update(**{k: 1 for k in ex.split()})
ex = "reg_cap s_thead s_tbody th_convt"
ka.update(**{k: 9 for k in ex.split()})
ex = "db_act df loris re_maxage rproxy rsp_jtr rsp_slp s_wr_slp snap_wri theme themes turbo"
ex = "db_act df k304 loris re_maxage rproxy rsp_jtr rsp_slp s_wr_slp snap_wri theme themes turbo"
ka.update(**{k: 0 for k in ex.split()})
ex = "ah_alg bname doctitle exit favico idp_h_usr html_head lg_sbf log_fk md_sbf name textfiles unlist vname R RS SR"