diff --git a/bin/u2c.py b/bin/u2c.py
index 2d29a75b..8f75c28d 100755
--- a/bin/u2c.py
+++ b/bin/u2c.py
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
from __future__ import print_function, unicode_literals
-S_VERSION = "1.18"
-S_BUILD_DT = "2024-06-01"
+S_VERSION = "1.19"
+S_BUILD_DT = "2024-07-21"
"""
u2c.py: upload to copyparty
@@ -119,6 +119,7 @@ class File(object):
self.nhs = 0
# set by upload
+ self.nojoin = 0 # type: int
self.up_b = 0 # type: int
self.up_c = 0 # type: int
self.cd = 0
@@ -130,10 +131,20 @@ class File(object):
class FileSlice(object):
"""file-like object providing a fixed window into a file"""
- def __init__(self, file, cid):
+ def __init__(self, file, cids):
# type: (File, str) -> None
- self.car, self.len = file.kchunks[cid]
+ self.file = file
+ self.cids = cids
+
+ self.car, tlen = file.kchunks[cids[0]]
+ for cid in cids[1:]:
+ ofs, clen = file.kchunks[cid]
+ if ofs != self.car + tlen:
+ raise Exception(9)
+ tlen += clen
+
+ self.len = tlen
self.cdr = self.car + self.len
self.ofs = 0 # type: int
self.f = open(file.abs, "rb", 512 * 1024)
@@ -636,13 +647,13 @@ def handshake(ar, file, search):
return r["hash"], r["sprs"]
-def upload(file, cid, pw, stats):
- # type: (File, str, str, str) -> None
- """upload one specific chunk, `cid` (a chunk-hash)"""
+def upload(fsl, pw, stats):
+ # type: (FileSlice, str, str) -> None
+ """upload a range of file data, defined by one or more `cid` (chunk-hash)"""
headers = {
- "X-Up2k-Hash": cid,
- "X-Up2k-Wark": file.wark,
+ "X-Up2k-Hash": ",".join(fsl.cids),
+ "X-Up2k-Wark": fsl.file.wark,
"Content-Type": "application/octet-stream",
}
@@ -652,15 +663,20 @@ def upload(file, cid, pw, stats):
if pw:
headers["Cookie"] = "=".join(["cppwd", pw])
- f = FileSlice(file, cid)
try:
- r = req_ses.post(file.url, headers=headers, data=f)
+ r = req_ses.post(fsl.file.url, headers=headers, data=fsl)
+
+ if r.status_code == 400:
+ txt = r.text
+ if "already got that" in txt or "already being written" in txt:
+ fsl.file.nojoin = 1
+
if not r:
raise Exception(repr(r))
_ = r.content
finally:
- f.f.close()
+ fsl.f.close()
class Ctl(object):
@@ -743,7 +759,7 @@ class Ctl(object):
self.mutex = threading.Lock()
self.q_handshake = Queue() # type: Queue[File]
- self.q_upload = Queue() # type: Queue[tuple[File, str]]
+ self.q_upload = Queue() # type: Queue[FileSlice]
self.st_hash = [None, "(idle, starting...)"] # type: tuple[File, int]
self.st_up = [None, "(idle, starting...)"] # type: tuple[File, int]
@@ -788,7 +804,8 @@ class Ctl(object):
for nc, cid in enumerate(hs):
print(" {0} up {1}".format(ncs - nc, cid))
stats = "{0}/0/0/{1}".format(nf, self.nfiles - nf)
- upload(file, cid, self.ar.a, stats)
+ fslice = FileSlice(file, [cid])
+ upload(fslice, self.ar.a, stats)
print(" ok!")
if file.recheck:
@@ -1062,13 +1079,24 @@ class Ctl(object):
if not hs:
kw = "uploaded" if file.up_b else " found"
print("{0} {1}".format(kw, upath))
- for cid in hs:
- self.q_upload.put([file, cid])
+
+ cs = hs[:]
+ while cs:
+ fsl = FileSlice(file, cs[:1])
+ try:
+ if file.nojoin:
+ raise Exception()
+ for n in range(2, self.ar.sz + 1):
+ fsl = FileSlice(file, cs[:n])
+ except:
+ pass
+ cs = cs[len(fsl.cids):]
+ self.q_upload.put(fsl)
def uploader(self):
while True:
- task = self.q_upload.get()
- if not task:
+ fsl = self.q_upload.get()
+ if not fsl:
self.st_up = [None, "(finished)"]
break
@@ -1086,22 +1114,23 @@ class Ctl(object):
self.eta,
)
- file, cid = task
+ file = fsl.file
+ cids = fsl.cids
try:
- upload(file, cid, self.ar.a, stats)
+ upload(fsl, self.ar.a, stats)
except Exception as ex:
- t = "upload failed, retrying: {0} #{1} ({2})\n"
- eprint(t.format(file.name, cid[:8], ex))
+ t = "upload failed, retrying: %s #%d+%d (%s)\n"
+ eprint(t % (file.name, cids[0][:8], len(cids) - 1, ex))
file.cd = time.time() + self.ar.cd
# handshake will fix it
with self.mutex:
- sz = file.kchunks[cid][1]
- file.ucids = [x for x in file.ucids if x != cid]
+ sz = fsl.len
+ file.ucids = [x for x in file.ucids if x not in cids]
if not file.ucids:
self.q_handshake.put(file)
- self.st_up = [file, cid]
+ self.st_up = [file, cids[0]]
file.up_b += sz
self.up_b += sz
self.up_br += sz
@@ -1164,6 +1193,7 @@ source file/folder selection uses rsync syntax, meaning that:
ap = app.add_argument_group("performance tweaks")
ap.add_argument("-j", type=int, metavar="CONNS", default=2, help="parallel connections")
ap.add_argument("-J", type=int, metavar="CORES", default=hcores, help="num cpu-cores to use for hashing; set 0 or 1 for single-core hashing")
+ ap.add_argument("--sz", type=int, metavar="MiB", default=64, help="try to make each POST this big")
ap.add_argument("-nh", action="store_true", help="disable hashing while uploading")
ap.add_argument("-ns", action="store_true", help="no status panel (for slow consoles and macos)")
ap.add_argument("--cd", type=float, metavar="SEC", default=5, help="delay before reattempting a failed handshake/upload")
diff --git a/copyparty/__main__.py b/copyparty/__main__.py
index 8846d5e1..a655c0e2 100644
--- a/copyparty/__main__.py
+++ b/copyparty/__main__.py
@@ -942,6 +942,7 @@ def add_upload(ap):
ap2.add_argument("--sparse", metavar="MiB", type=int, default=4, help="windows-only: minimum size of incoming uploads through up2k before they are made into sparse files")
ap2.add_argument("--turbo", metavar="LVL", type=int, default=0, help="configure turbo-mode in up2k client; [\033[32m-1\033[0m] = forbidden/always-off, [\033[32m0\033[0m] = default-off and warn if enabled, [\033[32m1\033[0m] = default-off, [\033[32m2\033[0m] = on, [\033[32m3\033[0m] = on and disable datecheck")
ap2.add_argument("--u2j", metavar="JOBS", type=int, default=2, help="web-client: number of file chunks to upload in parallel; 1 or 2 is good for low-latency (same-country) connections, 4-8 for android clients, 16 for cross-atlantic (max=64)")
+ ap2.add_argument("--u2sz", metavar="N,N,N", type=u, default="1,64,96", help="web-client: default upload chunksize (MiB); sets \033[33mmin,default,max\033[0m in the settings gui. Each HTTP POST will aim for this size. Cloudflare max is 96. Big values are good for cross-atlantic but may increase HDD fragmentation on some FS. Disable this optimization with [\033[32m1,1,1\033[0m]")
ap2.add_argument("--u2sort", metavar="TXT", type=u, default="s", help="upload order; [\033[32ms\033[0m]=smallest-first, [\033[32mn\033[0m]=alphabetical, [\033[32mfs\033[0m]=force-s, [\033[32mfn\033[0m]=force-n -- alphabetical is a bit slower on fiber/LAN but makes it easier to eyeball if everything went fine")
ap2.add_argument("--write-uplog", action="store_true", help="write POST reports to textfiles in working-directory")
diff --git a/copyparty/httpcli.py b/copyparty/httpcli.py
index decaae7e..83ebb634 100644
--- a/copyparty/httpcli.py
+++ b/copyparty/httpcli.py
@@ -2199,33 +2199,36 @@ class HttpCli(object):
def handle_post_binary(self) -> bool:
try:
- remains = int(self.headers["content-length"])
+ postsize = remains = int(self.headers["content-length"])
except:
raise Pebkac(400, "you must supply a content-length for binary POST")
try:
- chash = self.headers["x-up2k-hash"]
+ chashes = self.headers["x-up2k-hash"].split(",")
wark = self.headers["x-up2k-wark"]
except KeyError:
raise Pebkac(400, "need hash and wark headers for binary POST")
+ chashes = [x.strip() for x in chashes]
+
vfs, _ = self.asrv.vfs.get(self.vpath, self.uname, False, True)
ptop = (vfs.dbv or vfs).realpath
- x = self.conn.hsrv.broker.ask("up2k.handle_chunk", ptop, wark, chash)
+ x = self.conn.hsrv.broker.ask("up2k.handle_chunks", ptop, wark, chashes)
response = x.get()
- chunksize, cstart, path, lastmod, sprs = response
+ chunksize, cstarts, path, lastmod, sprs = response
+ maxsize = chunksize * len(chashes)
+ cstart0 = cstarts[0]
try:
if self.args.nw:
path = os.devnull
- if remains > chunksize:
- raise Pebkac(400, "your chunk is too big to fit")
+ if remains > maxsize:
+ t = "your client is sending %d bytes which is too much (server expected %d bytes at most)"
+ raise Pebkac(400, t % (remains, maxsize))
- self.log("writing {} #{} @{} len {}".format(path, chash, cstart, remains))
-
- reader = read_socket(self.sr, self.args.s_rd_sz, remains)
+ self.log("writing {} {} @{} len {}".format(path, chashes, cstart0, remains))
f = None
fpool = not self.args.no_fpool and sprs
@@ -2239,37 +2242,43 @@ class HttpCli(object):
f = f or open(fsenc(path), "rb+", self.args.iobuf)
try:
- f.seek(cstart[0])
- post_sz, _, sha_b64 = hashcopy(reader, f, self.args.s_wr_slp)
-
- if sha_b64 != chash:
- try:
- self.bakflip(f, cstart[0], post_sz, sha_b64, vfs.flags)
- except:
- self.log("bakflip failed: " + min_ex())
-
- t = "your chunk got corrupted somehow (received {} bytes); expected vs received hash:\n{}\n{}"
- raise Pebkac(400, t.format(post_sz, chash, sha_b64))
-
- if len(cstart) > 1 and path != os.devnull:
- self.log(
- "clone {} to {}".format(
- cstart[0], " & ".join(unicode(x) for x in cstart[1:])
- )
+ for chash, cstart in zip(chashes, cstarts):
+ f.seek(cstart[0])
+ reader = read_socket(
+ self.sr, self.args.s_rd_sz, min(remains, chunksize)
)
- ofs = 0
- while ofs < chunksize:
- bufsz = max(4 * 1024 * 1024, self.args.iobuf)
- bufsz = min(chunksize - ofs, bufsz)
- f.seek(cstart[0] + ofs)
- buf = f.read(bufsz)
- for wofs in cstart[1:]:
- f.seek(wofs + ofs)
- f.write(buf)
+ post_sz, _, sha_b64 = hashcopy(reader, f, self.args.s_wr_slp)
- ofs += len(buf)
+ if sha_b64 != chash:
+ try:
+ self.bakflip(f, cstart[0], post_sz, sha_b64, vfs.flags)
+ except:
+ self.log("bakflip failed: " + min_ex())
- self.log("clone {} done".format(cstart[0]))
+ t = "your chunk got corrupted somehow (received {} bytes); expected vs received hash:\n{}\n{}"
+ raise Pebkac(400, t.format(post_sz, chash, sha_b64))
+
+ remains -= chunksize
+
+ if len(cstart) > 1 and path != os.devnull:
+ self.log(
+ "clone {} to {}".format(
+ cstart[0], " & ".join(unicode(x) for x in cstart[1:])
+ )
+ )
+ ofs = 0
+ while ofs < chunksize:
+ bufsz = max(4 * 1024 * 1024, self.args.iobuf)
+ bufsz = min(chunksize - ofs, bufsz)
+ f.seek(cstart[0] + ofs)
+ buf = f.read(bufsz)
+ for wofs in cstart[1:]:
+ f.seek(wofs + ofs)
+ f.write(buf)
+
+ ofs += len(buf)
+
+ self.log("clone {} done".format(cstart[0]))
if not fpool:
f.close()
@@ -2281,10 +2290,10 @@ class HttpCli(object):
f.close()
raise
finally:
- x = self.conn.hsrv.broker.ask("up2k.release_chunk", ptop, wark, chash)
+ x = self.conn.hsrv.broker.ask("up2k.release_chunks", ptop, wark, chashes)
x.get() # block client until released
- x = self.conn.hsrv.broker.ask("up2k.confirm_chunk", ptop, wark, chash)
+ x = self.conn.hsrv.broker.ask("up2k.confirm_chunks", ptop, wark, chashes)
ztis = x.get()
try:
num_left, fin_path = ztis
@@ -2303,7 +2312,7 @@ class HttpCli(object):
cinf = self.headers.get("x-up2k-stat", "")
- spd = self._spd(post_sz)
+ spd = self._spd(postsize)
self.log("{:70} thank {}".format(spd, cinf))
self.reply(b"thank")
return True
@@ -4500,6 +4509,7 @@ class HttpCli(object):
"themes": self.args.themes,
"turbolvl": self.args.turbo,
"u2j": self.args.u2j,
+ "u2sz": self.args.u2sz,
"idxh": int(self.args.ih),
"u2sort": self.args.u2sort,
}
diff --git a/copyparty/up2k.py b/copyparty/up2k.py
index fbf16f4e..ea739441 100644
--- a/copyparty/up2k.py
+++ b/copyparty/up2k.py
@@ -3013,9 +3013,9 @@ class Up2k(object):
times = (int(time.time()), int(lmod))
bos.utime(dst, times, False)
- def handle_chunk(
- self, ptop: str, wark: str, chash: str
- ) -> tuple[int, list[int], str, float, bool]:
+ def handle_chunks(
+ self, ptop: str, wark: str, chashes: list[str]
+ ) -> tuple[list[int], list[list[int]], str, float, bool]:
with self.mutex, self.reg_mutex:
self.db_act = self.vol_act[ptop] = time.time()
job = self.registry[ptop].get(wark)
@@ -3024,26 +3024,37 @@ class Up2k(object):
self.log("unknown wark [{}], known: {}".format(wark, known))
raise Pebkac(400, "unknown wark" + SSEELOG)
- if chash not in job["need"]:
- msg = "chash = {} , need:\n".format(chash)
- msg += "\n".join(job["need"])
- self.log(msg)
- raise Pebkac(400, "already got that but thanks??")
+ for chash in chashes:
+ if chash not in job["need"]:
+ msg = "chash = {} , need:\n".format(chash)
+ msg += "\n".join(job["need"])
+ self.log(msg)
+ raise Pebkac(400, "already got that (%s) but thanks??" % (chash,))
- nchunk = [n for n, v in enumerate(job["hash"]) if v == chash]
- if not nchunk:
- raise Pebkac(400, "unknown chunk")
-
- if chash in job["busy"]:
- nh = len(job["hash"])
- idx = job["hash"].index(chash)
- t = "that chunk is already being written to:\n {}\n {} {}/{}\n {}"
- raise Pebkac(400, t.format(wark, chash, idx, nh, job["name"]))
-
- path = djoin(job["ptop"], job["prel"], job["tnam"])
+ if chash in job["busy"]:
+ nh = len(job["hash"])
+ idx = job["hash"].index(chash)
+ t = "that chunk is already being written to:\n {}\n {} {}/{}\n {}"
+ raise Pebkac(400, t.format(wark, chash, idx, nh, job["name"]))
chunksize = up2k_chunksize(job["size"])
- ofs = [chunksize * x for x in nchunk]
+
+ coffsets = []
+ for chash in chashes:
+ nchunk = [n for n, v in enumerate(job["hash"]) if v == chash]
+ if not nchunk:
+ raise Pebkac(400, "unknown chunk %s" % (chash))
+
+ ofs = [chunksize * x for x in nchunk]
+ coffsets.append(ofs)
+
+ for ofs1, ofs2 in zip(coffsets, coffsets[1:]):
+ gap = (ofs2[0] - ofs1[0]) - chunksize
+ if gap:
+ t = "only sibling chunks can be stitched; gap of %d bytes between offsets %d and %d in %s"
+ raise Pebkac(400, t % (ofs1, ofs2, gap, job["name"]))
+
+ path = djoin(job["ptop"], job["prel"], job["tnam"])
if not job["sprs"]:
cur_sz = bos.path.getsize(path)
@@ -3056,17 +3067,20 @@ class Up2k(object):
job["poke"] = time.time()
- return chunksize, ofs, path, job["lmod"], job["sprs"]
+ return chunksize, coffsets, path, job["lmod"], job["sprs"]
- def release_chunk(self, ptop: str, wark: str, chash: str) -> bool:
+ def release_chunks(self, ptop: str, wark: str, chashes: list[str]) -> bool:
with self.reg_mutex:
job = self.registry[ptop].get(wark)
if job:
- job["busy"].pop(chash, None)
+ for chash in chashes:
+ job["busy"].pop(chash, None)
return True
- def confirm_chunk(self, ptop: str, wark: str, chash: str) -> tuple[int, str]:
+ def confirm_chunks(
+ self, ptop: str, wark: str, chashes: list[str]
+ ) -> tuple[int, str]:
with self.mutex, self.reg_mutex:
self.db_act = self.vol_act[ptop] = time.time()
try:
@@ -3075,14 +3089,16 @@ class Up2k(object):
src = djoin(pdir, job["tnam"])
dst = djoin(pdir, job["name"])
except Exception as ex:
- return "confirm_chunk, wark, " + repr(ex) # type: ignore
+ return "confirm_chunk, wark(%r)" % (ex,) # type: ignore
- job["busy"].pop(chash, None)
+ for chash in chashes:
+ job["busy"].pop(chash, None)
try:
- job["need"].remove(chash)
+ for chash in chashes:
+ job["need"].remove(chash)
except Exception as ex:
- return "confirm_chunk, chash, " + repr(ex) # type: ignore
+ return "confirm_chunk, chash(%s) %r" % (chash, ex) # type: ignore
ret = len(job["need"])
if ret > 0:
diff --git a/copyparty/web/browser.js b/copyparty/web/browser.js
index 84188b69..50c8da36 100644
--- a/copyparty/web/browser.js
+++ b/copyparty/web/browser.js
@@ -210,6 +210,8 @@ var Ls = {
"cut_datechk": "has no effect unless the turbo button is enabled$N$Nreduces the yolo factor by a tiny amount; checks whether the file timestamps on the server matches yours$N$Nshould theoretically catch most unfinished / corrupted uploads, but is not a substitute for doing a verification pass with turbo disabled afterwards\">date-chk",
+ "cut_u2sz": "size (in MiB) of each upload chunk; big values fly better across the atlantic. Try low values on very unreliable connections",
+
"cut_flag": "ensure only one tab is uploading at a time $N -- other tabs must have this enabled too $N -- only affects tabs on the same domain",
"cut_az": "upload files in alphabetical order, rather than smallest-file-first$N$Nalphabetical order can make it easier to eyeball if something went wrong on the server, but it makes uploading slightly slower on fiber / LAN",
@@ -478,6 +480,7 @@ var Ls = {
"u_ehsinit": "server rejected the request to initiate upload; retrying...",
"u_eneths": "network error while performing upload handshake; retrying...",
"u_enethd": "network error while testing target existence; retrying...",
+ "u_cbusy": "waiting for server to trust us again after a network glitch...",
"u_ehsdf": "server ran out of disk space!\n\nwill keep retrying, in case someone\nfrees up enough space to continue",
"u_emtleak1": "it looks like your webbrowser may have a memory leak;\nplease",
"u_emtleak2": ' switch to https (recommended) or ',
@@ -721,6 +724,8 @@ var Ls = {
"cut_datechk": "har ingen effekt dersom turbo er avslått$N$Ngjør turbo bittelitt tryggere ved å sjekke datostemplingen på filene (i tillegg til filstørrelse)$N$Nburde oppdage og gjenoppta de fleste ufullstendige opplastninger, men er ikke en fullverdig erstatning for å deaktivere turbo og gjøre en skikkelig sjekk\">date-chk",
+ "cut_u2sz": "størrelse i megabyte for hvert bruddstykke for opplastning. Store verdier flyr bedre over atlanteren. Små verdier kan være bedre på særdeles ustabile forbindelser",
+
"cut_flag": "samkjører nettleserfaner slik at bare én $N kan holde på med befaring / opplastning $N -- andre faner må også ha denne skrudd på $N -- fungerer kun innenfor samme domene",
"cut_az": "last opp filer i alfabetisk rekkefølge, istedenfor minste-fil-først$N$Nalfabetisk kan gjøre det lettere å anslå om alt gikk bra, men er bittelitt tregere på fiber / LAN",
@@ -989,6 +994,7 @@ var Ls = {
"u_ehsinit": "server nektet forespørselen om å begynne en ny opplastning; prøver igjen...",
"u_eneths": "et problem med nettverket gjorde at avtale om opplastning ikke kunne inngås; prøver igjen...",
"u_enethd": "et problem med nettverket gjorde at filsjekk ikke kunne utføres; prøver igjen...",
+ "u_cbusy": "venter på klarering ifra server etter et lite nettverksglipp...",
"u_ehsdf": "serveren er full!\n\nprøver igjen regelmessig,\ni tilfelle noen rydder litt...",
"u_emtleak1": "uff, det er mulig at nettleseren din har en minnelekkasje...\nForeslår",
"u_emtleak2": ' helst at du bytter til https, eller ',
@@ -1251,6 +1257,7 @@ ebi('op_cfg').innerHTML = (
' ' +
' 💤\n' +
' az\n' +
' 🔔\n' +
diff --git a/copyparty/web/up2k.js b/copyparty/web/up2k.js
index 41a682df..fdca15ce 100644
--- a/copyparty/web/up2k.js
+++ b/copyparty/web/up2k.js
@@ -853,6 +853,7 @@ function up2k_init(subtle) {
setmsg(suggest_up2k, 'msg');
var parallel_uploads = ebi('nthread').value = icfg_get('nthread', u2j),
+ stitch_tgt = ebi('u2szg').value = icfg_get('u2sz', u2sz.split(',')[1]),
uc = {},
fdom_ctr = 0,
biggest_file = 0;
@@ -2374,11 +2375,22 @@ function up2k_init(subtle) {
var arr = st.todo.upload,
sort = arr.length && arr[arr.length - 1].nfile > t.n;
- for (var a = 0; a < t.postlist.length; a++)
+ for (var a = 0; a < t.postlist.length; a++) {
+ var nparts = [], tbytes = 0, stitch = stitch_tgt;
+ if (t.nojoin && t.nojoin - t.postlist.length < 6)
+ stitch = 1;
+
+ --a;
+ for (var b = 0; b < stitch; b++) {
+ nparts.push(t.postlist[++a]);
+ if (tbytes + chunksize > 64 * 1024 * 1024 || t.postlist[a+1] - t.postlist[a] !== 1)
+ break;
+ }
arr.push({
'nfile': t.n,
- 'npart': t.postlist[a]
+ 'nparts': nparts
});
+ }
msg = null;
done = false;
@@ -2387,7 +2399,7 @@ function up2k_init(subtle) {
arr.sort(function (a, b) {
return a.nfile < b.nfile ? -1 :
/* */ a.nfile > b.nfile ? 1 :
- a.npart < b.npart ? -1 : 1;
+ /* */ a.nparts[0] < b.nparts[0] ? -1 : 1;
});
}
@@ -2534,7 +2546,10 @@ function up2k_init(subtle) {
function exec_upload() {
var upt = st.todo.upload.shift(),
t = st.files[upt.nfile],
- npart = upt.npart,
+ nparts = upt.nparts,
+ pcar = nparts[0],
+ pcdr = nparts[nparts.length - 1],
+ snpart = pcar == pcdr ? pcar : ('' + pcar + '~' + pcdr),
tries = 0;
if (t.done)
@@ -2549,8 +2564,8 @@ function up2k_init(subtle) {
pvis.seth(t.n, 1, "🚀 send");
var chunksize = get_chunksize(t.size),
- car = npart * chunksize,
- cdr = car + chunksize;
+ car = pcar * chunksize,
+ cdr = (pcdr + 1) * chunksize;
if (cdr >= t.size)
cdr = t.size;
@@ -2560,14 +2575,19 @@ function up2k_init(subtle) {
var txt = unpre((xhr.response && xhr.response.err) || xhr.responseText);
if (txt.indexOf('upload blocked by x') + 1) {
apop(st.busy.upload, upt);
- apop(t.postlist, npart);
+ for (var a = pcar; a <= pcdr; a++)
+ apop(t.postlist, a);
pvis.seth(t.n, 1, "ERROR");
pvis.seth(t.n, 2, txt.split(/\n/)[0]);
pvis.move(t.n, 'ng');
return;
}
if (xhr.status == 200) {
- pvis.prog(t, npart, cdr - car);
+ var bdone = cdr - car;
+ for (var a = pcar; a <= pcdr; a++) {
+ pvis.prog(t, a, Math.min(bdone, chunksize));
+ bdone -= chunksize;
+ }
st.bytes.finished += cdr - car;
st.bytes.uploaded += cdr - car;
t.bytes_uploaded += cdr - car;
@@ -2576,18 +2596,21 @@ function up2k_init(subtle) {
}
else if (txt.indexOf('already got that') + 1 ||
txt.indexOf('already being written') + 1) {
- console.log("ignoring dupe-segment error", t.name, t);
+ t.nojoin = t.postlist.length;
+ console.log("ignoring dupe-segment with backoff", t.nojoin, t.name, t);
+ if (!toast.visible && st.todo.upload.length < 4)
+ toast.msg(10, L.u_cbusy);
}
else {
- xhrchk(xhr, L.u_cuerr2.format(npart, Math.ceil(t.size / chunksize), t.name), "404, target folder not found (???)", "warn", t);
-
+ xhrchk(xhr, L.u_cuerr2.format(snpart, Math.ceil(t.size / chunksize), t.name), "404, target folder not found (???)", "warn", t);
chill(t);
}
orz2(xhr);
}
var orz2 = function (xhr) {
apop(st.busy.upload, upt);
- apop(t.postlist, npart);
+ for (var a = pcar; a <= pcdr; a++)
+ apop(t.postlist, a);
if (!t.postlist.length) {
t.t_uploaded = Date.now();
pvis.seth(t.n, 1, 'verifying');
@@ -2604,7 +2627,7 @@ function up2k_init(subtle) {
var nb = xev.loaded;
st.bytes.inflight += nb - xhr.bsent;
xhr.bsent = nb;
- pvis.prog(t, npart, nb);
+ pvis.prog(t, pcar, nb);
};
xhr.onload = function (xev) {
try { orz(xhr); } catch (ex) { vis_exh(ex + '', 'up2k.js', '', '', ex); }
@@ -2616,13 +2639,17 @@ function up2k_init(subtle) {
st.bytes.inflight -= (xhr.bsent || 0);
if (!toast.visible)
- toast.warn(9.98, L.u_cuerr.format(npart, Math.ceil(t.size / chunksize), t.name), t);
+ toast.warn(9.98, L.u_cuerr.format(snpart, Math.ceil(t.size / chunksize), t.name), t);
console.log('chunkpit onerror,', ++tries, t.name, t);
orz2(xhr);
};
+ var chashes = [];
+ for (var a = pcar; a <= pcdr; a++)
+ chashes.push(t.hash[a]);
+
xhr.open('POST', t.purl, true);
- xhr.setRequestHeader("X-Up2k-Hash", t.hash[npart]);
+ xhr.setRequestHeader("X-Up2k-Hash", chashes.join(","));
xhr.setRequestHeader("X-Up2k-Wark", t.wark);
xhr.setRequestHeader("X-Up2k-Stat", "{0}/{1}/{2}/{3} {4}/{5} {6}".format(
pvis.ctr.ok, pvis.ctr.ng, pvis.ctr.bz, pvis.ctr.q, btot, btot - bfin,
@@ -2739,6 +2766,21 @@ function up2k_init(subtle) {
bumpthread({ "target": 1 });
}
+ var read_u2sz = function () {
+ var el = ebi('u2szg'), n = parseInt(el.value), dv = u2sz.split(',');
+ n = isNaN(n) ? dv[1] : n < dv[0] ? dv[0] : n > dv[2] ? dv[2] : n;
+ if (n == dv[1]) sdrop('u2sz'); else swrite('u2sz', n);
+ if (el.value != n) el.value = n;
+ };
+ ebi('u2szg').addEventListener('blur', read_u2sz);
+ ebi('u2szg').onkeydown = function (e) {
+ if (anymod(e)) return;
+ var n = e.code == 'ArrowUp' ? 1 : e.code == 'ArrowDown' ? -1 : 0;
+ if (!n) return;
+ this.value = parseInt(this.value) + n;
+ read_u2sz();
+ }
+
function tgl_fsearch() {
set_fsearch(!uc.fsearch);
}
diff --git a/copyparty/web/util.js b/copyparty/web/util.js
index dcfec4b4..97d38c2e 100644
--- a/copyparty/web/util.js
+++ b/copyparty/web/util.js
@@ -1396,10 +1396,10 @@ var tt = (function () {
o = ctr.querySelectorAll('*[tt]');
for (var a = o.length - 1; a >= 0; a--) {
- o[a].onfocus = _cshow;
- o[a].onblur = _hide;
- o[a].onmouseenter = _dshow;
- o[a].onmouseleave = _hide;
+ o[a].addEventListener('focus', _cshow);
+ o[a].addEventListener('blur', _hide);
+ o[a].addEventListener('mouseenter', _dshow);
+ o[a].addEventListener('mouseleave', _hide);
}
r.hide();
}
diff --git a/docs/devnotes.md b/docs/devnotes.md
index 885bcf1d..1bb99bed 100644
--- a/docs/devnotes.md
+++ b/docs/devnotes.md
@@ -55,8 +55,8 @@ quick outline of the up2k protocol, see [uploading](https://github.com/9001/cop
* server creates the `wark`, an identifier for this upload
* `sha512( salt + filesize + chunk_hashes )`
* and a sparse file is created for the chunks to drop into
-* client uploads each chunk
- * header entries for the chunk-hash and wark
+* client sends a series of POSTs, with one or more consecutive chunks in each
+ * header entries for the chunk-hashes (comma-separated) and wark
* server writes chunks into place based on the hash
* client does another handshake with the hashlist; server replies with OK or a list of chunks to reupload
@@ -327,10 +327,6 @@ can be reproduced with `--no-sendfile --s-wr-sz 8192 --s-wr-slp 0.3 --rsp-slp 6`
* remove brokers / multiprocessing stuff; https://github.com/9001/copyparty/tree/no-broker
* reduce the nesting / indirections in `HttpCli` / `httpcli.py`
* nearly zero benefit from stuff like replacing all the `self.conn.hsrv` with a local `hsrv` variable
-* reduce up2k roundtrips
- * start from a chunk index and just go
- * terminate client on bad data
- * not worth the effort, just throw enough conncetions at it
* single sha512 across all up2k chunks?
* crypto.subtle cannot into streaming, would have to use hashwasm, expensive
* separate sqlite table per tag
diff --git a/tests/util.py b/tests/util.py
index c1feca8b..7b87a930 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -120,7 +120,7 @@ class Cfg(Namespace):
ex = "ah_cli ah_gen css_browser hist js_browser mime mimes no_forget no_hash no_idx nonsus_urls og_tpl og_ua"
ka.update(**{k: None for k in ex.split()})
- ex = "hash_mt srch_time u2abort u2j"
+ ex = "hash_mt srch_time u2abort u2j u2sz"
ka.update(**{k: 1 for k in ex.split()})
ex = "au_vol mtab_age reg_cap s_thead s_tbody th_convt"