handle multiple simultaneous uploads of the same file

This commit is contained in:
ed 2021-11-14 15:03:11 +01:00
parent 62c3272351
commit b206c5d64e
3 changed files with 82 additions and 54 deletions

View file

@ -855,6 +855,7 @@ class HttpCli(object):
response = x.get()
chunksize, cstart, path, lastmod = response
try:
if self.args.nw:
path = os.devnull
@ -912,6 +913,9 @@ class HttpCli(object):
else:
with self.mutex:
self.u2fh.put(path, f)
finally:
x = self.conn.hsrv.broker.put(True, "up2k.release_chunk", ptop, wark, chash)
x.get() # block client until released
x = self.conn.hsrv.broker.put(True, "up2k.confirm_chunk", ptop, wark, chash)
x = x.get()

View file

@ -494,6 +494,7 @@ class Up2k(object):
if bos.path.exists(path):
reg[k] = job
job["poke"] = time.time()
job["busy"] = {}
else:
self.log("ign deleted file in snap: [{}]".format(path))
@ -1256,6 +1257,7 @@ class Up2k(object):
"at": at,
"hash": [],
"need": [],
"busy": {},
}
if job and wark in reg:
@ -1338,6 +1340,7 @@ class Up2k(object):
"t0": now,
"hash": deepcopy(cj["hash"]),
"need": [],
"busy": {},
}
# client-provided, sanitized by _get_wark: name, size, lmod
for k in [
@ -1444,6 +1447,11 @@ class Up2k(object):
if not nchunk:
raise Pebkac(400, "unknown chunk")
if wark in job["busy"]:
raise Pebkac(400, "that chunk is already being written to")
job["busy"][wark] = 1
job["poke"] = time.time()
chunksize = up2k_chunksize(job["size"])
@ -1453,6 +1461,14 @@ class Up2k(object):
return [chunksize, ofs, path, job["lmod"]]
def release_chunk(self, ptop, wark, chash):
with self.mutex:
job = self.registry[ptop].get(wark)
if job:
job["busy"].pop(wark, None)
return [True]
def confirm_chunk(self, ptop, wark, chash):
with self.mutex:
try:
@ -1463,6 +1479,8 @@ class Up2k(object):
except Exception as ex:
return "confirm_chunk, wark, " + repr(ex)
job["busy"].pop(wark, None)
try:
job["need"].remove(chash)
except Exception as ex:

View file

@ -1843,7 +1843,8 @@ function up2k_init(subtle) {
st.bytes.uploaded += cdr - car;
t.bytes_uploaded += cdr - car;
}
else if (txt.indexOf('already got that') !== -1) {
else if (txt.indexOf('already got that') + 1 ||
txt.indexOf('already being written') + 1) {
console.log("ignoring dupe-segment error", t);
}
else {
@ -1851,6 +1852,9 @@ function up2k_init(subtle) {
xhr.status, t.name) + (txt || "no further information"));
return;
}
orz2(xhr);
}
function orz2(xhr) {
apop(st.busy.upload, upt);
apop(t.postlist, npart);
if (!t.postlist.length) {
@ -1872,9 +1876,11 @@ function up2k_init(subtle) {
if (crashed)
return;
toast.err(9.98, "failed to upload a chunk,\n" + tries + " retries so far -- retrying in 10sec\n\n" + t.name);
if (!toast.visible)
toast.warn(9.98, "failed to upload a chunk;\nprobably harmless, continuing\n\n" + t.name);
console.log('chunkpit onerror,', ++tries, t);
setTimeout(do_send, 10 * 1000);
orz2(xhr);
};
xhr.open('POST', t.purl, true);
xhr.setRequestHeader("X-Up2k-Hash", t.hash[npart]);