mirror of
https://github.com/9001/copyparty.git
synced 2025-08-17 09:02:15 -06:00
adjust up2k hashlen to match base64 window
This commit is contained in:
parent
e3031bdeec
commit
be055961ae
|
@ -439,7 +439,7 @@ quick summary of more eccentric web-browsers trying to view a directory index:
|
||||||
|
|
||||||
copyparty returns a truncated sha512sum of your PUT/POST as base64; you can generate the same checksum locally to verify uplaods:
|
copyparty returns a truncated sha512sum of your PUT/POST as base64; you can generate the same checksum locally to verify uplaods:
|
||||||
|
|
||||||
b512(){ printf "$((sha512sum||shasum -a512)|sed -E 's/ .*//;s/(..)/\\x\1/g')"|base64|head -c43;}
|
b512(){ printf "$((sha512sum||shasum -a512)|sed -E 's/ .*//;s/(..)/\\x\1/g')"|base64|head -c44;}
|
||||||
b512 <movie.mkv
|
b512 <movie.mkv
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -84,14 +84,14 @@ def thumb_path(histpath, rem, mtime, fmt):
|
||||||
fn = rem
|
fn = rem
|
||||||
|
|
||||||
if rd:
|
if rd:
|
||||||
h = hashlib.sha512(fsenc(rd)).digest()[:24]
|
h = hashlib.sha512(fsenc(rd)).digest()
|
||||||
b64 = base64.urlsafe_b64encode(h).decode("ascii")[:24]
|
b64 = base64.urlsafe_b64encode(h).decode("ascii")[:24]
|
||||||
rd = "{}/{}/".format(b64[:2], b64[2:4]).lower() + b64
|
rd = "{}/{}/".format(b64[:2], b64[2:4]).lower() + b64
|
||||||
else:
|
else:
|
||||||
rd = "top"
|
rd = "top"
|
||||||
|
|
||||||
# could keep original filenames but this is safer re pathlen
|
# could keep original filenames but this is safer re pathlen
|
||||||
h = hashlib.sha512(fsenc(fn)).digest()[:24]
|
h = hashlib.sha512(fsenc(fn)).digest()
|
||||||
fn = base64.urlsafe_b64encode(h).decode("ascii")[:24]
|
fn = base64.urlsafe_b64encode(h).decode("ascii")[:24]
|
||||||
|
|
||||||
return "{}/th/{}/{}.{:x}.{}".format(
|
return "{}/th/{}/{}.{:x}.{}".format(
|
||||||
|
|
|
@ -30,6 +30,7 @@ from .util import (
|
||||||
s3dec,
|
s3dec,
|
||||||
statdir,
|
statdir,
|
||||||
s2hms,
|
s2hms,
|
||||||
|
min_ex,
|
||||||
)
|
)
|
||||||
from .mtag import MTag, MParser
|
from .mtag import MTag, MParser
|
||||||
|
|
||||||
|
@ -39,6 +40,8 @@ try:
|
||||||
except:
|
except:
|
||||||
HAVE_SQLITE3 = False
|
HAVE_SQLITE3 = False
|
||||||
|
|
||||||
|
DB_VER = 4
|
||||||
|
|
||||||
|
|
||||||
class Up2k(object):
|
class Up2k(object):
|
||||||
"""
|
"""
|
||||||
|
@ -91,7 +94,7 @@ class Up2k(object):
|
||||||
thr.start()
|
thr.start()
|
||||||
|
|
||||||
# static
|
# static
|
||||||
self.r_hash = re.compile("^[0-9a-zA-Z_-]{43}$")
|
self.r_hash = re.compile("^[0-9a-zA-Z_-]{44}$")
|
||||||
|
|
||||||
if not HAVE_SQLITE3:
|
if not HAVE_SQLITE3:
|
||||||
self.log("could not initialize sqlite3, will use in-memory registry only")
|
self.log("could not initialize sqlite3, will use in-memory registry only")
|
||||||
|
@ -887,59 +890,31 @@ class Up2k(object):
|
||||||
if not existed and ver is None:
|
if not existed and ver is None:
|
||||||
return self._create_db(db_path, cur)
|
return self._create_db(db_path, cur)
|
||||||
|
|
||||||
orig_ver = ver
|
if ver == DB_VER:
|
||||||
if not ver or ver < 3:
|
|
||||||
bak = "{}.bak.{:x}.v{}".format(db_path, int(time.time()), ver)
|
|
||||||
db = cur.connection
|
|
||||||
cur.close()
|
|
||||||
db.close()
|
|
||||||
msg = "creating new DB (old is bad); backup: {}"
|
|
||||||
if ver:
|
|
||||||
msg = "creating backup before upgrade: {}"
|
|
||||||
|
|
||||||
self.log(msg.format(bak))
|
|
||||||
shutil.copy2(db_path, bak)
|
|
||||||
cur = self._orz(db_path)
|
|
||||||
|
|
||||||
if ver == 1:
|
|
||||||
cur = self._upgrade_v1(cur, db_path)
|
|
||||||
if cur:
|
|
||||||
ver = 2
|
|
||||||
|
|
||||||
if ver == 2:
|
|
||||||
cur = self._create_v3(cur)
|
|
||||||
ver = self._read_ver(cur) if cur else None
|
|
||||||
|
|
||||||
if ver == 3:
|
|
||||||
if orig_ver != ver:
|
|
||||||
cur.connection.commit()
|
|
||||||
cur.execute("vacuum")
|
|
||||||
cur.connection.commit()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
nfiles = next(cur.execute("select count(w) from up"))[0]
|
nfiles = next(cur.execute("select count(w) from up"))[0]
|
||||||
self.log("OK: {} |{}|".format(db_path, nfiles))
|
self.log("OK: {} |{}|".format(db_path, nfiles))
|
||||||
return cur
|
return cur
|
||||||
except Exception as ex:
|
except:
|
||||||
self.log("WARN: could not list files, DB corrupt?\n " + repr(ex))
|
self.log("WARN: could not list files; DB corrupt?\n" + min_ex())
|
||||||
|
|
||||||
if cur:
|
elif ver > DB_VER:
|
||||||
db = cur.connection
|
m = "database is version {}, this copyparty only supports versions <= {}"
|
||||||
cur.close()
|
raise Exception(m.format(ver, DB_VER))
|
||||||
db.close()
|
|
||||||
|
bak = "{}.bak.{:x}.v{}".format(db_path, int(time.time()), ver)
|
||||||
|
db = cur.connection
|
||||||
|
cur.close()
|
||||||
|
db.close()
|
||||||
|
msg = "creating new DB (old is bad); backup: {}"
|
||||||
|
if ver:
|
||||||
|
msg = "creating new DB (too old to upgrade); backup: {}"
|
||||||
|
|
||||||
|
self.log(msg.format(bak))
|
||||||
|
os.rename(fsenc(db_path), fsenc(bak))
|
||||||
|
|
||||||
return self._create_db(db_path, None)
|
return self._create_db(db_path, None)
|
||||||
|
|
||||||
def _create_db(self, db_path, cur):
|
|
||||||
if not cur:
|
|
||||||
cur = self._orz(db_path)
|
|
||||||
|
|
||||||
self._create_v2(cur)
|
|
||||||
self._create_v3(cur)
|
|
||||||
cur.connection.commit()
|
|
||||||
self.log("created DB at {}".format(db_path))
|
|
||||||
return cur
|
|
||||||
|
|
||||||
def _read_ver(self, cur):
|
def _read_ver(self, cur):
|
||||||
for tab in ["ki", "kv"]:
|
for tab in ["ki", "kv"]:
|
||||||
try:
|
try:
|
||||||
|
@ -951,65 +926,38 @@ class Up2k(object):
|
||||||
if rows:
|
if rows:
|
||||||
return int(rows[0][0])
|
return int(rows[0][0])
|
||||||
|
|
||||||
def _create_v2(self, cur):
|
def _create_db(self, db_path, cur):
|
||||||
for cmd in [
|
|
||||||
r"create table up (w text, mt int, sz int, rd text, fn text)",
|
|
||||||
r"create index up_rd on up(rd)",
|
|
||||||
r"create index up_fn on up(fn)",
|
|
||||||
]:
|
|
||||||
cur.execute(cmd)
|
|
||||||
return cur
|
|
||||||
|
|
||||||
def _create_v3(self, cur):
|
|
||||||
"""
|
"""
|
||||||
collision in 2^(n/2) files where n = bits (6 bits/ch)
|
collision in 2^(n/2) files where n = bits (6 bits/ch)
|
||||||
10*6/2 = 2^30 = 1'073'741'824, 24.1mb idx 1<<(3*10)
|
10*6/2 = 2^30 = 1'073'741'824, 24.1mb idx 1<<(3*10)
|
||||||
12*6/2 = 2^36 = 68'719'476'736, 24.8mb idx
|
12*6/2 = 2^36 = 68'719'476'736, 24.8mb idx
|
||||||
16*6/2 = 2^48 = 281'474'976'710'656, 26.1mb idx
|
16*6/2 = 2^48 = 281'474'976'710'656, 26.1mb idx
|
||||||
"""
|
"""
|
||||||
for c, ks in [["drop table k", "isv"], ["drop index up_", "w"]]:
|
if not cur:
|
||||||
for k in ks:
|
cur = self._orz(db_path)
|
||||||
try:
|
|
||||||
cur.execute(c + k)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
idx = r"create index up_w on up(substr(w,1,16))"
|
idx = r"create index up_w on up(substr(w,1,16))"
|
||||||
if self.no_expr_idx:
|
if self.no_expr_idx:
|
||||||
idx = r"create index up_w on up(w)"
|
idx = r"create index up_w on up(w)"
|
||||||
|
|
||||||
for cmd in [
|
for cmd in [
|
||||||
|
r"create table up (w text, mt int, sz int, rd text, fn text)",
|
||||||
|
r"create index up_rd on up(rd)",
|
||||||
|
r"create index up_fn on up(fn)",
|
||||||
idx,
|
idx,
|
||||||
r"create table mt (w text, k text, v int)",
|
r"create table mt (w text, k text, v int)",
|
||||||
r"create index mt_w on mt(w)",
|
r"create index mt_w on mt(w)",
|
||||||
r"create index mt_k on mt(k)",
|
r"create index mt_k on mt(k)",
|
||||||
r"create index mt_v on mt(v)",
|
r"create index mt_v on mt(v)",
|
||||||
r"create table kv (k text, v int)",
|
r"create table kv (k text, v int)",
|
||||||
r"insert into kv values ('sver', 3)",
|
r"insert into kv values ('sver', {})".format(DB_VER),
|
||||||
]:
|
]:
|
||||||
cur.execute(cmd)
|
cur.execute(cmd)
|
||||||
|
|
||||||
|
cur.connection.commit()
|
||||||
|
self.log("created DB at {}".format(db_path))
|
||||||
return cur
|
return cur
|
||||||
|
|
||||||
def _upgrade_v1(self, odb, db_path):
|
|
||||||
npath = db_path + ".next"
|
|
||||||
if os.path.exists(npath):
|
|
||||||
os.unlink(npath)
|
|
||||||
|
|
||||||
ndb = self._orz(npath)
|
|
||||||
self._create_v2(ndb)
|
|
||||||
|
|
||||||
c = odb.execute("select * from up")
|
|
||||||
for wark, ts, sz, rp in c:
|
|
||||||
rd, fn = rp.rsplit("/", 1) if "/" in rp else ["", rp]
|
|
||||||
v = (wark, ts, sz, rd, fn)
|
|
||||||
ndb.execute("insert into up values (?,?,?,?,?)", v)
|
|
||||||
|
|
||||||
ndb.connection.commit()
|
|
||||||
ndb.connection.close()
|
|
||||||
odb.connection.close()
|
|
||||||
atomic_move(npath, db_path)
|
|
||||||
return self._orz(db_path)
|
|
||||||
|
|
||||||
def handle_json(self, cj):
|
def handle_json(self, cj):
|
||||||
with self.mutex:
|
with self.mutex:
|
||||||
if not self.register_vpath(cj["ptop"], cj["vcfg"]):
|
if not self.register_vpath(cj["ptop"], cj["vcfg"]):
|
||||||
|
@ -1316,9 +1264,9 @@ class Up2k(object):
|
||||||
hashobj.update(buf)
|
hashobj.update(buf)
|
||||||
rem -= len(buf)
|
rem -= len(buf)
|
||||||
|
|
||||||
digest = hashobj.digest()[:32]
|
digest = hashobj.digest()[:33]
|
||||||
digest = base64.urlsafe_b64encode(digest)
|
digest = base64.urlsafe_b64encode(digest)
|
||||||
ret.append(digest.decode("utf-8").rstrip("="))
|
ret.append(digest.decode("utf-8"))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
@ -1518,12 +1466,12 @@ def up2k_wark_from_hashlist(salt, filesize, hashes):
|
||||||
ident.extend(hashes)
|
ident.extend(hashes)
|
||||||
ident = "\n".join(ident)
|
ident = "\n".join(ident)
|
||||||
|
|
||||||
wark = hashlib.sha512(ident.encode("utf-8")).digest()
|
wark = hashlib.sha512(ident.encode("utf-8")).digest()[:33]
|
||||||
wark = base64.urlsafe_b64encode(wark)
|
wark = base64.urlsafe_b64encode(wark)
|
||||||
return wark.decode("ascii")[:43]
|
return wark.decode("ascii")
|
||||||
|
|
||||||
|
|
||||||
def up2k_wark_from_metadata(salt, sz, lastmod, rd, fn):
|
def up2k_wark_from_metadata(salt, sz, lastmod, rd, fn):
|
||||||
ret = fsenc("{}\n{}\n{}\n{}\n{}".format(salt, lastmod, sz, rd, fn))
|
ret = fsenc("{}\n{}\n{}\n{}\n{}".format(salt, lastmod, sz, rd, fn))
|
||||||
ret = base64.urlsafe_b64encode(hashlib.sha512(ret).digest())
|
ret = base64.urlsafe_b64encode(hashlib.sha512(ret).digest())
|
||||||
return "#{}".format(ret[:42].decode("ascii"))
|
return "#{}".format(ret.decode("ascii"))[:44]
|
||||||
|
|
|
@ -351,7 +351,7 @@ def ren_open(fname, *args, **kwargs):
|
||||||
if not b64:
|
if not b64:
|
||||||
b64 = (bname + ext).encode("utf-8", "replace")
|
b64 = (bname + ext).encode("utf-8", "replace")
|
||||||
b64 = hashlib.sha512(b64).digest()[:12]
|
b64 = hashlib.sha512(b64).digest()[:12]
|
||||||
b64 = base64.urlsafe_b64encode(b64).decode("utf-8").rstrip("=")
|
b64 = base64.urlsafe_b64encode(b64).decode("utf-8")
|
||||||
|
|
||||||
badlen = len(fname)
|
badlen = len(fname)
|
||||||
while len(fname) >= badlen:
|
while len(fname) >= badlen:
|
||||||
|
@ -908,8 +908,8 @@ def hashcopy(actor, fin, fout):
|
||||||
hashobj.update(buf)
|
hashobj.update(buf)
|
||||||
fout.write(buf)
|
fout.write(buf)
|
||||||
|
|
||||||
digest32 = hashobj.digest()[:32]
|
digest = hashobj.digest()[:33]
|
||||||
digest_b64 = base64.urlsafe_b64encode(digest32).decode("utf-8").rstrip("=")
|
digest_b64 = base64.urlsafe_b64encode(digest).decode("utf-8")
|
||||||
|
|
||||||
return tlen, hashobj.hexdigest(), digest_b64
|
return tlen, hashobj.hexdigest(), digest_b64
|
||||||
|
|
||||||
|
|
|
@ -970,8 +970,8 @@ function up2k_init(subtle) {
|
||||||
while (segm_next());
|
while (segm_next());
|
||||||
|
|
||||||
var hash_done = function (hashbuf) {
|
var hash_done = function (hashbuf) {
|
||||||
var hslice = new Uint8Array(hashbuf).subarray(0, 32),
|
var hslice = new Uint8Array(hashbuf).subarray(0, 33),
|
||||||
b64str = buf2b64(hslice).replace(/=$/, '');
|
b64str = buf2b64(hslice);
|
||||||
|
|
||||||
hashtab[nch] = b64str;
|
hashtab[nch] = b64str;
|
||||||
t.hash.push(nch);
|
t.hash.push(nch);
|
||||||
|
|
Loading…
Reference in a new issue