ensure consistency between db tables

This commit is contained in:
ed 2022-12-10 22:13:21 +00:00
parent 56b73dcc8a
commit 02ad4bfab2
2 changed files with 22 additions and 0 deletions

View file

@ -834,6 +834,7 @@ def run_argparse(
ap2.add_argument("--no-hash", metavar="PTN", type=u, help="regex: disable hashing of matching paths during e2ds folder scans (volflag=nohash)") ap2.add_argument("--no-hash", metavar="PTN", type=u, help="regex: disable hashing of matching paths during e2ds folder scans (volflag=nohash)")
ap2.add_argument("--no-idx", metavar="PTN", type=u, help="regex: disable indexing of matching paths during e2ds folder scans (volflag=noidx)") ap2.add_argument("--no-idx", metavar="PTN", type=u, help="regex: disable indexing of matching paths during e2ds folder scans (volflag=noidx)")
ap2.add_argument("--no-dhash", action="store_true", help="disable rescan acceleration; do full database integrity check -- makes the db ~5%% smaller and bootup/rescans 3~10x slower") ap2.add_argument("--no-dhash", action="store_true", help="disable rescan acceleration; do full database integrity check -- makes the db ~5%% smaller and bootup/rescans 3~10x slower")
ap2.add_argument("--re-dhash", action="store_true", help="rebuild the cache if it gets out of sync (for example crash on startup during metadata scanning)")
ap2.add_argument("--no-forget", action="store_true", help="never forget indexed files, even when deleted from disk -- makes it impossible to ever upload the same file twice (volflag=noforget)") ap2.add_argument("--no-forget", action="store_true", help="never forget indexed files, even when deleted from disk -- makes it impossible to ever upload the same file twice (volflag=noforget)")
ap2.add_argument("--dbd", metavar="PROFILE", default="wal", help="database durability profile; sets the tradeoff between robustness and speed, see --help-dbd (volflag=dbd)") ap2.add_argument("--dbd", metavar="PROFILE", default="wal", help="database durability profile; sets the tradeoff between robustness and speed, see --help-dbd (volflag=dbd)")
ap2.add_argument("--xlink", action="store_true", help="on upload: check all volumes for dupes, not just the target volume (volflag=xlink)") ap2.add_argument("--xlink", action="store_true", help="on upload: check all volumes for dupes, not just the target volume (volflag=xlink)")

View file

@ -182,9 +182,14 @@ class Up2k(object):
have_e2d = self.init_indexes(all_vols, []) have_e2d = self.init_indexes(all_vols, [])
if self.stop: if self.stop:
# up-mt consistency not guaranteed if init is interrupted;
# drop caches for a full scan on next boot
self._drop_caches()
if self.pp: if self.pp:
self.pp.end = True self.pp.end = True
self.pp = None self.pp = None
return return
if not self.pp and self.args.exit == "idx": if not self.pp and self.args.exit == "idx":
@ -470,6 +475,10 @@ class Up2k(object):
if next((zv for zv in vols if "e2ds" in zv.flags), None): if next((zv for zv in vols if "e2ds" in zv.flags), None):
self._block("indexing") self._block("indexing")
if self.args.re_dhash:
self.args.re_dhash = False
self._drop_caches()
for vol in vols: for vol in vols:
if self.stop: if self.stop:
break break
@ -1248,6 +1257,18 @@ class Up2k(object):
return ret return ret
def _drop_caches(self) -> None:
self.log("dropping caches for a full filesystem scan")
for vol in self.asrv.vfs.all_vols.values():
reg = self.register_vpath(vol.realpath, vol.flags)
if not reg:
continue
cur, _ = reg
self._set_tagscan(cur, True)
cur.execute("delete from dh")
cur.connection.commit()
def _set_tagscan(self, cur: "sqlite3.Cursor", need: bool) -> bool: def _set_tagscan(self, cur: "sqlite3.Cursor", need: bool) -> bool:
if self.args.no_dhash: if self.args.no_dhash:
return False return False