mirror of
https://github.com/9001/copyparty.git
synced 2025-08-16 16:42:13 -06:00
option to show symlink's lastmod instead of deref;
mainly motivated by u2cli's folder syncing in turbo mode which would un-turbo on most dupes due to wrong lastmod disabled by default for regular http listings (to avoid confusion in most regular usecases), enable per-request with urlparam lt enabled by default for single-level webdav listings (because rclone hits the same issue as u2cli), can be disabled with arg --dav-rt or volflag davrt impossible to enable for recursive webdav listings
This commit is contained in:
parent
d11e02da49
commit
673b4f7e23
|
@ -880,7 +880,7 @@ class Ctl(object):
|
|||
print(" ls ~{0}".format(srd))
|
||||
zb = self.ar.url.encode("utf-8")
|
||||
zb += quotep(rd.replace(b"\\", b"/"))
|
||||
r = req_ses.get(zb + b"?ls&dots", headers=headers)
|
||||
r = req_ses.get(zb + b"?ls<&dots", headers=headers)
|
||||
if not r:
|
||||
raise Exception("HTTP {0}".format(r.status_code))
|
||||
|
||||
|
|
|
@ -783,6 +783,7 @@ def add_webdav(ap):
|
|||
ap2.add_argument("--daw", action="store_true", help="enable full write support, even if client may not be webdav. \033[1;31mWARNING:\033[0m This has side-effects -- PUT-operations will now \033[1;31mOVERWRITE\033[0m existing files, rather than inventing new filenames to avoid loss of data. You might want to instead set this as a volflag where needed. By not setting this flag, uploaded files can get written to a filename which the client does not expect (which might be okay, depending on client)")
|
||||
ap2.add_argument("--dav-inf", action="store_true", help="allow depth:infinite requests (recursive file listing); extremely server-heavy but required for spec compliance -- luckily few clients rely on this")
|
||||
ap2.add_argument("--dav-mac", action="store_true", help="disable apple-garbage filter -- allow macos to create junk files (._* and .DS_Store, .Spotlight-*, .fseventsd, .Trashes, .AppleDouble, __MACOS)")
|
||||
ap2.add_argument("--dav-rt", action="store_true", help="show symlink-destination's lastmodified instead of the link itself; always enabled for recursive listings (volflag=davrt)")
|
||||
|
||||
|
||||
def add_smb(ap):
|
||||
|
|
|
@ -548,6 +548,8 @@ class VFS(object):
|
|||
seen = seen[:] + [fsroot]
|
||||
rfiles = [x for x in vfs_ls if not stat.S_ISDIR(x[1].st_mode)]
|
||||
rdirs = [x for x in vfs_ls if stat.S_ISDIR(x[1].st_mode)]
|
||||
# if lstat: ignore folder symlinks since copyparty will never make those
|
||||
# (and we definitely don't want to descend into them)
|
||||
|
||||
rfiles.sort()
|
||||
rdirs.sort()
|
||||
|
|
|
@ -13,6 +13,7 @@ def vf_bmap() -> dict[str, str]:
|
|||
"no_dedup": "copydupes",
|
||||
"no_dupe": "nodupe",
|
||||
"no_forget": "noforget",
|
||||
"dav_rt": "davrt",
|
||||
}
|
||||
for k in (
|
||||
"dotsrch",
|
||||
|
@ -142,7 +143,8 @@ flagcats = {
|
|||
"lg_sbf": "list of *logue-sandbox safeguards to disable",
|
||||
},
|
||||
"others": {
|
||||
"fk=8": 'generates per-file accesskeys,\nwhich will then be required at the "g" permission'
|
||||
"fk=8": 'generates per-file accesskeys,\nwhich will then be required at the "g" permission',
|
||||
"davrt": "show lastmod time of symlink destination, not the link itself\n(note: this option is always enabled for recursive listings)",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -403,7 +403,11 @@ class HttpCli(object):
|
|||
self.get_pwd_cookie(self.pw)
|
||||
|
||||
if self.is_rclone:
|
||||
# dots: always include dotfiles if permitted
|
||||
# lt: probably more important showing the correct timestamps of any dupes it just uploaded rather than the lastmod time of any non-copyparty-managed symlinks
|
||||
# b: basic-browser if it tries to parse the html listing
|
||||
uparam["dots"] = ""
|
||||
uparam["lt"] = ""
|
||||
uparam["b"] = ""
|
||||
cookies["b"] = ""
|
||||
|
||||
|
@ -865,10 +869,11 @@ class HttpCli(object):
|
|||
|
||||
props = set(props_lst)
|
||||
vn, rem = self.asrv.vfs.get(self.vpath, self.uname, True, False, err=401)
|
||||
tap = vn.canonical(rem)
|
||||
depth = self.headers.get("depth", "infinity").lower()
|
||||
|
||||
try:
|
||||
topdir = {"vp": "", "st": bos.stat(vn.canonical(rem))}
|
||||
topdir = {"vp": "", "st": bos.stat(tap)}
|
||||
except OSError as ex:
|
||||
if ex.errno != errno.ENOENT:
|
||||
raise
|
||||
|
@ -884,6 +889,9 @@ class HttpCli(object):
|
|||
self.reply(zb, 403, "application/xml; charset=utf-8")
|
||||
return True
|
||||
|
||||
# this will return symlink-target timestamps
|
||||
# because lstat=true would not recurse into subfolders
|
||||
# and this is a rare case where we actually want that
|
||||
fgen = vn.zipgen(
|
||||
rem,
|
||||
rem,
|
||||
|
@ -897,7 +905,11 @@ class HttpCli(object):
|
|||
|
||||
elif depth == "1":
|
||||
_, vfs_ls, vfs_virt = vn.ls(
|
||||
rem, self.uname, not self.args.no_scandir, [[True, False]]
|
||||
rem,
|
||||
self.uname,
|
||||
not self.args.no_scandir,
|
||||
[[True, False]],
|
||||
lstat="davrt" not in vn.flags,
|
||||
)
|
||||
if not self.args.ed:
|
||||
names = set(exclude_dotfiles([x[0] for x in vfs_ls]))
|
||||
|
@ -931,6 +943,13 @@ class HttpCli(object):
|
|||
for x in fgen:
|
||||
rp = vjoin(vtop, x["vp"])
|
||||
st: os.stat_result = x["st"]
|
||||
mtime = st.st_mtime
|
||||
if stat.S_ISLNK(st.st_mode):
|
||||
try:
|
||||
st = bos.stat(os.path.join(tap, x["vp"]))
|
||||
except:
|
||||
continue
|
||||
|
||||
isdir = stat.S_ISDIR(st.st_mode)
|
||||
|
||||
t = "<D:response><D:href>/{}{}</D:href><D:propstat><D:prop>"
|
||||
|
@ -938,7 +957,7 @@ class HttpCli(object):
|
|||
|
||||
pvs: dict[str, str] = {
|
||||
"displayname": html_escape(rp.split("/")[-1]),
|
||||
"getlastmodified": formatdate(st.st_mtime, usegmt=True),
|
||||
"getlastmodified": formatdate(mtime, usegmt=True),
|
||||
"resourcetype": '<D:collection xmlns:D="DAV:"/>' if isdir else "",
|
||||
"supportedlock": '<D:lockentry xmlns:D="DAV:"><D:lockscope><D:exclusive/></D:lockscope><D:locktype><D:write/></D:locktype></D:lockentry>',
|
||||
}
|
||||
|
@ -3513,7 +3532,11 @@ class HttpCli(object):
|
|||
return self.tx_zip(k, v, self.vpath, vn, rem, [], self.args.ed)
|
||||
|
||||
fsroot, vfs_ls, vfs_virt = vn.ls(
|
||||
rem, self.uname, not self.args.no_scandir, [[True, False], [False, True]]
|
||||
rem,
|
||||
self.uname,
|
||||
not self.args.no_scandir,
|
||||
[[True, False], [False, True]],
|
||||
lstat="lt" in self.uparam,
|
||||
)
|
||||
stats = {k: v for k, v in vfs_ls}
|
||||
ls_names = [x[0] for x in vfs_ls]
|
||||
|
@ -3557,7 +3580,8 @@ class HttpCli(object):
|
|||
fspath = fsroot + "/" + fn
|
||||
|
||||
try:
|
||||
inf = stats.get(fn) or bos.stat(fspath)
|
||||
linf = stats.get(fn) or bos.lstat(fspath)
|
||||
inf = bos.stat(fspath) if stat.S_ISLNK(linf.st_mode) else linf
|
||||
except:
|
||||
self.log("broken symlink: {}".format(repr(fspath)))
|
||||
continue
|
||||
|
@ -3579,7 +3603,7 @@ class HttpCli(object):
|
|||
margin = "-"
|
||||
|
||||
sz = inf.st_size
|
||||
zd = datetime.utcfromtimestamp(inf.st_mtime)
|
||||
zd = datetime.utcfromtimestamp(linf.st_mtime)
|
||||
dt = zd.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
try:
|
||||
|
@ -3606,7 +3630,7 @@ class HttpCli(object):
|
|||
"sz": sz,
|
||||
"ext": ext,
|
||||
"dt": dt,
|
||||
"ts": int(inf.st_mtime),
|
||||
"ts": int(linf.st_mtime),
|
||||
}
|
||||
if is_dir:
|
||||
dirs.append(item)
|
||||
|
|
|
@ -111,6 +111,7 @@ authenticate using header `Cookie: cppwd=foo` or url param `&pw=foo`
|
|||
| GET | `?ls&dots` | list files/folders at URL as JSON, including dotfiles |
|
||||
| GET | `?ls=t` | list files/folders at URL as plaintext |
|
||||
| GET | `?ls=v` | list files/folders at URL, terminal-formatted |
|
||||
| GET | `?lt` | in listings, use symlink timestamps rather than targets |
|
||||
| GET | `?b` | list files/folders at URL as simplified HTML |
|
||||
| GET | `?tree=.` | list one level of subdirectories inside URL |
|
||||
| GET | `?tree` | list one level of subdirectories for each level until URL |
|
||||
|
|
Loading…
Reference in a new issue