summaryrefslogtreecommitdiff
path: root/devel
diff options
context:
space:
mode:
Diffstat (limited to 'devel')
-rw-r--r--devel/Makefile22
-rw-r--r--devel/R-cran-devtools/Makefile2
-rw-r--r--devel/R-cran-gargle/Makefile2
-rw-r--r--devel/R-cran-pkgload/Makefile2
-rw-r--r--devel/R-cran-usethis/Makefile2
-rw-r--r--devel/fossil/Makefile2
-rw-r--r--devel/genromfs/Makefile20
-rw-r--r--devel/genromfs/distinfo3
-rw-r--r--devel/genromfs/files/patch-Makefile36
-rw-r--r--devel/genromfs/pkg-descr6
-rw-r--r--devel/gvfs/Makefile89
-rw-r--r--devel/gvfs/distinfo3
-rw-r--r--devel/gvfs/files/patch-client_gvfsfusedaemon.c12
-rw-r--r--devel/gvfs/files/patch-daemon_gvfswritechannel.c10
-rw-r--r--devel/gvfs/pkg-descr4
-rw-r--r--devel/gvfs/pkg-plist156
-rw-r--r--devel/libbde/Makefile31
-rw-r--r--devel/libbde/distinfo3
-rw-r--r--devel/libbde/pkg-descr2
-rw-r--r--devel/libbde/pkg-plist19
-rw-r--r--devel/libfsapfs/Makefile32
-rw-r--r--devel/libfsapfs/distinfo3
-rw-r--r--devel/libfsapfs/pkg-descr1
-rw-r--r--devel/libfsapfs/pkg-plist19
-rw-r--r--devel/libfsext/Makefile30
-rw-r--r--devel/libfsext/distinfo3
-rw-r--r--devel/libfsext/pkg-descr1
-rw-r--r--devel/libfsext/pkg-plist19
-rw-r--r--devel/libfsfat/Makefile30
-rw-r--r--devel/libfsfat/distinfo3
-rw-r--r--devel/libfsfat/pkg-descr1
-rw-r--r--devel/libfsfat/pkg-plist19
-rw-r--r--devel/libfshfs/Makefile30
-rw-r--r--devel/libfshfs/distinfo3
-rw-r--r--devel/libfshfs/pkg-descr1
-rw-r--r--devel/libfshfs/pkg-plist19
-rw-r--r--devel/libfsntfs/Makefile34
-rw-r--r--devel/libfsntfs/distinfo3
-rw-r--r--devel/libfsntfs/pkg-descr1
-rw-r--r--devel/libfsntfs/pkg-plist18
-rw-r--r--devel/libfsxfs/Makefile30
-rw-r--r--devel/libfsxfs/distinfo3
-rw-r--r--devel/libfsxfs/pkg-descr1
-rw-r--r--devel/libfsxfs/pkg-plist19
-rw-r--r--devel/libmodi/Makefile32
-rw-r--r--devel/libmodi/distinfo3
-rw-r--r--devel/libmodi/pkg-descr1
-rw-r--r--devel/libmodi/pkg-plist18
-rw-r--r--devel/libublio/pkg-descr2
-rw-r--r--devel/py-dask/Makefile2
-rw-r--r--devel/py-etils/Makefile6
-rw-r--r--devel/py-fs/Makefile20
-rw-r--r--devel/py-fs/distinfo2
-rw-r--r--devel/py-fs/files/patch-2to35882
-rw-r--r--devel/py-fs/pkg-descr8
-rw-r--r--devel/py-fs2/Makefile26
-rw-r--r--devel/py-fs2/distinfo3
-rw-r--r--devel/py-fs2/pkg-descr5
-rw-r--r--devel/py-fsspec-xrootd/Makefile26
-rw-r--r--devel/py-fsspec-xrootd/distinfo3
-rw-r--r--devel/py-fsspec-xrootd/pkg-descr3
-rw-r--r--devel/py-fsspec/Makefile28
-rw-r--r--devel/py-fsspec/distinfo3
-rw-r--r--devel/py-fsspec/pkg-descr3
-rw-r--r--devel/py-fusepy/Makefile24
-rw-r--r--devel/py-fusepy/distinfo3
-rw-r--r--devel/py-fusepy/pkg-descr2
-rw-r--r--devel/py-gcsfs/Makefile37
-rw-r--r--devel/py-gcsfs/distinfo3
-rw-r--r--devel/py-gcsfs/pkg-descr1
-rw-r--r--devel/py-libioc/Makefile2
-rw-r--r--devel/py-libzfs/Makefile35
-rw-r--r--devel/py-libzfs/distinfo3
-rw-r--r--devel/py-libzfs/files/extra-zpool-add.patch44
-rw-r--r--devel/py-libzfs/pkg-descr1
-rw-r--r--devel/py-llfuse/Makefile26
-rw-r--r--devel/py-llfuse/distinfo3
-rw-r--r--devel/py-llfuse/pkg-descr3
-rw-r--r--devel/py-pyfakefs/Makefile24
-rw-r--r--devel/py-pyfakefs/distinfo3
-rw-r--r--devel/py-pyfakefs/pkg-descr13
-rw-r--r--devel/py-pyyaml-include/Makefile2
-rw-r--r--devel/py-s3fs/Makefile33
-rw-r--r--devel/py-s3fs/distinfo3
-rw-r--r--devel/py-s3fs/pkg-descr2
-rw-r--r--devel/py-uproot/Makefile6
-rw-r--r--devel/py-zarr/Makefile2
-rw-r--r--devel/rubygem-aws-sdk-efs/Makefile20
-rw-r--r--devel/rubygem-aws-sdk-efs/distinfo3
-rw-r--r--devel/rubygem-aws-sdk-efs/pkg-descr1
-rw-r--r--devel/rubygem-aws-sdk-resources/Makefile2
-rw-r--r--devel/tclvfs/Makefile18
-rw-r--r--devel/tclvfs/distinfo2
-rw-r--r--devel/tclvfs/files/patch-Makefile.in12
-rw-r--r--devel/tclvfs/files/patch-generic_vfs.c14
-rw-r--r--devel/tclvfs/pkg-descr4
-rw-r--r--devel/tclvfs/pkg-plist28
97 files changed, 17 insertions, 7187 deletions
diff --git a/devel/Makefile b/devel/Makefile
index 873d4182ce39..4471d057fb87 100644
--- a/devel/Makefile
+++ b/devel/Makefile
@@ -798,7 +798,6 @@
SUBDIR += gengetopt
SUBDIR += genht
SUBDIR += genie
- SUBDIR += genromfs
SUBDIR += getoptions
SUBDIR += gettext
SUBDIR += gettext-lint
@@ -942,7 +941,6 @@
SUBDIR += guile-lib
SUBDIR += gum
SUBDIR += gumbo
- SUBDIR += gvfs
SUBDIR += gvp
SUBDIR += gwenhywfar
SUBDIR += gwenhywfar-fox16
@@ -1210,7 +1208,6 @@
SUBDIR += libayatana-indicator
SUBDIR += libb2
SUBDIR += libbacktrace
- SUBDIR += libbde
SUBDIR += libbegemot
SUBDIR += libbfd
SUBDIR += libbinio
@@ -1293,12 +1290,6 @@
SUBDIR += libfort
SUBDIR += libfortuna
SUBDIR += libfreefare
- SUBDIR += libfsapfs
- SUBDIR += libfsext
- SUBDIR += libfsfat
- SUBDIR += libfshfs
- SUBDIR += libfsntfs
- SUBDIR += libfsxfs
SUBDIR += libftdi
SUBDIR += libftdi1
SUBDIR += libfwnt
@@ -1360,7 +1351,6 @@
SUBDIR += libmcfp
SUBDIR += libmill
SUBDIR += libmimedir
- SUBDIR += libmodi
SUBDIR += libmowgli
SUBDIR += libmowgli2
SUBDIR += libmpcbdm
@@ -4889,16 +4879,11 @@
SUBDIR += py-frictionless-ckan-mapper
SUBDIR += py-frozendict
SUBDIR += py-frozenlist
- SUBDIR += py-fs
- SUBDIR += py-fs2
- SUBDIR += py-fsspec
- SUBDIR += py-fsspec-xrootd
SUBDIR += py-fudge
SUBDIR += py-funcparserlib
SUBDIR += py-funcsigs
SUBDIR += py-funcy
SUBDIR += py-furl
- SUBDIR += py-fusepy
SUBDIR += py-future
SUBDIR += py-futurist
SUBDIR += py-fuzzywuzzy
@@ -4907,7 +4892,6 @@
SUBDIR += py-game_sdl2
SUBDIR += py-gapic-generator
SUBDIR += py-gast
- SUBDIR += py-gcsfs
SUBDIR += py-gelidum
SUBDIR += py-genson
SUBDIR += py-genty
@@ -5148,7 +5132,6 @@
SUBDIR += py-libusb1
SUBDIR += py-libversion
SUBDIR += py-libvirt
- SUBDIR += py-libzfs
SUBDIR += py-lief
SUBDIR += py-line-profiler
SUBDIR += py-linear-tsv
@@ -5157,7 +5140,6 @@
SUBDIR += py-littleutils
SUBDIR += py-livemark
SUBDIR += py-lizard
- SUBDIR += py-llfuse
SUBDIR += py-llvmcpy
SUBDIR += py-llvmlite
SUBDIR += py-lml
@@ -5535,7 +5517,6 @@
SUBDIR += py-pyee11
SUBDIR += py-pyelftools
SUBDIR += py-pyface
- SUBDIR += py-pyfakefs
SUBDIR += py-pyfcm
SUBDIR += py-pyflakes
SUBDIR += py-pyformance
@@ -5834,7 +5815,6 @@
SUBDIR += py-rush
SUBDIR += py-rx
SUBDIR += py-rx1
- SUBDIR += py-s3fs
SUBDIR += py-saneyaml
SUBDIR += py-sarge
SUBDIR += py-sarif-om
@@ -6564,7 +6544,6 @@
SUBDIR += rubygem-aws-sdk-ecr
SUBDIR += rubygem-aws-sdk-ecrpublic
SUBDIR += rubygem-aws-sdk-ecs
- SUBDIR += rubygem-aws-sdk-efs
SUBDIR += rubygem-aws-sdk-eks
SUBDIR += rubygem-aws-sdk-eksauth
SUBDIR += rubygem-aws-sdk-elasticache
@@ -8177,7 +8156,6 @@
SUBDIR += tclreadline
SUBDIR += tclthread
SUBDIR += tcltls
- SUBDIR += tclvfs
SUBDIR += tclxml
SUBDIR += tdl
SUBDIR += template-glib
diff --git a/devel/R-cran-devtools/Makefile b/devel/R-cran-devtools/Makefile
index 9857d76d4ed1..c6577a925f5b 100644
--- a/devel/R-cran-devtools/Makefile
+++ b/devel/R-cran-devtools/Makefile
@@ -13,7 +13,7 @@ LICENSE_FILE= ${WRKSRC}/LICENSE
CRAN_DEPENDS= R-cran-cli>=3.3.0:devel/R-cran-cli \
R-cran-desc>=1.4.1:devel/R-cran-desc \
R-cran-ellipsis>=0.3.2:devel/R-cran-ellipsis \
- R-cran-fs>=1.5.2:sysutils/R-cran-fs \
+ R-cran-fs>=1.5.2:filesystems/R-cran-fs \
R-cran-httr>=1.4.2:www/R-cran-httr \
R-cran-lifecycle>=1.0.1:devel/R-cran-lifecycle \
R-cran-memoise>=2.0.1:devel/R-cran-memoise \
diff --git a/devel/R-cran-gargle/Makefile b/devel/R-cran-gargle/Makefile
index 7be4d7afbbdf..bdad5d073ab9 100644
--- a/devel/R-cran-gargle/Makefile
+++ b/devel/R-cran-gargle/Makefile
@@ -10,7 +10,7 @@ WWW= https://gargle.r-lib.org/
LICENSE= MIT
CRAN_DEPENDS= R-cran-cli>=3.0.1:devel/R-cran-cli \
- R-cran-fs>=1.3.1:sysutils/R-cran-fs \
+ R-cran-fs>=1.3.1:filesystems/R-cran-fs \
R-cran-glue>=1.3.0:devel/R-cran-glue \
R-cran-httr>=1.4.5:www/R-cran-httr \
R-cran-jsonlite>0:converters/R-cran-jsonlite \
diff --git a/devel/R-cran-pkgload/Makefile b/devel/R-cran-pkgload/Makefile
index 15b5bada5a6d..05d40c99ef32 100644
--- a/devel/R-cran-pkgload/Makefile
+++ b/devel/R-cran-pkgload/Makefile
@@ -11,7 +11,7 @@ LICENSE= GPLv3
RUN_DEPENDS= R-cran-cli>=3.3.0:devel/R-cran-cli \
R-cran-desc>0:devel/R-cran-desc \
- R-cran-fs>0:sysutils/R-cran-fs \
+ R-cran-fs>0:filesystems/R-cran-fs \
R-cran-glue>0:devel/R-cran-glue \
R-cran-lifecycle>0:devel/R-cran-lifecycle \
R-cran-pkgbuild>0:devel/R-cran-pkgbuild \
diff --git a/devel/R-cran-usethis/Makefile b/devel/R-cran-usethis/Makefile
index c928160e266b..a3c6d6bbfa88 100644
--- a/devel/R-cran-usethis/Makefile
+++ b/devel/R-cran-usethis/Makefile
@@ -14,7 +14,7 @@ CRAN_DEPENDS= R-cran-cli>=3.0.1:devel/R-cran-cli \
R-cran-crayon>0:devel/R-cran-crayon \
R-cran-curl>=2.7:ftp/R-cran-curl \
R-cran-desc>=1.4.2:devel/R-cran-desc \
- R-cran-fs>=1.3.0:sysutils/R-cran-fs \
+ R-cran-fs>=1.3.0:filesystems/R-cran-fs \
R-cran-gert>=1.4.1:devel/R-cran-gert \
R-cran-gh>=1.2.1:www/R-cran-gh \
R-cran-glue>=1.3.0:devel/R-cran-glue \
diff --git a/devel/fossil/Makefile b/devel/fossil/Makefile
index 93ca6ae93ce1..69b6ce89eb18 100644
--- a/devel/fossil/Makefile
+++ b/devel/fossil/Makefile
@@ -41,7 +41,7 @@ TCL_CONFIGURE_ON= --with-tcl-stubs --with-tcl=${TCL_LIBDIR}
TCL_USES= tcl
TCL_CFLAGS= -DTCL_LIBRARY_NAME=\\\"libtcl${TCL_SHLIB_VER}.so\\\" \
-DTCL_MINOR_OFFSET=7
-FUSE_LIB_DEPENDS= libfuse.so:sysutils/fusefs-libs
+FUSE_LIB_DEPENDS= libfuse.so:filesystems/fusefs-libs
FUSE_USES= localbase:ldflags
post-patch-FUSE-on:
diff --git a/devel/genromfs/Makefile b/devel/genromfs/Makefile
deleted file mode 100644
index 3a5848bbdf9f..000000000000
--- a/devel/genromfs/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-PORTNAME= genromfs
-PORTVERSION= 0.5.7
-PORTREVISION= 1
-CATEGORIES= devel
-
-MAINTAINER= tomek@cedro.info
-COMMENT= Mkfs equivalent for romfs filesystem
-WWW= https://github.com/chexum/genromfs
-
-LICENSE= GPLv2
-
-USE_GITHUB= yes
-GH_ACCOUNT= chexum
-
-MAKE_ARGS= PREFIX=${STAGEDIR}${PREFIX}
-
-PLIST_FILES= bin/genromfs \
- share/man/man8/genromfs.8.gz
-
-.include <bsd.port.mk>
diff --git a/devel/genromfs/distinfo b/devel/genromfs/distinfo
deleted file mode 100644
index d56cda096967..000000000000
--- a/devel/genromfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1634516173
-SHA256 (chexum-genromfs-0.5.7_GH0.tar.gz) = 2d16d217b11a28809454ddab0cd7c1c0865af8ea79ac0e86af03ab82320f02ab
-SIZE (chexum-genromfs-0.5.7_GH0.tar.gz) = 27929
diff --git a/devel/genromfs/files/patch-Makefile b/devel/genromfs/files/patch-Makefile
deleted file mode 100644
index 1b9e509d6c3e..000000000000
--- a/devel/genromfs/files/patch-Makefile
+++ /dev/null
@@ -1,36 +0,0 @@
---- Makefile.orig 2009-02-15 10:48:15 UTC
-+++ Makefile
-@@ -5,7 +5,7 @@ all: genromfs
-
- PACKAGE = genromfs
- VERSION = 0.5.7
--CC = gcc
-+CC = cc
- CFLAGS = -O2 -Wall -DVERSION=\"$(VERSION)\"#-g#
- LDFLAGS = -s#-g
-
-@@ -17,9 +17,9 @@ FILES = COPYING NEWS ChangeLog Makefile \
- checkdist \
- .gitignore selftest
-
--prefix = /usr
--bindir = $(prefix)/bin
--mandir = $(prefix)/man
-+PREFIX ?= /usr
-+bindir = /bin
-+mandir = /share/man
-
- genromfs: genromfs.o
- $(CC) $(LDFLAGS) genromfs.o -o genromfs
-@@ -53,11 +53,6 @@ install-bin:
- install -m 755 genromfs $(PREFIX)$(bindir)/
-
- install-man:
-- # genromfs 0.5 installed the man page in this file,
-- # remove it before someone notices :)
-- if [ -f $(PREFIX)$(bindir)/man8 ]; then \
-- rm -f $(PREFIX)$(bindir)/man8; \
-- fi
- mkdir -p $(PREFIX)$(mandir)/man8
- install -m 644 genromfs.8 $(PREFIX)$(mandir)/man8/
-
diff --git a/devel/genromfs/pkg-descr b/devel/genromfs/pkg-descr
deleted file mode 100644
index af0611e1fed2..000000000000
--- a/devel/genromfs/pkg-descr
+++ /dev/null
@@ -1,6 +0,0 @@
-The mkfs equivalent for romfs filesystem. Port created for NuttX RTOS.
-ROM FileSystem (ROMFS), read only filesystem, mainly for initial RAM
-disks of installation disks. Using this filesystem, you get a very
-similar feature, and even the possibility of a small kernel, with a
-file system which doesn't take up useful memory from the router
-functions in the basement of your office.
diff --git a/devel/gvfs/Makefile b/devel/gvfs/Makefile
deleted file mode 100644
index b303965b2c7f..000000000000
--- a/devel/gvfs/Makefile
+++ /dev/null
@@ -1,89 +0,0 @@
-PORTNAME= gvfs
-PORTVERSION= 1.50.2
-PORTREVISION= 3
-CATEGORIES= devel gnome
-MASTER_SITES= GNOME
-DIST_SUBDIR= gnome
-
-MAINTAINER= gnome@FreeBSD.org
-COMMENT= GNOME virtual file system
-WWW= https://www.gnome.org/
-
-LICENSE= GPLv2
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-BUILD_DEPENDS= docbook-xsl>=0:textproc/docbook-xsl
-LIB_DEPENDS= libgcrypt.so:security/libgcrypt \
- libdbus-1.so:devel/dbus \
- libgcr-base-3.so:security/gcr3 \
- libsoup-3.0.so:devel/libsoup3 \
- libgudev-1.0.so:devel/libgudev \
- libudisks2.so:sysutils/libudisks \
- libsecret-1.so:security/libsecret
-RUN_DEPENDS= bsdisks>0:sysutils/bsdisks \
- lsof:sysutils/lsof
-PORTSCOUT= limitw:1,even
-
-USES= cpe gettext gnome libarchive localbase meson \
- pkgconfig python:build shebangfix tar:xz
-USE_GNOME= glib20 libxml2 libxslt:build
-SHEBANG_FILES= meson_post_install.py
-MESON_ARGS= -Dsystemduserunitdir=no \
- -Dtmpfilesdir=no \
- -Dadmin=false \
- -Dlogind=false \
- -Dlibusb=false \
- -Dman=true
-USE_LDCONFIG= yes
-CPE_VENDOR= gnome
-
-GLIB_SCHEMAS= org.gnome.system.gvfs.enums.xml
-
-OPTIONS_SUB= yes
-OPTIONS_DEFINE= AVAHI FUSE AFC GOA SMB CDDA GOOGLE GPHOTO MTP NFS BLURAY
-OPTIONS_DEFAULT=AVAHI SMB CDDA GPHOTO MTP NFS BLURAY
-
-AFC_DESC= Apple device support
-GOA_DESC= GNOME Online Accounts volume monitor support
-GOOGLE_DESC= Google backend
-
-AVAHI_LIB_DEPENDS= libavahi-client.so:net/avahi-app
-AVAHI_MESON_TRUE= dnssd
-AVAHI_VARS= GLIB_SCHEMAS+=org.gnome.system.dns_sd.gschema.xml
-
-BLURAY_LIB_DEPENDS= libbluray.so:multimedia/libbluray
-BLURAY_MESON_TRUE= bluray
-BLURAY_DESC= Blu-ray metadata support
-
-FUSE_USES= fuse:3
-FUSE_MESON_TRUE= fuse
-
-AFC_LIB_DEPENDS= libimobiledevice-1.0.so:comms/libimobiledevice \
- libplist-2.0.so:devel/libplist
-AFC_MESON_TRUE= afc
-
-GOA_LIB_DEPENDS= libgoa-1.0.so:net/gnome-online-accounts
-GOA_MESON_TRUE= goa
-
-SMB_USES= samba:lib
-SMB_MESON_TRUE= smb
-SMB_VARS= GLIB_SCHEMAS+=org.gnome.system.smb.gschema.xml
-
-CDDA_LIB_DEPENDS= libcdio_paranoia.so:sysutils/libcdio-paranoia \
- libcdio.so:sysutils/libcdio
-CDDA_MESON_TRUE= cdda
-
-GOOGLE_IMPLIES= GOA
-GOOGLE_LIB_DEPENDS= libgdata.so:devel/libgdata
-GOOGLE_MESON_TRUE= google
-
-GPHOTO_LIB_DEPENDS= libgphoto2.so:graphics/libgphoto2
-GPHOTO_MESON_TRUE= gphoto2
-
-MTP_LIB_DEPENDS= libmtp.so:multimedia/libmtp
-MTP_MESON_TRUE= mtp
-
-NFS_LIB_DEPENDS= libnfs.so:net/libnfs
-NFS_MESON_TRUE= nfs
-
-.include <bsd.port.mk>
diff --git a/devel/gvfs/distinfo b/devel/gvfs/distinfo
deleted file mode 100644
index 57983f695276..000000000000
--- a/devel/gvfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1658680082
-SHA256 (gnome/gvfs-1.50.2.tar.xz) = 03d72b8c15ef438110f0cf457b5655266c8b515d0412b30f4d55cfa0da06ac5e
-SIZE (gnome/gvfs-1.50.2.tar.xz) = 1214464
diff --git a/devel/gvfs/files/patch-client_gvfsfusedaemon.c b/devel/gvfs/files/patch-client_gvfsfusedaemon.c
deleted file mode 100644
index 0d9c69189b48..000000000000
--- a/devel/gvfs/files/patch-client_gvfsfusedaemon.c
+++ /dev/null
@@ -1,12 +0,0 @@
---- client/gvfsfusedaemon.c.orig 2012-05-15 20:01:49.000000000 +0200
-+++ client/gvfsfusedaemon.c 2012-05-31 14:00:26.000000000 +0200
-@@ -2361,7 +2361,9 @@
- subthread = g_thread_new ("gvfs-fuse-sub", (GThreadFunc) subthread_main, NULL);
-
- /* Indicate O_TRUNC support for open() */
-+#ifdef FUSE_CAP_ATOMIC_O_TRUNC
- conn->want |= FUSE_CAP_ATOMIC_O_TRUNC;
-+#endif
-
- return NULL;
- }
diff --git a/devel/gvfs/files/patch-daemon_gvfswritechannel.c b/devel/gvfs/files/patch-daemon_gvfswritechannel.c
deleted file mode 100644
index 54c4216873e4..000000000000
--- a/devel/gvfs/files/patch-daemon_gvfswritechannel.c
+++ /dev/null
@@ -1,10 +0,0 @@
---- daemon/gvfswritechannel.c.orig 2007-12-22 18:50:10.000000000 -0500
-+++ daemon/gvfswritechannel.c 2007-12-22 18:50:20.000000000 -0500
-@@ -30,6 +30,7 @@
- #include <sys/un.h>
- #include <unistd.h>
- #include <fcntl.h>
-+#include <string.h>
-
- #include <glib.h>
- #include <glib-object.h>
diff --git a/devel/gvfs/pkg-descr b/devel/gvfs/pkg-descr
deleted file mode 100644
index 5ea65916b9a7..000000000000
--- a/devel/gvfs/pkg-descr
+++ /dev/null
@@ -1,4 +0,0 @@
-GVFS allows applications and users to treat any number of file system
-concepts as a part of the local filesystem. With GVFS, filesystems across
-the internet, on connected devices, and in multiple formats are as simple
-to access (and write code for) as any directory on the local machine.
diff --git a/devel/gvfs/pkg-plist b/devel/gvfs/pkg-plist
deleted file mode 100644
index b4919f0bf62e..000000000000
--- a/devel/gvfs/pkg-plist
+++ /dev/null
@@ -1,156 +0,0 @@
-include/gvfs-client/gvfs/gvfsurimapper.h
-include/gvfs-client/gvfs/gvfsuriutils.h
-lib/gio/modules/libgioremote-volume-monitor.so
-lib/gio/modules/libgvfsdbus.so
-lib/gvfs/libgvfscommon.so
-lib/gvfs/libgvfsdaemon.so
-%%AFC%%libexec/gvfs-afc-volume-monitor
-%%GOA%%libexec/gvfs-goa-volume-monitor
-%%GPHOTO%%libexec/gvfs-gphoto2-volume-monitor
-%%MTP%%libexec/gvfs-mtp-volume-monitor
-libexec/gvfs-udisks2-volume-monitor
-libexec/gvfsd
-%%AFC%%libexec/gvfsd-afc
-libexec/gvfsd-afp
-libexec/gvfsd-afp-browse
-libexec/gvfsd-archive
-libexec/gvfsd-burn
-%%CDDA%%libexec/gvfsd-cdda
-libexec/gvfsd-computer
-libexec/gvfsd-dav
-%%AVAHI%%libexec/gvfsd-dnssd
-libexec/gvfsd-ftp
-%%FUSE%%libexec/gvfsd-fuse
-%%GOOGLE%%libexec/gvfsd-google
-%%GPHOTO%%libexec/gvfsd-gphoto2
-libexec/gvfsd-http
-libexec/gvfsd-localtest
-libexec/gvfsd-metadata
-%%MTP%%libexec/gvfsd-mtp
-libexec/gvfsd-network
-%%NFS%%libexec/gvfsd-nfs
-libexec/gvfsd-recent
-libexec/gvfsd-sftp
-%%SMB%%libexec/gvfsd-smb
-%%SMB%%libexec/gvfsd-smb-browse
-libexec/gvfsd-trash
-%%FUSE%%share/man/man1/gvfsd-fuse.1.gz
-share/man/man1/gvfsd-metadata.1.gz
-share/man/man1/gvfsd.1.gz
-share/man/man7/gvfs.7.gz
-%%AVAHI%%share/GConf/gsettings/gvfs-dns-sd.convert
-%%SMB%%share/GConf/gsettings/gvfs-smb.convert
-share/dbus-1/services/org.gtk.vfs.Daemon.service
-%%AFC%%share/dbus-1/services/org.gtk.vfs.AfcVolumeMonitor.service
-%%GPHOTO%%share/dbus-1/services/org.gtk.vfs.GPhoto2VolumeMonitor.service
-%%GOA%%share/dbus-1/services/org.gtk.vfs.GoaVolumeMonitor.service
-%%MTP%%share/dbus-1/services/org.gtk.vfs.MTPVolumeMonitor.service
-share/dbus-1/services/org.gtk.vfs.Metadata.service
-share/dbus-1/services/org.gtk.vfs.UDisks2VolumeMonitor.service
-%%AFC%%%%DATADIR%%/mounts/afc.mount
-%%DATADIR%%/mounts/afp-browse.mount
-%%DATADIR%%/mounts/afp.mount
-%%DATADIR%%/mounts/archive.mount
-%%DATADIR%%/mounts/burn.mount
-%%CDDA%%%%DATADIR%%/mounts/cdda.mount
-%%DATADIR%%/mounts/computer.mount
-%%AVAHI%%%%DATADIR%%/mounts/dav+sd.mount
-%%DATADIR%%/mounts/dav.mount
-%%AVAHI%%%%DATADIR%%/mounts/dns-sd.mount
-%%DATADIR%%/mounts/ftp.mount
-%%DATADIR%%/mounts/ftpis.mount
-%%DATADIR%%/mounts/ftps.mount
-%%GOOGLE%%%%DATADIR%%/mounts/google.mount
-%%GPHOTO%%%%DATADIR%%/mounts/gphoto2.mount
-%%DATADIR%%/mounts/http.mount
-%%DATADIR%%/mounts/localtest.mount
-%%MTP%%%%DATADIR%%/mounts/mtp.mount
-%%DATADIR%%/mounts/network.mount
-%%NFS%%%%DATADIR%%/mounts/nfs.mount
-%%DATADIR%%/mounts/recent.mount
-%%DATADIR%%/mounts/sftp.mount
-%%SMB%%%%DATADIR%%/mounts/smb-browse.mount
-%%SMB%%%%DATADIR%%/mounts/smb.mount
-%%DATADIR%%/mounts/trash.mount
-%%AFC%%%%DATADIR%%/remote-volume-monitors/afc.monitor
-%%GOA%%%%DATADIR%%/remote-volume-monitors/goa.monitor
-%%GPHOTO%%%%DATADIR%%/remote-volume-monitors/gphoto2.monitor
-%%MTP%%%%DATADIR%%/remote-volume-monitors/mtp.monitor
-%%DATADIR%%/remote-volume-monitors/udisks2.monitor
-share/locale/af/LC_MESSAGES/gvfs.mo
-share/locale/ar/LC_MESSAGES/gvfs.mo
-share/locale/as/LC_MESSAGES/gvfs.mo
-share/locale/ast/LC_MESSAGES/gvfs.mo
-share/locale/be/LC_MESSAGES/gvfs.mo
-share/locale/be@latin/LC_MESSAGES/gvfs.mo
-share/locale/bg/LC_MESSAGES/gvfs.mo
-share/locale/bn/LC_MESSAGES/gvfs.mo
-share/locale/bn_IN/LC_MESSAGES/gvfs.mo
-share/locale/bs/LC_MESSAGES/gvfs.mo
-share/locale/ca/LC_MESSAGES/gvfs.mo
-share/locale/ca@valencia/LC_MESSAGES/gvfs.mo
-share/locale/cs/LC_MESSAGES/gvfs.mo
-share/locale/da/LC_MESSAGES/gvfs.mo
-share/locale/de/LC_MESSAGES/gvfs.mo
-share/locale/el/LC_MESSAGES/gvfs.mo
-share/locale/en@shaw/LC_MESSAGES/gvfs.mo
-share/locale/en_GB/LC_MESSAGES/gvfs.mo
-share/locale/eo/LC_MESSAGES/gvfs.mo
-share/locale/es/LC_MESSAGES/gvfs.mo
-share/locale/et/LC_MESSAGES/gvfs.mo
-share/locale/eu/LC_MESSAGES/gvfs.mo
-share/locale/fa/LC_MESSAGES/gvfs.mo
-share/locale/fi/LC_MESSAGES/gvfs.mo
-share/locale/fr/LC_MESSAGES/gvfs.mo
-share/locale/fur/LC_MESSAGES/gvfs.mo
-share/locale/ga/LC_MESSAGES/gvfs.mo
-share/locale/gl/LC_MESSAGES/gvfs.mo
-share/locale/gu/LC_MESSAGES/gvfs.mo
-share/locale/he/LC_MESSAGES/gvfs.mo
-share/locale/hi/LC_MESSAGES/gvfs.mo
-share/locale/hr/LC_MESSAGES/gvfs.mo
-share/locale/hu/LC_MESSAGES/gvfs.mo
-share/locale/id/LC_MESSAGES/gvfs.mo
-share/locale/it/LC_MESSAGES/gvfs.mo
-share/locale/ja/LC_MESSAGES/gvfs.mo
-share/locale/kk/LC_MESSAGES/gvfs.mo
-share/locale/kn/LC_MESSAGES/gvfs.mo
-share/locale/ko/LC_MESSAGES/gvfs.mo
-share/locale/ku/LC_MESSAGES/gvfs.mo
-share/locale/lt/LC_MESSAGES/gvfs.mo
-share/locale/lv/LC_MESSAGES/gvfs.mo
-share/locale/mai/LC_MESSAGES/gvfs.mo
-share/locale/mk/LC_MESSAGES/gvfs.mo
-share/locale/ml/LC_MESSAGES/gvfs.mo
-share/locale/mr/LC_MESSAGES/gvfs.mo
-share/locale/ms/LC_MESSAGES/gvfs.mo
-share/locale/nb/LC_MESSAGES/gvfs.mo
-share/locale/nds/LC_MESSAGES/gvfs.mo
-share/locale/ne/LC_MESSAGES/gvfs.mo
-share/locale/nl/LC_MESSAGES/gvfs.mo
-share/locale/nn/LC_MESSAGES/gvfs.mo
-share/locale/oc/LC_MESSAGES/gvfs.mo
-share/locale/or/LC_MESSAGES/gvfs.mo
-share/locale/pa/LC_MESSAGES/gvfs.mo
-share/locale/pl/LC_MESSAGES/gvfs.mo
-share/locale/pt/LC_MESSAGES/gvfs.mo
-share/locale/pt_BR/LC_MESSAGES/gvfs.mo
-share/locale/ro/LC_MESSAGES/gvfs.mo
-share/locale/ru/LC_MESSAGES/gvfs.mo
-share/locale/sk/LC_MESSAGES/gvfs.mo
-share/locale/sl/LC_MESSAGES/gvfs.mo
-share/locale/sq/LC_MESSAGES/gvfs.mo
-share/locale/sr/LC_MESSAGES/gvfs.mo
-share/locale/sr@latin/LC_MESSAGES/gvfs.mo
-share/locale/sv/LC_MESSAGES/gvfs.mo
-share/locale/ta/LC_MESSAGES/gvfs.mo
-share/locale/te/LC_MESSAGES/gvfs.mo
-share/locale/tg/LC_MESSAGES/gvfs.mo
-share/locale/th/LC_MESSAGES/gvfs.mo
-share/locale/tr/LC_MESSAGES/gvfs.mo
-share/locale/ug/LC_MESSAGES/gvfs.mo
-share/locale/uk/LC_MESSAGES/gvfs.mo
-share/locale/vi/LC_MESSAGES/gvfs.mo
-share/locale/zh_CN/LC_MESSAGES/gvfs.mo
-share/locale/zh_HK/LC_MESSAGES/gvfs.mo
-share/locale/zh_TW/LC_MESSAGES/gvfs.mo
diff --git a/devel/libbde/Makefile b/devel/libbde/Makefile
deleted file mode 100644
index fd5d266e24cb..000000000000
--- a/devel/libbde/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-PORTNAME= libbde
-DISTVERSION= alpha-20231220
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libbde/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the BitLocker Drive Encryption volumes
-WWW= https://github.com/libyal/libbde
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-LIB_DEPENDS= libcaes.so:security/libcaes
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-libcaes=${LOCALBASE} --with-openssl=no
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= fuse libtool pathfix pkgconfig python
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libbde/distinfo b/devel/libbde/distinfo
deleted file mode 100644
index 7d8c38ce375a..000000000000
--- a/devel/libbde/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707835740
-SHA256 (libbde-alpha-20231220.tar.gz) = 21fa33dd40edcea07d50b16ef926db459a6f2db551232dd8c1597484f1367241
-SIZE (libbde-alpha-20231220.tar.gz) = 1739799
diff --git a/devel/libbde/pkg-descr b/devel/libbde/pkg-descr
deleted file mode 100644
index 9acfb6bca20f..000000000000
--- a/devel/libbde/pkg-descr
+++ /dev/null
@@ -1,2 +0,0 @@
-Library and tools to access the BitLocker Drive Encryption (BDE) encrypted
-volumes.
diff --git a/devel/libbde/pkg-plist b/devel/libbde/pkg-plist
deleted file mode 100644
index d9f871ed4a9a..000000000000
--- a/devel/libbde/pkg-plist
+++ /dev/null
@@ -1,19 +0,0 @@
-bin/bdeinfo
-bin/bdemount
-include/libbde.h
-include/libbde/codepage.h
-include/libbde/definitions.h
-include/libbde/error.h
-include/libbde/extern.h
-include/libbde/features.h
-include/libbde/types.h
-lib/libbde.a
-lib/libbde.so
-lib/libbde.so.1
-lib/libbde.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pybde.a
-%%PYTHON_SITELIBDIR%%/pybde.so
-libdata/pkgconfig/libbde.pc
-share/man/man1/bdeinfo.1.gz
-share/man/man1/bdemount.1.gz
-share/man/man3/libbde.3.gz
diff --git a/devel/libfsapfs/Makefile b/devel/libfsapfs/Makefile
deleted file mode 100644
index 4b9c7e8b80d3..000000000000
--- a/devel/libfsapfs/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-PORTNAME= libfsapfs
-# If/When moving from experimental to alpha, switch to DISTVERSIONPREFIX to prevent PORTEPOCH
-DISTVERSION= experimental-20231122
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libfsapfs/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the Apple File System (APFS)
-WWW= https://github.com/libyal/libfsapfs
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-LIB_DEPENDS= libcaes.so:security/libcaes
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-libcaes=${LOCALBASE} --with-openssl=no
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= fuse libtool pathfix pkgconfig python
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libfsapfs/distinfo b/devel/libfsapfs/distinfo
deleted file mode 100644
index c5ac079f5d26..000000000000
--- a/devel/libfsapfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707836234
-SHA256 (libfsapfs-experimental-20231122.tar.gz) = 0a324de50aaf30c7a685cc9b296f4e9ed101a2558aae95334410fbf5c108cc30
-SIZE (libfsapfs-experimental-20231122.tar.gz) = 2164984
diff --git a/devel/libfsapfs/pkg-descr b/devel/libfsapfs/pkg-descr
deleted file mode 100644
index 61a4927c5eb0..000000000000
--- a/devel/libfsapfs/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Library and tools to access the Apple File System (APFS)
diff --git a/devel/libfsapfs/pkg-plist b/devel/libfsapfs/pkg-plist
deleted file mode 100644
index 939747d660de..000000000000
--- a/devel/libfsapfs/pkg-plist
+++ /dev/null
@@ -1,19 +0,0 @@
-bin/fsapfsinfo
-bin/fsapfsmount
-include/libfsapfs.h
-include/libfsapfs/codepage.h
-include/libfsapfs/definitions.h
-include/libfsapfs/error.h
-include/libfsapfs/extern.h
-include/libfsapfs/features.h
-include/libfsapfs/types.h
-lib/libfsapfs.a
-lib/libfsapfs.so
-lib/libfsapfs.so.1
-lib/libfsapfs.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pyfsapfs.a
-%%PYTHON_SITELIBDIR%%/pyfsapfs.so
-libdata/pkgconfig/libfsapfs.pc
-share/man/man1/fsapfsinfo.1.gz
-share/man/man1/fsapfsmount.1.gz
-share/man/man3/libfsapfs.3.gz
diff --git a/devel/libfsext/Makefile b/devel/libfsext/Makefile
deleted file mode 100644
index c35d4e101549..000000000000
--- a/devel/libfsext/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-PORTNAME= libfsext
-# If/When moving from experimental to alpha, switch to DISTVERSIONPREFIX to prevent PORTEPOCH
-DISTVERSION= experimental-20231129
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libfsext/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the Extended File System
-WWW= https://github.com/libyal/libfsext
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-openssl=no
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= fuse libtool pathfix pkgconfig python
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libfsext/distinfo b/devel/libfsext/distinfo
deleted file mode 100644
index af5acaab6f00..000000000000
--- a/devel/libfsext/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707836309
-SHA256 (libfsext-experimental-20231129.tar.gz) = e5faf530e370c0bac1d584d236150577d0ca1b4692fbf0321d6568999323fcf1
-SIZE (libfsext-experimental-20231129.tar.gz) = 1719851
diff --git a/devel/libfsext/pkg-descr b/devel/libfsext/pkg-descr
deleted file mode 100644
index 285d40feb04f..000000000000
--- a/devel/libfsext/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Library and tools to access the Extended File System
diff --git a/devel/libfsext/pkg-plist b/devel/libfsext/pkg-plist
deleted file mode 100644
index 81ea09be91c1..000000000000
--- a/devel/libfsext/pkg-plist
+++ /dev/null
@@ -1,19 +0,0 @@
-bin/fsextinfo
-bin/fsextmount
-include/libfsext.h
-include/libfsext/codepage.h
-include/libfsext/definitions.h
-include/libfsext/error.h
-include/libfsext/extern.h
-include/libfsext/features.h
-include/libfsext/types.h
-lib/libfsext.a
-lib/libfsext.so
-lib/libfsext.so.1
-lib/libfsext.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pyfsext.a
-%%PYTHON_SITELIBDIR%%/pyfsext.so
-libdata/pkgconfig/libfsext.pc
-share/man/man1/fsextinfo.1.gz
-share/man/man1/fsextmount.1.gz
-share/man/man3/libfsext.3.gz
diff --git a/devel/libfsfat/Makefile b/devel/libfsfat/Makefile
deleted file mode 100644
index 4d016544f03a..000000000000
--- a/devel/libfsfat/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-PORTNAME= libfsfat
-# If/When moving from experimental to alpha, switch to DISTVERSIONPREFIX to prevent PORTEPOCH
-DISTVERSION= experimental-20231122
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libfsfat/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the File Allocation Table File System
-WWW= https://github.com/libyal/libfsfat
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-openssl=no
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= fuse libtool pathfix pkgconfig python
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libfsfat/distinfo b/devel/libfsfat/distinfo
deleted file mode 100644
index 0f2626e7770f..000000000000
--- a/devel/libfsfat/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707836386
-SHA256 (libfsfat-experimental-20231122.tar.gz) = bc74cd55a1f3a1102dd9c2cde7958d017e6455d82ef387249b12ad58c62df7c5
-SIZE (libfsfat-experimental-20231122.tar.gz) = 1665538
diff --git a/devel/libfsfat/pkg-descr b/devel/libfsfat/pkg-descr
deleted file mode 100644
index f303433616cc..000000000000
--- a/devel/libfsfat/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Library and tools to access the File Allocation Table (FAT) file system
diff --git a/devel/libfsfat/pkg-plist b/devel/libfsfat/pkg-plist
deleted file mode 100644
index c657a6fc2311..000000000000
--- a/devel/libfsfat/pkg-plist
+++ /dev/null
@@ -1,19 +0,0 @@
-bin/fsfatinfo
-bin/fsfatmount
-include/libfsfat.h
-include/libfsfat/codepage.h
-include/libfsfat/definitions.h
-include/libfsfat/error.h
-include/libfsfat/extern.h
-include/libfsfat/features.h
-include/libfsfat/types.h
-lib/libfsfat.a
-lib/libfsfat.so
-lib/libfsfat.so.1
-lib/libfsfat.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pyfsfat.a
-%%PYTHON_SITELIBDIR%%/pyfsfat.so
-libdata/pkgconfig/libfsfat.pc
-share/man/man1/fsfatinfo.1.gz
-share/man/man1/fsfatmount.1.gz
-share/man/man3/libfsfat.3.gz
diff --git a/devel/libfshfs/Makefile b/devel/libfshfs/Makefile
deleted file mode 100644
index 44ab8090cb05..000000000000
--- a/devel/libfshfs/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-PORTNAME= libfshfs
-# If/When moving from experimental to alpha, switch to DISTVERSIONPREFIX to prevent PORTEPOCH
-DISTVERSION= experimental-20231125
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libfshfs/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the Mac OS Hierarchical File System (HFS)
-WWW= https://github.com/libyal/libfshfs
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-openssl=no
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= fuse libtool pathfix pkgconfig python
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libfshfs/distinfo b/devel/libfshfs/distinfo
deleted file mode 100644
index d97397cb447c..000000000000
--- a/devel/libfshfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707836459
-SHA256 (libfshfs-experimental-20231125.tar.gz) = af9063a07814265cd79ed823ea3a44c08de3d4defa3c95750866f8572ad8bf8c
-SIZE (libfshfs-experimental-20231125.tar.gz) = 2027202
diff --git a/devel/libfshfs/pkg-descr b/devel/libfshfs/pkg-descr
deleted file mode 100644
index b7634c525b9c..000000000000
--- a/devel/libfshfs/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Library and tools to access the Mac OS Hierarchical File System (HFS)
diff --git a/devel/libfshfs/pkg-plist b/devel/libfshfs/pkg-plist
deleted file mode 100644
index d5c0ac017d37..000000000000
--- a/devel/libfshfs/pkg-plist
+++ /dev/null
@@ -1,19 +0,0 @@
-bin/fshfsinfo
-bin/fshfsmount
-include/libfshfs.h
-include/libfshfs/codepage.h
-include/libfshfs/definitions.h
-include/libfshfs/error.h
-include/libfshfs/extern.h
-include/libfshfs/features.h
-include/libfshfs/types.h
-lib/libfshfs.a
-lib/libfshfs.so
-lib/libfshfs.so.1
-lib/libfshfs.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pyfshfs.a
-%%PYTHON_SITELIBDIR%%/pyfshfs.so
-libdata/pkgconfig/libfshfs.pc
-share/man/man1/fshfsinfo.1.gz
-share/man/man1/fshfsmount.1.gz
-share/man/man3/libfshfs.3.gz
diff --git a/devel/libfsntfs/Makefile b/devel/libfsntfs/Makefile
deleted file mode 100644
index b9f207317355..000000000000
--- a/devel/libfsntfs/Makefile
+++ /dev/null
@@ -1,34 +0,0 @@
-PORTNAME= libfsntfs
-# If/When moving from experimental to alpha, switch to DISTVERSIONPREFIX to prevent PORTEPOCH
-DISTVERSION= experimental-20231125
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libfsntfs/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the New Technology File System (NTFS)
-WWW= https://github.com/libyal/libfsntfs
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-LIB_DEPENDS= libfwnt.so:devel/libfwnt
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-openssl=no \
- --with-libfwnt=${LOCALBASE}
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= cpe libtool fuse pathfix pkgconfig python
-CPE_VENDOR= ${PORTNAME}_project
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libfsntfs/distinfo b/devel/libfsntfs/distinfo
deleted file mode 100644
index 82438122469a..000000000000
--- a/devel/libfsntfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707836581
-SHA256 (libfsntfs-experimental-20231125.tar.gz) = c180863cc7aaacf950251e4758a0c4c8a30c100b6b96b63884b9d6717f3975a6
-SIZE (libfsntfs-experimental-20231125.tar.gz) = 2104151
diff --git a/devel/libfsntfs/pkg-descr b/devel/libfsntfs/pkg-descr
deleted file mode 100644
index 8aa270b801b3..000000000000
--- a/devel/libfsntfs/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Library and tools to access the New Technology File System (NTFS)
diff --git a/devel/libfsntfs/pkg-plist b/devel/libfsntfs/pkg-plist
deleted file mode 100644
index 35bb09d1e8d8..000000000000
--- a/devel/libfsntfs/pkg-plist
+++ /dev/null
@@ -1,18 +0,0 @@
-bin/fsntfsinfo
-bin/fsntfsmount
-include/libfsntfs.h
-include/libfsntfs/codepage.h
-include/libfsntfs/definitions.h
-include/libfsntfs/error.h
-include/libfsntfs/extern.h
-include/libfsntfs/features.h
-include/libfsntfs/types.h
-lib/libfsntfs.a
-lib/libfsntfs.so
-lib/libfsntfs.so.1
-lib/libfsntfs.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pyfsntfs.a
-%%PYTHON_SITELIBDIR%%/pyfsntfs.so
-libdata/pkgconfig/libfsntfs.pc
-share/man/man1/fsntfsinfo.1.gz
-share/man/man3/libfsntfs.3.gz
diff --git a/devel/libfsxfs/Makefile b/devel/libfsxfs/Makefile
deleted file mode 100644
index c9a36dd78526..000000000000
--- a/devel/libfsxfs/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-PORTNAME= libfsxfs
-# If/When moving from experimental to alpha, switch to DISTVERSIONPREFIX to prevent PORTEPOCH
-DISTVERSION= experimental-20231124
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libfsxfs/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the SGI X File System
-WWW= https://github.com/libyal/libfsxfs
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-openssl=no
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= fuse libtool pathfix pkgconfig python
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libfsxfs/distinfo b/devel/libfsxfs/distinfo
deleted file mode 100644
index a141316a4050..000000000000
--- a/devel/libfsxfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707836670
-SHA256 (libfsxfs-experimental-20231124.tar.gz) = f4817abfa9e10d11b506422e13b596a41dd278443a39278a13f220b9683d7c9b
-SIZE (libfsxfs-experimental-20231124.tar.gz) = 1709527
diff --git a/devel/libfsxfs/pkg-descr b/devel/libfsxfs/pkg-descr
deleted file mode 100644
index 644af6e193f2..000000000000
--- a/devel/libfsxfs/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Library and tools to access the SGI X File System
diff --git a/devel/libfsxfs/pkg-plist b/devel/libfsxfs/pkg-plist
deleted file mode 100644
index 1c0c6102a1a8..000000000000
--- a/devel/libfsxfs/pkg-plist
+++ /dev/null
@@ -1,19 +0,0 @@
-bin/fsxfsinfo
-bin/fsxfsmount
-include/libfsxfs.h
-include/libfsxfs/codepage.h
-include/libfsxfs/definitions.h
-include/libfsxfs/error.h
-include/libfsxfs/extern.h
-include/libfsxfs/features.h
-include/libfsxfs/types.h
-lib/libfsxfs.a
-lib/libfsxfs.so
-lib/libfsxfs.so.1
-lib/libfsxfs.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pyfsxfs.a
-%%PYTHON_SITELIBDIR%%/pyfsxfs.so
-libdata/pkgconfig/libfsxfs.pc
-share/man/man1/fsxfsinfo.1.gz
-share/man/man1/fsxfsmount.1.gz
-share/man/man3/libfsxfs.3.gz
diff --git a/devel/libmodi/Makefile b/devel/libmodi/Makefile
deleted file mode 100644
index 77bca8473f60..000000000000
--- a/devel/libmodi/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-PORTNAME= libmodi
-# If/When moving from experimental to alpha, switch to DISTVERSIONPREFIX to prevent PORTEPOCH
-DISTVERSION= experimental-20231123
-CATEGORIES= devel
-MASTER_SITES= https://github.com/libyal/libmodi/releases/download/${PORTVERSION:E}/ \
- LOCAL/antoine
-
-MAINTAINER= antoine@FreeBSD.org
-COMMENT= Library and tools to access the Mac OS disk image formats
-WWW= https://github.com/libyal/libmodi
-
-LICENSE= LGPL3+
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-LIB_DEPENDS= libcaes.so:security/libcaes
-
-GNU_CONFIGURE= yes
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-CONFIGURE_ARGS= --enable-python --with-libcaes=${LOCALBASE} --with-openssl=no
-CONFIGURE_ENV= PYTHON_CONFIG=${PYTHON_CMD}-config
-USES= fuse libtool pathfix pkgconfig python
-USE_LDCONFIG= yes
-INSTALL_TARGET= install-strip
-WRKSRC= ${WRKDIR}/${PORTNAME}-${PORTVERSION:E}
-
-OPTIONS_DEFINE= NLS
-
-NLS_USES= gettext
-NLS_CONFIGURE_ON= --with-libintl-prefix=${LOCALBASE}
-NLS_CONFIGURE_OFF= --disable-nls
-
-.include <bsd.port.mk>
diff --git a/devel/libmodi/distinfo b/devel/libmodi/distinfo
deleted file mode 100644
index 5895684e3a45..000000000000
--- a/devel/libmodi/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1707837223
-SHA256 (libmodi-experimental-20231123.tar.gz) = 01aa4f2ac8a0e2673a37952cdb98a21d0572ac40771cc83fc2170f78533fb8bc
-SIZE (libmodi-experimental-20231123.tar.gz) = 2075035
diff --git a/devel/libmodi/pkg-descr b/devel/libmodi/pkg-descr
deleted file mode 100644
index b94a8a7477fa..000000000000
--- a/devel/libmodi/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Library and tools to access the Mac OS disk image formats
diff --git a/devel/libmodi/pkg-plist b/devel/libmodi/pkg-plist
deleted file mode 100644
index 406b08b3b485..000000000000
--- a/devel/libmodi/pkg-plist
+++ /dev/null
@@ -1,18 +0,0 @@
-bin/modiinfo
-bin/modimount
-include/libmodi.h
-include/libmodi/codepage.h
-include/libmodi/definitions.h
-include/libmodi/error.h
-include/libmodi/extern.h
-include/libmodi/features.h
-include/libmodi/types.h
-lib/libmodi.a
-lib/libmodi.so
-lib/libmodi.so.1
-lib/libmodi.so.1.0.0
-%%PYTHON_SITELIBDIR%%/pymodi.a
-%%PYTHON_SITELIBDIR%%/pymodi.so
-libdata/pkgconfig/libmodi.pc
-share/man/man1/modiinfo.1.gz
-share/man/man3/libmodi.3.gz
diff --git a/devel/libublio/pkg-descr b/devel/libublio/pkg-descr
index e89d18797670..04fde723712c 100644
--- a/devel/libublio/pkg-descr
+++ b/devel/libublio/pkg-descr
@@ -1,5 +1,5 @@
This is a user space caching library, currently used by FreeBSD and Mac OS X
-to improve performance of NTFS-3G (sysutils/fusefs-ntfs port), because these
+to improve performance of NTFS-3G (filesystems/ntfs port), because these
systems don't have a block device cache, giving a very slow read/write rate.
Note that libublio is _not_ thread safe.
diff --git a/devel/py-dask/Makefile b/devel/py-dask/Makefile
index 4b17cd2a62f5..856c925150f6 100644
--- a/devel/py-dask/Makefile
+++ b/devel/py-dask/Makefile
@@ -17,7 +17,7 @@ BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=62.6:devel/py-setuptools@${PY_
${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}click>=8.1:devel/py-click@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}cloudpickle>=3.0.0:devel/py-cloudpickle@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}fsspec>=2021.09.0:devel/py-fsspec@${PY_FLAVOR} \
+ ${PYTHON_PKGNAMEPREFIX}fsspec>=2021.09.0:filesystems/py-fsspec@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}packaging>=20.0:devel/py-packaging@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}partd>=1.4.0:databases/py-partd@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}pyyaml>=5.3.1:devel/py-pyyaml@${PY_FLAVOR} \
diff --git a/devel/py-etils/Makefile b/devel/py-etils/Makefile
index 21017461a1b6..639208edad6f 100644
--- a/devel/py-etils/Makefile
+++ b/devel/py-etils/Makefile
@@ -51,12 +51,12 @@ ECOLAB_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}jupyter>=0:devel/py-jupyter@${PY_FLAV
${PYTHON_PKGNAMEPREFIX}packaging>=0:devel/py-packaging@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}protobuf>=0,1:devel/py-protobuf@${PY_FLAVOR}
ENP_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}numpy>=0,1:math/py-numpy@${PY_FLAVOR}
-EPATH_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fsspec>=0:devel/py-fsspec@${PY_FLAVOR} \
+EPATH_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fsspec>=0:filesystems/py-fsspec@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}importlib-resources>=0:devel/py-importlib-resources@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}typing-extensions>=0:devel/py-typing-extensions@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}zipp>=0:devel/py-zipp@${PY_FLAVOR}
-EPATH_GCS_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}gcsfs>=0:devel/py-gcsfs@${PY_FLAVOR}
-EPATH_S3_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}s3fs>=0:devel/py-s3fs@${PY_FLAVOR}
+EPATH_GCS_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}gcsfs>=0:filesystems/py-gcsfs@${PY_FLAVOR}
+EPATH_S3_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}s3fs>=0:filesystems/py-s3fs@${PY_FLAVOR}
EPY_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}typing-extensions>=0:devel/py-typing-extensions@${PY_FLAVOR}
ETQDM_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}absl-py>=0:devel/py-absl-py@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}tqdm>=0:misc/py-tqdm@${PY_FLAVOR}
diff --git a/devel/py-fs/Makefile b/devel/py-fs/Makefile
deleted file mode 100644
index 11780bfbde3a..000000000000
--- a/devel/py-fs/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-PORTNAME= fs
-PORTVERSION= 0.5.4
-PORTREVISION= 1
-CATEGORIES= devel python
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= douglas@douglasthrift.net
-COMMENT= Python filesystem abstraction
-WWW= https://pyfilesystem.org/
-
-LICENSE= BSD3CLAUSE
-LICENSE_FILE= ${WRKSRC}/LICENSE.txt
-
-USES= dos2unix python
-USE_PYTHON= autoplist concurrent distutils
-
-NO_ARCH= yes
-
-.include <bsd.port.mk>
diff --git a/devel/py-fs/distinfo b/devel/py-fs/distinfo
deleted file mode 100644
index 58372636118c..000000000000
--- a/devel/py-fs/distinfo
+++ /dev/null
@@ -1,2 +0,0 @@
-SHA256 (fs-0.5.4.tar.gz) = ba2cca8773435a7c86059d57cb4b8ea30fda40f8610941f7822d1ce3ffd36197
-SIZE (fs-0.5.4.tar.gz) = 231333
diff --git a/devel/py-fs/files/patch-2to3 b/devel/py-fs/files/patch-2to3
deleted file mode 100644
index 5e5874a2a21b..000000000000
--- a/devel/py-fs/files/patch-2to3
+++ /dev/null
@@ -1,5882 +0,0 @@
---- fs/appdirfs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/appdirfs.py
-@@ -84,6 +84,6 @@ class UserLogFS(OSFS):
-
- if __name__ == "__main__":
- udfs = UserDataFS('exampleapp', appauthor='pyfs')
-- print udfs
-+ print(udfs)
- udfs2 = UserDataFS('exampleapp2', appauthor='pyfs', create=False)
-- print udfs2
-+ print(udfs2)
---- fs/appdirs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/appdirs.py
-@@ -21,7 +21,7 @@ import os
- PY3 = sys.version_info[0] == 3
-
- if PY3:
-- unicode = str
-+ str = str
-
- class AppDirsError(Exception):
- pass
-@@ -248,7 +248,7 @@ def _get_win_folder_from_registry(csidl_name):
- registry for this guarantees us the correct answer for all CSIDL_*
- names.
- """
-- import _winreg
-+ import winreg
-
- shell_folder_name = {
- "CSIDL_APPDATA": "AppData",
-@@ -256,9 +256,9 @@ def _get_win_folder_from_registry(csidl_name):
- "CSIDL_LOCAL_APPDATA": "Local AppData",
- }[csidl_name]
-
-- key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
-+ key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
- r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
-- dir, type = _winreg.QueryValueEx(key, shell_folder_name)
-+ dir, type = winreg.QueryValueEx(key, shell_folder_name)
- return dir
-
- def _get_win_folder_with_pywin32(csidl_name):
-@@ -268,7 +268,7 @@ def _get_win_folder_with_pywin32(csidl_name):
- # not return unicode strings when there is unicode data in the
- # path.
- try:
-- dir = unicode(dir)
-+ dir = str(dir)
-
- # Downgrade to short path name if have highbit chars. See
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
-@@ -337,9 +337,9 @@ if __name__ == "__main__":
- print("-- app dirs (without optional 'version')")
- dirs = AppDirs(appname, appauthor, version="1.0")
- for prop in props:
-- print("%s: %s" % (prop, getattr(dirs, prop)))
-+ print(("%s: %s" % (prop, getattr(dirs, prop))))
-
- print("\n-- app dirs (with optional 'version')")
- dirs = AppDirs(appname, appauthor)
- for prop in props:
-- print("%s: %s" % (prop, getattr(dirs, prop)))
-+ print(("%s: %s" % (prop, getattr(dirs, prop))))
---- fs/base.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/base.py
-@@ -12,8 +12,8 @@ For more information regarding implementing a working
-
- """
-
--from __future__ import with_statement
-
-+
- __all__ = ['DummyLock',
- 'silence_fserrors',
- 'NullFile',
-@@ -109,7 +109,7 @@ class NullFile(object):
- def flush(self):
- pass
-
-- def next(self):
-+ def __next__(self):
- raise StopIteration
-
- def readline(self, *args, **kwargs):
-@@ -900,7 +900,7 @@ class FS(object):
- chunk_size=1024 * 64,
- progress_callback=progress_callback,
- finished_callback=finished_callback)
-- except Exception, e:
-+ except Exception as e:
- if error_callback is not None:
- error_callback(e)
- finally:
-@@ -1156,7 +1156,7 @@ class FS(object):
- def _shutil_copyfile(cls, src_syspath, dst_syspath):
- try:
- shutil.copyfile(src_syspath, dst_syspath)
-- except IOError, e:
-+ except IOError as e:
- # shutil reports ENOENT when a parent directory is missing
- if getattr(e, "errno", None) == errno.ENOENT:
- if not os.path.exists(dirname(dst_syspath)):
---- fs/browsewin.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/browsewin.py
-@@ -24,7 +24,7 @@ class InfoFrame(wx.Frame):
-
- self.SetTitle("FS Object info - %s (%s)" % (path, desc))
-
-- keys = info.keys()
-+ keys = list(info.keys())
- keys.sort()
-
- self.list_ctrl = wx.ListCtrl(self, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
-@@ -36,7 +36,7 @@ class InfoFrame(wx.Frame):
- self.list_ctrl.SetColumnWidth(1, 300)
-
- for key in sorted(keys, key=lambda k:k.lower()):
-- self.list_ctrl.Append((key, unicode(info.get(key))))
-+ self.list_ctrl.Append((key, str(info.get(key))))
-
- self.Center()
-
-@@ -50,7 +50,7 @@ class BrowseFrame(wx.Frame):
-
- self.fs = fs
- self.hide_dotfiles = hide_dotfiles
-- self.SetTitle("FS Browser - " + unicode(fs))
-+ self.SetTitle("FS Browser - " + str(fs))
-
- self.tree = wx.gizmos.TreeListCtrl(self, -1, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT)
-
-@@ -105,7 +105,7 @@ class BrowseFrame(wx.Frame):
- try:
- paths = ( [(True, p) for p in self.fs.listdir(path, absolute=True, dirs_only=True)] +
- [(False, p) for p in self.fs.listdir(path, absolute=True, files_only=True)] )
-- except FSError, e:
-+ except FSError as e:
- msg = "Failed to get directory listing for %s\n\nThe following error was reported:\n\n%s" % (path, e)
- wx.MessageDialog(self, msg, "Error listing directory", wx.OK).ShowModal()
- paths = []
-@@ -194,6 +194,6 @@ def browse(fs, hide_dotfiles=False):
-
-
- if __name__ == "__main__":
-- from osfs import OSFS
-+ from .osfs import OSFS
- home_fs = OSFS("~/")
- browse(home_fs, True)
---- fs/commands/fscp.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/commands/fscp.py
-@@ -3,7 +3,7 @@ from fs.utils import copyfile, copyfile_non_atomic
- from fs.path import pathjoin, iswildcard
- from fs.commands.runner import Command
- import sys
--import Queue as queue
-+import queue as queue
- import time
- import threading
-
-@@ -31,7 +31,7 @@ class FileOpThread(threading.Thread):
- self.dest_fs.makedir(path, recursive=True, allow_recreate=True)
- else:
- self.action(fs, path, self.dest_fs, dest_path, overwrite=True)
-- except Exception, e:
-+ except Exception as e:
- self.on_error(e)
- self.queue.task_done()
- break
-@@ -147,7 +147,7 @@ Copy SOURCE to DESTINATION"""
- file_queue,
- self.on_done,
- self.on_error)
-- for i in xrange(options.threads)]
-+ for i in range(options.threads)]
-
- for thread in threads:
- thread.start()
-@@ -188,7 +188,7 @@ Copy SOURCE to DESTINATION"""
-
- if self.action_errors:
- for error in self.action_errors:
-- self.error(self.wrap_error(unicode(error)) + '\n')
-+ self.error(self.wrap_error(str(error)) + '\n')
- sys.stdout.flush()
- else:
- if complete and options.progress:
-@@ -204,9 +204,9 @@ Copy SOURCE to DESTINATION"""
- try:
- if self.options.verbose:
- if path_type == self.DIR:
-- print "mkdir %s" % dst_fs.desc(dst_path)
-+ print("mkdir %s" % dst_fs.desc(dst_path))
- else:
-- print "%s -> %s" % (src_fs.desc(src_path), dst_fs.desc(dst_path))
-+ print("%s -> %s" % (src_fs.desc(src_path), dst_fs.desc(dst_path)))
- elif self.options.progress:
- self.done_files += 1
- sys.stdout.write(self.progress_bar(self.total_files, self.done_files, self.get_verb()))
---- fs/commands/fsinfo.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/commands/fsinfo.py
-@@ -31,12 +31,12 @@ Display information regarding an FS resource"""
- return val
-
- def make_printable(text):
-- if not isinstance(text, basestring):
-+ if not isinstance(text, str):
- try:
- text = str(text)
- except:
- try:
-- text = unicode(text)
-+ text = str(text)
- except:
- text = repr(text)
- return text
-@@ -48,16 +48,16 @@ Display information regarding an FS resource"""
- dirs_only=options.dirsonly):
- if not options.omit:
- if options.simple:
-- file_line = u'%s\n' % self.wrap_filename(path)
-+ file_line = '%s\n' % self.wrap_filename(path)
- else:
-- file_line = u'[%s] %s\n' % (self.wrap_filename(path), self.wrap_faded(fs.desc(path)))
-+ file_line = '[%s] %s\n' % (self.wrap_filename(path), self.wrap_faded(fs.desc(path)))
- self.output(file_line)
- info = fs.getinfo(path)
-
-- for k, v in info.items():
-+ for k, v in list(info.items()):
- if k.startswith('_'):
- del info[k]
-- elif not isinstance(v, (basestring, int, long, float, bool, datetime)):
-+ elif not isinstance(v, (str, int, float, bool, datetime)):
- del info[k]
-
- if keys:
---- fs/commands/fsls.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/commands/fsls.py
-@@ -37,7 +37,7 @@ List contents of [PATH]"""
- output = self.output
-
- if not args:
-- args = [u'.']
-+ args = ['.']
-
- dir_paths = []
- file_paths = []
-@@ -75,13 +75,13 @@ List contents of [PATH]"""
-
- if options.syspath:
- # Path without a syspath, just won't be displayed
-- dir_paths = filter(None, [fs.getsyspath(path, allow_none=True) for path in dir_paths])
-- file_paths = filter(None, [fs.getsyspath(path, allow_none=True) for path in file_paths])
-+ dir_paths = [_f for _f in [fs.getsyspath(path, allow_none=True) for path in dir_paths] if _f]
-+ file_paths = [_f for _f in [fs.getsyspath(path, allow_none=True) for path in file_paths] if _f]
-
- if options.url:
- # Path without a syspath, just won't be displayed
-- dir_paths = filter(None, [fs.getpathurl(path, allow_none=True) for path in dir_paths])
-- file_paths = filter(None, [fs.getpathurl(path, allow_none=True) for path in file_paths])
-+ dir_paths = [_f for _f in [fs.getpathurl(path, allow_none=True) for path in dir_paths] if _f]
-+ file_paths = [_f for _f in [fs.getpathurl(path, allow_none=True) for path in file_paths] if _f]
-
- dirs = frozenset(dir_paths)
- paths = sorted(dir_paths + file_paths, key=lambda p: p.lower())
-@@ -95,7 +95,7 @@ List contents of [PATH]"""
- def columnize(paths, num_columns):
-
- col_height = (len(paths) + num_columns - 1) / num_columns
-- columns = [[] for _ in xrange(num_columns)]
-+ columns = [[] for _ in range(num_columns)]
- col_no = 0
- col_pos = 0
- for path in paths:
-@@ -128,11 +128,11 @@ List contents of [PATH]"""
-
- def condense_columns(columns):
- max_column_height = max([len(col) for col in columns])
-- lines = [[] for _ in xrange(max_column_height)]
-+ lines = [[] for _ in range(max_column_height)]
- for column in columns:
- for line, path in zip(lines, column):
- line.append(path)
-- return '\n'.join(u' '.join(line) for line in lines)
-+ return '\n'.join(' '.join(line) for line in lines)
-
- if options.long:
- for path in paths:
-@@ -151,7 +151,7 @@ List contents of [PATH]"""
- while num_cols:
- col_height = (num_paths + num_cols - 1) // num_cols
- line_width = 0
-- for col_no in xrange(num_cols):
-+ for col_no in range(num_cols):
- try:
- col_width = max(path_widths[col_no * col_height: (col_no + 1) * col_height])
- except ValueError:
---- fs/commands/fsserve.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/commands/fsserve.py
-@@ -82,7 +82,7 @@ Serves the contents of PATH with one of a number of me
- try:
- self.output("Starting sftp server on %s:%i\n" % (options.addr, port), verbose=True)
- server.serve_forever()
-- except Exception, e:
-+ except Exception as e:
- pass
- finally:
- server.server_close()
-@@ -90,7 +90,7 @@ Serves the contents of PATH with one of a number of me
- else:
- self.error("Server type '%s' not recognised\n" % options.type)
-
-- except IOError, e:
-+ except IOError as e:
- if e.errno == errno.EACCES:
- self.error('Permission denied\n')
- return 1
---- fs/commands/fstree.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/commands/fstree.py
-@@ -34,7 +34,7 @@ Recursively display the contents of PATH in an ascii t
-
- for fs, path, is_dir in self.get_resources(args, single=True):
- if not is_dir:
-- self.error(u"'%s' is not a dir\n" % path)
-+ self.error("'%s' is not a dir\n" % path)
- return 1
- fs.cache_hint(True)
- if options.gui:
---- fs/commands/runner.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/commands/runner.py
-@@ -68,7 +68,7 @@ else:
-
-
- def _unicode(text):
-- if not isinstance(text, unicode):
-+ if not isinstance(text, str):
- return text.decode('ascii', 'replace')
- return text
-
-@@ -128,17 +128,17 @@ class Command(object):
- text = _unicode(text)
- if not self.terminal_colors:
- return text
-- return u'\x1b[2m%s\x1b[0m' % text
-+ return '\x1b[2m%s\x1b[0m' % text
-
- def wrap_link(self, text):
- if not self.terminal_colors:
- return text
-- return u'\x1b[1;33m%s\x1b[0m' % text
-+ return '\x1b[1;33m%s\x1b[0m' % text
-
- def wrap_strong(self, text):
- if not self.terminal_colors:
- return text
-- return u'\x1b[1m%s\x1b[0m' % text
-+ return '\x1b[1m%s\x1b[0m' % text
-
- def wrap_table_header(self, name):
- if not self.terminal_colors:
-@@ -215,10 +215,10 @@ class Command(object):
- return resources
-
- def ask(self, msg):
-- return raw_input('%s: %s ' % (self.name, msg))
-+ return input('%s: %s ' % (self.name, msg))
-
- def text_encode(self, text):
-- if not isinstance(text, unicode):
-+ if not isinstance(text, str):
- text = text.decode('ascii', 'replace')
- text = text.encode(self.encoding, 'replace')
- return text
-@@ -226,7 +226,7 @@ class Command(object):
- def output(self, msgs, verbose=False):
- if verbose and not self.options.verbose:
- return
-- if isinstance(msgs, basestring):
-+ if isinstance(msgs, str):
- msgs = (msgs,)
- for msg in msgs:
- self.output_file.write(self.text_encode(msg))
-@@ -276,7 +276,7 @@ class Command(object):
-
- opener_table = []
-
-- for fs_opener in opener.openers.itervalues():
-+ for fs_opener in opener.openers.values():
- names = fs_opener.names
- desc = getattr(fs_opener, 'desc', '')
- opener_table.append((names, desc))
-@@ -346,12 +346,12 @@ class Command(object):
- opener.add(new_opener)
-
- if not six.PY3:
-- args = [unicode(arg, sys.getfilesystemencoding()) for arg in args]
-+ args = [str(arg, sys.getfilesystemencoding()) for arg in args]
- self.verbose = options.verbose
- try:
- return self.do_run(options, args) or 0
-- except FSError, e:
-- self.error(self.wrap_error(unicode(e)) + '\n')
-+ except FSError as e:
-+ self.error(self.wrap_error(str(e)) + '\n')
- if options.debug:
- raise
- return 1
-@@ -361,8 +361,8 @@ class Command(object):
- return 0
- except SystemExit:
- return 0
-- except Exception, e:
-- self.error(self.wrap_error('Error - %s\n' % unicode(e)))
-+ except Exception as e:
-+ self.error(self.wrap_error('Error - %s\n' % str(e)))
- if options.debug:
- raise
- return 1
---- fs/contrib/archivefs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/archivefs.py
-@@ -62,7 +62,7 @@ class ArchiveFS(FS):
- :param thread_synchronize: set to True (default) to enable thread-safety
- """
- super(ArchiveFS, self).__init__(thread_synchronize=thread_synchronize)
-- if isinstance(f, basestring):
-+ if isinstance(f, str):
- self.fileobj = None
- self.root_path = f
- else:
-@@ -83,7 +83,7 @@ class ArchiveFS(FS):
- return "<ArchiveFS: %s>" % self.root_path
-
- def __unicode__(self):
-- return u"<ArchiveFS: %s>" % self.root_path
-+ return "<ArchiveFS: %s>" % self.root_path
-
- def getmeta(self, meta_name, default=NoDefaultMeta):
- if meta_name == 'read_only':
-@@ -446,7 +446,7 @@ class ArchiveMountFS(mountfs.MountFS):
- else:
- listing = self.listdir(path, *args, **kwargs)
- if dirs_only:
-- listing = filter(isdir, listing)
-+ listing = list(filter(isdir, listing))
- return listing
-
- if wildcard is None:
---- fs/contrib/bigfs/__init__.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/bigfs/__init__.py
-@@ -149,7 +149,7 @@ class _ExceptionProxy(object):
- def __setattr__(self, name, value):
- raise ValueError("File has been closed")
-
-- def __nonzero__(self):
-+ def __bool__(self):
- return False
-
-
-@@ -193,7 +193,7 @@ class BigFS(FS):
- return "<BigFS: %s>" % self.big_path
-
- def __unicode__(self):
-- return unicode(self.__str__())
-+ return str(self.__str__())
-
-
- def _parse_resource_list(self, g):
---- fs/contrib/bigfs/subrangefile.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/bigfs/subrangefile.py
-@@ -33,7 +33,7 @@ class SubrangeFile:
- return "<SubrangeFile: %s@%d size=%d>" % (self.name, self.startOffset, self.fileSize)
-
- def __unicode__(self):
-- return unicode(self.__str__())
-+ return str(self.__str__())
-
- def size(self):
- return self.fileSize
---- fs/contrib/davfs/__init__.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/contrib/davfs/__init__.py
-@@ -16,21 +16,21 @@ Requires the dexml module:
- # Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
- # All rights reserved; available under the terms of the MIT License.
-
--from __future__ import with_statement
-
-+
- import os
- import sys
--import httplib
-+import http.client
- import socket
--from urlparse import urlparse
-+from urllib.parse import urlparse
- import stat as statinfo
--from urllib import quote as urlquote
--from urllib import unquote as urlunquote
-+from urllib.parse import quote as urlquote
-+from urllib.parse import unquote as urlunquote
- import base64
- import re
- import time
- import datetime
--import cookielib
-+import http.cookiejar
- import fnmatch
- import xml.dom.pulldom
- import threading
-@@ -78,8 +78,8 @@ class DAVFS(FS):
- """
-
- connection_classes = {
-- "http": httplib.HTTPConnection,
-- "https": httplib.HTTPSConnection,
-+ "http": http.client.HTTPConnection,
-+ "https": http.client.HTTPSConnection,
- }
-
- _DEFAULT_PORT_NUMBERS = {
-@@ -116,7 +116,7 @@ class DAVFS(FS):
- self._connections = []
- self._free_connections = {}
- self._connection_lock = threading.Lock()
-- self._cookiejar = cookielib.CookieJar()
-+ self._cookiejar = http.cookiejar.CookieJar()
- super(DAVFS,self).__init__(thread_synchronize=thread_synchronize)
- # Check that the server speaks WebDAV, and normalize the URL
- # after any redirects have been followed.
-@@ -221,14 +221,14 @@ class DAVFS(FS):
- self._free_connections = {}
- self._connection_lock = threading.Lock()
- self._url_p = urlparse(self.url)
-- self._cookiejar = cookielib.CookieJar()
-+ self._cookiejar = http.cookiejar.CookieJar()
-
- def getpathurl(self, path, allow_none=False):
- """Convert a client-side path into a server-side URL."""
- path = relpath(normpath(path))
- if path.endswith("/"):
- path = path[:-1]
-- if isinstance(path,unicode):
-+ if isinstance(path,str):
- path = path.encode("utf8")
- return self.url + urlquote(path)
-
-@@ -291,7 +291,7 @@ class DAVFS(FS):
- """Perform a single HTTP request, without any error handling."""
- if self.closed:
- raise RemoteConnectionError("",msg="FS is closed")
-- if isinstance(url,basestring):
-+ if isinstance(url,str):
- url = urlparse(url)
- if self.credentials is not None:
- username = self.credentials.get("username","")
-@@ -310,7 +310,7 @@ class DAVFS(FS):
- if hasattr(body,"md5"):
- md5 = body.md5.decode("hex").encode("base64")
- con.putheader("Content-MD5",md5)
-- for hdr,val in headers.iteritems():
-+ for hdr,val in headers.items():
- con.putheader(hdr,val)
- self._cookiejar.add_cookie_header(FakeReq(con,url.scheme,url.path))
- con.endheaders()
-@@ -332,7 +332,7 @@ class DAVFS(FS):
- self._give_connection(url,con)
- resp.close = new_close
- return resp
-- except socket.error, e:
-+ except socket.error as e:
- if not fresh:
- return self._raw_request(url,method,body,headers,num_tries)
- if e.args[0] in _RETRYABLE_ERRORS:
-@@ -479,7 +479,7 @@ class DAVFS(FS):
- if not entry_ok:
- continue
- if wildcard is not None:
-- if isinstance(wildcard,basestring):
-+ if isinstance(wildcard,str):
- if not fnmatch.fnmatch(nm,wildcard):
- continue
- else:
-@@ -530,7 +530,7 @@ class DAVFS(FS):
- if not entry_ok:
- continue
- if wildcard is not None:
-- if isinstance(wildcard,basestring):
-+ if isinstance(wildcard,str):
- if not fnmatch.fnmatch(nm,wildcard):
- continue
- else:
-@@ -610,7 +610,7 @@ class DAVFS(FS):
- if self._isurl(path,res.href):
- info.update(self._info_from_propfind(res))
- if "st_mode" not in info:
-- info["st_mode"] = 0700 | statinfo.S_IFREG
-+ info["st_mode"] = 0o700 | statinfo.S_IFREG
- return info
- finally:
- response.close()
-@@ -647,7 +647,7 @@ class DAVFS(FS):
- # TODO: should check for status of the propfind first...
- # check for directory indicator
- if findElements("DAV:","collection"):
-- info["st_mode"] = 0700 | statinfo.S_IFDIR
-+ info["st_mode"] = 0o700 | statinfo.S_IFDIR
- # check for content length
- cl = findElements("DAV:","getcontentlength")
- if cl:
-@@ -674,7 +674,7 @@ class DAVFS(FS):
- if etag:
- info["etag"] = etag
- if "st_mode" not in info:
-- info["st_mode"] = 0700 | statinfo.S_IFREG
-+ info["st_mode"] = 0o700 | statinfo.S_IFREG
- return info
-
-
---- fs/contrib/davfs/util.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/davfs/util.py
-@@ -8,7 +8,7 @@
-
- import os
- import re
--import cookielib
-+import http.cookiejar
-
-
- def get_fileno(file):
-@@ -130,7 +130,7 @@ class FakeResp:
- # is a tweaked version of the cookielib function of the same name.
- #
- _test_cookie = "sessionid=e9c9b002befa93bd865ce155270307ef; Domain=.cloud.me; expires=Wed, 10-Feb-2010 03:27:20 GMT; httponly; Max-Age=1209600; Path=/, sessionid_https=None; Domain=.cloud.me; expires=Wed, 10-Feb-2010 03:27:20 GMT; httponly; Max-Age=1209600; Path=/; secure"
--if len(cookielib.parse_ns_headers([_test_cookie])) != 2:
-+if len(http.cookiejar.parse_ns_headers([_test_cookie])) != 2:
- def parse_ns_headers(ns_headers):
- """Improved parser for netscape-style cookies.
-
-@@ -170,13 +170,13 @@ if len(cookielib.parse_ns_headers([_test_cookie])) !=
- # convert expires date to seconds since epoch
- if v.startswith('"'): v = v[1:]
- if v.endswith('"'): v = v[:-1]
-- v = cookielib.http2time(v) # None if invalid
-+ v = http.cookiejar.http2time(v) # None if invalid
- pairs.append((k, v))
- if pairs:
- if not version_set:
- pairs.append(("version", "0"))
- result.append(pairs)
- return result
-- cookielib.parse_ns_headers = parse_ns_headers
-- assert len(cookielib.parse_ns_headers([_test_cookie])) == 2
-+ http.cookiejar.parse_ns_headers = parse_ns_headers
-+ assert len(http.cookiejar.parse_ns_headers([_test_cookie])) == 2
-
---- fs/contrib/davfs/xmlobj.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/davfs/xmlobj.py
-@@ -9,9 +9,9 @@ of dexml.Model subclasses.
-
- """
-
--from urlparse import urlparse, urlunparse
-+from urllib.parse import urlparse, urlunparse
-
--from httplib import responses as STATUS_CODE_TEXT
-+from http.client import responses as STATUS_CODE_TEXT
- STATUS_CODE_TEXT[207] = "Multi-Status"
-
- import dexml
-@@ -86,7 +86,7 @@ class StatusField(fields.Value):
- return val
-
- def __set__(self,instance,value):
-- if isinstance(value,basestring):
-+ if isinstance(value,str):
- # sanity check it
- bits = value.split(" ")
- if len(bits) < 3 or bits[0] != "HTTP/1.1":
---- fs/contrib/sqlitefs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/contrib/sqlitefs.py
-@@ -24,7 +24,7 @@ def fetchone(cursor):
- '''
- row = None
- try:
-- row = cursor.next()
-+ row = next(cursor)
- except:
- pass
- return(row)
-@@ -62,7 +62,7 @@ class SqliteFsFileBase(object):
- __repr__ = __str__
-
- def __unicode__(self):
-- return u"<SqliteFS File in %s %s>" % (self.fs, self.path)
-+ return "<SqliteFS File in %s %s>" % (self.fs, self.path)
-
- def __del__(self):
- if not self.closed:
-@@ -74,7 +74,7 @@ class SqliteFsFileBase(object):
- def __iter__(self):
- raise OperationFailedError('__iter__', self.path)
-
-- def next(self):
-+ def __next__(self):
- raise OperationFailedError('next', self.path)
-
- def readline(self, *args, **kwargs):
-@@ -139,8 +139,8 @@ class SqliteReadableFile(SqliteFsFileBase):
- def __iter__(self):
- return iter(self.real_stream)
-
-- def next(self):
-- return self.real_stream.next()
-+ def __next__(self):
-+ return next(self.real_stream)
-
- def readline(self, *args, **kwargs):
- return self.real_stream.readline(*args, **kwargs)
-@@ -438,7 +438,7 @@ class SqliteFS(FS):
- get the directory information dictionary.
- '''
- info = dict()
-- info['st_mode'] = 0755
-+ info['st_mode'] = 0o755
- return info
-
- def _get_file_info(self, path):
-@@ -460,7 +460,7 @@ class SqliteFS(FS):
- info['created'] = row[2]
- info['last_modified'] = row[3]
- info['last_accessed'] = row[4]
-- info['st_mode'] = 0666
-+ info['st_mode'] = 0o666
- return(info)
-
- def _isfile(self,path):
-@@ -551,7 +551,7 @@ class SqliteFS(FS):
- pass
-
- if( absolute == False):
-- pathlist = map(lambda dpath:frombase(path,dpath), pathlist)
-+ pathlist = [frombase(path,dpath) for dpath in pathlist]
-
- return(pathlist)
-
---- fs/contrib/tahoelafs/__init__.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/tahoelafs/__init__.py
-@@ -70,8 +70,8 @@ from fs import _thread_synchronize_default, SEEK_END
- from fs.remote import CacheFSMixin, RemoteFileBuffer
- from fs.base import fnmatch, NoDefaultMeta
-
--from util import TahoeUtil
--from connection import Connection
-+from .util import TahoeUtil
-+from .connection import Connection
-
- from six import b
-
-@@ -240,7 +240,7 @@ class _TahoeLAFS(FS):
- continue
-
- if wildcard is not None:
-- if isinstance(wildcard,basestring):
-+ if isinstance(wildcard,str):
- if not fnmatch.fnmatch(item['name'], wildcard):
- continue
- else:
-@@ -269,7 +269,7 @@ class _TahoeLAFS(FS):
-
- try:
- self.tahoeutil.unlink(self.dircap, path)
-- except Exception, e:
-+ except Exception as e:
- raise errors.ResourceInvalidError(path)
-
- @_fix_path
-@@ -341,8 +341,8 @@ class _TahoeLAFS(FS):
-
- def _log(self, level, message):
- if not logger.isEnabledFor(level): return
-- logger.log(level, u'(%d) %s' % (id(self),
-- unicode(message).encode('ASCII', 'replace')))
-+ logger.log(level, '(%d) %s' % (id(self),
-+ str(message).encode('ASCII', 'replace')))
-
- @_fix_path
- def getpathurl(self, path, allow_none=False, webapi=None):
-@@ -353,11 +353,11 @@ class _TahoeLAFS(FS):
- webapi = self.connection.webapi
- self._log(DEBUG, "Retrieving URL for %s over %s" % (path, webapi))
- path = self.tahoeutil.fixwinpath(path, False)
-- return u"%s/uri/%s%s" % (webapi, self.dircap, path)
-+ return "%s/uri/%s%s" % (webapi, self.dircap, path)
-
- @_fix_path
- def getrange(self, path, offset, length=None):
-- return self.connection.get(u'/uri/%s%s' % (self.dircap, path),
-+ return self.connection.get('/uri/%s%s' % (self.dircap, path),
- offset=offset, length=length)
-
- @_fix_path
-@@ -379,10 +379,10 @@ class _TahoeLAFS(FS):
- file.seek(0)
-
- if size > self.largefilesize:
-- self.connection.put(u'/uri/%s%s' % (self.dircap, path),
-+ self.connection.put('/uri/%s%s' % (self.dircap, path),
- "PyFilesystem.TahoeLAFS: Upload started, final size %d" % size)
-
-- self.connection.put(u'/uri/%s%s' % (self.dircap, path), file, size=size)
-+ self.connection.put('/uri/%s%s' % (self.dircap, path), file, size=size)
-
- @_fix_path
- def getinfo(self, path):
---- fs/contrib/tahoelafs/connection.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/contrib/tahoelafs/connection.py
-@@ -10,17 +10,19 @@ if python3:
- from urllib.parse import urlencode, pathname2url, quote
- from urllib.request import Request, urlopen
- else:
-- from urllib import urlencode, pathname2url
-- from urllib2 import Request, urlopen, quote
-+ from urllib.parse import urlencode
-+ from urllib.request import pathname2url
-+ from urllib.request import Request, urlopen
-+ from urllib.parse import quote
-
- class PutRequest(Request):
- def __init__(self, *args, **kwargs):
-- self.get_method = lambda: u'PUT'
-+ self.get_method = lambda: 'PUT'
- Request.__init__(self, *args, **kwargs)
-
- class DeleteRequest(Request):
- def __init__(self, *args, **kwargs):
-- self.get_method = lambda: u'DELETE'
-+ self.get_method = lambda: 'DELETE'
- Request.__init__(self, *args, **kwargs)
-
- class Connection:
-@@ -32,7 +34,7 @@ class Connection:
- '''
- Retrieve length of string or file object and prepare HTTP headers.
- '''
-- if isinstance(f, basestring):
-+ if isinstance(f, str):
- # Just set up content length
- size = len(f)
- elif getattr(f, 'read', None):
-@@ -50,20 +52,20 @@ class Connection:
-
- def _urlencode(self, data):
- _data = {}
-- for k, v in data.items():
-+ for k, v in list(data.items()):
- _data[k.encode('utf-8')] = v.encode('utf-8')
- return urlencode(_data)
-
- def _quotepath(self, path, params={}):
- q = quote(path.encode('utf-8'), safe='/')
- if params:
-- return u"%s?%s" % (q, self._urlencode(params))
-+ return "%s?%s" % (q, self._urlencode(params))
- return q
-
- def _urlopen(self, req):
- try:
- return urlopen(req)
-- except Exception, e:
-+ except Exception as e:
- if not getattr(e, 'getcode', None):
- raise errors.RemoteConnectionError(str(e))
- code = e.getcode()
-@@ -85,7 +87,7 @@ class Connection:
- data = self._urlencode(data)
- path = self._quotepath(path)
- if data:
-- path = u'?'.join([path, data])
-+ path = '?'.join([path, data])
-
- headers = {}
- headers.update(self.headers)
---- fs/contrib/tahoelafs/test_tahoelafs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/tahoelafs/test_tahoelafs.py
-@@ -35,7 +35,7 @@ class TestTahoeLAFS(unittest.TestCase,FSTestCases):#,T
-
- def test_dircap(self):
- # Is dircap in correct format?
-- self.assert_(self.dircap.startswith('URI:DIR2:') and len(self.dircap) > 50)
-+ self.assertTrue(self.dircap.startswith('URI:DIR2:') and len(self.dircap) > 50)
-
- def test_concurrent_copydir(self):
- # makedir() on TahoeLAFS is currently not atomic
---- fs/contrib/tahoelafs/util.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/contrib/tahoelafs/util.py
-@@ -19,7 +19,7 @@ except ImportError:
- try:
- import json
- except ImportError:
-- print "simplejson (http://pypi.python.org/pypi/simplejson/) required"
-+ print("simplejson (http://pypi.python.org/pypi/simplejson/) required")
- raise
-
- from .connection import Connection
-@@ -29,22 +29,22 @@ python3 = int(platform.python_version_tuple()[0]) > 2
- if python3:
- from urllib.error import HTTPError
- else:
-- from urllib2 import HTTPError
-+ from urllib.error import HTTPError
-
- class TahoeUtil:
- def __init__(self, webapi):
- self.connection = Connection(webapi)
-
- def createdircap(self):
-- return self.connection.post(u'/uri', params={u't': u'mkdir'}).read()
-+ return self.connection.post('/uri', params={'t': 'mkdir'}).read()
-
- def unlink(self, dircap, path=None):
- path = self.fixwinpath(path, False)
-- self.connection.delete(u'/uri/%s%s' % (dircap, path))
-+ self.connection.delete('/uri/%s%s' % (dircap, path))
-
- def info(self, dircap, path):
- path = self.fixwinpath(path, False)
-- meta = json.load(self.connection.get(u'/uri/%s%s' % (dircap, path), {u't': u'json'}))
-+ meta = json.load(self.connection.get('/uri/%s%s' % (dircap, path), {'t': 'json'}))
- return self._info(path, meta)
-
- def fixwinpath(self, path, direction=True):
-@@ -74,7 +74,7 @@ class TahoeUtil:
- if type == 'unknown':
- raise errors.ResourceNotFoundError(path)
-
-- info = {'name': unicode(self.fixwinpath(path, True)),
-+ info = {'name': str(self.fixwinpath(path, True)),
- 'type': type,
- 'size': data.get('size', 0),
- 'ctime': None,
-@@ -83,22 +83,22 @@ class TahoeUtil:
- info['ctime'] = data['metadata'].get('ctime')
-
- if info['type'] == 'dirnode':
-- info['st_mode'] = 0777 | statinfo.S_IFDIR
-+ info['st_mode'] = 0o777 | statinfo.S_IFDIR
- else:
-- info['st_mode'] = 0644
-+ info['st_mode'] = 0o644
-
- return info
-
- def list(self, dircap, path=None):
- path = self.fixwinpath(path, False)
-
-- data = json.load(self.connection.get(u'/uri/%s%s' % (dircap, path), {u't': u'json'}))
-+ data = json.load(self.connection.get('/uri/%s%s' % (dircap, path), {'t': 'json'}))
-
- if len(data) < 2 or data[0] != 'dirnode':
- raise errors.ResourceInvalidError('Metadata in unknown format!')
-
- data = data[1]['children']
-- for i in data.keys():
-+ for i in list(data.keys()):
- x = self._info(i, data[i])
- yield x
-
-@@ -106,7 +106,7 @@ class TahoeUtil:
- path = self.fixwinpath(path, False)
- path = pathsplit(path)
-
-- self.connection.post(u"/uri/%s%s" % (dircap, path[0]), data={u't': u'mkdir', u'name': path[1]})
-+ self.connection.post("/uri/%s%s" % (dircap, path[0]), data={'t': 'mkdir', 'name': path[1]})
-
- def move(self, dircap, src, dst):
- if src == '/' or dst == '/':
-@@ -120,8 +120,8 @@ class TahoeUtil:
-
- if src_tuple[0] == dst_tuple[0]:
- # Move inside one directory
-- self.connection.post(u"/uri/%s%s" % (dircap, src_tuple[0]), data={u't': u'rename',
-- u'from_name': src_tuple[1], u'to_name': dst_tuple[1]})
-+ self.connection.post("/uri/%s%s" % (dircap, src_tuple[0]), data={'t': 'rename',
-+ 'from_name': src_tuple[1], 'to_name': dst_tuple[1]})
- return
-
- # Move to different directory. Firstly create link on dst, then remove from src
-@@ -133,7 +133,7 @@ class TahoeUtil:
- self.unlink(dircap, dst)
-
- uri = self.info(dircap, src)['uri']
-- self.connection.put(u"/uri/%s%s" % (dircap, dst), data=uri, params={u't': u'uri'})
-+ self.connection.put("/uri/%s%s" % (dircap, dst), data=uri, params={'t': 'uri'})
- if uri != self.info(dircap, dst)['uri']:
- raise errors.OperationFailedError('Move failed')
-
---- fs/errors.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/errors.py
-@@ -57,19 +57,19 @@ class FSError(Exception):
-
- def __str__(self):
- keys = {}
-- for k,v in self.__dict__.iteritems():
-- if isinstance(v,unicode):
-+ for k,v in self.__dict__.items():
-+ if isinstance(v,str):
- v = v.encode(sys.getfilesystemencoding())
- keys[k] = v
- return str(self.msg % keys)
-
- def __unicode__(self):
- keys = {}
-- for k,v in self.__dict__.iteritems():
-+ for k,v in self.__dict__.items():
- if isinstance(v, six.binary_type):
- v = v.decode(sys.getfilesystemencoding(), 'replace')
- keys[k] = v
-- return unicode(self.msg, encoding=sys.getfilesystemencoding(), errors='replace') % keys
-+ return str(self.msg, encoding=sys.getfilesystemencoding(), errors='replace') % keys
-
- def __reduce__(self):
- return (self.__class__,(),self.__dict__.copy(),)
-@@ -217,33 +217,33 @@ def convert_fs_errors(func):
- def wrapper(*args,**kwds):
- try:
- return func(*args,**kwds)
-- except ResourceNotFoundError, e:
-+ except ResourceNotFoundError as e:
- raise OSError(errno.ENOENT,str(e))
-- except ParentDirectoryMissingError, e:
-+ except ParentDirectoryMissingError as e:
- if sys.platform == "win32":
- raise OSError(errno.ESRCH,str(e))
- else:
- raise OSError(errno.ENOENT,str(e))
-- except ResourceInvalidError, e:
-+ except ResourceInvalidError as e:
- raise OSError(errno.EINVAL,str(e))
-- except PermissionDeniedError, e:
-+ except PermissionDeniedError as e:
- raise OSError(errno.EACCES,str(e))
-- except ResourceLockedError, e:
-+ except ResourceLockedError as e:
- if sys.platform == "win32":
- raise WindowsError(32,str(e))
- else:
- raise OSError(errno.EACCES,str(e))
-- except DirectoryNotEmptyError, e:
-+ except DirectoryNotEmptyError as e:
- raise OSError(errno.ENOTEMPTY,str(e))
-- except DestinationExistsError, e:
-+ except DestinationExistsError as e:
- raise OSError(errno.EEXIST,str(e))
-- except StorageSpaceError, e:
-+ except StorageSpaceError as e:
- raise OSError(errno.ENOSPC,str(e))
-- except RemoteConnectionError, e:
-+ except RemoteConnectionError as e:
- raise OSError(errno.ENETDOWN,str(e))
-- except UnsupportedError, e:
-+ except UnsupportedError as e:
- raise OSError(errno.ENOSYS,str(e))
-- except FSError, e:
-+ except FSError as e:
- raise OSError(errno.EFAULT,str(e))
- return wrapper
-
-@@ -255,7 +255,7 @@ def convert_os_errors(func):
- def wrapper(self,*args,**kwds):
- try:
- return func(self,*args,**kwds)
-- except (OSError,IOError), e:
-+ except (OSError,IOError) as e:
- (exc_type,exc_inst,tb) = sys.exc_info()
- path = getattr(e,"filename",None)
- if path and path[0] == "/" and hasattr(self,"root_path"):
-@@ -263,53 +263,53 @@ def convert_os_errors(func):
- if isprefix(self.root_path,path):
- path = path[len(self.root_path):]
- if not hasattr(e,"errno") or not e.errno:
-- raise OperationFailedError(opname,details=e),None,tb
-+ raise OperationFailedError(opname,details=e).with_traceback(tb)
- if e.errno == errno.ENOENT:
-- raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
-+ raise ResourceNotFoundError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.EFAULT:
- # This can happen when listdir a directory that is deleted by another thread
- # Best to interpret it as a resource not found
-- raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
-+ raise ResourceNotFoundError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.ESRCH:
-- raise ResourceNotFoundError(path,opname=opname,details=e),None,tb
-+ raise ResourceNotFoundError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.ENOTEMPTY:
-- raise DirectoryNotEmptyError(path,opname=opname,details=e),None,tb
-+ raise DirectoryNotEmptyError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.EEXIST:
-- raise DestinationExistsError(path,opname=opname,details=e),None,tb
-+ raise DestinationExistsError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == 183: # some sort of win32 equivalent to EEXIST
-- raise DestinationExistsError(path,opname=opname,details=e),None,tb
-+ raise DestinationExistsError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.ENOTDIR:
-- raise ResourceInvalidError(path,opname=opname,details=e),None,tb
-+ raise ResourceInvalidError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.EISDIR:
-- raise ResourceInvalidError(path,opname=opname,details=e),None,tb
-+ raise ResourceInvalidError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.EINVAL:
-- raise ResourceInvalidError(path,opname=opname,details=e),None,tb
-+ raise ResourceInvalidError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.ENOSPC:
-- raise StorageSpaceError(opname,path=path,details=e),None,tb
-+ raise StorageSpaceError(opname,path=path,details=e).with_traceback(tb)
- if e.errno == errno.EPERM:
-- raise PermissionDeniedError(opname,path=path,details=e),None,tb
-+ raise PermissionDeniedError(opname,path=path,details=e).with_traceback(tb)
- if hasattr(errno,"ENONET") and e.errno == errno.ENONET:
-- raise RemoteConnectionError(opname,path=path,details=e),None,tb
-+ raise RemoteConnectionError(opname,path=path,details=e).with_traceback(tb)
- if e.errno == errno.ENETDOWN:
-- raise RemoteConnectionError(opname,path=path,details=e),None,tb
-+ raise RemoteConnectionError(opname,path=path,details=e).with_traceback(tb)
- if e.errno == errno.ECONNRESET:
-- raise RemoteConnectionError(opname,path=path,details=e),None,tb
-+ raise RemoteConnectionError(opname,path=path,details=e).with_traceback(tb)
- if e.errno == errno.EACCES:
- if sys.platform == "win32":
- if e.args[0] and e.args[0] == 32:
-- raise ResourceLockedError(path,opname=opname,details=e),None,tb
-- raise PermissionDeniedError(opname,details=e),None,tb
-+ raise ResourceLockedError(path,opname=opname,details=e).with_traceback(tb)
-+ raise PermissionDeniedError(opname,details=e).with_traceback(tb)
- # Sometimes windows gives some random errors...
- if sys.platform == "win32":
- if e.errno in (13,):
-- raise ResourceInvalidError(path,opname=opname,details=e),None,tb
-+ raise ResourceInvalidError(path,opname=opname,details=e).with_traceback(tb)
- if e.errno == errno.ENAMETOOLONG:
-- raise PathError(path,details=e),None,tb
-+ raise PathError(path,details=e).with_traceback(tb)
- if e.errno == errno.EOPNOTSUPP:
-- raise UnsupportedError(opname,details=e),None,tb
-+ raise UnsupportedError(opname,details=e).with_traceback(tb)
- if e.errno == errno.ENOSYS:
-- raise UnsupportedError(opname,details=e),None,tb
-- raise OperationFailedError(opname,details=e),None,tb
-+ raise UnsupportedError(opname,details=e).with_traceback(tb)
-+ raise OperationFailedError(opname,details=e).with_traceback(tb)
- return wrapper
-
-
---- fs/expose/dokan/__init__.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/dokan/__init__.py
-@@ -54,8 +54,8 @@ systems with Dokan installed.
- # Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
- # All rights reserved; available under the terms of the MIT License.
-
--from __future__ import with_statement
-
-+
- import sys
-
- import os
-@@ -64,7 +64,7 @@ import errno
- import time
- import stat as statinfo
- import subprocess
--import cPickle
-+import pickle
- import datetime
- import ctypes
- from collections import deque
-@@ -76,7 +76,7 @@ from fs.local_functools import wraps
- from fs.wrapfs import WrapFS
-
- try:
-- import libdokan
-+ from . import libdokan
- except (NotImplementedError, EnvironmentError, ImportError, NameError,):
- is_available = False
- sys.modules.pop("fs.expose.dokan.libdokan", None)
-@@ -171,12 +171,12 @@ def handle_fs_errors(func):
- def wrapper(*args,**kwds):
- try:
- res = func(*args,**kwds)
-- except OSError, e:
-+ except OSError as e:
- if e.errno:
- res = -1 * _errno2syserrcode(e.errno)
- else:
- res = -1
-- except Exception, e:
-+ except Exception as e:
- raise
- else:
- if res is None:
-@@ -424,7 +424,7 @@ class FSOperations(object):
- info.contents.Context = 1
- try:
- f = self.fs.open(path, mode)
-- print path, mode, repr(f)
-+ print(path, mode, repr(f))
- except ResourceInvalidError:
- info.contents.IsDirectory = True
- except FSError:
-@@ -896,10 +896,10 @@ def mount(fs, drive, foreground=False, ready_callback=
- def check_ready(mp=None):
- if ready_callback is not False:
- check_alive(mp)
-- for _ in xrange(100):
-+ for _ in range(100):
- try:
- os.stat(drive+":\\")
-- except EnvironmentError, e:
-+ except EnvironmentError as e:
- check_alive(mp)
- time.sleep(0.05)
- else:
-@@ -989,7 +989,7 @@ class MountProcess(subprocess.Popen):
- cmd = cmd + "data = cPickle.loads(%s); "
- cmd = cmd + "from fs.expose.dokan import MountProcess; "
- cmd = cmd + "MountProcess._do_mount(data)"
-- cmd = cmd % (repr(cPickle.dumps((fs,drive,dokan_opts,nowait),-1)),)
-+ cmd = cmd % (repr(pickle.dumps((fs,drive,dokan_opts,nowait),-1)),)
- cmd = [sys.executable,"-c",cmd]
- super(MountProcess,self).__init__(cmd,**kwds)
-
---- fs/expose/ftp.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/ftp.py
-@@ -28,7 +28,7 @@ from fs.osfs import OSFS
- from fs.errors import convert_fs_errors
- from fs import iotools
-
--from six import text_type as unicode
-+from six import text_type as str
-
-
- # Get these once so we can reuse them:
-@@ -107,9 +107,9 @@ class FTPFS(ftpserver.AbstractedFS):
- def chdir(self, path):
- # We dont' use the decorator here, we actually decode a version of the
- # path for use with pyfs, but keep the original for use with pyftpdlib.
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- # pyftpdlib 0.7.x
-- unipath = unicode(path, self.encoding)
-+ unipath = str(path, self.encoding)
- else:
- # pyftpdlib 1.x
- unipath = path
-@@ -134,7 +134,7 @@ class FTPFS(ftpserver.AbstractedFS):
- @convert_fs_errors
- @decode_args
- def listdir(self, path):
-- return map(lambda x: x.encode(self.encoding), self.fs.listdir(path))
-+ return [x.encode(self.encoding) for x in self.fs.listdir(path)]
-
- @convert_fs_errors
- @decode_args
-@@ -190,7 +190,7 @@ class FTPFS(ftpserver.AbstractedFS):
- kwargs['st_mode'] = info['mode']
- else:
- # Otherwise, build one. Not executable by default.
-- mode = 0660
-+ mode = 0o660
- # Merge in the type (dir or file). File is tested first, some file systems
- # such as ArchiveMountFS treat archive files as directories too. By checking
- # file first, any such files will be only files (not directories).
-@@ -198,7 +198,7 @@ class FTPFS(ftpserver.AbstractedFS):
- mode |= stat.S_IFREG
- elif self.fs.isdir(path):
- mode |= stat.S_IFDIR
-- mode |= 0110 # Merge in exec bit to signal dir is listable
-+ mode |= 0o110 # Merge in exec bit to signal dir is listable
- kwargs['st_mode'] = mode
- return FakeStat(**kwargs)
-
---- fs/expose/fuse/__init__.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/fuse/__init__.py
-@@ -56,7 +56,7 @@ import errno
- import time
- import stat as statinfo
- import subprocess
--import cPickle
-+import pickle
-
- import logging
- logger = logging.getLogger("fs.expose.fuse")
-@@ -404,9 +404,9 @@ class FSOperations(Operations):
- # The interesting stuff
- if 'st_mode' not in info:
- if self.fs.isdir(path):
-- info['st_mode'] = 0755
-+ info['st_mode'] = 0o755
- else:
-- info['st_mode'] = 0666
-+ info['st_mode'] = 0o666
- mode = info['st_mode']
- if not statinfo.S_ISDIR(mode) and not statinfo.S_ISREG(mode):
- if self.fs.isdir(path):
-@@ -432,7 +432,7 @@ class FSOperations(Operations):
- except KeyError:
- pass
- else:
-- info["st_size"] = max(written_sizes.values() + [info["st_size"]])
-+ info["st_size"] = max(list(written_sizes.values()) + [info["st_size"]])
- return info
-
-
-@@ -491,7 +491,7 @@ def unmount(path):
- else:
- args = ["fusermount", "-u", path]
-
-- for num_tries in xrange(3):
-+ for num_tries in range(3):
- p = subprocess.Popen(args,
- stderr=subprocess.PIPE,
- stdout=subprocess.PIPE)
-@@ -554,7 +554,7 @@ class MountProcess(subprocess.Popen):
- cmd = cmd + 'data = loads(%s); '
- cmd = cmd + 'from fs.expose.fuse import MountProcess; '
- cmd = cmd + 'MountProcess._do_mount_nowait(data)'
-- cmd = cmd % (repr(cPickle.dumps((fs, path, fuse_opts), -1)),)
-+ cmd = cmd % (repr(pickle.dumps((fs, path, fuse_opts), -1)),)
- cmd = [sys.executable, "-c", cmd]
- super(MountProcess, self).__init__(cmd, **kwds)
- else:
-@@ -567,7 +567,7 @@ class MountProcess(subprocess.Popen):
- cmd = cmd + 'data = loads(%s); '
- cmd = cmd + 'from fs.expose.fuse import MountProcess; '
- cmd = cmd + 'MountProcess._do_mount_wait(data)'
-- cmd = cmd % (repr(cPickle.dumps((fs, path, fuse_opts, r, w), -1)),)
-+ cmd = cmd % (repr(pickle.dumps((fs, path, fuse_opts, r, w), -1)),)
- cmd = [sys.executable, "-c", cmd]
- super(MountProcess, self).__init__(cmd, **kwds)
- os.close(w)
-@@ -635,8 +635,8 @@ class MountProcess(subprocess.Popen):
- opts["unmount_callback"] = unmount_callback
- try:
- mount(fs, path, **opts)
-- except Exception, e:
-- os.write(w, b("E") + unicode(e).encode('ascii', errors='replace'))
-+ except Exception as e:
-+ os.write(w, b("E") + str(e).encode('ascii', errors='replace'))
- os.close(w)
-
- if not successful:
-@@ -653,5 +653,5 @@ if __name__ == "__main__":
- os.makedirs(mount_point)
-
- def ready_callback():
-- print "READY"
-+ print("READY")
- mount(TempFS(), mount_point, foreground=True, ready_callback=ready_callback)
---- fs/expose/fuse/fuse_ctypes.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/fuse/fuse_ctypes.py
-@@ -12,8 +12,8 @@
- # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
--from __future__ import division
-
-+
- from ctypes import *
- from ctypes.util import find_library
- from errno import *
-@@ -273,7 +273,7 @@ def time_of_timespec(ts):
- return ts.tv_sec + ts.tv_nsec / 10 ** 9
-
- def set_st_attrs(st, attrs):
-- for key, val in attrs.items():
-+ for key, val in list(attrs.items()):
- if key in ('st_atime', 'st_mtime', 'st_ctime'):
- timespec = getattr(st, key + 'spec')
- timespec.tv_sec = int(val)
-@@ -314,7 +314,7 @@ class FUSE(object):
- kwargs.setdefault('fsname', operations.__class__.__name__)
- args.append('-o')
- args.append(','.join(key if val == True else '%s=%s' % (key, val)
-- for key, val in kwargs.items()))
-+ for key, val in list(kwargs.items())))
- args.append(mountpoint)
- argv = (c_char_p * len(args))(*args)
-
-@@ -331,7 +331,7 @@ class FUSE(object):
- """Decorator for the methods that follow"""
- try:
- return func(*args, **kwargs) or 0
-- except OSError, e:
-+ except OSError as e:
- return -(e.errno or EFAULT)
- except:
- print_exc()
-@@ -400,7 +400,7 @@ class FUSE(object):
- def statfs(self, path, buf):
- stv = buf.contents
- attrs = self.operations('statfs', path)
-- for key, val in attrs.items():
-+ for key, val in list(attrs.items()):
- if hasattr(stv, key):
- setattr(stv, key, val)
- return 0
-@@ -576,7 +576,7 @@ class Operations(object):
-
- if path != '/':
- raise OSError(ENOENT, '')
-- return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
-+ return dict(st_mode=(S_IFDIR | 0o755), st_nlink=2)
-
- def getxattr(self, path, name, position=0):
- raise OSError(ENOTSUP, '')
-@@ -667,13 +667,13 @@ class Operations(object):
-
- class LoggingMixIn:
- def __call__(self, op, path, *args):
-- print '->', op, path, repr(args)
-+ print('->', op, path, repr(args))
- ret = '[Unknown Error]'
- try:
- ret = getattr(self, op)(path, *args)
- return ret
-- except OSError, e:
-+ except OSError as e:
- ret = str(e)
- raise
- finally:
-- print '<-', op, repr(ret)
-+ print('<-', op, repr(ret))
---- fs/expose/fuse/fuse.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/fuse/fuse.py
-@@ -12,8 +12,8 @@
- # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
--from __future__ import division
-
-+
- from ctypes import *
- from ctypes.util import find_library
- from errno import *
-@@ -269,7 +269,7 @@ def time_of_timespec(ts):
- return ts.tv_sec + ts.tv_nsec / 10 ** 9
-
- def set_st_attrs(st, attrs):
-- for key, val in attrs.items():
-+ for key, val in list(attrs.items()):
- if key in ('st_atime', 'st_mtime', 'st_ctime'):
- timespec = getattr(st, key + 'spec')
- timespec.tv_sec = int(val)
-@@ -312,7 +312,7 @@ class FUSE(object):
- kwargs.setdefault('fsname', operations.__class__.__name__)
- args.append('-o')
- args.append(','.join(key if val == True else '%s=%s' % (key, val)
-- for key, val in kwargs.items()))
-+ for key, val in list(kwargs.items())))
- args.append(mountpoint)
- argv = (c_char_p * len(args))(*args)
-
-@@ -331,7 +331,7 @@ class FUSE(object):
- """Decorator for the methods that follow"""
- try:
- return func(*args, **kwargs) or 0
-- except OSError, e:
-+ except OSError as e:
- return -(e.errno or EFAULT)
- except:
- print_exc()
-@@ -406,7 +406,7 @@ class FUSE(object):
- def statfs(self, path, buf):
- stv = buf.contents
- attrs = self.operations('statfs', path)
-- for key, val in attrs.items():
-+ for key, val in list(attrs.items()):
- if hasattr(stv, key):
- setattr(stv, key, val)
- return 0
-@@ -579,7 +579,7 @@ class Operations(object):
-
- if path != '/':
- raise FuseOSError(ENOENT)
-- return dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
-+ return dict(st_mode=(S_IFDIR | 0o755), st_nlink=2)
-
- def getxattr(self, path, name, position=0):
- raise FuseOSError(ENOTSUP)
-@@ -670,13 +670,13 @@ class Operations(object):
-
- class LoggingMixIn:
- def __call__(self, op, path, *args):
-- print '->', op, path, repr(args)
-+ print('->', op, path, repr(args))
- ret = '[Unhandled Exception]'
- try:
- ret = getattr(self, op)(path, *args)
- return ret
-- except OSError, e:
-+ except OSError as e:
- ret = str(e)
- raise
- finally:
-- print '<-', op, repr(ret)
-+ print('<-', op, repr(ret))
---- fs/expose/fuse/fuse3.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/expose/fuse/fuse3.py
-@@ -229,7 +229,7 @@ def time_of_timespec(ts):
- return ts.tv_sec + ts.tv_nsec / 10 ** 9
-
- def set_st_attrs(st, attrs):
-- for key, val in attrs.items():
-+ for key, val in list(attrs.items()):
- if key in ('st_atime', 'st_mtime', 'st_ctime'):
- timespec = getattr(st, key + 'spec')
- timespec.tv_sec = int(val)
-@@ -274,7 +274,7 @@ class FUSE(object):
- kwargs.setdefault('fsname', operations.__class__.__name__)
- args.append('-o')
- args.append(','.join(key if val == True else '%s=%s' % (key, val)
-- for key, val in kwargs.items()))
-+ for key, val in list(kwargs.items())))
- args.append(mountpoint)
- argv = (c_char_p * len(args))(*args)
-
-@@ -361,7 +361,7 @@ class FUSE(object):
- def statfs(self, path, buf):
- stv = buf.contents
- attrs = self.operations('statfs', path)
-- for key, val in attrs.items():
-+ for key, val in list(attrs.items()):
- if hasattr(stv, key):
- setattr(stv, key, val)
- return 0
---- fs/expose/http.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/http.py
-@@ -1,13 +1,13 @@
- __all__ = ["serve_fs"]
-
--import SimpleHTTPServer
--import SocketServer
-+import http.server
-+import socketserver
- from fs.path import pathjoin, dirname
- from fs.errors import FSError
- from time import mktime
--from cStringIO import StringIO
-+from io import StringIO
- import cgi
--import urllib
-+import urllib.request, urllib.parse, urllib.error
- import posixpath
- import time
- import threading
-@@ -16,13 +16,13 @@ import socket
- def _datetime_to_epoch(d):
- return mktime(d.timetuple())
-
--class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
-+class FSHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
-
- """A hacked together version of SimpleHTTPRequestHandler"""
-
- def __init__(self, fs, request, client_address, server):
- self._fs = fs
-- SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
-+ http.server.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
-
- def do_GET(self):
- """Serve a GET request."""
-@@ -69,7 +69,7 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTP
- try:
- info = self._fs.getinfo(path)
- f = self._fs.open(path, 'rb')
-- except FSError, e:
-+ except FSError as e:
- self.send_error(404, str(e))
- return None
- self.send_response(200)
-@@ -98,7 +98,7 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTP
- paths = [p+'/' for p in sorted(dir_paths, key=lambda p:p.lower())] + sorted(file_paths, key=lambda p:p.lower())
- #list.sort(key=lambda a: a.lower())
- f = StringIO()
-- displaypath = cgi.escape(urllib.unquote(self.path))
-+ displaypath = cgi.escape(urllib.parse.unquote(self.path))
- f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
- f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
- f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
-@@ -106,11 +106,11 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTP
-
- parent = dirname(path)
- if path != parent:
-- f.write('<li><a href="%s">../</a></li>' % urllib.quote(parent.rstrip('/') + '/'))
-+ f.write('<li><a href="%s">../</a></li>' % urllib.parse.quote(parent.rstrip('/') + '/'))
-
- for path in paths:
- f.write('<li><a href="%s">%s</a>\n'
-- % (urllib.quote(path), cgi.escape(path)))
-+ % (urllib.parse.quote(path), cgi.escape(path)))
- f.write("</ul>\n<hr>\n</body>\n</html>\n")
- length = f.tell()
- f.seek(0)
-@@ -124,7 +124,7 @@ class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTP
- # abandon query parameters
- path = path.split('?',1)[0]
- path = path.split('#',1)[0]
-- path = posixpath.normpath(urllib.unquote(path))
-+ path = posixpath.normpath(urllib.parse.unquote(path))
- return path
-
-
-@@ -143,7 +143,7 @@ def serve_fs(fs, address='', port=8000):
-
- #class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
- # pass
-- httpd = SocketServer.TCPServer((address, port), Handler, bind_and_activate=False)
-+ httpd = socketserver.TCPServer((address, port), Handler, bind_and_activate=False)
- #httpd = ThreadedTCPServer((address, port), Handler, bind_and_activate=False)
- httpd.allow_reuse_address = True
- httpd.server_bind()
---- fs/expose/importhook.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/importhook.py
-@@ -60,7 +60,7 @@ class FSImportHook(object):
- def __init__(self,fs_or_url):
- # If given a string, try to open it as an FS url.
- # Don't open things on the local filesystem though.
-- if isinstance(fs_or_url,basestring):
-+ if isinstance(fs_or_url,str):
- if ":/" not in fs_or_url:
- raise ImportError
- try:
-@@ -182,7 +182,7 @@ class FSImportHook(object):
- mod.__loader__ = self
- sys.modules[fullname] = mod
- try:
-- exec code in mod.__dict__
-+ exec(code, mod.__dict__)
- mod.__file__ = self.get_filename(fullname,info)
- if self.is_package(fullname,info):
- if self.path is None:
-@@ -231,7 +231,7 @@ class FSImportHook(object):
- """Read the specified data file."""
- try:
- return self.fs.getcontents(path, 'rb')
-- except FSError, e:
-+ except FSError as e:
- raise IOError(str(e))
-
- def get_filename(self,fullname,info=None):
---- fs/expose/sftp.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/sftp.py
-@@ -24,12 +24,12 @@ is, you probably don't want to use it.
-
- """
-
--from __future__ import with_statement
-
-+
- import os
- import stat as statinfo
- import time
--import SocketServer
-+import socketserver
- import threading
-
- import paramiko
-@@ -73,11 +73,11 @@ def report_sftp_errors(func):
- def wrapper(*args,**kwds):
- try:
- return func(*args, **kwds)
-- except ResourceNotFoundError, e:
-+ except ResourceNotFoundError as e:
- return paramiko.SFTP_NO_SUCH_FILE
-- except UnsupportedError, e:
-+ except UnsupportedError as e:
- return paramiko.SFTP_OP_UNSUPPORTED
-- except FSError, e:
-+ except FSError as e:
- return paramiko.SFTP_FAILURE
- return wrapper
-
-@@ -114,7 +114,7 @@ class SFTPServerInterface(paramiko.SFTPServerInterface
-
- @report_sftp_errors
- def list_folder(self, path):
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- path = path.decode(self.encoding)
- stats = []
- for entry in self.fs.listdir(path, absolute=True):
-@@ -125,7 +125,7 @@ class SFTPServerInterface(paramiko.SFTPServerInterface
-
- @report_sftp_errors
- def stat(self, path):
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- path = path.decode(self.encoding)
-
- info = self.fs.getinfo(path)
-@@ -146,9 +146,9 @@ class SFTPServerInterface(paramiko.SFTPServerInterface
- stat.st_mtime = time.mktime(info.get("modified_time").timetuple())
-
- if isdir(self.fs, path, info):
-- stat.st_mode = 0777 | statinfo.S_IFDIR
-+ stat.st_mode = 0o777 | statinfo.S_IFDIR
- else:
-- stat.st_mode = 0777 | statinfo.S_IFREG
-+ stat.st_mode = 0o777 | statinfo.S_IFREG
- return stat
-
- def lstat(self, path):
-@@ -156,16 +156,16 @@ class SFTPServerInterface(paramiko.SFTPServerInterface
-
- @report_sftp_errors
- def remove(self, path):
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- path = path.decode(self.encoding)
- self.fs.remove(path)
- return paramiko.SFTP_OK
-
- @report_sftp_errors
- def rename(self, oldpath, newpath):
-- if not isinstance(oldpath, unicode):
-+ if not isinstance(oldpath, str):
- oldpath = oldpath.decode(self.encoding)
-- if not isinstance(newpath, unicode):
-+ if not isinstance(newpath, str):
- newpath = newpath.decode(self.encoding)
- if self.fs.isfile(oldpath):
- self.fs.move(oldpath, newpath)
-@@ -175,14 +175,14 @@ class SFTPServerInterface(paramiko.SFTPServerInterface
-
- @report_sftp_errors
- def mkdir(self, path, attr):
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- path = path.decode(self.encoding)
- self.fs.makedir(path)
- return paramiko.SFTP_OK
-
- @report_sftp_errors
- def rmdir(self, path):
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- path = path.decode(self.encoding)
- self.fs.removedir(path)
- return paramiko.SFTP_OK
-@@ -224,7 +224,7 @@ class SFTPHandle(paramiko.SFTPHandle):
- super(SFTPHandle, self).__init__(flags)
- mode = flags_to_mode(flags)
- self.owner = owner
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- path = path.decode(self.owner.encoding)
- self.path = path
- self._file = owner.fs.open(path, mode)
-@@ -263,7 +263,7 @@ class SFTPServer(paramiko.SFTPServer):
- super(SFTPServer, self).finish_subsystem()
-
-
--class SFTPRequestHandler(SocketServer.BaseRequestHandler):
-+class SFTPRequestHandler(socketserver.BaseRequestHandler):
- """SocketServer RequestHandler subclass for BaseSFTPServer.
-
- This RequestHandler subclass creates a paramiko Transport, sets up the
-@@ -305,7 +305,7 @@ class SFTPRequestHandler(SocketServer.BaseRequestHandl
-
-
-
--class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
-+class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
- pass
-
-
-@@ -334,7 +334,7 @@ class BaseSFTPServer(ThreadedTCPServer):
- self.host_key = host_key
- if RequestHandlerClass is None:
- RequestHandlerClass = SFTPRequestHandler
-- SocketServer.TCPServer.__init__(self, address, RequestHandlerClass)
-+ socketserver.TCPServer.__init__(self, address, RequestHandlerClass)
-
- def shutdown_request(self, request):
- # Prevent TCPServer from closing the connection prematurely
---- fs/expose/wsgi/__init__.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/expose/wsgi/__init__.py
-@@ -1 +1 @@
--from wsgi import serve_fs
-+from .wsgi import serve_fs
---- fs/expose/wsgi/serve_home.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/wsgi/serve_home.py
-@@ -1,10 +1,10 @@
- from wsgiref.simple_server import make_server
-
- from fs.osfs import OSFS
--from wsgi import serve_fs
-+from .wsgi import serve_fs
- osfs = OSFS('~/')
- application = serve_fs(osfs)
-
- httpd = make_server('', 8000, application)
--print "Serving on http://127.0.0.1:8000"
-+print("Serving on http://127.0.0.1:8000")
- httpd.serve_forever()
---- fs/expose/wsgi/wsgi.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/expose/wsgi/wsgi.py
-@@ -1,5 +1,5 @@
-
--import urlparse
-+import urllib.parse
- import mimetypes
-
- from fs.errors import FSError
-@@ -10,7 +10,7 @@ from datetime import datetime
- try:
- from mako.template import Template
- except ImportError:
-- print "Requires mako templates http://www.makotemplates.org/"
-+ print("Requires mako templates http://www.makotemplates.org/")
- raise
-
-
-@@ -28,7 +28,7 @@ class WSGIServer(object):
- def __init__(self, serve_fs, indexes=True, dir_template=None, chunk_size=16*1024*1024):
-
- if dir_template is None:
-- from dirtemplate import template as dir_template
-+ from .dirtemplate import template as dir_template
-
- self.serve_fs = serve_fs
- self.indexes = indexes
-@@ -57,7 +57,7 @@ class WSGIServer(object):
- serving_file = None
- try:
- serving_file = self.serve_fs.open(path, 'rb')
-- except Exception, e:
-+ except Exception as e:
- if serving_file is not None:
- serving_file.close()
- return self.serve_500(request, str(e))
---- fs/expose/xmlrpc.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/expose/xmlrpc.py
-@@ -15,8 +15,8 @@ an FS object, which can then be exposed using whatever
-
- """
-
--import xmlrpclib
--from SimpleXMLRPCServer import SimpleXMLRPCServer
-+import xmlrpc.client
-+from xmlrpc.server import SimpleXMLRPCServer
- from datetime import datetime
- import base64
-
-@@ -61,13 +61,13 @@ class RPCFSInterface(object):
-
- def getmeta(self, meta_name):
- meta = self.fs.getmeta(meta_name)
-- if isinstance(meta, basestring):
-+ if isinstance(meta, str):
- meta = self.decode_path(meta)
- return meta
-
- def getmeta_default(self, meta_name, default):
- meta = self.fs.getmeta(meta_name, default)
-- if isinstance(meta, basestring):
-+ if isinstance(meta, str):
- meta = self.decode_path(meta)
- return meta
-
-@@ -77,7 +77,7 @@ class RPCFSInterface(object):
- def get_contents(self, path, mode="rb"):
- path = self.decode_path(path)
- data = self.fs.getcontents(path, mode)
-- return xmlrpclib.Binary(data)
-+ return xmlrpc.client.Binary(data)
-
- def set_contents(self, path, data):
- path = self.decode_path(path)
-@@ -119,16 +119,16 @@ class RPCFSInterface(object):
-
- def settimes(self, path, accessed_time, modified_time):
- path = self.decode_path(path)
-- if isinstance(accessed_time, xmlrpclib.DateTime):
-+ if isinstance(accessed_time, xmlrpc.client.DateTime):
- accessed_time = datetime.strptime(accessed_time.value, "%Y%m%dT%H:%M:%S")
-- if isinstance(modified_time, xmlrpclib.DateTime):
-+ if isinstance(modified_time, xmlrpc.client.DateTime):
- modified_time = datetime.strptime(modified_time.value, "%Y%m%dT%H:%M:%S")
- return self.fs.settimes(path, accessed_time, modified_time)
-
- def getinfo(self, path):
- path = self.decode_path(path)
- info = self.fs.getinfo(path)
-- info = dict((k, v) for k, v in info.iteritems()
-+ info = dict((k, v) for k, v in info.items()
- if k in self._allowed_info)
- return info
-
---- fs/filelike.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/filelike.py
-@@ -52,9 +52,9 @@ if PY3:
- from six import BytesIO as _StringIO
- else:
- try:
-- from cStringIO import StringIO as _StringIO
-+ from io import StringIO as _StringIO
- except ImportError:
-- from StringIO import StringIO as _StringIO
-+ from io import StringIO as _StringIO
-
-
- class FileLikeBase(object):
-@@ -305,7 +305,7 @@ class FileLikeBase(object):
- self.close()
- return False
-
-- def next(self):
-+ def __next__(self):
- """next() method complying with the iterator protocol.
-
- File-like objects are their own iterators, with each call to
---- fs/ftpfs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/ftpfs.py
-@@ -37,9 +37,9 @@ if PY3:
- from six import BytesIO as StringIO
- else:
- try:
-- from cStringIO import StringIO
-+ from io import StringIO
- except ImportError:
-- from StringIO import StringIO
-+ from io import StringIO
-
- import time
-
-@@ -221,10 +221,10 @@ class FTPListDataParser(object):
- elif c == 'r':
- result.try_retr = True
- elif c == 's':
-- result.size = long(buf[i+1:j])
-+ result.size = int(buf[i+1:j])
- elif c == 'm':
- result.mtime_type = MTIME_TYPE.LOCAL
-- result.mtime = long(buf[i+1:j])
-+ result.mtime = int(buf[i+1:j])
- elif c == 'i':
- result.id_type = ID_TYPE.FULL
- result.id = buf[i+1:j-i-1]
-@@ -285,7 +285,7 @@ class FTPListDataParser(object):
-
- elif state == 4: # getting tentative size
- try:
-- size = long(buf[i:j])
-+ size = int(buf[i:j])
- except ValueError:
- pass
- state = 5
-@@ -295,25 +295,25 @@ class FTPListDataParser(object):
- if month >= 0:
- state = 6
- else:
-- size = long(buf[i:j])
-+ size = int(buf[i:j])
-
- elif state == 6: # have size and month
-- mday = long(buf[i:j])
-+ mday = int(buf[i:j])
- state = 7
-
- elif state == 7: # have size, month, mday
- if (j - i == 4) and (buf[i+1] == ':'):
-- hour = long(buf[i])
-- minute = long(buf[i+2:i+4])
-+ hour = int(buf[i])
-+ minute = int(buf[i+2:i+4])
- result.mtime_type = MTIME_TYPE.REMOTE_MINUTE
- result.mtime = self._guess_time(month, mday, hour, minute)
- elif (j - i == 5) and (buf[i+2] == ':'):
-- hour = long(buf[i:i+2])
-- minute = long(buf[i+3:i+5])
-+ hour = int(buf[i:i+2])
-+ minute = int(buf[i+3:i+5])
- result.mtime_type = MTIME_TYPE.REMOTE_MINUTE
- result.mtime = self._guess_time(month, mday, hour, minute)
- elif j - i >= 4:
-- year = long(buf[i:j])
-+ year = int(buf[i:j])
- result.mtime_type = MTIME_TYPE.REMOTE_DAY
- result.mtime = self._get_mtime(year, month, mday)
- else:
-@@ -379,7 +379,7 @@ class FTPListDataParser(object):
- j = i
-
- j = buf.index('-', j)
-- mday = long(buf[i:j])
-+ mday = int(buf[i:j])
-
- j = _skip(buf, j, '-')
- i = j
-@@ -391,13 +391,13 @@ class FTPListDataParser(object):
- j = _skip(buf, j, '-')
- i = j
- j = buf.index(' ', j)
-- year = long(buf[i:j])
-+ year = int(buf[i:j])
-
- j = _skip(buf, j, ' ')
- i = j
-
- j = buf.index(':', j)
-- hour = long(buf[i:j])
-+ hour = int(buf[i:j])
- j = _skip(buf, j, ':')
- i = j
-
-@@ -406,7 +406,7 @@ class FTPListDataParser(object):
- if j == buflen:
- raise IndexError # abort, abort!
-
-- minute = long(buf[i:j])
-+ minute = int(buf[i:j])
-
- result.mtime_type = MTIME_TYPE.REMOTE_MINUTE
- result.mtime = self._get_mtime(year, month, mday, hour, minute)
-@@ -430,17 +430,17 @@ class FTPListDataParser(object):
- result = FTPListData(buf)
-
- j = buf.index('-', j)
-- month = long(buf[i:j])
-+ month = int(buf[i:j])
-
- j = _skip(buf, j, '-')
- i = j
- j = buf.index('-', j)
-- mday = long(buf[i:j])
-+ mday = int(buf[i:j])
-
- j = _skip(buf, j, '-')
- i = j
- j = buf.index(' ', j)
-- year = long(buf[i:j])
-+ year = int(buf[i:j])
- if year < 50:
- year += 2000
- if year < 1000:
-@@ -449,14 +449,14 @@ class FTPListDataParser(object):
- j = _skip(buf, j, ' ')
- i = j
- j = buf.index(':', j)
-- hour = long(buf[i:j])
-+ hour = int(buf[i:j])
- j = _skip(buf, j, ':')
- i = j
- while not (buf[j] in 'AP'):
- j += 1
- if j == buflen:
- raise IndexError
-- minute = long(buf[i:j])
-+ minute = int(buf[i:j])
-
- if buf[j] == 'A':
- j += 1
-@@ -482,7 +482,7 @@ class FTPListDataParser(object):
- i = j
- j = buf.index(' ', j)
-
-- result.size = long(buf[i:j])
-+ result.size = int(buf[i:j])
- result.try_retr = True
-
- j = _skip(buf, j, ' ')
-@@ -546,10 +546,10 @@ class FTPMlstDataParser(object):
- int(factvalue[12:14]),
- 0, 0, 0))
- elif factname == 'size':
-- result.size = long(factvalue)
-+ result.size = int(factvalue)
- elif factname == 'sizd':
- # some FTP servers report directory size with sizd
-- result.size = long(factvalue)
-+ result.size = int(factvalue)
- elif factname == 'type':
- if factvalue.lower() == 'file':
- result.try_retr = True
-@@ -605,7 +605,7 @@ def fileftperrors(f):
- try:
- try:
- ret = f(self, *args, **kwargs)
-- except Exception, e:
-+ except Exception as e:
- self.ftpfs._translate_exception(args[0] if args else '', e)
- finally:
- self._lock.release()
-@@ -795,16 +795,16 @@ class _FTPFile(object):
- self.conn.close()
- self.conn = None
- self.ftp.voidresp()
-- except error_temp, error_perm:
-+ except error_temp as error_perm:
- pass
- if self.ftp is not None:
- try:
- self.ftp.close()
-- except error_temp, error_perm:
-+ except error_temp as error_perm:
- pass
- self.closed = True
-
-- def next(self):
-+ def __next__(self):
- return self.readline()
-
- def readline(self, size=None):
-@@ -823,7 +823,7 @@ def ftperrors(f):
- try:
- try:
- ret = f(self, *args, **kwargs)
-- except Exception, e:
-+ except Exception as e:
- self._translate_exception(args[0] if args else '', e)
- finally:
- self._leave_dircache()
-@@ -834,7 +834,7 @@ def ftperrors(f):
-
-
- def _encode(s):
-- if isinstance(s, unicode):
-+ if isinstance(s, str):
- return s.encode('utf-8')
- return s
-
-@@ -956,7 +956,7 @@ class FTPFS(FS):
- return features
-
- def on_line(line):
-- if not isinstance(line, unicode):
-+ if not isinstance(line, str):
- line = line.decode('utf-8')
- info = parse_ftp_list_line(line, self.use_mlst)
- if info:
-@@ -986,7 +986,7 @@ class FTPFS(FS):
- else: # Matrix FTP server has bug
- on_line(list_line)
- # if it's a dir, then we can send a MLSD
-- if dirlist[dirlist.keys()[0]]['try_cwd']:
-+ if dirlist[list(dirlist.keys())[0]]['try_cwd']:
- dirlist = {}
- self.ftp.retrlines("MLSD " + encoded_path, on_line)
- else:
-@@ -996,11 +996,11 @@ class FTPFS(FS):
- self.dircache[path] = dirlist
-
- def is_symlink(info):
-- return info['try_retr'] and info['try_cwd'] and info.has_key('target')
-+ return info['try_retr'] and info['try_cwd'] and 'target' in info
-
- def resolve_symlink(linkpath):
- linkinfo = self.getinfo(linkpath)
-- if not linkinfo.has_key('resolved'):
-+ if 'resolved' not in linkinfo:
- linkinfo['resolved'] = linkpath
- if is_symlink(linkinfo):
- target = linkinfo['target']
-@@ -1036,7 +1036,7 @@ class FTPFS(FS):
- else:
- dircache = self.dircache
- paths = [normpath(abspath(path)) for path in paths]
-- for cached_path in dircache.keys():
-+ for cached_path in list(dircache.keys()):
- for path in paths:
- if isbase(cached_path, path):
- dircache.pop(cached_path, None)
-@@ -1083,7 +1083,7 @@ class FTPFS(FS):
- else:
- ftp.connect(self.host, self.port, self.timeout)
- ftp.login(self.user, self.passwd, self.acct)
-- except socket_error, e:
-+ except socket_error as e:
- raise RemoteConnectionError(str(e), details=e)
- return ftp
-
-@@ -1104,7 +1104,7 @@ class FTPFS(FS):
- return '<FTPFS %s>' % self.host
-
- def __unicode__(self):
-- return u'<FTPFS %s>' % self.host
-+ return '<FTPFS %s>' % self.host
-
- @convert_os_errors
- def _translate_exception(self, path, exception):
-@@ -1225,7 +1225,7 @@ class FTPFS(FS):
- raise ResourceNotFoundError(path)
- if not self.isdir(path):
- raise ResourceInvalidError(path)
-- paths = self._readdir(path).keys()
-+ paths = list(self._readdir(path).keys())
-
- return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
-
-@@ -1266,7 +1266,7 @@ class FTPFS(FS):
- self.ftp.mkd(_encode(path))
- except error_reply:
- return
-- except error_perm, e:
-+ except error_perm as e:
- if recursive or allow_recreate:
- return
- if str(e).split(' ', 1)[0]=='550':
-@@ -1337,7 +1337,7 @@ class FTPFS(FS):
- try:
- self.refresh_dircache(dirname(src), dirname(dst))
- self.ftp.rename(_encode(src), _encode(dst))
-- except error_perm, exception:
-+ except error_perm as exception:
- code, message = str(exception).split(' ', 1)
- if code == "550":
- if not self.exists(dirname(dst)):
---- fs/httpfs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/httpfs.py
-@@ -10,8 +10,8 @@ from fs.path import normpath
- from fs.errors import ResourceNotFoundError, UnsupportedError
- from fs.filelike import FileWrapper
- from fs import iotools
--
--from urllib2 import urlopen, URLError
-+from urllib.request import urlopen
-+from urllib.error import URLError
- from datetime import datetime
-
-
-@@ -50,9 +50,9 @@ class HTTPFS(FS):
- url = self._make_url(path)
- try:
- f = urlopen(url)
-- except URLError, e:
-+ except URLError as e:
- raise ResourceNotFoundError(path, details=e)
-- except OSError, e:
-+ except OSError as e:
- raise ResourceNotFoundError(path, details=e)
-
- return FileWrapper(f)
---- fs/iotools.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/iotools.py
-@@ -1,6 +1,6 @@
--from __future__ import unicode_literals
--from __future__ import print_function
-
-+
-+
- from fs import SEEK_SET, SEEK_CUR, SEEK_END
-
- import io
-@@ -178,7 +178,7 @@ def make_bytes_io(data, encoding=None, errors=None):
- if hasattr(data, 'mode') and 'b' in data.mode:
- # It's already a binary file
- return data
-- if not isinstance(data, basestring):
-+ if not isinstance(data, str):
- # It's a file, but we don't know if its binary
- # TODO: Is there a better way than reading the entire file?
- data = data.read() or b''
---- fs/memoryfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/memoryfs.py
-@@ -78,10 +78,10 @@ class MemoryFile(object):
- return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-
- def __repr__(self):
-- return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-+ return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-
- def __unicode__(self):
-- return u"<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-+ return "<MemoryFile in %s %s>" % (self.memory_fs, self.path)
-
- def __del__(self):
- if not self.closed:
-@@ -101,7 +101,7 @@ class MemoryFile(object):
- def next(self):
- if 'r' not in self.mode and '+' not in self.mode:
- raise IOError("File not open for reading")
-- return self.mem_file.next()
-+ return next(self.mem_file)
-
- @seek_and_lock
- def readline(self, *args, **kwargs):
-@@ -218,7 +218,7 @@ class DirEntry(object):
- if self.isfile():
- return "<file %s>" % self.name
- elif self.isdir():
-- return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())
-+ return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.items())
-
- def isdir(self):
- return self.type == "dir"
-@@ -559,10 +559,10 @@ class MemoryFS(FS):
- raise ResourceNotFoundError(path)
- if dir_entry.isfile():
- raise ResourceInvalidError(path, msg="not a directory: %(path)s")
-- paths = dir_entry.contents.keys()
-+ paths = list(dir_entry.contents.keys())
- for (i,p) in enumerate(paths):
-- if not isinstance(p,unicode):
-- paths[i] = unicode(p)
-+ if not isinstance(p,str):
-+ paths[i] = str(p)
- return self._listdir_helper(path, paths, wildcard, full, absolute, dirs_only, files_only)
-
- @synchronize
-@@ -578,10 +578,10 @@ class MemoryFS(FS):
- info['accessed_time'] = dir_entry.accessed_time
-
- if dir_entry.isdir():
-- info['st_mode'] = 0755 | stat.S_IFDIR
-+ info['st_mode'] = 0o755 | stat.S_IFDIR
- else:
- info['size'] = len(dir_entry.data or b(''))
-- info['st_mode'] = 0666 | stat.S_IFREG
-+ info['st_mode'] = 0o666 | stat.S_IFREG
-
- return info
-
-@@ -671,12 +671,12 @@ class MemoryFS(FS):
- @synchronize
- def setxattr(self, path, key, value):
- dir_entry = self._dir_entry(path)
-- key = unicode(key)
-+ key = str(key)
- dir_entry.xattrs[key] = value
-
- @synchronize
- def getxattr(self, path, key, default=None):
-- key = unicode(key)
-+ key = str(key)
- dir_entry = self._dir_entry(path)
- return dir_entry.xattrs.get(key, default)
-
-@@ -691,4 +691,4 @@ class MemoryFS(FS):
- @synchronize
- def listxattrs(self, path):
- dir_entry = self._dir_entry(path)
-- return dir_entry.xattrs.keys()
-+ return list(dir_entry.xattrs.keys())
---- fs/mountfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/mountfs.py
-@@ -61,7 +61,7 @@ class DirMount(object):
- return "<DirMount %s, %s>" % (self.path, self.fs)
-
- def __unicode__(self):
-- return u"<DirMount %s, %s>" % (self.path, self.fs)
-+ return "<DirMount %s, %s>" % (self.path, self.fs)
-
-
- class FileMount(object):
-@@ -90,12 +90,12 @@ class MountFS(FS):
- self.mount_tree = PathMap()
-
- def __str__(self):
-- return "<%s [%s]>" % (self.__class__.__name__,self.mount_tree.items(),)
-+ return "<%s [%s]>" % (self.__class__.__name__,list(self.mount_tree.items()),)
-
- __repr__ = __str__
-
- def __unicode__(self):
-- return u"<%s [%s]>" % (self.__class__.__name__,self.mount_tree.items(),)
-+ return "<%s [%s]>" % (self.__class__.__name__,list(self.mount_tree.items()),)
-
- def _delegate(self, path):
- path = abspath(normpath(path))
-@@ -119,7 +119,7 @@ class MountFS(FS):
- return self, "/", path
-
- try:
-- self.mount_tree.iternames(path).next()
-+ next(self.mount_tree.iternames(path))
- except StopIteration:
- return None, None, None
- else:
-@@ -129,7 +129,7 @@ class MountFS(FS):
- def close(self):
- # Explicitly closes children if requested
- if self.auto_close:
-- for mount in self.mount_tree.itervalues():
-+ for mount in self.mount_tree.values():
- mount.fs.close()
- # Free references (which may incidently call the close method of the child filesystems)
- self.mount_tree.clear()
---- fs/multifs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/multifs.py
-@@ -106,7 +106,7 @@ class MultiFS(FS):
-
- @synchronize
- def __unicode__(self):
-- return u"<MultiFS: %s>" % ", ".join(unicode(fs) for fs in self.fs_sequence)
-+ return "<MultiFS: %s>" % ", ".join(str(fs) for fs in self.fs_sequence)
-
- def _get_priority(self, name):
- return self.fs_priorities[name]
-@@ -128,7 +128,7 @@ class MultiFS(FS):
-
- def _priority_sort(self):
- """Sort filesystems by priority order"""
-- priority_order = sorted(self.fs_lookup.keys(), key=lambda n: self.fs_priorities[n], reverse=True)
-+ priority_order = sorted(list(self.fs_lookup.keys()), key=lambda n: self.fs_priorities[n], reverse=True)
- self.fs_sequence = [self.fs_lookup[name] for name in priority_order]
-
- @synchronize
-@@ -214,7 +214,7 @@ class MultiFS(FS):
- return self.writefs
- for fs in self:
- if fs.exists(path):
-- for fs_name, fs_object in self.fs_lookup.iteritems():
-+ for fs_name, fs_object in self.fs_lookup.items():
- if fs is fs_object:
- return fs_name, fs
- raise ResourceNotFoundError(path, msg="Path does not map to any filesystem: %(path)s")
---- fs/opener.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/opener.py
-@@ -72,7 +72,7 @@ from fs.filelike import FileWrapper
- from os import getcwd
- import os.path
- import re
--from urlparse import urlparse
-+from urllib.parse import urlparse
-
- class OpenerError(Exception):
- """The base exception thrown by openers"""
-@@ -794,7 +794,7 @@ example:
- def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
-
- from fs.mountfs import MountFS
-- from ConfigParser import ConfigParser
-+ from configparser import ConfigParser
- cfg = ConfigParser()
-
- if '#' in fs_path:
-@@ -830,7 +830,7 @@ example:
- def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir):
-
- from fs.multifs import MultiFS
-- from ConfigParser import ConfigParser
-+ from configparser import ConfigParser
- cfg = ConfigParser()
-
- if '#' in fs_path:
---- fs/osfs/__init__.py.orig 2015-11-13 22:18:37 UTC
-+++ fs/osfs/__init__.py
-@@ -48,13 +48,13 @@ def _os_stat(path):
-
-
- @convert_os_errors
--def _os_mkdir(name, mode=0777):
-+def _os_mkdir(name, mode=0o777):
- """Replacement for os.mkdir that raises FSError subclasses."""
- return os.mkdir(name, mode)
-
-
- @convert_os_errors
--def _os_makedirs(name, mode=0777):
-+def _os_makedirs(name, mode=0o777):
- """Replacement for os.makdirs that raises FSError subclasses.
-
- This implementation also correctly handles win32 long filenames (those
-@@ -71,7 +71,7 @@ def _os_makedirs(name, mode=0777):
- if head and tail and not os.path.exists(head):
- try:
- _os_makedirs(head, mode)
-- except OSError, e:
-+ except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- if tail == os.curdir:
-@@ -98,11 +98,11 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
- 'atomic.setcontents': False}
-
- if platform.system() == 'Windows':
-- _meta["invalid_path_chars"] = ''.join(chr(n) for n in xrange(31)) + '\\:*?"<>|'
-+ _meta["invalid_path_chars"] = ''.join(chr(n) for n in range(31)) + '\\:*?"<>|'
- else:
- _meta["invalid_path_chars"] = '\0'
-
-- def __init__(self, root_path, thread_synchronize=_thread_synchronize_default, encoding=None, create=False, dir_mode=0700, use_long_paths=True):
-+ def __init__(self, root_path, thread_synchronize=_thread_synchronize_default, encoding=None, create=False, dir_mode=0o700, use_long_paths=True):
- """
- Creates an FS object that represents the OS Filesystem under a given root path
-
-@@ -124,13 +124,13 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
- if sys.platform == "win32":
- if use_long_paths and not root_path.startswith("\\\\?\\"):
- if not root_path.startswith("\\"):
-- root_path = u"\\\\?\\" + root_path
-+ root_path = "\\\\?\\" + root_path
- else:
- # Explicitly mark UNC paths, seems to work better.
- if root_path.startswith("\\\\"):
-- root_path = u"\\\\?\\UNC\\" + root_path[2:]
-+ root_path = "\\\\?\\UNC\\" + root_path[2:]
- else:
-- root_path = u"\\\\?" + root_path
-+ root_path = "\\\\?" + root_path
- # If it points at the root of a drive, it needs a trailing slash.
- if len(root_path) == 6 and not root_path.endswith("\\"):
- root_path = root_path + "\\"
-@@ -155,16 +155,16 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
- return "<OSFS: %r>" % self.root_path
-
- def __unicode__(self):
-- return u"<OSFS: %s>" % self.root_path
-+ return "<OSFS: %s>" % self.root_path
-
- def _decode_path(self, p):
-- if isinstance(p, unicode):
-+ if isinstance(p, str):
- return p
- return p.decode(self.encoding, 'replace')
-
- def getsyspath(self, path, allow_none=False):
- self.validatepath(path)
-- path = relpath(normpath(path)).replace(u"/", os.sep)
-+ path = relpath(normpath(path)).replace("/", os.sep)
- path = os.path.join(self.root_path, path)
- if not path.startswith(self.root_path):
- raise PathError(path, msg="OSFS given path outside root: %(path)s")
-@@ -234,7 +234,7 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
- encoding = encoding or 'utf-8'
- try:
- return io.open(sys_path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline)
-- except EnvironmentError, e:
-+ except EnvironmentError as e:
- # Win32 gives EACCES when opening a directory.
- if sys.platform == "win32" and e.errno in (errno.EACCES,):
- if self.isdir(path):
-@@ -301,7 +301,7 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
- sys_path = self.getsyspath(path)
- try:
- os.remove(sys_path)
-- except OSError, e:
-+ except OSError as e:
- if e.errno == errno.EACCES and sys.platform == "win32":
- # sometimes windows says this for attempts to remove a dir
- if os.path.isdir(sys_path):
-@@ -338,7 +338,7 @@ class OSFS(OSFSXAttrMixin, OSFSWatchMixin, FS):
- path_dst = self.getsyspath(dst)
- try:
- os.rename(path_src, path_dst)
-- except OSError, e:
-+ except OSError as e:
- if e.errno:
- # POSIX rename() can rename over an empty directory but gives
- # ENOTEMPTY if the dir has contents. Raise UnsupportedError
---- fs/osfs/watch_inotify.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/osfs/watch_inotify.py
-@@ -18,7 +18,7 @@ from fs.watch import *
-
- try:
- import pyinotify
--except Exception, e:
-+except Exception as e:
- # pyinotify sometimes raises its own custom errors on import.
- # How on earth are we supposed to catch them when we can't import them?
- if isinstance(e,ImportError):
-@@ -39,7 +39,7 @@ class OSFSWatchMixin(WatchableFSMixin):
- def close(self):
- super(OSFSWatchMixin,self).close()
- self.notify_watchers(CLOSED)
-- for watcher_list in self._watchers.values():
-+ for watcher_list in list(self._watchers.values()):
- for watcher in watcher_list:
- self.del_watcher(watcher)
- self.__watch_lock.acquire()
-@@ -58,7 +58,7 @@ class OSFSWatchMixin(WatchableFSMixin):
- w = super_add_watcher(callback,path,events,recursive)
- w._pyinotify_id = None
- syspath = self.getsyspath(path)
-- if isinstance(syspath,unicode):
-+ if isinstance(syspath,str):
- syspath = syspath.encode(sys.getfilesystemencoding())
- # Each watch gets its own WatchManager, since it's tricky to make
- # a single WatchManager handle multiple callbacks with different
-@@ -73,7 +73,7 @@ class OSFSWatchMixin(WatchableFSMixin):
- kwds = dict(rec=recursive,auto_add=recursive,quiet=False)
- try:
- wids = wm.add_watch(syspath,evtmask,process_events,**kwds)
-- except pyinotify.WatchManagerError, e:
-+ except pyinotify.WatchManagerError as e:
- raise OperationFailedError("add_watcher",details=e)
- w._pyinotify_id = wids[syspath]
- self.__watch_lock.acquire()
-@@ -239,7 +239,7 @@ class SharedThreadedNotifier(threading.Thread):
- while self.running:
- try:
- ready_fds = self._poller.poll()
-- except _select_error, e:
-+ except _select_error as e:
- if e[0] != errno.EINTR:
- raise
- else:
---- fs/osfs/watch_win32.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/osfs/watch_win32.py
-@@ -10,7 +10,7 @@ import os
- import sys
- import errno
- import threading
--import Queue
-+import queue
- import stat
- import struct
- import ctypes
-@@ -226,7 +226,7 @@ class WatchedDirectory(object):
- ctypes.byref(self.result),len(self.result),
- self.recursive,self.flags,None,
- overlapped,None)
-- except WindowsError, e:
-+ except WindowsError as e:
- self.error = e
- self.close()
-
-@@ -262,7 +262,7 @@ class WatchThread(threading.Thread):
- self.watched_directories = {}
- self.ready = threading.Event()
- self._iocp = None
-- self._new_watches = Queue.Queue()
-+ self._new_watches = queue.Queue()
-
- def close(self):
- if not self.closed:
-@@ -383,11 +383,11 @@ class WatchThread(threading.Thread):
- hash(w),0)
- w.post()
- w.ready.set()
-- except Queue.Empty:
-+ except queue.Empty:
- pass
- finally:
- self.ready.set()
-- for w in self.watched_directories.itervalues():
-+ for w in self.watched_directories.values():
- w.close()
- if self._iocp:
- CloseHandle(self._iocp)
---- fs/osfs/xattrs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/osfs/xattrs.py
-@@ -49,7 +49,7 @@ if xattr is not None:
-
- @convert_os_errors
- def listxattrs(self, path):
-- return xattr.xattr(self.getsyspath(path)).keys()
-+ return list(xattr.xattr(self.getsyspath(path)).keys())
-
- else:
-
---- fs/path.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/path.py
-@@ -44,7 +44,7 @@ def normpath(path):
- if not _requires_normalization(path):
- return path.rstrip('/')
-
-- prefix = u'/' if path.startswith('/') else u''
-+ prefix = '/' if path.startswith('/') else ''
- components = []
- append = components.append
- special = ('..', '.', '').__contains__
-@@ -60,7 +60,7 @@ def normpath(path):
- # causing a circular import.
- from fs.errors import BackReferenceError
- raise BackReferenceError('Too many backrefs in \'%s\'' % path)
-- return prefix + u'/'.join(components)
-+ return prefix + '/'.join(components)
-
-
- if os.sep != '/':
-@@ -100,11 +100,11 @@ def recursepath(path, reverse=False):
- """
-
- if path in ('', '/'):
-- return [u'/']
-+ return ['/']
-
- path = abspath(normpath(path)) + '/'
-
-- paths = [u'/']
-+ paths = ['/']
- find = path.find
- append = paths.append
- pos = 1
-@@ -133,7 +133,7 @@ def abspath(path):
-
- """
- if not path.startswith('/'):
-- return u'/' + path
-+ return '/' + path
- return path
-
-
-@@ -176,7 +176,7 @@ def pathjoin(*paths):
- absolute = True
- relpaths.append(p)
-
-- path = normpath(u"/".join(relpaths))
-+ path = normpath("/".join(relpaths))
- if absolute:
- path = abspath(path)
- return path
-@@ -419,7 +419,7 @@ def relativefrom(base, path):
- break
- common += 1
-
-- return u'/'.join([u'..'] * (len(base) - common) + path[common:])
-+ return '/'.join(['..'] * (len(base) - common) + path[common:])
-
-
- class PathMap(object):
-@@ -559,7 +559,7 @@ class PathMap(object):
- m = m[name]
- except KeyError:
- return
-- for (nm, subm) in m.iteritems():
-+ for (nm, subm) in m.items():
- if not nm:
- yield abspath(root)
- else:
-@@ -568,7 +568,7 @@ class PathMap(object):
- yield subk
-
- def __iter__(self):
-- return self.iterkeys()
-+ return iter(self.keys())
-
- def keys(self,root="/"):
- return list(self.iterkeys(root))
-@@ -583,7 +583,7 @@ class PathMap(object):
- m = m[name]
- except KeyError:
- return
-- for (nm, subm) in m.iteritems():
-+ for (nm, subm) in m.items():
- if not nm:
- yield subm
- else:
-@@ -604,7 +604,7 @@ class PathMap(object):
- m = m[name]
- except KeyError:
- return
-- for (nm, subm) in m.iteritems():
-+ for (nm, subm) in m.items():
- if not nm:
- yield (abspath(normpath(root)), subm)
- else:
-@@ -627,7 +627,7 @@ class PathMap(object):
- m = m[name]
- except KeyError:
- return
-- for (nm, subm) in m.iteritems():
-+ for (nm, subm) in m.items():
- if nm and subm:
- yield nm
-
-@@ -651,9 +651,9 @@ def iswildcard(path):
- return not _wild_chars.isdisjoint(path)
-
- if __name__ == "__main__":
-- print recursepath('a/b/c')
-+ print(recursepath('a/b/c'))
-
-- print relativefrom('/', '/foo')
-- print relativefrom('/foo/bar', '/foo/baz')
-- print relativefrom('/foo/bar/baz', '/foo/egg')
-- print relativefrom('/foo/bar/baz/egg', '/foo/egg')
-+ print(relativefrom('/', '/foo'))
-+ print(relativefrom('/foo/bar', '/foo/baz'))
-+ print(relativefrom('/foo/bar/baz', '/foo/egg'))
-+ print(relativefrom('/foo/bar/baz/egg', '/foo/egg'))
---- fs/remote.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/remote.py
-@@ -20,8 +20,8 @@ FS subclasses interfacing with a remote filesystem. T
-
- """
-
--from __future__ import with_statement
-
-+
- import time
- import stat as statinfo
- from errno import EINVAL
-@@ -422,11 +422,11 @@ class CachedInfo(object):
- self.has_full_children = other.has_full_children
- @classmethod
- def new_file_stub(cls):
-- info = {"info" : 0700 | statinfo.S_IFREG}
-+ info = {"info" : 0o700 | statinfo.S_IFREG}
- return cls(info,has_full_info=False)
- @classmethod
- def new_dir_stub(cls):
-- info = {"info" : 0700 | statinfo.S_IFDIR}
-+ info = {"info" : 0o700 | statinfo.S_IFDIR}
- return cls(info,has_full_info=False)
-
-
-@@ -512,7 +512,7 @@ class CacheFSMixin(FS):
- if self.max_cache_size is not None and old_ci is None:
- while self.__cache_size >= self.max_cache_size:
- try:
-- to_del = iter(self.__cache).next()
-+ to_del = next(iter(self.__cache))
- except StopIteration:
- break
- else:
-@@ -592,7 +592,7 @@ class CacheFSMixin(FS):
-
- def isdir(self, path):
- try:
-- self.__cache.iternames(path).next()
-+ next(self.__cache.iternames(path))
- return True
- except StopIteration:
- pass
-@@ -607,7 +607,7 @@ class CacheFSMixin(FS):
-
- def isfile(self, path):
- try:
-- self.__cache.iternames(path).next()
-+ next(self.__cache.iternames(path))
- return False
- except StopIteration:
- pass
---- fs/remotefs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/remotefs.py
-@@ -1,5 +1,5 @@
- # Work in Progress - Do not use
--from __future__ import with_statement
-+
- from fs.base import FS
- from fs.expose.serve import packetstream
-
-@@ -7,7 +7,7 @@ from collections import defaultdict
- import threading
- from threading import Lock, RLock
- from json import dumps
--import Queue as queue
-+import queue as queue
- import socket
-
- from six import b
-@@ -35,12 +35,12 @@ class PacketHandler(threading.Thread):
- while True:
- data = read(1024*16)
- if not data:
-- print "No data"
-+ print("No data")
- break
-- print "data", repr(data)
-+ print("data", repr(data))
- for header, payload in decoder.feed(data):
-- print repr(header)
-- print repr(payload)
-+ print(repr(header))
-+ print(repr(payload))
- on_packet(header, payload)
-
- def _new_call_id(self):
-@@ -77,8 +77,8 @@ class PacketHandler(threading.Thread):
-
- while True:
- header, payload = queue.get()
-- print repr(header)
-- print repr(payload)
-+ print(repr(header))
-+ print(repr(payload))
- if client_ref is not None and header.get('client_ref') != client_ref:
- continue
- break
-@@ -167,9 +167,9 @@ class RemoteFS(FS):
- def ping(self, msg):
- call_id = self.packet_handler.send_packet({'type':'rpc', 'method':'ping'}, msg)
- header, payload = self.packet_handler.get_packet(call_id)
-- print "PING"
-- print header
-- print payload
-+ print("PING")
-+ print(header)
-+ print(payload)
-
- def close(self):
- self.transport.close()
---- fs/rpcfs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/rpcfs.py
-@@ -8,7 +8,7 @@ class from the :mod:`fs.expose.xmlrpc` module.
-
- """
-
--import xmlrpclib
-+import xmlrpc.client
- import socket
- import base64
-
-@@ -28,11 +28,11 @@ def re_raise_faults(func):
- def wrapper(*args, **kwds):
- try:
- return func(*args, **kwds)
-- except (xmlrpclib.Fault), f:
-+ except (xmlrpc.client.Fault) as f:
- #raise
- # Make sure it's in a form we can handle
-
-- print f.faultString
-+ print(f.faultString)
- bits = f.faultString.split(" ")
- if bits[0] not in ["<type", "<class"]:
- raise f
-@@ -41,7 +41,7 @@ def re_raise_faults(func):
- cls = bits[0]
- msg = ">:".join(bits[1:])
- cls = cls.strip('\'')
-- print "-" + cls
-+ print("-" + cls)
- cls = _object_by_name(cls)
- # Re-raise using the remainder of the fault code as message
- if cls:
-@@ -50,7 +50,7 @@ def re_raise_faults(func):
- else:
- raise cls(msg)
- raise f
-- except socket.error, e:
-+ except socket.error as e:
- raise RemoteConnectionError(str(e), details=e)
- return wrapper
-
-@@ -126,9 +126,9 @@ class RPCFS(FS):
- kwds = dict(allow_none=True, use_datetime=True)
-
- if self._transport is not None:
-- proxy = xmlrpclib.ServerProxy(self.uri, self._transport, **kwds)
-+ proxy = xmlrpc.client.ServerProxy(self.uri, self._transport, **kwds)
- else:
-- proxy = xmlrpclib.ServerProxy(self.uri, **kwds)
-+ proxy = xmlrpc.client.ServerProxy(self.uri, **kwds)
-
- return ReRaiseFaults(proxy)
-
-@@ -170,7 +170,7 @@ class RPCFS(FS):
- meta = self.proxy.getmeta(meta_name)
- else:
- meta = self.proxy.getmeta_default(meta_name, default)
-- if isinstance(meta, basestring):
-+ if isinstance(meta, str):
- # To allow transport of meta with invalid xml chars (like null)
- meta = self.encode_path(meta)
- return meta
-@@ -185,7 +185,7 @@ class RPCFS(FS):
- # TODO: chunked transport of large files
- epath = self.encode_path(path)
- if "w" in mode:
-- self.proxy.set_contents(epath, xmlrpclib.Binary(b("")))
-+ self.proxy.set_contents(epath, xmlrpc.client.Binary(b("")))
- if "r" in mode or "a" in mode or "+" in mode:
- try:
- data = self.proxy.get_contents(epath, "rb").data
-@@ -194,7 +194,7 @@ class RPCFS(FS):
- raise ResourceNotFoundError(path)
- if not self.isdir(dirname(path)):
- raise ParentDirectoryMissingError(path)
-- self.proxy.set_contents(path, xmlrpclib.Binary(b("")))
-+ self.proxy.set_contents(path, xmlrpc.client.Binary(b("")))
- else:
- data = b("")
- f = StringIO(data)
-@@ -210,7 +210,7 @@ class RPCFS(FS):
- self._lock.acquire()
- try:
- oldflush()
-- self.proxy.set_contents(epath, xmlrpclib.Binary(f.getvalue()))
-+ self.proxy.set_contents(epath, xmlrpc.client.Binary(f.getvalue()))
- finally:
- self._lock.release()
-
---- fs/s3fs.py.orig 2015-11-13 16:37:26 UTC
-+++ fs/s3fs.py
-@@ -41,7 +41,7 @@ else:
- try:
- return self._map[(threading.currentThread(),attr)]
- except KeyError:
-- raise AttributeError, attr
-+ raise AttributeError(attr)
- def __setattr__(self,attr,value):
- self._map[(threading.currentThread(),attr)] = value
-
-@@ -106,7 +106,7 @@ class S3FS(FS):
- prefix = prefix[1:]
- if not prefix.endswith(separator) and prefix != "":
- prefix = prefix + separator
-- if isinstance(prefix,unicode):
-+ if isinstance(prefix,str):
- prefix = prefix.encode("utf8")
- if aws_access_key is None:
- if "AWS_ACCESS_KEY_ID" not in os.environ:
-@@ -149,7 +149,7 @@ class S3FS(FS):
- b.get_key(self._prefix)
- else:
- b = self._s3conn.get_bucket(self._bucket_name, validate=1)
-- except S3ResponseError, e:
-+ except S3ResponseError as e:
- if "404 Not Found" not in str(e):
- raise
- b = self._s3conn.create_bucket(self._bucket_name)
-@@ -179,7 +179,7 @@ class S3FS(FS):
- s3path = self._prefix + path
- if s3path and s3path[-1] == self._separator:
- s3path = s3path[:-1]
-- if isinstance(s3path,unicode):
-+ if isinstance(s3path,str):
- s3path = s3path.encode("utf8")
- return s3path
-
-@@ -220,9 +220,9 @@ class S3FS(FS):
-
- def _sync_set_contents(self,key,contents):
- """Synchronously set the contents of a key."""
-- if isinstance(key,basestring):
-+ if isinstance(key,str):
- key = self._s3bukt.new_key(key)
-- if isinstance(contents,basestring):
-+ if isinstance(contents,str):
- key.set_contents_from_string(contents)
- elif hasattr(contents,"md5"):
- hexmd5 = contents.md5
-@@ -338,7 +338,7 @@ class S3FS(FS):
- # the directory itself, which other tools may not create.
- ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
- try:
-- iter(ks).next()
-+ next(iter(ks))
- except StopIteration:
- return False
- else:
-@@ -398,7 +398,7 @@ class S3FS(FS):
- # Skip over the entry for the directory itself, if it exists
- name = self._uns3path(k.name,s3path)
- if name != "":
-- if not isinstance(name,unicode):
-+ if not isinstance(name,str):
- name = name.decode("utf8")
- if name.endswith(self._separator):
- name = name[:-1]
-@@ -572,14 +572,14 @@ class S3FS(FS):
- else:
- info["name"] = basename(self._uns3key(k.name))
- if self._key_is_dir(key):
-- info["st_mode"] = 0700 | statinfo.S_IFDIR
-+ info["st_mode"] = 0o700 | statinfo.S_IFDIR
- else:
-- info["st_mode"] = 0700 | statinfo.S_IFREG
-+ info["st_mode"] = 0o700 | statinfo.S_IFREG
- if hasattr(key,"size"):
- info['size'] = int(key.size)
- etag = getattr(key,"etag",None)
- if etag is not None:
-- if isinstance(etag,unicode):
-+ if isinstance(etag,str):
- etag = etag.encode("utf8")
- info['etag'] = etag.strip('"').strip("'")
- if hasattr(key,"last_modified"):
-@@ -632,7 +632,7 @@ class S3FS(FS):
- s3path_src = self._s3path(src)
- try:
- self._s3bukt.copy_key(s3path_dst,self._bucket_name,s3path_src)
-- except S3ResponseError, e:
-+ except S3ResponseError as e:
- if "404 Not Found" in str(e):
- msg = "Source is not a file: %(path)s"
- raise ResourceInvalidError(src, msg=msg)
-@@ -663,7 +663,7 @@ class S3FS(FS):
- for k in self._s3bukt.list(prefix=prefix):
- name = relpath(self._uns3path(k.name,prefix))
- if name != "":
-- if not isinstance(name,unicode):
-+ if not isinstance(name,str):
- name = name.decode("utf8")
- if not k.name.endswith(self._separator):
- if wildcard is not None:
-@@ -691,7 +691,7 @@ class S3FS(FS):
- for k in self._s3bukt.list(prefix=prefix):
- name = relpath(self._uns3path(k.name,prefix))
- if name != "":
-- if not isinstance(name,unicode):
-+ if not isinstance(name,str):
- name = name.decode("utf8")
- if wildcard is not None:
- if callable(wildcard):
-@@ -718,7 +718,7 @@ class S3FS(FS):
- for k in self._s3bukt.list(prefix=prefix):
- name = relpath(self._uns3path(k.name,prefix))
- if name != "":
-- if not isinstance(name,unicode):
-+ if not isinstance(name,str):
- name = name.decode("utf8")
- if not k.name.endswith(self._separator):
- if wildcard is not None:
-@@ -733,16 +733,16 @@ class S3FS(FS):
-
-
- def _eq_utf8(name1,name2):
-- if isinstance(name1,unicode):
-+ if isinstance(name1,str):
- name1 = name1.encode("utf8")
-- if isinstance(name2,unicode):
-+ if isinstance(name2,str):
- name2 = name2.encode("utf8")
- return name1 == name2
-
- def _startswith_utf8(name1,name2):
-- if isinstance(name1,unicode):
-+ if isinstance(name1,str):
- name1 = name1.encode("utf8")
-- if isinstance(name2,unicode):
-+ if isinstance(name2,str):
- name2 = name2.encode("utf8")
- return name1.startswith(name2)
-
---- fs/sftpfs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/sftpfs.py
-@@ -131,7 +131,7 @@ class SFTPFS(FS):
- self._client = None
-
- self.hostname = None
-- if isinstance(connection, basestring):
-+ if isinstance(connection, str):
- self.hostname = connection
- elif isinstance(connection, tuple):
- self.hostname = '%s:%s' % connection
-@@ -183,7 +183,7 @@ class SFTPFS(FS):
- if not connection.is_authenticated():
- try:
- connection.auth_none(username)
-- except paramiko.BadAuthenticationType, e:
-+ except paramiko.BadAuthenticationType as e:
- self.close()
- allowed = ', '.join(e.allowed_types)
- raise RemoteConnectionError(msg='no auth - server requires one of the following: %s' % allowed, details=e)
-@@ -192,14 +192,14 @@ class SFTPFS(FS):
- self.close()
- raise RemoteConnectionError(msg='no auth')
-
-- except paramiko.SSHException, e:
-+ except paramiko.SSHException as e:
- self.close()
- raise RemoteConnectionError(msg='SSH exception (%s)' % str(e), details=e)
-
- self._transport = connection
-
- def __unicode__(self):
-- return u'<SFTPFS: %s>' % self.desc('/')
-+ return '<SFTPFS: %s>' % self.desc('/')
-
- @classmethod
- def _agent_auth(cls, transport, username):
-@@ -307,7 +307,7 @@ class SFTPFS(FS):
- self.closed = True
-
- def _normpath(self, path):
-- if not isinstance(path, unicode):
-+ if not isinstance(path, str):
- path = path.decode(self.encoding)
- npath = pathjoin(self.root_path, relpath(normpath(path)))
- if not isprefix(self.root_path, npath):
-@@ -355,10 +355,10 @@ class SFTPFS(FS):
- def desc(self, path):
- npath = self._normpath(path)
- if self.hostname:
-- return u'sftp://%s%s' % (self.hostname, path)
-+ return 'sftp://%s%s' % (self.hostname, path)
- else:
- addr, port = self._transport.getpeername()
-- return u'sftp://%s:%i%s' % (addr, port, self.client.normalize(npath))
-+ return 'sftp://%s:%i%s' % (addr, port, self.client.normalize(npath))
-
- @synchronize
- @convert_os_errors
-@@ -368,7 +368,7 @@ class SFTPFS(FS):
- npath = self._normpath(path)
- try:
- self.client.stat(npath)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- return False
- raise
-@@ -382,7 +382,7 @@ class SFTPFS(FS):
- npath = self._normpath(path)
- try:
- stat = self.client.stat(npath)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- return False
- raise
-@@ -394,7 +394,7 @@ class SFTPFS(FS):
- npath = self._normpath(path)
- try:
- stat = self.client.stat(npath)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- return False
- raise
-@@ -409,10 +409,10 @@ class SFTPFS(FS):
- if dirs_only or files_only:
- attrs = self.client.listdir_attr(npath)
- attrs_map = dict((a.filename, a) for a in attrs)
-- paths = list(attrs_map.iterkeys())
-+ paths = list(attrs_map.keys())
- else:
- paths = self.client.listdir(npath)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- if self.isfile(path):
- raise ResourceInvalidError(path,msg="Can't list directory contents of a file: %(path)s")
-@@ -424,19 +424,19 @@ class SFTPFS(FS):
- if attrs_map:
- if dirs_only:
- filter_paths = []
-- for apath, attr in attrs_map.iteritems():
-+ for apath, attr in attrs_map.items():
- if isdir(self, path, attr.__dict__):
- filter_paths.append(apath)
- paths = filter_paths
- elif files_only:
- filter_paths = []
-- for apath, attr in attrs_map.iteritems():
-+ for apath, attr in attrs_map.items():
- if isfile(self, apath, attr.__dict__):
- filter_paths.append(apath)
- paths = filter_paths
-
- for (i,p) in enumerate(paths):
-- if not isinstance(p,unicode):
-+ if not isinstance(p,str):
- paths[i] = p.decode(self.encoding)
-
- return self._listdir_helper(path, paths, wildcard, full, absolute, False, False)
-@@ -448,8 +448,8 @@ class SFTPFS(FS):
- try:
- attrs = self.client.listdir_attr(npath)
- attrs_map = dict((a.filename, a) for a in attrs)
-- paths = attrs_map.keys()
-- except IOError, e:
-+ paths = list(attrs_map.keys())
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- if self.isfile(path):
- raise ResourceInvalidError(path,msg="Can't list directory contents of a file: %(path)s")
-@@ -460,19 +460,19 @@ class SFTPFS(FS):
-
- if dirs_only:
- filter_paths = []
-- for path, attr in attrs_map.iteritems():
-+ for path, attr in attrs_map.items():
- if isdir(self, path, attr.__dict__):
- filter_paths.append(path)
- paths = filter_paths
- elif files_only:
- filter_paths = []
-- for path, attr in attrs_map.iteritems():
-+ for path, attr in attrs_map.items():
- if isfile(self, path, attr.__dict__):
- filter_paths.append(path)
- paths = filter_paths
-
- for (i, p) in enumerate(paths):
-- if not isinstance(p, unicode):
-+ if not isinstance(p, str):
- paths[i] = p.decode(self.encoding)
-
- def getinfo(p):
-@@ -491,7 +491,7 @@ class SFTPFS(FS):
- npath = self._normpath(path)
- try:
- self.client.mkdir(npath)
-- except IOError, _e:
-+ except IOError as _e:
- # Error code is unreliable, try to figure out what went wrong
- try:
- stat = self.client.stat(npath)
-@@ -519,7 +519,7 @@ class SFTPFS(FS):
- npath = self._normpath(path)
- try:
- self.client.remove(npath)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- raise ResourceNotFoundError(path)
- elif self.isdir(path):
-@@ -542,7 +542,7 @@ class SFTPFS(FS):
- raise ResourceNotFoundError(path)
- try:
- self.client.rmdir(npath)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- if self.isfile(path):
- raise ResourceInvalidError(path,msg="Can't use removedir() on a file: %(path)s")
-@@ -565,7 +565,7 @@ class SFTPFS(FS):
- ndst = self._normpath(dst)
- try:
- self.client.rename(nsrc,ndst)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- raise ResourceNotFoundError(src)
- if not self.isdir(dirname(dst)):
-@@ -581,7 +581,7 @@ class SFTPFS(FS):
- self.remove(dst)
- try:
- self.client.rename(nsrc,ndst)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- raise ResourceNotFoundError(src)
- if self.exists(dst):
-@@ -599,7 +599,7 @@ class SFTPFS(FS):
- self.removedir(dst)
- try:
- self.client.rename(nsrc,ndst)
-- except IOError, e:
-+ except IOError as e:
- if getattr(e,"errno",None) == ENOENT:
- raise ResourceNotFoundError(src)
- if self.exists(dst):
-@@ -612,7 +612,7 @@ class SFTPFS(FS):
- @classmethod
- def _extract_info(cls, stats):
- fromtimestamp = datetime.datetime.fromtimestamp
-- info = dict((k, v) for k, v in stats.iteritems() if k in cls._info_vars and not k.startswith('_'))
-+ info = dict((k, v) for k, v in stats.items() if k in cls._info_vars and not k.startswith('_'))
- info['size'] = info['st_size']
- ct = info.get('st_ctime')
- if ct is not None:
---- fs/tempfs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/tempfs.py
-@@ -29,7 +29,7 @@ class TempFS(OSFS):
- _meta['atomic.move'] = True
- _meta['atomic.copy'] = True
-
-- def __init__(self, identifier=None, temp_dir=None, dir_mode=0700, thread_synchronize=_thread_synchronize_default):
-+ def __init__(self, identifier=None, temp_dir=None, dir_mode=0o700, thread_synchronize=_thread_synchronize_default):
- """Creates a temporary Filesystem
-
- identifier -- A string that is included in the name of the temporary directory,
-@@ -49,7 +49,7 @@ class TempFS(OSFS):
- __str__ = __repr__
-
- def __unicode__(self):
-- return u'<TempFS: %s>' % self._temp_dir
-+ return '<TempFS: %s>' % self._temp_dir
-
- def __getstate__(self):
- # If we are picking a TempFS, we want to preserve its contents,
---- fs/tests/__init__.py.orig 2015-11-13 22:33:26 UTC
-+++ fs/tests/__init__.py
-@@ -5,8 +5,8 @@
-
- """
-
--from __future__ import with_statement
-
-+
- # Send any output from the logging module to stdout, so it will
- # be captured by nose and reported appropriately
- import sys
-@@ -61,7 +61,7 @@ class FSTestCases(object):
- self.assertEqual(self.fs.validatepath('.foo'), None)
- self.assertEqual(self.fs.validatepath('foo'), None)
- self.assertEqual(self.fs.validatepath('foo/bar'), None)
-- self.assert_(self.fs.isvalidpath('foo/bar'))
-+ self.assertTrue(self.fs.isvalidpath('foo/bar'))
-
- def test_tree(self):
- """Test tree print"""
-@@ -79,8 +79,8 @@ class FSTestCases(object):
- stupid_meta = 'thismetashouldnotexist!"r$$%^&&*()_+'
- self.assertRaises(NoMetaError, self.fs.getmeta, stupid_meta)
- self.assertFalse(self.fs.hasmeta(stupid_meta))
-- self.assertEquals(None, self.fs.getmeta(stupid_meta, None))
-- self.assertEquals(3.14, self.fs.getmeta(stupid_meta, 3.14))
-+ self.assertEqual(None, self.fs.getmeta(stupid_meta, None))
-+ self.assertEqual(3.14, self.fs.getmeta(stupid_meta, 3.14))
- for meta_name in meta_names:
- try:
- meta = self.fs.getmeta(meta_name)
-@@ -101,15 +101,15 @@ class FSTestCases(object):
- except NoSysPathError:
- pass
- else:
-- self.assertTrue(isinstance(syspath, unicode))
-+ self.assertTrue(isinstance(syspath, str))
- syspath = self.fs.getsyspath("/", allow_none=True)
- if syspath is not None:
-- self.assertTrue(isinstance(syspath, unicode))
-+ self.assertTrue(isinstance(syspath, str))
-
- def test_debug(self):
- str(self.fs)
- repr(self.fs)
-- self.assert_(hasattr(self.fs, 'desc'))
-+ self.assertTrue(hasattr(self.fs, 'desc'))
-
- def test_open_on_directory(self):
- self.fs.makedir("testdir")
-@@ -132,20 +132,20 @@ class FSTestCases(object):
- f.close()
- self.assertTrue(self.check("test1.txt"))
- f = self.fs.open("test1.txt", "rb")
-- self.assertEquals(f.read(), b("testing"))
-+ self.assertEqual(f.read(), b("testing"))
- f.close()
- f = self.fs.open("test1.txt", "wb")
- f.write(b("test file overwrite"))
- f.close()
- self.assertTrue(self.check("test1.txt"))
- f = self.fs.open("test1.txt", "rb")
-- self.assertEquals(f.read(), b("test file overwrite"))
-+ self.assertEqual(f.read(), b("test file overwrite"))
- f.close()
-
- def test_createfile(self):
- test = b('now with content')
- self.fs.createfile("test.txt")
-- self.assert_(self.fs.exists("test.txt"))
-+ self.assertTrue(self.fs.exists("test.txt"))
- self.assertEqual(self.fs.getcontents("test.txt", "rb"), b(''))
- self.fs.setcontents("test.txt", test)
- self.fs.createfile("test.txt")
-@@ -163,36 +163,36 @@ class FSTestCases(object):
- def test_setcontents(self):
- # setcontents() should accept both a string...
- self.fs.setcontents("hello", b("world"))
-- self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
-+ self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
- # ...and a file-like object
- self.fs.setcontents("hello", StringIO(b("to you, good sir!")))
-- self.assertEquals(self.fs.getcontents(
-+ self.assertEqual(self.fs.getcontents(
- "hello", "rb"), b("to you, good sir!"))
- # setcontents() should accept both a string...
- self.fs.setcontents("hello", b("world"), chunk_size=2)
-- self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
-+ self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
- # ...and a file-like object
- self.fs.setcontents("hello", StringIO(
- b("to you, good sir!")), chunk_size=2)
-- self.assertEquals(self.fs.getcontents(
-+ self.assertEqual(self.fs.getcontents(
- "hello", "rb"), b("to you, good sir!"))
- self.fs.setcontents("hello", b(""))
-- self.assertEquals(self.fs.getcontents("hello", "rb"), b(""))
-+ self.assertEqual(self.fs.getcontents("hello", "rb"), b(""))
-
- def test_setcontents_async(self):
- # setcontents() should accept both a string...
- self.fs.setcontents_async("hello", b("world")).wait()
-- self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
-+ self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
- # ...and a file-like object
- self.fs.setcontents_async("hello", StringIO(
- b("to you, good sir!"))).wait()
-- self.assertEquals(self.fs.getcontents("hello"), b("to you, good sir!"))
-+ self.assertEqual(self.fs.getcontents("hello"), b("to you, good sir!"))
- self.fs.setcontents_async("hello", b("world"), chunk_size=2).wait()
-- self.assertEquals(self.fs.getcontents("hello", "rb"), b("world"))
-+ self.assertEqual(self.fs.getcontents("hello", "rb"), b("world"))
- # ...and a file-like object
- self.fs.setcontents_async("hello", StringIO(
- b("to you, good sir!")), chunk_size=2).wait()
-- self.assertEquals(self.fs.getcontents(
-+ self.assertEqual(self.fs.getcontents(
- "hello", "rb"), b("to you, good sir!"))
-
- def test_isdir_isfile(self):
-@@ -214,19 +214,19 @@ class FSTestCases(object):
- def test_listdir(self):
- def check_unicode(items):
- for item in items:
-- self.assertTrue(isinstance(item, unicode))
-- self.fs.setcontents(u"a", b(''))
-+ self.assertTrue(isinstance(item, str))
-+ self.fs.setcontents("a", b(''))
- self.fs.setcontents("b", b(''))
- self.fs.setcontents("foo", b(''))
- self.fs.setcontents("bar", b(''))
- # Test listing of the root directory
- d1 = self.fs.listdir()
- self.assertEqual(len(d1), 4)
-- self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
-+ self.assertEqual(sorted(d1), ["a", "b", "bar", "foo"])
- check_unicode(d1)
- d1 = self.fs.listdir("")
- self.assertEqual(len(d1), 4)
-- self.assertEqual(sorted(d1), [u"a", u"b", u"bar", u"foo"])
-+ self.assertEqual(sorted(d1), ["a", "b", "bar", "foo"])
- check_unicode(d1)
- d1 = self.fs.listdir("/")
- self.assertEqual(len(d1), 4)
-@@ -234,7 +234,7 @@ class FSTestCases(object):
- # Test listing absolute paths
- d2 = self.fs.listdir(absolute=True)
- self.assertEqual(len(d2), 4)
-- self.assertEqual(sorted(d2), [u"/a", u"/b", u"/bar", u"/foo"])
-+ self.assertEqual(sorted(d2), ["/a", "/b", "/bar", "/foo"])
- check_unicode(d2)
- # Create some deeper subdirectories, to make sure their
- # contents are not inadvertantly included
-@@ -248,25 +248,25 @@ class FSTestCases(object):
- dirs_only = self.fs.listdir(dirs_only=True)
- files_only = self.fs.listdir(files_only=True)
- contains_a = self.fs.listdir(wildcard="*a*")
-- self.assertEqual(sorted(dirs_only), [u"p", u"q"])
-- self.assertEqual(sorted(files_only), [u"a", u"b", u"bar", u"foo"])
-- self.assertEqual(sorted(contains_a), [u"a", u"bar"])
-+ self.assertEqual(sorted(dirs_only), ["p", "q"])
-+ self.assertEqual(sorted(files_only), ["a", "b", "bar", "foo"])
-+ self.assertEqual(sorted(contains_a), ["a", "bar"])
- check_unicode(dirs_only)
- check_unicode(files_only)
- check_unicode(contains_a)
- # Test listing a subdirectory
- d3 = self.fs.listdir("p/1/2/3")
- self.assertEqual(len(d3), 4)
-- self.assertEqual(sorted(d3), [u"a", u"b", u"bar", u"foo"])
-+ self.assertEqual(sorted(d3), ["a", "b", "bar", "foo"])
- check_unicode(d3)
- # Test listing a subdirectory with absoliute and full paths
- d4 = self.fs.listdir("p/1/2/3", absolute=True)
- self.assertEqual(len(d4), 4)
-- self.assertEqual(sorted(d4), [u"/p/1/2/3/a", u"/p/1/2/3/b", u"/p/1/2/3/bar", u"/p/1/2/3/foo"])
-+ self.assertEqual(sorted(d4), ["/p/1/2/3/a", "/p/1/2/3/b", "/p/1/2/3/bar", "/p/1/2/3/foo"])
- check_unicode(d4)
- d4 = self.fs.listdir("p/1/2/3", full=True)
- self.assertEqual(len(d4), 4)
-- self.assertEqual(sorted(d4), [u"p/1/2/3/a", u"p/1/2/3/b", u"p/1/2/3/bar", u"p/1/2/3/foo"])
-+ self.assertEqual(sorted(d4), ["p/1/2/3/a", "p/1/2/3/b", "p/1/2/3/bar", "p/1/2/3/foo"])
- check_unicode(d4)
- # Test that appropriate errors are raised
- self.assertRaises(ResourceNotFoundError, self.fs.listdir, "zebra")
-@@ -275,32 +275,32 @@ class FSTestCases(object):
- def test_listdirinfo(self):
- def check_unicode(items):
- for (nm, info) in items:
-- self.assertTrue(isinstance(nm, unicode))
-+ self.assertTrue(isinstance(nm, str))
-
- def check_equal(items, target):
- names = [nm for (nm, info) in items]
- self.assertEqual(sorted(names), sorted(target))
-- self.fs.setcontents(u"a", b(''))
-+ self.fs.setcontents("a", b(''))
- self.fs.setcontents("b", b(''))
- self.fs.setcontents("foo", b(''))
- self.fs.setcontents("bar", b(''))
- # Test listing of the root directory
- d1 = self.fs.listdirinfo()
- self.assertEqual(len(d1), 4)
-- check_equal(d1, [u"a", u"b", u"bar", u"foo"])
-+ check_equal(d1, ["a", "b", "bar", "foo"])
- check_unicode(d1)
- d1 = self.fs.listdirinfo("")
- self.assertEqual(len(d1), 4)
-- check_equal(d1, [u"a", u"b", u"bar", u"foo"])
-+ check_equal(d1, ["a", "b", "bar", "foo"])
- check_unicode(d1)
- d1 = self.fs.listdirinfo("/")
- self.assertEqual(len(d1), 4)
-- check_equal(d1, [u"a", u"b", u"bar", u"foo"])
-+ check_equal(d1, ["a", "b", "bar", "foo"])
- check_unicode(d1)
- # Test listing absolute paths
- d2 = self.fs.listdirinfo(absolute=True)
- self.assertEqual(len(d2), 4)
-- check_equal(d2, [u"/a", u"/b", u"/bar", u"/foo"])
-+ check_equal(d2, ["/a", "/b", "/bar", "/foo"])
- check_unicode(d2)
- # Create some deeper subdirectories, to make sure their
- # contents are not inadvertantly included
-@@ -314,25 +314,25 @@ class FSTestCases(object):
- dirs_only = self.fs.listdirinfo(dirs_only=True)
- files_only = self.fs.listdirinfo(files_only=True)
- contains_a = self.fs.listdirinfo(wildcard="*a*")
-- check_equal(dirs_only, [u"p", u"q"])
-- check_equal(files_only, [u"a", u"b", u"bar", u"foo"])
-- check_equal(contains_a, [u"a", u"bar"])
-+ check_equal(dirs_only, ["p", "q"])
-+ check_equal(files_only, ["a", "b", "bar", "foo"])
-+ check_equal(contains_a, ["a", "bar"])
- check_unicode(dirs_only)
- check_unicode(files_only)
- check_unicode(contains_a)
- # Test listing a subdirectory
- d3 = self.fs.listdirinfo("p/1/2/3")
- self.assertEqual(len(d3), 4)
-- check_equal(d3, [u"a", u"b", u"bar", u"foo"])
-+ check_equal(d3, ["a", "b", "bar", "foo"])
- check_unicode(d3)
- # Test listing a subdirectory with absoliute and full paths
- d4 = self.fs.listdirinfo("p/1/2/3", absolute=True)
- self.assertEqual(len(d4), 4)
-- check_equal(d4, [u"/p/1/2/3/a", u"/p/1/2/3/b", u"/p/1/2/3/bar", u"/p/1/2/3/foo"])
-+ check_equal(d4, ["/p/1/2/3/a", "/p/1/2/3/b", "/p/1/2/3/bar", "/p/1/2/3/foo"])
- check_unicode(d4)
- d4 = self.fs.listdirinfo("p/1/2/3", full=True)
- self.assertEqual(len(d4), 4)
-- check_equal(d4, [u"p/1/2/3/a", u"p/1/2/3/b", u"p/1/2/3/bar", u"p/1/2/3/foo"])
-+ check_equal(d4, ["p/1/2/3/a", "p/1/2/3/b", "p/1/2/3/bar", "p/1/2/3/foo"])
- check_unicode(d4)
- # Test that appropriate errors are raised
- self.assertRaises(ResourceNotFoundError, self.fs.listdirinfo, "zebra")
-@@ -343,7 +343,7 @@ class FSTestCases(object):
- self.fs.setcontents('b.txt', b('world'))
- self.fs.makeopendir('foo').setcontents('c', b('123'))
- sorted_walk = sorted([(d, sorted(fs)) for (d, fs) in self.fs.walk()])
-- self.assertEquals(sorted_walk,
-+ self.assertEqual(sorted_walk,
- [("/", ["a.txt", "b.txt"]),
- ("/foo", ["c"])])
- # When searching breadth-first, shallow entries come first
-@@ -371,10 +371,10 @@ class FSTestCases(object):
- self.fs.makeopendir('.svn').setcontents('ignored', b(''))
- for dir_path, paths in self.fs.walk(wildcard='*.txt'):
- for path in paths:
-- self.assert_(path.endswith('.txt'))
-+ self.assertTrue(path.endswith('.txt'))
- for dir_path, paths in self.fs.walk(wildcard=lambda fn: fn.endswith('.txt')):
- for path in paths:
-- self.assert_(path.endswith('.txt'))
-+ self.assertTrue(path.endswith('.txt'))
-
- def test_walk_dir_wildcard(self):
- self.fs.setcontents('a.txt', b('hello'))
-@@ -383,35 +383,35 @@ class FSTestCases(object):
- self.fs.makeopendir('.svn').setcontents('ignored', b(''))
- for dir_path, paths in self.fs.walk(dir_wildcard=lambda fn: not fn.endswith('.svn')):
- for path in paths:
-- self.assert_('.svn' not in path)
-+ self.assertTrue('.svn' not in path)
-
- def test_walkfiles(self):
- self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
- self.fs.makeopendir('foo').setcontents('b', b('123'))
-- self.assertEquals(sorted(
-+ self.assertEqual(sorted(
- self.fs.walkfiles()), ["/bar/a.txt", "/foo/b"])
-- self.assertEquals(sorted(self.fs.walkfiles(
-+ self.assertEqual(sorted(self.fs.walkfiles(
- dir_wildcard="*foo*")), ["/foo/b"])
-- self.assertEquals(sorted(self.fs.walkfiles(
-+ self.assertEqual(sorted(self.fs.walkfiles(
- wildcard="*.txt")), ["/bar/a.txt"])
-
- def test_walkdirs(self):
- self.fs.makeopendir('bar').setcontents('a.txt', b('123'))
- self.fs.makeopendir('foo').makeopendir(
- "baz").setcontents('b', b('123'))
-- self.assertEquals(sorted(self.fs.walkdirs()), [
-+ self.assertEqual(sorted(self.fs.walkdirs()), [
- "/", "/bar", "/foo", "/foo/baz"])
-- self.assertEquals(sorted(self.fs.walkdirs(
-+ self.assertEqual(sorted(self.fs.walkdirs(
- wildcard="*foo*")), ["/", "/foo", "/foo/baz"])
-
- def test_unicode(self):
-- alpha = u"\N{GREEK SMALL LETTER ALPHA}"
-- beta = u"\N{GREEK SMALL LETTER BETA}"
-+ alpha = "\N{GREEK SMALL LETTER ALPHA}"
-+ beta = "\N{GREEK SMALL LETTER BETA}"
- self.fs.makedir(alpha)
- self.fs.setcontents(alpha + "/a", b(''))
- self.fs.setcontents(alpha + "/" + beta, b(''))
- self.assertTrue(self.check(alpha))
-- self.assertEquals(sorted(self.fs.listdir(alpha)), ["a", beta])
-+ self.assertEqual(sorted(self.fs.listdir(alpha)), ["a", beta])
-
- def test_makedir(self):
- check = self.check
-@@ -420,11 +420,11 @@ class FSTestCases(object):
- self.assertRaises(
- ParentDirectoryMissingError, self.fs.makedir, "a/b/c")
- self.fs.makedir("a/b/c", recursive=True)
-- self.assert_(check("a/b/c"))
-+ self.assertTrue(check("a/b/c"))
- self.fs.makedir("foo/bar/baz", recursive=True)
-- self.assert_(check("foo/bar/baz"))
-+ self.assertTrue(check("foo/bar/baz"))
- self.fs.makedir("a/b/child")
-- self.assert_(check("a/b/child"))
-+ self.assertTrue(check("a/b/child"))
- self.assertRaises(DestinationExistsError, self.fs.makedir, "/a/b")
- self.fs.makedir("/a/b", allow_recreate=True)
- self.fs.setcontents("/a/file", b(''))
-@@ -446,30 +446,30 @@ class FSTestCases(object):
- def test_removedir(self):
- check = self.check
- self.fs.makedir("a")
-- self.assert_(check("a"))
-+ self.assertTrue(check("a"))
- self.fs.removedir("a")
- self.assertRaises(ResourceNotFoundError, self.fs.removedir, "a")
-- self.assert_(not check("a"))
-+ self.assertTrue(not check("a"))
- self.fs.makedir("a/b/c/d", recursive=True)
- self.assertRaises(DirectoryNotEmptyError, self.fs.removedir, "a/b")
- self.fs.removedir("a/b/c/d")
-- self.assert_(not check("a/b/c/d"))
-+ self.assertTrue(not check("a/b/c/d"))
- self.fs.removedir("a/b/c")
-- self.assert_(not check("a/b/c"))
-+ self.assertTrue(not check("a/b/c"))
- self.fs.removedir("a/b")
-- self.assert_(not check("a/b"))
-+ self.assertTrue(not check("a/b"))
- # Test recursive removal of empty parent dirs
- self.fs.makedir("foo/bar/baz", recursive=True)
- self.fs.removedir("foo/bar/baz", recursive=True)
-- self.assert_(not check("foo/bar/baz"))
-- self.assert_(not check("foo/bar"))
-- self.assert_(not check("foo"))
-+ self.assertTrue(not check("foo/bar/baz"))
-+ self.assertTrue(not check("foo/bar"))
-+ self.assertTrue(not check("foo"))
- self.fs.makedir("foo/bar/baz", recursive=True)
- self.fs.setcontents("foo/file.txt", b("please don't delete me"))
- self.fs.removedir("foo/bar/baz", recursive=True)
-- self.assert_(not check("foo/bar/baz"))
-- self.assert_(not check("foo/bar"))
-- self.assert_(check("foo/file.txt"))
-+ self.assertTrue(not check("foo/bar/baz"))
-+ self.assertTrue(not check("foo/bar"))
-+ self.assertTrue(check("foo/file.txt"))
- # Ensure that force=True works as expected
- self.fs.makedir("frollic/waggle", recursive=True)
- self.fs.setcontents("frollic/waddle.txt", b("waddlewaddlewaddle"))
-@@ -477,41 +477,41 @@ class FSTestCases(object):
- self.assertRaises(
- ResourceInvalidError, self.fs.removedir, "frollic/waddle.txt")
- self.fs.removedir("frollic", force=True)
-- self.assert_(not check("frollic"))
-+ self.assertTrue(not check("frollic"))
- # Test removing unicode dirs
-- kappa = u"\N{GREEK CAPITAL LETTER KAPPA}"
-+ kappa = "\N{GREEK CAPITAL LETTER KAPPA}"
- self.fs.makedir(kappa)
-- self.assert_(self.fs.isdir(kappa))
-+ self.assertTrue(self.fs.isdir(kappa))
- self.fs.removedir(kappa)
- self.assertRaises(ResourceNotFoundError, self.fs.removedir, kappa)
-- self.assert_(not self.fs.isdir(kappa))
-+ self.assertTrue(not self.fs.isdir(kappa))
- self.fs.makedir(pathjoin("test", kappa), recursive=True)
-- self.assert_(check(pathjoin("test", kappa)))
-+ self.assertTrue(check(pathjoin("test", kappa)))
- self.fs.removedir("test", force=True)
-- self.assert_(not check("test"))
-+ self.assertTrue(not check("test"))
-
- def test_rename(self):
- check = self.check
- # test renaming a file in the same directory
- self.fs.setcontents("foo.txt", b("Hello, World!"))
-- self.assert_(check("foo.txt"))
-+ self.assertTrue(check("foo.txt"))
- self.fs.rename("foo.txt", "bar.txt")
-- self.assert_(check("bar.txt"))
-- self.assert_(not check("foo.txt"))
-+ self.assertTrue(check("bar.txt"))
-+ self.assertTrue(not check("foo.txt"))
- # test renaming a directory in the same directory
- self.fs.makedir("dir_a")
- self.fs.setcontents("dir_a/test.txt", b("testerific"))
-- self.assert_(check("dir_a"))
-+ self.assertTrue(check("dir_a"))
- self.fs.rename("dir_a", "dir_b")
-- self.assert_(check("dir_b"))
-- self.assert_(check("dir_b/test.txt"))
-- self.assert_(not check("dir_a/test.txt"))
-- self.assert_(not check("dir_a"))
-+ self.assertTrue(check("dir_b"))
-+ self.assertTrue(check("dir_b/test.txt"))
-+ self.assertTrue(not check("dir_a/test.txt"))
-+ self.assertTrue(not check("dir_a"))
- # test renaming a file into a different directory
- self.fs.makedir("dir_a")
- self.fs.rename("dir_b/test.txt", "dir_a/test.txt")
-- self.assert_(not check("dir_b/test.txt"))
-- self.assert_(check("dir_a/test.txt"))
-+ self.assertTrue(not check("dir_b/test.txt"))
-+ self.assertTrue(check("dir_a/test.txt"))
- # test renaming a file into a non-existent directory
- self.assertRaises(ParentDirectoryMissingError,
- self.fs.rename, "dir_a/test.txt", "nonexistent/test.txt")
-@@ -530,7 +530,7 @@ class FSTestCases(object):
- test_str = b("Hello, World!")
- self.fs.setcontents("info.txt", test_str)
- info = self.fs.getinfo("info.txt")
-- for k, v in info.iteritems():
-+ for k, v in info.items():
- self.assertEqual(self.fs.getinfokeys('info.txt', k), {k: v})
-
- test_info = {}
-@@ -562,26 +562,26 @@ class FSTestCases(object):
-
- self.fs.makedir("foo/bar", recursive=True)
- makefile("foo/bar/a.txt")
-- self.assert_(check("foo/bar/a.txt"))
-- self.assert_(checkcontents("foo/bar/a.txt"))
-+ self.assertTrue(check("foo/bar/a.txt"))
-+ self.assertTrue(checkcontents("foo/bar/a.txt"))
- self.fs.move("foo/bar/a.txt", "foo/b.txt")
-- self.assert_(not check("foo/bar/a.txt"))
-- self.assert_(check("foo/b.txt"))
-- self.assert_(checkcontents("foo/b.txt"))
-+ self.assertTrue(not check("foo/bar/a.txt"))
-+ self.assertTrue(check("foo/b.txt"))
-+ self.assertTrue(checkcontents("foo/b.txt"))
-
- self.fs.move("foo/b.txt", "c.txt")
-- self.assert_(not check("foo/b.txt"))
-- self.assert_(check("/c.txt"))
-- self.assert_(checkcontents("/c.txt"))
-+ self.assertTrue(not check("foo/b.txt"))
-+ self.assertTrue(check("/c.txt"))
-+ self.assertTrue(checkcontents("/c.txt"))
-
- makefile("foo/bar/a.txt")
- self.assertRaises(
- DestinationExistsError, self.fs.move, "foo/bar/a.txt", "/c.txt")
-- self.assert_(check("foo/bar/a.txt"))
-- self.assert_(check("/c.txt"))
-+ self.assertTrue(check("foo/bar/a.txt"))
-+ self.assertTrue(check("/c.txt"))
- self.fs.move("foo/bar/a.txt", "/c.txt", overwrite=True)
-- self.assert_(not check("foo/bar/a.txt"))
-- self.assert_(check("/c.txt"))
-+ self.assertTrue(not check("foo/bar/a.txt"))
-+ self.assertTrue(check("/c.txt"))
-
- def test_movedir(self):
- check = self.check
-@@ -602,29 +602,29 @@ class FSTestCases(object):
-
- self.fs.movedir("a", "copy of a")
-
-- self.assert_(self.fs.isdir("copy of a"))
-- self.assert_(check("copy of a/1.txt"))
-- self.assert_(check("copy of a/2.txt"))
-- self.assert_(check("copy of a/3.txt"))
-- self.assert_(check("copy of a/foo/bar/baz.txt"))
-+ self.assertTrue(self.fs.isdir("copy of a"))
-+ self.assertTrue(check("copy of a/1.txt"))
-+ self.assertTrue(check("copy of a/2.txt"))
-+ self.assertTrue(check("copy of a/3.txt"))
-+ self.assertTrue(check("copy of a/foo/bar/baz.txt"))
-
-- self.assert_(not check("a/1.txt"))
-- self.assert_(not check("a/2.txt"))
-- self.assert_(not check("a/3.txt"))
-- self.assert_(not check("a/foo/bar/baz.txt"))
-- self.assert_(not check("a/foo/bar"))
-- self.assert_(not check("a/foo"))
-- self.assert_(not check("a"))
-+ self.assertTrue(not check("a/1.txt"))
-+ self.assertTrue(not check("a/2.txt"))
-+ self.assertTrue(not check("a/3.txt"))
-+ self.assertTrue(not check("a/foo/bar/baz.txt"))
-+ self.assertTrue(not check("a/foo/bar"))
-+ self.assertTrue(not check("a/foo"))
-+ self.assertTrue(not check("a"))
-
- self.fs.makedir("a")
- self.assertRaises(
- DestinationExistsError, self.fs.movedir, "copy of a", "a")
- self.fs.movedir("copy of a", "a", overwrite=True)
-- self.assert_(not check("copy of a"))
-- self.assert_(check("a/1.txt"))
-- self.assert_(check("a/2.txt"))
-- self.assert_(check("a/3.txt"))
-- self.assert_(check("a/foo/bar/baz.txt"))
-+ self.assertTrue(not check("copy of a"))
-+ self.assertTrue(check("a/1.txt"))
-+ self.assertTrue(check("a/2.txt"))
-+ self.assertTrue(check("a/3.txt"))
-+ self.assertTrue(check("a/foo/bar/baz.txt"))
-
- def test_cant_copy_from_os(self):
- sys_executable = os.path.abspath(os.path.realpath(sys.executable))
-@@ -645,28 +645,28 @@ class FSTestCases(object):
-
- self.fs.makedir("foo/bar", recursive=True)
- makefile("foo/bar/a.txt")
-- self.assert_(check("foo/bar/a.txt"))
-- self.assert_(checkcontents("foo/bar/a.txt"))
-+ self.assertTrue(check("foo/bar/a.txt"))
-+ self.assertTrue(checkcontents("foo/bar/a.txt"))
- # import rpdb2; rpdb2.start_embedded_debugger('password');
- self.fs.copy("foo/bar/a.txt", "foo/b.txt")
-- self.assert_(check("foo/bar/a.txt"))
-- self.assert_(check("foo/b.txt"))
-- self.assert_(checkcontents("foo/bar/a.txt"))
-- self.assert_(checkcontents("foo/b.txt"))
-+ self.assertTrue(check("foo/bar/a.txt"))
-+ self.assertTrue(check("foo/b.txt"))
-+ self.assertTrue(checkcontents("foo/bar/a.txt"))
-+ self.assertTrue(checkcontents("foo/b.txt"))
-
- self.fs.copy("foo/b.txt", "c.txt")
-- self.assert_(check("foo/b.txt"))
-- self.assert_(check("/c.txt"))
-- self.assert_(checkcontents("/c.txt"))
-+ self.assertTrue(check("foo/b.txt"))
-+ self.assertTrue(check("/c.txt"))
-+ self.assertTrue(checkcontents("/c.txt"))
-
- makefile("foo/bar/a.txt", b("different contents"))
-- self.assert_(checkcontents("foo/bar/a.txt", b("different contents")))
-+ self.assertTrue(checkcontents("foo/bar/a.txt", b("different contents")))
- self.assertRaises(
- DestinationExistsError, self.fs.copy, "foo/bar/a.txt", "/c.txt")
-- self.assert_(checkcontents("/c.txt"))
-+ self.assertTrue(checkcontents("/c.txt"))
- self.fs.copy("foo/bar/a.txt", "/c.txt", overwrite=True)
-- self.assert_(checkcontents("foo/bar/a.txt", b("different contents")))
-- self.assert_(checkcontents("/c.txt", b("different contents")))
-+ self.assertTrue(checkcontents("foo/bar/a.txt", b("different contents")))
-+ self.assertTrue(checkcontents("/c.txt", b("different contents")))
-
- def test_copydir(self):
- check = self.check
-@@ -690,24 +690,24 @@ class FSTestCases(object):
- makefile("a/foo/bar/baz.txt")
-
- self.fs.copydir("a", "copy of a")
-- self.assert_(check("copy of a/1.txt"))
-- self.assert_(check("copy of a/2.txt"))
-- self.assert_(check("copy of a/3.txt"))
-- self.assert_(check("copy of a/foo/bar/baz.txt"))
-+ self.assertTrue(check("copy of a/1.txt"))
-+ self.assertTrue(check("copy of a/2.txt"))
-+ self.assertTrue(check("copy of a/3.txt"))
-+ self.assertTrue(check("copy of a/foo/bar/baz.txt"))
- checkcontents("copy of a/1.txt")
-
-- self.assert_(check("a/1.txt"))
-- self.assert_(check("a/2.txt"))
-- self.assert_(check("a/3.txt"))
-- self.assert_(check("a/foo/bar/baz.txt"))
-+ self.assertTrue(check("a/1.txt"))
-+ self.assertTrue(check("a/2.txt"))
-+ self.assertTrue(check("a/3.txt"))
-+ self.assertTrue(check("a/foo/bar/baz.txt"))
- checkcontents("a/1.txt")
-
- self.assertRaises(DestinationExistsError, self.fs.copydir, "a", "b")
- self.fs.copydir("a", "b", overwrite=True)
-- self.assert_(check("b/1.txt"))
-- self.assert_(check("b/2.txt"))
-- self.assert_(check("b/3.txt"))
-- self.assert_(check("b/foo/bar/baz.txt"))
-+ self.assertTrue(check("b/1.txt"))
-+ self.assertTrue(check("b/2.txt"))
-+ self.assertTrue(check("b/3.txt"))
-+ self.assertTrue(check("b/foo/bar/baz.txt"))
- checkcontents("b/1.txt")
-
- def test_copydir_with_dotfile(self):
-@@ -724,13 +724,13 @@ class FSTestCases(object):
- makefile("a/.hidden.txt")
-
- self.fs.copydir("a", "copy of a")
-- self.assert_(check("copy of a/1.txt"))
-- self.assert_(check("copy of a/2.txt"))
-- self.assert_(check("copy of a/.hidden.txt"))
-+ self.assertTrue(check("copy of a/1.txt"))
-+ self.assertTrue(check("copy of a/2.txt"))
-+ self.assertTrue(check("copy of a/.hidden.txt"))
-
-- self.assert_(check("a/1.txt"))
-- self.assert_(check("a/2.txt"))
-- self.assert_(check("a/.hidden.txt"))
-+ self.assertTrue(check("a/1.txt"))
-+ self.assertTrue(check("a/2.txt"))
-+ self.assertTrue(check("a/.hidden.txt"))
-
- def test_readwriteappendseek(self):
- def checkcontents(path, check_contents):
-@@ -743,7 +743,7 @@ class FSTestCases(object):
- all_strings = b("").join(test_strings)
-
- self.assertRaises(ResourceNotFoundError, self.fs.open, "a.txt", "r")
-- self.assert_(not self.fs.exists("a.txt"))
-+ self.assertTrue(not self.fs.exists("a.txt"))
- f1 = self.fs.open("a.txt", "wb")
- pos = 0
- for s in test_strings:
-@@ -751,26 +751,26 @@ class FSTestCases(object):
- pos += len(s)
- self.assertEqual(pos, f1.tell())
- f1.close()
-- self.assert_(self.fs.exists("a.txt"))
-- self.assert_(checkcontents("a.txt", all_strings))
-+ self.assertTrue(self.fs.exists("a.txt"))
-+ self.assertTrue(checkcontents("a.txt", all_strings))
-
- f2 = self.fs.open("b.txt", "wb")
- f2.write(test_strings[0])
- f2.close()
-- self.assert_(checkcontents("b.txt", test_strings[0]))
-+ self.assertTrue(checkcontents("b.txt", test_strings[0]))
- f3 = self.fs.open("b.txt", "ab")
- # On win32, tell() gives zero until you actually write to the file
- # self.assertEquals(f3.tell(),len(test_strings[0]))
- f3.write(test_strings[1])
-- self.assertEquals(f3.tell(), len(test_strings[0])+len(test_strings[1]))
-+ self.assertEqual(f3.tell(), len(test_strings[0])+len(test_strings[1]))
- f3.write(test_strings[2])
-- self.assertEquals(f3.tell(), len(all_strings))
-+ self.assertEqual(f3.tell(), len(all_strings))
- f3.close()
-- self.assert_(checkcontents("b.txt", all_strings))
-+ self.assertTrue(checkcontents("b.txt", all_strings))
- f4 = self.fs.open("b.txt", "wb")
- f4.write(test_strings[2])
- f4.close()
-- self.assert_(checkcontents("b.txt", test_strings[2]))
-+ self.assertTrue(checkcontents("b.txt", test_strings[2]))
- f5 = self.fs.open("c.txt", "wb")
- for s in test_strings:
- f5.write(s+b("\n"))
-@@ -815,7 +815,7 @@ class FSTestCases(object):
- with self.fs.open("hello", "wb") as f:
- f.truncate(30)
-
-- self.assertEquals(self.fs.getsize("hello"), 30)
-+ self.assertEqual(self.fs.getsize("hello"), 30)
-
- # Some file systems (FTPFS) don't support both reading and writing
- if self.fs.getmeta('file.read_and_write', True):
-@@ -825,7 +825,7 @@ class FSTestCases(object):
-
- with self.fs.open("hello", "rb") as f:
- f.seek(25)
-- self.assertEquals(f.read(), b("123456"))
-+ self.assertEqual(f.read(), b("123456"))
-
- def test_write_past_end_of_file(self):
- if self.fs.getmeta('file.read_and_write', True):
-@@ -833,7 +833,7 @@ class FSTestCases(object):
- f.seek(25)
- f.write(b("EOF"))
- with self.fs.open("write_at_end", "rb") as f:
-- self.assertEquals(f.read(), b("\x00")*25 + b("EOF"))
-+ self.assertEqual(f.read(), b("\x00")*25 + b("EOF"))
-
- def test_with_statement(self):
- # This is a little tricky since 'with' is actually new syntax.
-@@ -856,15 +856,15 @@ class FSTestCases(object):
- code += " raise ValueError\n"
- code = compile(code, "<string>", 'exec')
- self.assertRaises(ValueError, eval, code, globals(), locals())
-- self.assertEquals(self.fs.getcontents('f.txt', 'rb'), contents)
-+ self.assertEqual(self.fs.getcontents('f.txt', 'rb'), contents)
-
- def test_pickling(self):
- if self.fs.getmeta('pickle_contents', True):
- self.fs.setcontents("test1", b("hello world"))
- fs2 = pickle.loads(pickle.dumps(self.fs))
-- self.assert_(fs2.isfile("test1"))
-+ self.assertTrue(fs2.isfile("test1"))
- fs3 = pickle.loads(pickle.dumps(self.fs, -1))
-- self.assert_(fs3.isfile("test1"))
-+ self.assertTrue(fs3.isfile("test1"))
- else:
- # Just make sure it doesn't throw an exception
- fs2 = pickle.loads(pickle.dumps(self.fs))
-@@ -879,9 +879,9 @@ class FSTestCases(object):
- r = random.Random(0)
- randint = r.randint
- int2byte = six.int2byte
-- for _i in xrange(num_chunks):
-+ for _i in range(num_chunks):
- c = b("").join(int2byte(randint(
-- 0, 255)) for _j in xrange(chunk_size//8))
-+ 0, 255)) for _j in range(chunk_size//8))
- yield c * 8
- f = self.fs.open("bigfile", "wb")
- try:
-@@ -894,7 +894,7 @@ class FSTestCases(object):
- try:
- try:
- while True:
-- if chunks.next() != f.read(chunk_size):
-+ if next(chunks) != f.read(chunk_size):
- assert False, "bigfile was corrupted"
- except StopIteration:
- if f.read() != b(""):
-@@ -929,9 +929,9 @@ class FSTestCases(object):
- """Test read(0) returns empty string"""
- self.fs.setcontents('foo.txt', b('Hello, World'))
- with self.fs.open('foo.txt', 'rb') as f:
-- self.assert_(len(f.read(0)) == 0)
-+ self.assertTrue(len(f.read(0)) == 0)
- with self.fs.open('foo.txt', 'rt') as f:
-- self.assert_(len(f.read(0)) == 0)
-+ self.assertTrue(len(f.read(0)) == 0)
-
- # May be disabled - see end of file
-
-@@ -977,7 +977,7 @@ class ThreadingTestCases(object):
- for t in threads:
- t.join()
- for (c, e, t) in errors:
-- raise e, None, t
-+ raise e.with_traceback(t)
- finally:
- sys.setcheckinterval(check_interval)
-
-@@ -994,12 +994,12 @@ class ThreadingTestCases(object):
- def thread1():
- c = b("thread1 was 'ere")
- setcontents("thread1.txt", c)
-- self.assertEquals(self.fs.getcontents("thread1.txt", 'rb'), c)
-+ self.assertEqual(self.fs.getcontents("thread1.txt", 'rb'), c)
-
- def thread2():
- c = b("thread2 was 'ere")
- setcontents("thread2.txt", c)
-- self.assertEquals(self.fs.getcontents("thread2.txt", 'rb'), c)
-+ self.assertEqual(self.fs.getcontents("thread2.txt", 'rb'), c)
- self._runThreads(thread1, thread2)
-
- def test_setcontents_threaded_samefile(self):
-@@ -1016,19 +1016,19 @@ class ThreadingTestCases(object):
- c = b("thread1 was 'ere")
- setcontents("threads.txt", c)
- self._yield()
-- self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
-+ self.assertEqual(self.fs.listdir("/"), ["threads.txt"])
-
- def thread2():
- c = b("thread2 was 'ere")
- setcontents("threads.txt", c)
- self._yield()
-- self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
-+ self.assertEqual(self.fs.listdir("/"), ["threads.txt"])
-
- def thread3():
- c = b("thread3 was 'ere")
- setcontents("threads.txt", c)
- self._yield()
-- self.assertEquals(self.fs.listdir("/"), ["threads.txt"])
-+ self.assertEqual(self.fs.listdir("/"), ["threads.txt"])
- try:
- self._runThreads(thread1, thread2, thread3)
- except ResourceLockedError:
-@@ -1079,23 +1079,23 @@ class ThreadingTestCases(object):
- def makedir():
- try:
- self.fs.makedir("testdir")
-- except DestinationExistsError, e:
-+ except DestinationExistsError as e:
- errors.append(e)
-
- def makedir_noerror():
- try:
- self.fs.makedir("testdir", allow_recreate=True)
-- except DestinationExistsError, e:
-+ except DestinationExistsError as e:
- errors.append(e)
-
- def removedir():
- try:
- self.fs.removedir("testdir")
-- except (ResourceNotFoundError, ResourceLockedError), e:
-+ except (ResourceNotFoundError, ResourceLockedError) as e:
- errors.append(e)
- # One thread should succeed, one should error
- self._runThreads(makedir, makedir)
-- self.assertEquals(len(errors), 1)
-+ self.assertEqual(len(errors), 1)
- self.fs.removedir("testdir")
- # One thread should succeed, two should error
- errors = []
-@@ -1106,18 +1106,18 @@ class ThreadingTestCases(object):
- # All threads should succeed
- errors = []
- self._runThreads(makedir_noerror, makedir_noerror, makedir_noerror)
-- self.assertEquals(len(errors), 0)
-+ self.assertEqual(len(errors), 0)
- self.assertTrue(self.fs.isdir("testdir"))
- self.fs.removedir("testdir")
- # makedir() can beat removedir() and vice-versa
- errors = []
- self._runThreads(makedir, removedir)
- if self.fs.isdir("testdir"):
-- self.assertEquals(len(errors), 1)
-+ self.assertEqual(len(errors), 1)
- self.assertFalse(isinstance(errors[0], DestinationExistsError))
- self.fs.removedir("testdir")
- else:
-- self.assertEquals(len(errors), 0)
-+ self.assertEqual(len(errors), 0)
-
- def test_concurrent_copydir(self):
- self.fs.makedir("a")
-@@ -1136,10 +1136,10 @@ class ThreadingTestCases(object):
- # This should error out since we're not overwriting
- self.assertRaises(
- DestinationExistsError, self._runThreads, copydir, copydir)
-- self.assert_(self.fs.isdir('a'))
-- self.assert_(self.fs.isdir('a'))
-+ self.assertTrue(self.fs.isdir('a'))
-+ self.assertTrue(self.fs.isdir('a'))
- copydir_overwrite()
-- self.assert_(self.fs.isdir('a'))
-+ self.assertTrue(self.fs.isdir('a'))
- # This should run to completion and give a valid state, unless
- # files get locked when written to.
- try:
-@@ -1160,19 +1160,19 @@ class ThreadingTestCases(object):
- "contents the second"), b("number three")]
-
- def thread1():
-- for i in xrange(30):
-+ for i in range(30):
- for c in contents:
- self.fs.setcontents("thread1.txt", c)
-- self.assertEquals(self.fs.getsize("thread1.txt"), len(c))
-- self.assertEquals(self.fs.getcontents(
-+ self.assertEqual(self.fs.getsize("thread1.txt"), len(c))
-+ self.assertEqual(self.fs.getcontents(
- "thread1.txt", 'rb'), c)
-
- def thread2():
-- for i in xrange(30):
-+ for i in range(30):
- for c in contents:
- self.fs.setcontents("thread2.txt", c)
-- self.assertEquals(self.fs.getsize("thread2.txt"), len(c))
-- self.assertEquals(self.fs.getcontents(
-+ self.assertEqual(self.fs.getsize("thread2.txt"), len(c))
-+ self.assertEqual(self.fs.getcontents(
- "thread2.txt", 'rb'), c)
- self._runThreads(thread1, thread2)
-
---- fs/tests/test_archivefs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_archivefs.py
-@@ -58,7 +58,7 @@ class TestReadArchiveFS(unittest.TestCase):
- contents = f.read()
- return contents
- def check_contents(path, expected):
-- self.assert_(read_contents(path)==expected)
-+ self.assertTrue(read_contents(path)==expected)
- check_contents("a.txt", b("Hello, World!"))
- check_contents("1.txt", b("1"))
- check_contents("foo/bar/baz.txt", b("baz"))
-@@ -67,29 +67,29 @@ class TestReadArchiveFS(unittest.TestCase):
- def read_contents(path):
- return self.fs.getcontents(path)
- def check_contents(path, expected):
-- self.assert_(read_contents(path)==expected)
-+ self.assertTrue(read_contents(path)==expected)
- check_contents("a.txt", b("Hello, World!"))
- check_contents("1.txt", b("1"))
- check_contents("foo/bar/baz.txt", b("baz"))
-
- def test_is(self):
-- self.assert_(self.fs.isfile('a.txt'))
-- self.assert_(self.fs.isfile('1.txt'))
-- self.assert_(self.fs.isfile('foo/bar/baz.txt'))
-- self.assert_(self.fs.isdir('foo'))
-- self.assert_(self.fs.isdir('foo/bar'))
-- self.assert_(self.fs.exists('a.txt'))
-- self.assert_(self.fs.exists('1.txt'))
-- self.assert_(self.fs.exists('foo/bar/baz.txt'))
-- self.assert_(self.fs.exists('foo'))
-- self.assert_(self.fs.exists('foo/bar'))
-+ self.assertTrue(self.fs.isfile('a.txt'))
-+ self.assertTrue(self.fs.isfile('1.txt'))
-+ self.assertTrue(self.fs.isfile('foo/bar/baz.txt'))
-+ self.assertTrue(self.fs.isdir('foo'))
-+ self.assertTrue(self.fs.isdir('foo/bar'))
-+ self.assertTrue(self.fs.exists('a.txt'))
-+ self.assertTrue(self.fs.exists('1.txt'))
-+ self.assertTrue(self.fs.exists('foo/bar/baz.txt'))
-+ self.assertTrue(self.fs.exists('foo'))
-+ self.assertTrue(self.fs.exists('foo/bar'))
-
- def test_listdir(self):
- def check_listing(path, expected):
- dir_list = self.fs.listdir(path)
-- self.assert_(sorted(dir_list) == sorted(expected))
-+ self.assertTrue(sorted(dir_list) == sorted(expected))
- for item in dir_list:
-- self.assert_(isinstance(item,unicode))
-+ self.assertTrue(isinstance(item,str))
- check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt'])
- check_listing('foo', ['second.txt', 'bar'])
- check_listing('foo/bar', ['baz.txt'])
-@@ -114,7 +114,7 @@ class TestWriteArchiveFS(unittest.TestCase):
-
- makefile("a.txt", b("Hello, World!"))
- makefile("b.txt", b("b"))
-- makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
-+ makefile("\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
- makefile("foo/bar/baz.txt", b("baz"))
- makefile("foo/second.txt", b("hai"))
-
-@@ -125,7 +125,7 @@ class TestWriteArchiveFS(unittest.TestCase):
-
- def test_valid(self):
- zf = zipfile.ZipFile(self.temp_filename, "r")
-- self.assert_(zf.testzip() is None)
-+ self.assertTrue(zf.testzip() is None)
- zf.close()
-
- def test_creation(self):
-@@ -140,7 +140,7 @@ class TestWriteArchiveFS(unittest.TestCase):
- check_contents("b.txt", b("b"))
- check_contents("foo/bar/baz.txt", b("baz"))
- check_contents("foo/second.txt", b("hai"))
-- check_contents(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
-+ check_contents("\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
-
-
- #~ class TestAppendArchiveFS(TestWriteArchiveFS):
---- fs/tests/test_errors.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_errors.py
-@@ -29,4 +29,4 @@ class TestFSError(unittest.TestCase):
-
- def test_unicode_representation_of_error_with_non_ascii_characters(self):
- path_error = PathError('/Shïrê/Frødø')
-- _ = unicode(path_error)
-\ No newline at end of file
-+ _ = str(path_error)
---- fs/tests/test_expose.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_expose.py
-@@ -149,7 +149,7 @@ if dokan.is_available:
-
- def tearDown(self):
- self.mount_proc.unmount()
-- for _ in xrange(10):
-+ for _ in range(10):
- try:
- if self.mount_proc.poll() is None:
- self.mount_proc.terminate()
---- fs/tests/test_fs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_fs.py
-@@ -20,7 +20,7 @@ from fs import osfs
- class TestOSFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
-
- def setUp(self):
-- self.temp_dir = tempfile.mkdtemp(u"fstest")
-+ self.temp_dir = tempfile.mkdtemp("fstest")
- self.fs = osfs.OSFS(self.temp_dir)
-
- def tearDown(self):
-@@ -35,14 +35,14 @@ class TestOSFS(unittest.TestCase,FSTestCases,Threading
-
- self.assertRaises(errors.InvalidCharsInPathError, self.fs.open, 'invalid\0file', 'wb')
- self.assertFalse(self.fs.isvalidpath('invalid\0file'))
-- self.assert_(self.fs.isvalidpath('validfile'))
-- self.assert_(self.fs.isvalidpath('completely_valid/path/foo.bar'))
-+ self.assertTrue(self.fs.isvalidpath('validfile'))
-+ self.assertTrue(self.fs.isvalidpath('completely_valid/path/foo.bar'))
-
-
- class TestSubFS(unittest.TestCase,FSTestCases,ThreadingTestCases):
-
- def setUp(self):
-- self.temp_dir = tempfile.mkdtemp(u"fstest")
-+ self.temp_dir = tempfile.mkdtemp("fstest")
- self.parent_fs = osfs.OSFS(self.temp_dir)
- self.parent_fs.makedir("foo/bar", recursive=True)
- self.fs = self.parent_fs.opendir("foo/bar")
-@@ -118,7 +118,7 @@ class TestTempFS(unittest.TestCase,FSTestCases,Threadi
- def tearDown(self):
- td = self.fs._temp_dir
- self.fs.close()
-- self.assert_(not os.path.exists(td))
-+ self.assertTrue(not os.path.exists(td))
-
- def check(self, p):
- td = self.fs._temp_dir
-@@ -129,5 +129,5 @@ class TestTempFS(unittest.TestCase,FSTestCases,Threadi
-
- self.assertRaises(errors.InvalidCharsInPathError, self.fs.open, 'invalid\0file', 'wb')
- self.assertFalse(self.fs.isvalidpath('invalid\0file'))
-- self.assert_(self.fs.isvalidpath('validfile'))
-- self.assert_(self.fs.isvalidpath('completely_valid/path/foo.bar'))
-+ self.assertTrue(self.fs.isvalidpath('validfile'))
-+ self.assertTrue(self.fs.isvalidpath('completely_valid/path/foo.bar'))
---- fs/tests/test_ftpfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_ftpfs.py
-@@ -10,7 +10,7 @@ import tempfile
- import subprocess
- import time
- from os.path import abspath
--import urllib
-+import urllib.request, urllib.parse, urllib.error
-
- from six import PY3
-
-@@ -37,7 +37,7 @@ class TestFTPFS(unittest.TestCase, FSTestCases, Thread
- ftp_port += 1
- use_port = str(ftp_port)
- #ftp_port = 10000
-- self.temp_dir = tempfile.mkdtemp(u"ftpfstests")
-+ self.temp_dir = tempfile.mkdtemp("ftpfstests")
-
- file_path = __file__
- if ':' not in file_path:
-@@ -58,7 +58,7 @@ class TestFTPFS(unittest.TestCase, FSTestCases, Thread
- start_time = time.time()
- while time.time() - start_time < 5:
- try:
-- ftpurl = urllib.urlopen('ftp://127.0.0.1:%s' % use_port)
-+ ftpurl = urllib.request.urlopen('ftp://127.0.0.1:%s' % use_port)
- except IOError:
- time.sleep(0)
- else:
---- fs/tests/test_importhook.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_importhook.py
-@@ -25,7 +25,7 @@ class TestFSImportHook(unittest.TestCase):
- for ph in list(sys.path_hooks):
- if issubclass(ph,FSImportHook):
- sys.path_hooks.remove(mph)
-- for (k,v) in sys.modules.items():
-+ for (k,v) in list(sys.modules.items()):
- if k.startswith("fsih_"):
- del sys.modules[k]
- elif hasattr(v,"__loader__"):
-@@ -64,22 +64,22 @@ class TestFSImportHook(unittest.TestCase):
- ih = FSImportHook(t)
- sys.meta_path.append(ih)
- try:
-- self.assertEquals(ih.find_module("fsih_hello"),ih)
-- self.assertEquals(ih.find_module("fsih_helo"),None)
-- self.assertEquals(ih.find_module("fsih_pkg"),ih)
-- self.assertEquals(ih.find_module("fsih_pkg.sub1"),ih)
-- self.assertEquals(ih.find_module("fsih_pkg.sub2"),ih)
-- self.assertEquals(ih.find_module("fsih_pkg.sub3"),None)
-+ self.assertEqual(ih.find_module("fsih_hello"),ih)
-+ self.assertEqual(ih.find_module("fsih_helo"),None)
-+ self.assertEqual(ih.find_module("fsih_pkg"),ih)
-+ self.assertEqual(ih.find_module("fsih_pkg.sub1"),ih)
-+ self.assertEqual(ih.find_module("fsih_pkg.sub2"),ih)
-+ self.assertEqual(ih.find_module("fsih_pkg.sub3"),None)
- m = ih.load_module("fsih_hello")
-- self.assertEquals(m.message,"hello world!")
-+ self.assertEqual(m.message,"hello world!")
- self.assertRaises(ImportError,ih.load_module,"fsih_helo")
- ih.load_module("fsih_pkg")
- m = ih.load_module("fsih_pkg.sub1")
-- self.assertEquals(m.message,"hello world!")
-- self.assertEquals(m.a,42)
-+ self.assertEqual(m.message,"hello world!")
-+ self.assertEqual(m.a,42)
- m = ih.load_module("fsih_pkg.sub2")
-- self.assertEquals(m.message,"hello world!")
-- self.assertEquals(m.a,42 * 2)
-+ self.assertEqual(m.message,"hello world!")
-+ self.assertEqual(m.a,42 * 2)
- self.assertRaises(ImportError,ih.load_module,"fsih_pkg.sub3")
- finally:
- sys.meta_path.remove(ih)
-@@ -88,7 +88,7 @@ class TestFSImportHook(unittest.TestCase):
- def _check_imports_are_working(self):
- try:
- import fsih_hello
-- self.assertEquals(fsih_hello.message,"hello world!")
-+ self.assertEqual(fsih_hello.message,"hello world!")
- try:
- import fsih_helo
- except ImportError:
-@@ -97,11 +97,11 @@ class TestFSImportHook(unittest.TestCase):
- assert False, "ImportError not raised"
- import fsih_pkg
- import fsih_pkg.sub1
-- self.assertEquals(fsih_pkg.sub1.message,"hello world!")
-- self.assertEquals(fsih_pkg.sub1.a,42)
-+ self.assertEqual(fsih_pkg.sub1.message,"hello world!")
-+ self.assertEqual(fsih_pkg.sub1.a,42)
- import fsih_pkg.sub2
-- self.assertEquals(fsih_pkg.sub2.message,"hello world!")
-- self.assertEquals(fsih_pkg.sub2.a,42 * 2)
-+ self.assertEqual(fsih_pkg.sub2.message,"hello world!")
-+ self.assertEqual(fsih_pkg.sub2.a,42 * 2)
- try:
- import fsih_pkg.sub3
- except ImportError:
-@@ -109,7 +109,7 @@ class TestFSImportHook(unittest.TestCase):
- else:
- assert False, "ImportError not raised"
- finally:
-- for k in sys.modules.keys():
-+ for k in list(sys.modules.keys()):
- if k.startswith("fsih_"):
- del sys.modules[k]
-
---- fs/tests/test_iotools.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_iotools.py
-@@ -1,5 +1,5 @@
--from __future__ import unicode_literals
-
-+
- from fs import iotools
-
- import io
-@@ -7,9 +7,9 @@ import unittest
- from os.path import dirname, join, abspath
-
- try:
-- unicode
-+ str
- except NameError:
-- unicode = str
-+ str = str
-
-
- class OpenFilelike(object):
-@@ -37,20 +37,20 @@ class TestIOTools(unittest.TestCase):
- """Test make_stream"""
- with self.get_bin_file() as f:
- text = f.read()
-- self.assert_(isinstance(text, bytes))
-+ self.assertTrue(isinstance(text, bytes))
-
- with self.get_bin_file() as f:
- with iotools.make_stream("data/UTF-8-demo.txt", f, 'rt') as f2:
- text = f2.read()
-- self.assert_(isinstance(text, unicode))
-+ self.assertTrue(isinstance(text, str))
-
- def test_decorator(self):
- """Test filelike_to_stream decorator"""
- o = OpenFilelike(self.get_bin_file)
- with o.open('file', 'rb') as f:
- text = f.read()
-- self.assert_(isinstance(text, bytes))
-+ self.assertTrue(isinstance(text, bytes))
-
- with o.open('file', 'rt') as f:
- text = f.read()
-- self.assert_(isinstance(text, unicode))
-+ self.assertTrue(isinstance(text, str))
---- fs/tests/test_mountfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_mountfs.py
-@@ -12,11 +12,11 @@ class TestMountFS(unittest.TestCase):
- m2 = MemoryFS()
- multi_fs.mount('/m1', m1)
- multi_fs.mount('/m2', m2)
-- self.assert_(not m1.closed)
-- self.assert_(not m2.closed)
-+ self.assertTrue(not m1.closed)
-+ self.assertTrue(not m2.closed)
- multi_fs.close()
-- self.assert_(m1.closed)
-- self.assert_(m2.closed)
-+ self.assertTrue(m1.closed)
-+ self.assertTrue(m2.closed)
-
- def test_no_auto_close(self):
- """Test MountFS auto close can be disabled"""
-@@ -25,11 +25,11 @@ class TestMountFS(unittest.TestCase):
- m2 = MemoryFS()
- multi_fs.mount('/m1', m1)
- multi_fs.mount('/m2', m2)
-- self.assert_(not m1.closed)
-- self.assert_(not m2.closed)
-+ self.assertTrue(not m1.closed)
-+ self.assertTrue(not m2.closed)
- multi_fs.close()
-- self.assert_(not m1.closed)
-- self.assert_(not m2.closed)
-+ self.assertTrue(not m1.closed)
-+ self.assertTrue(not m2.closed)
-
- def test_mountfile(self):
- """Test mounting a file"""
-@@ -42,16 +42,16 @@ class TestMountFS(unittest.TestCase):
- mount_fs = MountFS()
- mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo)
-
-- self.assert_(mount_fs.isdir('/'))
-- self.assert_(mount_fs.isdir('./'))
-- self.assert_(mount_fs.isdir(''))
-+ self.assertTrue(mount_fs.isdir('/'))
-+ self.assertTrue(mount_fs.isdir('./'))
-+ self.assertTrue(mount_fs.isdir(''))
-
- # Check we can see the mounted file in the dir list
- self.assertEqual(mount_fs.listdir(), ["bar.txt"])
-- self.assert_(not mount_fs.exists('nobodyhere.txt'))
-- self.assert_(mount_fs.exists('bar.txt'))
-- self.assert_(mount_fs.isfile('bar.txt'))
-- self.assert_(not mount_fs.isdir('bar.txt'))
-+ self.assertTrue(not mount_fs.exists('nobodyhere.txt'))
-+ self.assertTrue(mount_fs.exists('bar.txt'))
-+ self.assertTrue(mount_fs.isfile('bar.txt'))
-+ self.assertTrue(not mount_fs.isdir('bar.txt'))
-
- # Check open and getinfo callables
- self.assertEqual(mount_fs.getcontents('bar.txt'), quote)
-@@ -67,9 +67,9 @@ class TestMountFS(unittest.TestCase):
- self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz'))
-
- # Check unmount
-- self.assert_(mount_fs.unmount("bar.txt"))
-+ self.assertTrue(mount_fs.unmount("bar.txt"))
- self.assertEqual(mount_fs.listdir(), [])
-- self.assert_(not mount_fs.exists('bar.txt'))
-+ self.assertTrue(not mount_fs.exists('bar.txt'))
-
- # Check unount a second time is a null op, and returns False
- self.assertFalse(mount_fs.unmount("bar.txt"))
---- fs/tests/test_multifs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_multifs.py
-@@ -13,11 +13,11 @@ class TestMultiFS(unittest.TestCase):
- m2 = MemoryFS()
- multi_fs.addfs('m1', m1)
- multi_fs.addfs('m2', m2)
-- self.assert_(not m1.closed)
-- self.assert_(not m2.closed)
-+ self.assertTrue(not m1.closed)
-+ self.assertTrue(not m2.closed)
- multi_fs.close()
-- self.assert_(m1.closed)
-- self.assert_(m2.closed)
-+ self.assertTrue(m1.closed)
-+ self.assertTrue(m2.closed)
-
- def test_no_auto_close(self):
- """Test MultiFS auto close can be disables"""
-@@ -26,11 +26,11 @@ class TestMultiFS(unittest.TestCase):
- m2 = MemoryFS()
- multi_fs.addfs('m1', m1)
- multi_fs.addfs('m2', m2)
-- self.assert_(not m1.closed)
-- self.assert_(not m2.closed)
-+ self.assertTrue(not m1.closed)
-+ self.assertTrue(not m2.closed)
- multi_fs.close()
-- self.assert_(not m1.closed)
-- self.assert_(not m2.closed)
-+ self.assertTrue(not m1.closed)
-+ self.assertTrue(not m2.closed)
-
-
- def test_priority(self):
-@@ -45,7 +45,7 @@ class TestMultiFS(unittest.TestCase):
- multi_fs.addfs("m1", m1)
- multi_fs.addfs("m2", m2)
- multi_fs.addfs("m3", m3)
-- self.assert_(multi_fs.getcontents("name") == b("m3"))
-+ self.assertTrue(multi_fs.getcontents("name") == b("m3"))
-
- m1 = MemoryFS()
- m2 = MemoryFS()
-@@ -57,7 +57,7 @@ class TestMultiFS(unittest.TestCase):
- multi_fs.addfs("m1", m1)
- multi_fs.addfs("m2", m2, priority=10)
- multi_fs.addfs("m3", m3)
-- self.assert_(multi_fs.getcontents("name") == b("m2"))
-+ self.assertTrue(multi_fs.getcontents("name") == b("m2"))
-
- m1 = MemoryFS()
- m2 = MemoryFS()
-@@ -69,7 +69,7 @@ class TestMultiFS(unittest.TestCase):
- multi_fs.addfs("m1", m1)
- multi_fs.addfs("m2", m2, priority=10)
- multi_fs.addfs("m3", m3, priority=10)
-- self.assert_(multi_fs.getcontents("name") == b("m3"))
-+ self.assertTrue(multi_fs.getcontents("name") == b("m3"))
-
- m1 = MemoryFS()
- m2 = MemoryFS()
-@@ -81,5 +81,5 @@ class TestMultiFS(unittest.TestCase):
- multi_fs.addfs("m1", m1, priority=11)
- multi_fs.addfs("m2", m2, priority=10)
- multi_fs.addfs("m3", m3, priority=10)
-- self.assert_(multi_fs.getcontents("name") == b("m1"))
-+ self.assertTrue(multi_fs.getcontents("name") == b("m1"))
-
---- fs/tests/test_opener.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_opener.py
-@@ -14,7 +14,7 @@ from fs import path
- class TestOpener(unittest.TestCase):
-
- def setUp(self):
-- self.temp_dir = tempfile.mkdtemp(u"fstest_opener")
-+ self.temp_dir = tempfile.mkdtemp("fstest_opener")
-
- def tearDown(self):
- shutil.rmtree(self.temp_dir)
---- fs/tests/test_path.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_path.py
-@@ -23,7 +23,7 @@ class TestPathFunctions(unittest.TestCase):
- ("a/b/c", "a/b/c"),
- ("a/b/../c/", "a/c"),
- ("/","/"),
-- (u"a/\N{GREEK SMALL LETTER BETA}/c",u"a/\N{GREEK SMALL LETTER BETA}/c"),
-+ ("a/\N{GREEK SMALL LETTER BETA}/c","a/\N{GREEK SMALL LETTER BETA}/c"),
- ]
- for path, result in tests:
- self.assertEqual(normpath(path), result)
-@@ -44,7 +44,7 @@ class TestPathFunctions(unittest.TestCase):
- ("a/b", "./d", "e", "a/b/d/e"),
- ("/", "/", "/"),
- ("/", "", "/"),
-- (u"a/\N{GREEK SMALL LETTER BETA}","c",u"a/\N{GREEK SMALL LETTER BETA}/c"),
-+ ("a/\N{GREEK SMALL LETTER BETA}","c","a/\N{GREEK SMALL LETTER BETA}/c"),
- ]
- for testpaths in tests:
- paths = testpaths[:-1]
-@@ -101,12 +101,12 @@ class TestPathFunctions(unittest.TestCase):
- self.assertEqual(pathsplit(path), result)
-
- def test_recursepath(self):
-- self.assertEquals(recursepath("/"),["/"])
-- self.assertEquals(recursepath("hello"),["/","/hello"])
-- self.assertEquals(recursepath("/hello/world/"),["/","/hello","/hello/world"])
-- self.assertEquals(recursepath("/hello/world/",reverse=True),["/hello/world","/hello","/"])
-- self.assertEquals(recursepath("hello",reverse=True),["/hello","/"])
-- self.assertEquals(recursepath("",reverse=True),["/"])
-+ self.assertEqual(recursepath("/"),["/"])
-+ self.assertEqual(recursepath("hello"),["/","/hello"])
-+ self.assertEqual(recursepath("/hello/world/"),["/","/hello","/hello/world"])
-+ self.assertEqual(recursepath("/hello/world/",reverse=True),["/hello/world","/hello","/"])
-+ self.assertEqual(recursepath("hello",reverse=True),["/hello","/"])
-+ self.assertEqual(recursepath("",reverse=True),["/"])
-
- def test_isdotfile(self):
- for path in ['.foo',
-@@ -114,7 +114,7 @@ class TestPathFunctions(unittest.TestCase):
- 'foo/.svn',
- 'foo/bar/.svn',
- '/foo/.bar']:
-- self.assert_(isdotfile(path))
-+ self.assertTrue(isdotfile(path))
-
- for path in ['asfoo',
- 'df.svn',
-@@ -142,10 +142,10 @@ class TestPathFunctions(unittest.TestCase):
- self.assertEqual(basename(path), test_basename)
-
- def test_iswildcard(self):
-- self.assert_(iswildcard('*'))
-- self.assert_(iswildcard('*.jpg'))
-- self.assert_(iswildcard('foo/*'))
-- self.assert_(iswildcard('foo/{}'))
-+ self.assertTrue(iswildcard('*'))
-+ self.assertTrue(iswildcard('*.jpg'))
-+ self.assertTrue(iswildcard('foo/*'))
-+ self.assertTrue(iswildcard('foo/{}'))
- self.assertFalse(iswildcard('foo'))
- self.assertFalse(iswildcard('img.jpg'))
- self.assertFalse(iswildcard('foo/bar'))
-@@ -171,9 +171,9 @@ class Test_PathMap(unittest.TestCase):
- def test_basics(self):
- map = PathMap()
- map["hello"] = "world"
-- self.assertEquals(map["/hello"],"world")
-- self.assertEquals(map["/hello/"],"world")
-- self.assertEquals(map.get("hello"),"world")
-+ self.assertEqual(map["/hello"],"world")
-+ self.assertEqual(map["/hello/"],"world")
-+ self.assertEqual(map.get("hello"),"world")
-
- def test_iteration(self):
- map = PathMap()
-@@ -183,17 +183,17 @@ class Test_PathMap(unittest.TestCase):
- map["hello/kitty"] = 4
- map["hello/kitty/islame"] = 5
- map["batman/isawesome"] = 6
-- self.assertEquals(set(map.iterkeys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/hello/kitty/islame","/batman/isawesome")))
-- self.assertEquals(sorted(map.values()),range(1,7))
-- self.assertEquals(sorted(map.items("/hello/world/")),[("/hello/world",1),("/hello/world/howareya",2),("/hello/world/iamfine",3)])
-- self.assertEquals(zip(map.keys(),map.values()),map.items())
-- self.assertEquals(zip(map.keys("batman"),map.values("batman")),map.items("batman"))
-- self.assertEquals(set(map.iternames("hello")),set(("world","kitty")))
-- self.assertEquals(set(map.iternames("/hello/kitty")),set(("islame",)))
-+ self.assertEqual(set(map.keys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/hello/kitty/islame","/batman/isawesome")))
-+ self.assertEqual(sorted(map.values()),list(range(1,7)))
-+ self.assertEqual(sorted(map.items("/hello/world/")),[("/hello/world",1),("/hello/world/howareya",2),("/hello/world/iamfine",3)])
-+ self.assertEqual(list(zip(list(map.keys()),list(map.values()))),list(map.items()))
-+ self.assertEqual(list(zip(map.keys("batman"),map.values("batman"))),map.items("batman"))
-+ self.assertEqual(set(map.iternames("hello")),set(("world","kitty")))
-+ self.assertEqual(set(map.iternames("/hello/kitty")),set(("islame",)))
-
- del map["hello/kitty/islame"]
-- self.assertEquals(set(map.iternames("/hello/kitty")),set())
-- self.assertEquals(set(map.iterkeys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/batman/isawesome")))
-- self.assertEquals(set(map.values()),set(range(1,7)) - set((5,)))
-+ self.assertEqual(set(map.iternames("/hello/kitty")),set())
-+ self.assertEqual(set(map.keys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/batman/isawesome")))
-+ self.assertEqual(set(map.values()),set(range(1,7)) - set((5,)))
-
-
---- fs/tests/test_remote.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_remote.py
-@@ -116,37 +116,37 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCa
- self.fakeOn()
-
- f = self.fs.open('test.txt', 'rb')
-- self.assertEquals(f.read(10), contents[:10])
-+ self.assertEqual(f.read(10), contents[:10])
- f.wrapped_file.seek(0, SEEK_END)
-- self.assertEquals(f._rfile.tell(), 10)
-+ self.assertEqual(f._rfile.tell(), 10)
- f.seek(20)
-- self.assertEquals(f.tell(), 20)
-- self.assertEquals(f._rfile.tell(), 20)
-+ self.assertEqual(f.tell(), 20)
-+ self.assertEqual(f._rfile.tell(), 20)
- f.seek(0, SEEK_END)
-- self.assertEquals(f._rfile.tell(), len(contents))
-+ self.assertEqual(f._rfile.tell(), len(contents))
- f.close()
-
- f = self.fs.open('test.txt', 'ab')
-- self.assertEquals(f.tell(), len(contents))
-+ self.assertEqual(f.tell(), len(contents))
- f.close()
-
- self.fakeOff()
-
- # Writing over the rfile edge
- f = self.fs.open('test.txt', 'wb+')
-- self.assertEquals(f.tell(), 0)
-+ self.assertEqual(f.tell(), 0)
- f.seek(len(contents) - 5)
- # Last 5 characters not loaded from remote file
-- self.assertEquals(f._rfile.tell(), len(contents) - 5)
-+ self.assertEqual(f._rfile.tell(), len(contents) - 5)
- # Confirm that last 5 characters are still in rfile buffer
-- self.assertEquals(f._rfile.read(), contents[-5:])
-+ self.assertEqual(f._rfile.read(), contents[-5:])
- # Rollback position 5 characters before eof
- f._rfile.seek(len(contents[:-5]))
- # Write 10 new characters (will make contents longer for 5 chars)
- f.write(b('1234567890'))
- f.flush()
- # We are on the end of file (and buffer not serve anything anymore)
-- self.assertEquals(f.read(), b(''))
-+ self.assertEqual(f.read(), b(''))
- f.close()
-
- self.fakeOn()
-@@ -154,7 +154,7 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCa
- # Check if we wrote everything OK from
- # previous writing over the remote buffer edge
- f = self.fs.open('test.txt', 'rb')
-- self.assertEquals(f.read(), contents[:-5] + b('1234567890'))
-+ self.assertEqual(f.read(), contents[:-5] + b('1234567890'))
- f.close()
-
- self.fakeOff()
-@@ -199,36 +199,36 @@ class TestRemoteFileBuffer(unittest.TestCase, FSTestCa
-
- f = self.fs.open('test.txt', 'rb+')
- # Check if we read just 10 characters
-- self.assertEquals(f.read(10), contents[:10])
-- self.assertEquals(f._rfile.tell(), 10)
-+ self.assertEqual(f.read(10), contents[:10])
-+ self.assertEqual(f._rfile.tell(), 10)
- # Write garbage to file to mark it as _changed
- f.write(b('x'))
- # This should read the rest of file and store file back to again.
- f.flush()
- f.seek(0)
- # Try if we have unocrrupted file locally...
-- self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
-+ self.assertEqual(f.read(), contents[:10] + b('x') + contents[11:])
- f.close()
-
- # And if we have uncorrupted file also on storage
- f = self.fs.open('test.txt', 'rb')
-- self.assertEquals(f.read(), contents[:10] + b('x') + contents[11:])
-+ self.assertEqual(f.read(), contents[:10] + b('x') + contents[11:])
- f.close()
-
- # Now try it again, but write garbage behind edge of remote file
- f = self.fs.open('test.txt', 'rb+')
-- self.assertEquals(f.read(10), contents[:10])
-+ self.assertEqual(f.read(10), contents[:10])
- # Write garbage to file to mark it as _changed
- f.write(contents2)
- f.flush()
- f.seek(0)
- # Try if we have unocrrupted file locally...
-- self.assertEquals(f.read(), contents[:10] + contents2)
-+ self.assertEqual(f.read(), contents[:10] + contents2)
- f.close()
-
- # And if we have uncorrupted file also on storage
- f = self.fs.open('test.txt', 'rb')
-- self.assertEquals(f.read(), contents[:10] + contents2)
-+ self.assertEqual(f.read(), contents[:10] + contents2)
- f.close()
-
-
---- fs/tests/test_rpcfs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/tests/test_rpcfs.py
-@@ -48,7 +48,7 @@ class TestRPCFS(unittest.TestCase, FSTestCases, Thread
- while not self.server:
- try:
- self.server = self.makeServer(self.temp_fs,("127.0.0.1",port))
-- except socket.error, e:
-+ except socket.error as e:
- if e.args[1] == "Address already in use":
- port += 1
- else:
-@@ -63,7 +63,7 @@ class TestRPCFS(unittest.TestCase, FSTestCases, Thread
- #self.server.serve_forever()
- while self.serve_more_requests:
- self.server.handle_request()
-- except Exception, e:
-+ except Exception as e:
- pass
-
- self.end_event.set()
-@@ -93,7 +93,7 @@ class TestRPCFS(unittest.TestCase, FSTestCases, Thread
- sock.settimeout(.1)
- sock.connect(sa)
- sock.send(b("\n"))
-- except socket.error, e:
-+ except socket.error as e:
- pass
- finally:
- if sock is not None:
---- fs/tests/test_utils.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_utils.py
-@@ -16,11 +16,11 @@ class TestUtils(unittest.TestCase):
- fs.setcontents("foo/bar/fruit", b("apple"))
-
- def _check_fs(self, fs):
-- self.assert_(fs.isfile("f1"))
-- self.assert_(fs.isfile("f2"))
-- self.assert_(fs.isfile("f3"))
-- self.assert_(fs.isdir("foo/bar"))
-- self.assert_(fs.isfile("foo/bar/fruit"))
-+ self.assertTrue(fs.isfile("f1"))
-+ self.assertTrue(fs.isfile("f2"))
-+ self.assertTrue(fs.isfile("f3"))
-+ self.assertTrue(fs.isdir("foo/bar"))
-+ self.assertTrue(fs.isfile("foo/bar/fruit"))
- self.assertEqual(fs.getcontents("f1", "rb"), b("file 1"))
- self.assertEqual(fs.getcontents("f2", "rb"), b("file 2"))
- self.assertEqual(fs.getcontents("f3", "rb"), b("file 3"))
-@@ -61,7 +61,7 @@ class TestUtils(unittest.TestCase):
- fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
- utils.movedir((fs1, "from"), (fs2, "copy"))
-- self.assert_(not fs1.exists("from"))
-+ self.assertTrue(not fs1.exists("from"))
- self._check_fs(fs2.opendir("copy"))
-
- fs1 = TempFS()
-@@ -69,7 +69,7 @@ class TestUtils(unittest.TestCase):
- fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
- utils.movedir((fs1, "from"), (fs2, "copy"))
-- self.assert_(not fs1.exists("from"))
-+ self.assertTrue(not fs1.exists("from"))
- self._check_fs(fs2.opendir("copy"))
-
- def test_movedir_root(self):
-@@ -79,7 +79,7 @@ class TestUtils(unittest.TestCase):
- fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
- utils.movedir((fs1, "from"), fs2)
-- self.assert_(not fs1.exists("from"))
-+ self.assertTrue(not fs1.exists("from"))
- self._check_fs(fs2)
-
- fs1 = TempFS()
-@@ -87,7 +87,7 @@ class TestUtils(unittest.TestCase):
- fs1sub = fs1.makeopendir("from")
- self._make_fs(fs1sub)
- utils.movedir((fs1, "from"), fs2)
-- self.assert_(not fs1.exists("from"))
-+ self.assertTrue(not fs1.exists("from"))
- self._check_fs(fs2)
-
- def test_remove_all(self):
-@@ -101,15 +101,15 @@ class TestUtils(unittest.TestCase):
- fs.setcontents("foo/baz", b("baz"))
-
- utils.remove_all(fs, "foo/bar")
-- self.assert_(not fs.exists("foo/bar/fruit"))
-- self.assert_(fs.exists("foo/bar"))
-- self.assert_(fs.exists("foo/baz"))
-+ self.assertTrue(not fs.exists("foo/bar/fruit"))
-+ self.assertTrue(fs.exists("foo/bar"))
-+ self.assertTrue(fs.exists("foo/baz"))
- utils.remove_all(fs, "")
-- self.assert_(not fs.exists("foo/bar/fruit"))
-- self.assert_(not fs.exists("foo/bar/baz"))
-- self.assert_(not fs.exists("foo/baz"))
-- self.assert_(not fs.exists("foo"))
-- self.assert_(not fs.exists("f1"))
-- self.assert_(fs.isdirempty('/'))
-+ self.assertTrue(not fs.exists("foo/bar/fruit"))
-+ self.assertTrue(not fs.exists("foo/bar/baz"))
-+ self.assertTrue(not fs.exists("foo/baz"))
-+ self.assertTrue(not fs.exists("foo"))
-+ self.assertTrue(not fs.exists("f1"))
-+ self.assertTrue(fs.isdirempty('/'))
-
-
---- fs/tests/test_watch.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_watch.py
-@@ -71,7 +71,7 @@ class WatcherTestCases:
- for event in event_list:
- if isinstance(event,cls):
- if path is None or event.path == path:
-- for (k,v) in attrs.iteritems():
-+ for (k,v) in attrs.items():
- if getattr(event,k) != v:
- break
- else:
-@@ -98,7 +98,7 @@ class WatcherTestCases:
- self.assertEventOccurred(CREATED,"/hello")
- self.clearCapturedEvents()
- old_atime = self.fs.getinfo("hello").get("accessed_time")
-- self.assertEquals(self.fs.getcontents("hello"), b("hello world"))
-+ self.assertEqual(self.fs.getcontents("hello"), b("hello world"))
- if not isinstance(self.watchfs,PollingWatchableFS):
- # Help it along by updting the atime.
- # TODO: why is this necessary?
-@@ -113,7 +113,7 @@ class WatcherTestCases:
- # update it if it's too old, or don't update it at all!
- # Try to force the issue, wait for it to change, but eventually
- # give up and bail out.
-- for i in xrange(10):
-+ for i in range(10):
- if self.fs.getinfo("hello").get("accessed_time") != old_atime:
- if not self.checkEventOccurred(MODIFIED,"/hello"):
- self.assertEventOccurred(ACCESSED,"/hello")
-@@ -142,7 +142,7 @@ class WatcherTestCases:
- self.waitForEvents()
- for evt in events:
- assert isinstance(evt,MODIFIED)
-- self.assertEquals(evt.path,"/hello")
-+ self.assertEqual(evt.path,"/hello")
-
- def test_watch_single_file_remove(self):
- self.fs.makedir("testing")
-@@ -153,9 +153,9 @@ class WatcherTestCases:
- self.waitForEvents()
- self.fs.remove("testing/hello")
- self.waitForEvents()
-- self.assertEquals(len(events),1)
-+ self.assertEqual(len(events),1)
- assert isinstance(events[0],REMOVED)
-- self.assertEquals(events[0].path,"/testing/hello")
-+ self.assertEqual(events[0].path,"/testing/hello")
-
- def test_watch_iter_changes(self):
- changes = iter_changes(self.watchfs)
-@@ -195,9 +195,9 @@ class TestWatchers_TempFS(unittest.TestCase,FSTestCase
- watchfs = osfs.OSFS(self.fs.root_path)
- self.watchfs = ensure_watchable(watchfs,poll_interval=0.1)
- if watch_inotify is not None:
-- self.assertEquals(watchfs,self.watchfs)
-+ self.assertEqual(watchfs,self.watchfs)
- if watch_win32 is not None:
-- self.assertEquals(watchfs,self.watchfs)
-+ self.assertEqual(watchfs,self.watchfs)
-
- def tearDown(self):
- self.watchfs.close()
---- fs/tests/test_wrapfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_wrapfs.py
-@@ -26,7 +26,7 @@ class TestWrapFS(unittest.TestCase, FSTestCases, Threa
- #__test__ = False
-
- def setUp(self):
-- self.temp_dir = tempfile.mkdtemp(u"fstest")
-+ self.temp_dir = tempfile.mkdtemp("fstest")
- self.fs = wrapfs.WrapFS(osfs.OSFS(self.temp_dir))
-
- def tearDown(self):
-@@ -41,7 +41,7 @@ from fs.wrapfs.lazyfs import LazyFS
- class TestLazyFS(unittest.TestCase, FSTestCases, ThreadingTestCases):
-
- def setUp(self):
-- self.temp_dir = tempfile.mkdtemp(u"fstest")
-+ self.temp_dir = tempfile.mkdtemp("fstest")
- self.fs = LazyFS((osfs.OSFS,(self.temp_dir,)))
-
- def tearDown(self):
-@@ -63,13 +63,13 @@ class TestLimitSizeFS(TestWrapFS):
-
- def tearDown(self):
- remove_all(self.fs, "/")
-- self.assertEquals(self.fs.cur_size,0)
-+ self.assertEqual(self.fs.cur_size,0)
- super(TestLimitSizeFS,self).tearDown()
- self.fs.close()
-
- def test_storage_error(self):
- total_written = 0
-- for i in xrange(1024*2):
-+ for i in range(1024*2):
- try:
- total_written += 1030
- self.fs.setcontents("file %i" % i, b("C")*1030)
-@@ -85,11 +85,11 @@ from fs.wrapfs.hidedotfilesfs import HideDotFilesFS
- class TestHideDotFilesFS(unittest.TestCase):
-
- def setUp(self):
-- self.temp_dir = tempfile.mkdtemp(u"fstest")
-- open(os.path.join(self.temp_dir, u".dotfile"), 'w').close()
-- open(os.path.join(self.temp_dir, u"regularfile"), 'w').close()
-- os.mkdir(os.path.join(self.temp_dir, u".dotdir"))
-- os.mkdir(os.path.join(self.temp_dir, u"regulardir"))
-+ self.temp_dir = tempfile.mkdtemp("fstest")
-+ open(os.path.join(self.temp_dir, ".dotfile"), 'w').close()
-+ open(os.path.join(self.temp_dir, "regularfile"), 'w').close()
-+ os.mkdir(os.path.join(self.temp_dir, ".dotdir"))
-+ os.mkdir(os.path.join(self.temp_dir, "regulardir"))
- self.fs = HideDotFilesFS(osfs.OSFS(self.temp_dir))
-
- def tearDown(self):
-@@ -97,15 +97,15 @@ class TestHideDotFilesFS(unittest.TestCase):
- self.fs.close()
-
- def test_hidden(self):
-- self.assertEquals(len(self.fs.listdir(hidden=False)), 2)
-- self.assertEquals(len(list(self.fs.ilistdir(hidden=False))), 2)
-+ self.assertEqual(len(self.fs.listdir(hidden=False)), 2)
-+ self.assertEqual(len(list(self.fs.ilistdir(hidden=False))), 2)
-
- def test_nonhidden(self):
-- self.assertEquals(len(self.fs.listdir(hidden=True)), 4)
-- self.assertEquals(len(list(self.fs.ilistdir(hidden=True))), 4)
-+ self.assertEqual(len(self.fs.listdir(hidden=True)), 4)
-+ self.assertEqual(len(list(self.fs.ilistdir(hidden=True))), 4)
-
- def test_default(self):
-- self.assertEquals(len(self.fs.listdir()), 2)
-- self.assertEquals(len(list(self.fs.ilistdir())), 2)
-+ self.assertEqual(len(self.fs.listdir()), 2)
-+ self.assertEqual(len(list(self.fs.ilistdir())), 2)
-
-
---- fs/tests/test_xattr.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_xattr.py
-@@ -37,19 +37,19 @@ class XAttrTestCases:
-
- def test_list_xattrs(self):
- def do_list(p):
-- self.assertEquals(sorted(self.fs.listxattrs(p)),[])
-+ self.assertEqual(sorted(self.fs.listxattrs(p)),[])
- self.fs.setxattr(p,"xattr1","value1")
-- self.assertEquals(self.fs.getxattr(p,"xattr1"),"value1")
-- self.assertEquals(sorted(self.fs.listxattrs(p)),["xattr1"])
-- self.assertTrue(isinstance(self.fs.listxattrs(p)[0],unicode))
-+ self.assertEqual(self.fs.getxattr(p,"xattr1"),"value1")
-+ self.assertEqual(sorted(self.fs.listxattrs(p)),["xattr1"])
-+ self.assertTrue(isinstance(self.fs.listxattrs(p)[0],str))
- self.fs.setxattr(p,"attr2","value2")
-- self.assertEquals(sorted(self.fs.listxattrs(p)),["attr2","xattr1"])
-- self.assertTrue(isinstance(self.fs.listxattrs(p)[0],unicode))
-- self.assertTrue(isinstance(self.fs.listxattrs(p)[1],unicode))
-+ self.assertEqual(sorted(self.fs.listxattrs(p)),["attr2","xattr1"])
-+ self.assertTrue(isinstance(self.fs.listxattrs(p)[0],str))
-+ self.assertTrue(isinstance(self.fs.listxattrs(p)[1],str))
- self.fs.delxattr(p,"xattr1")
-- self.assertEquals(sorted(self.fs.listxattrs(p)),["attr2"])
-+ self.assertEqual(sorted(self.fs.listxattrs(p)),["attr2"])
- self.fs.delxattr(p,"attr2")
-- self.assertEquals(sorted(self.fs.listxattrs(p)),[])
-+ self.assertEqual(sorted(self.fs.listxattrs(p)),[])
- self.fs.setcontents("test.txt",b("hello"))
- do_list("test.txt")
- self.fs.makedir("mystuff")
-@@ -64,16 +64,16 @@ class XAttrTestCases:
- self.fs.makedir("stuff")
- self.fs.copy("a.txt","stuff/a.txt")
- self.assertTrue(self.fs.exists("stuff/a.txt"))
-- self.assertEquals(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue")
-- self.assertEquals(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue")
-- self.assertEquals(self.fs.getxattr("a.txt","myattr"),"myvalue")
-- self.assertEquals(self.fs.getxattr("a.txt","testattr"),"testvalue")
-+ self.assertEqual(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue")
-+ self.assertEqual(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue")
-+ self.assertEqual(self.fs.getxattr("a.txt","myattr"),"myvalue")
-+ self.assertEqual(self.fs.getxattr("a.txt","testattr"),"testvalue")
- self.fs.setxattr("stuff","dirattr","a directory")
- self.fs.copydir("stuff","stuff2")
-- self.assertEquals(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue")
-- self.assertEquals(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue")
-- self.assertEquals(self.fs.getxattr("stuff2","dirattr"),"a directory")
-- self.assertEquals(self.fs.getxattr("stuff","dirattr"),"a directory")
-+ self.assertEqual(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue")
-+ self.assertEqual(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue")
-+ self.assertEqual(self.fs.getxattr("stuff2","dirattr"),"a directory")
-+ self.assertEqual(self.fs.getxattr("stuff","dirattr"),"a directory")
-
- def test_move_xattrs(self):
- self.fs.setcontents("a.txt",b("content"))
-@@ -82,29 +82,29 @@ class XAttrTestCases:
- self.fs.makedir("stuff")
- self.fs.move("a.txt","stuff/a.txt")
- self.assertTrue(self.fs.exists("stuff/a.txt"))
-- self.assertEquals(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue")
-- self.assertEquals(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue")
-+ self.assertEqual(self.fs.getxattr("stuff/a.txt","myattr"),"myvalue")
-+ self.assertEqual(self.fs.getxattr("stuff/a.txt","testattr"),"testvalue")
- self.fs.setxattr("stuff","dirattr","a directory")
- self.fs.movedir("stuff","stuff2")
-- self.assertEquals(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue")
-- self.assertEquals(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue")
-- self.assertEquals(self.fs.getxattr("stuff2","dirattr"),"a directory")
-+ self.assertEqual(self.fs.getxattr("stuff2/a.txt","myattr"),"myvalue")
-+ self.assertEqual(self.fs.getxattr("stuff2/a.txt","testattr"),"testvalue")
-+ self.assertEqual(self.fs.getxattr("stuff2","dirattr"),"a directory")
-
- def test_remove_file(self):
- def listxattrs(path):
- return list(self.fs.listxattrs(path))
- # Check that xattrs aren't preserved after a file is removed
- self.fs.createfile("myfile")
-- self.assertEquals(listxattrs("myfile"),[])
-+ self.assertEqual(listxattrs("myfile"),[])
- self.fs.setxattr("myfile","testattr","testvalue")
-- self.assertEquals(listxattrs("myfile"),["testattr"])
-+ self.assertEqual(listxattrs("myfile"),["testattr"])
- self.fs.remove("myfile")
- self.assertRaises(ResourceNotFoundError,listxattrs,"myfile")
- self.fs.createfile("myfile")
-- self.assertEquals(listxattrs("myfile"),[])
-+ self.assertEqual(listxattrs("myfile"),[])
- self.fs.setxattr("myfile","testattr2","testvalue2")
-- self.assertEquals(listxattrs("myfile"),["testattr2"])
-- self.assertEquals(self.fs.getxattr("myfile","testattr2"),"testvalue2")
-+ self.assertEqual(listxattrs("myfile"),["testattr2"])
-+ self.assertEqual(self.fs.getxattr("myfile","testattr2"),"testvalue2")
- # Check that removing a file without xattrs still works
- self.fs.createfile("myfile2")
- self.fs.remove("myfile2")
-@@ -114,16 +114,16 @@ class XAttrTestCases:
- return list(self.fs.listxattrs(path))
- # Check that xattrs aren't preserved after a dir is removed
- self.fs.makedir("mydir")
-- self.assertEquals(listxattrs("mydir"),[])
-+ self.assertEqual(listxattrs("mydir"),[])
- self.fs.setxattr("mydir","testattr","testvalue")
-- self.assertEquals(listxattrs("mydir"),["testattr"])
-+ self.assertEqual(listxattrs("mydir"),["testattr"])
- self.fs.removedir("mydir")
- self.assertRaises(ResourceNotFoundError,listxattrs,"mydir")
- self.fs.makedir("mydir")
-- self.assertEquals(listxattrs("mydir"),[])
-+ self.assertEqual(listxattrs("mydir"),[])
- self.fs.setxattr("mydir","testattr2","testvalue2")
-- self.assertEquals(listxattrs("mydir"),["testattr2"])
-- self.assertEquals(self.fs.getxattr("mydir","testattr2"),"testvalue2")
-+ self.assertEqual(listxattrs("mydir"),["testattr2"])
-+ self.assertEqual(self.fs.getxattr("mydir","testattr2"),"testvalue2")
- # Check that removing a dir without xattrs still works
- self.fs.makedir("mydir2")
- self.fs.removedir("mydir2")
-@@ -149,7 +149,7 @@ class TestXAttr_TempFS(unittest.TestCase,FSTestCases,X
- except AttributeError:
- td = self.fs.wrapped_fs._temp_dir
- self.fs.close()
-- self.assert_(not os.path.exists(td))
-+ self.assertTrue(not os.path.exists(td))
-
- def check(self, p):
- try:
---- fs/tests/test_zipfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/tests/test_zipfs.py
-@@ -52,7 +52,7 @@ class TestReadZipFS(unittest.TestCase):
- return contents
-
- def check_contents(path, expected):
-- self.assert_(read_contents(path) == expected)
-+ self.assertTrue(read_contents(path) == expected)
- check_contents("a.txt", b("Hello, World!"))
- check_contents("1.txt", b("1"))
- check_contents("foo/bar/baz.txt", b("baz"))
-@@ -62,30 +62,30 @@ class TestReadZipFS(unittest.TestCase):
- return self.fs.getcontents(path, 'rb')
-
- def check_contents(path, expected):
-- self.assert_(read_contents(path) == expected)
-+ self.assertTrue(read_contents(path) == expected)
- check_contents("a.txt", b("Hello, World!"))
- check_contents("1.txt", b("1"))
- check_contents("foo/bar/baz.txt", b("baz"))
-
- def test_is(self):
-- self.assert_(self.fs.isfile('a.txt'))
-- self.assert_(self.fs.isfile('1.txt'))
-- self.assert_(self.fs.isfile('foo/bar/baz.txt'))
-- self.assert_(self.fs.isdir('foo'))
-- self.assert_(self.fs.isdir('foo/bar'))
-- self.assert_(self.fs.exists('a.txt'))
-- self.assert_(self.fs.exists('1.txt'))
-- self.assert_(self.fs.exists('foo/bar/baz.txt'))
-- self.assert_(self.fs.exists('foo'))
-- self.assert_(self.fs.exists('foo/bar'))
-+ self.assertTrue(self.fs.isfile('a.txt'))
-+ self.assertTrue(self.fs.isfile('1.txt'))
-+ self.assertTrue(self.fs.isfile('foo/bar/baz.txt'))
-+ self.assertTrue(self.fs.isdir('foo'))
-+ self.assertTrue(self.fs.isdir('foo/bar'))
-+ self.assertTrue(self.fs.exists('a.txt'))
-+ self.assertTrue(self.fs.exists('1.txt'))
-+ self.assertTrue(self.fs.exists('foo/bar/baz.txt'))
-+ self.assertTrue(self.fs.exists('foo'))
-+ self.assertTrue(self.fs.exists('foo/bar'))
-
- def test_listdir(self):
-
- def check_listing(path, expected):
- dir_list = self.fs.listdir(path)
-- self.assert_(sorted(dir_list) == sorted(expected))
-+ self.assertTrue(sorted(dir_list) == sorted(expected))
- for item in dir_list:
-- self.assert_(isinstance(item, unicode))
-+ self.assertTrue(isinstance(item, str))
- check_listing('/', ['a.txt', '1.txt', 'foo', 'b.txt'])
- check_listing('foo', ['second.txt', 'bar'])
- check_listing('foo/bar', ['baz.txt'])
-@@ -108,7 +108,7 @@ class TestWriteZipFS(unittest.TestCase):
-
- makefile("a.txt", b("Hello, World!"))
- makefile("b.txt", b("b"))
-- makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
-+ makefile("\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
- makefile("foo/bar/baz.txt", b("baz"))
- makefile("foo/second.txt", b("hai"))
-
-@@ -119,7 +119,7 @@ class TestWriteZipFS(unittest.TestCase):
-
- def test_valid(self):
- zf = zipfile.ZipFile(self.temp_filename, "r")
-- self.assert_(zf.testzip() is None)
-+ self.assertTrue(zf.testzip() is None)
- zf.close()
-
- def test_creation(self):
-@@ -134,7 +134,7 @@ class TestWriteZipFS(unittest.TestCase):
- check_contents("b.txt", b("b"))
- check_contents("foo/bar/baz.txt", b("baz"))
- check_contents("foo/second.txt", b("hai"))
-- check_contents(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
-+ check_contents("\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
-
-
- class TestAppendZipFS(TestWriteZipFS):
-@@ -159,7 +159,7 @@ class TestAppendZipFS(TestWriteZipFS):
- zip_fs = zipfs.ZipFS(self.temp_filename, 'a')
-
- makefile("foo/bar/baz.txt", b("baz"))
-- makefile(u"\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
-+ makefile("\N{GREEK SMALL LETTER ALPHA}/\N{GREEK CAPITAL LETTER OMEGA}.txt", b("this is the alpha and the omega"))
- makefile("foo/second.txt", b("hai"))
-
- zip_fs.close()
---- fs/utils.py.orig 2015-11-13 23:12:33 UTC
-+++ fs/utils.py
-@@ -384,7 +384,7 @@ def isfile(fs,path,info=None):
- def contains_files(fs, path='/'):
- """Check if there are any files in the filesystem"""
- try:
-- iter(fs.walkfiles(path)).next()
-+ next(iter(fs.walkfiles(path)))
- except StopIteration:
- return False
- return True
-@@ -426,7 +426,7 @@ def find_duplicates(fs,
- for path in compare_paths:
- file_sizes[fs.getsize(path)].append(path)
-
-- size_duplicates = [paths for paths in file_sizes.itervalues() if len(paths) > 1]
-+ size_duplicates = [paths for paths in file_sizes.values() if len(paths) > 1]
-
- signatures = defaultdict(list)
-
-@@ -453,7 +453,7 @@ def find_duplicates(fs,
- # If 'quick' is True then the signature comparison is adequate (although
- # it may result in false positives)
- if quick:
-- for paths in signatures.itervalues():
-+ for paths in signatures.values():
- if len(paths) > 1:
- yield paths
- return
-@@ -482,7 +482,7 @@ def find_duplicates(fs,
- # byte by byte.
- # All path groups in this loop have the same size and same signature, so are
- # highly likely to be identical.
-- for paths in signatures.itervalues():
-+ for paths in signatures.values():
-
- while len(paths) > 1:
-
-@@ -535,7 +535,7 @@ def print_fs(fs,
- if file_out is None:
- file_out = sys.stdout
-
-- file_encoding = getattr(file_out, 'encoding', u'utf-8') or u'utf-8'
-+ file_encoding = getattr(file_out, 'encoding', 'utf-8') or 'utf-8'
- file_encoding = file_encoding.upper()
-
- if terminal_colors is None:
-@@ -546,44 +546,44 @@ def print_fs(fs,
-
- def write(line):
- if PY3:
-- file_out.write((line + u'\n'))
-+ file_out.write((line + '\n'))
- else:
-- file_out.write((line + u'\n').encode(file_encoding, 'replace'))
-+ file_out.write((line + '\n').encode(file_encoding, 'replace'))
-
- def wrap_prefix(prefix):
- if not terminal_colors:
- return prefix
-- return u'\x1b[32m%s\x1b[0m' % prefix
-+ return '\x1b[32m%s\x1b[0m' % prefix
-
- def wrap_dirname(dirname):
- if not terminal_colors:
- return dirname
-- return u'\x1b[1;34m%s\x1b[0m' % dirname
-+ return '\x1b[1;34m%s\x1b[0m' % dirname
-
- def wrap_error(msg):
- if not terminal_colors:
- return msg
-- return u'\x1b[31m%s\x1b[0m' % msg
-+ return '\x1b[31m%s\x1b[0m' % msg
-
- def wrap_filename(fname):
- if not terminal_colors:
- return fname
-- if fname.startswith(u'.'):
-- fname = u'\x1b[33m%s\x1b[0m' % fname
-+ if fname.startswith('.'):
-+ fname = '\x1b[33m%s\x1b[0m' % fname
- return fname
- dircount = [0]
- filecount = [0]
- def print_dir(fs, path, levels=[]):
- if file_encoding == 'UTF-8' and terminal_colors:
-- char_vertline = u'│'
-- char_newnode = u'├'
-- char_line = u'──'
-- char_corner = u'â•°'
-+ char_vertline = '│'
-+ char_newnode = '├'
-+ char_line = '──'
-+ char_corner = 'â•°'
- else:
-- char_vertline = u'|'
-- char_newnode = u'|'
-- char_line = u'--'
-- char_corner = u'`'
-+ char_vertline = '|'
-+ char_newnode = '|'
-+ char_line = '--'
-+ char_corner = '`'
-
- try:
- dirs = fs.listdir(path, dirs_only=True)
-@@ -593,18 +593,18 @@ def print_fs(fs,
- files = fs.listdir(path, files_only=True, wildcard=files_wildcard)
- dir_listing = ( [(True, p) for p in dirs] +
- [(False, p) for p in files] )
-- except Exception, e:
-+ except Exception as e:
- prefix = ''.join([(char_vertline + ' ', ' ')[last] for last in levels]) + ' '
-- write(wrap_prefix(prefix[:-1] + ' ') + wrap_error(u"unable to retrieve directory list (%s) ..." % str(e)))
-+ write(wrap_prefix(prefix[:-1] + ' ') + wrap_error("unable to retrieve directory list (%s) ..." % str(e)))
- return 0
-
- if hide_dotfiles:
- dir_listing = [(isdir, p) for isdir, p in dir_listing if not p.startswith('.')]
-
- if dirs_first:
-- dir_listing.sort(key = lambda (isdir, p):(not isdir, p.lower()))
-+ dir_listing.sort(key = lambda isdir_p:(not isdir_p[0], isdir_p[1].lower()))
- else:
-- dir_listing.sort(key = lambda (isdir, p):p.lower())
-+ dir_listing.sort(key = lambda isdir_p1:isdir_p1[1].lower())
-
- for i, (is_dir, item) in enumerate(dir_listing):
- if is_dir:
-@@ -685,9 +685,9 @@ if __name__ == "__main__":
- t1.tree()
-
- t2 = TempFS()
-- print t2.listdir()
-+ print(t2.listdir())
- movedir(t1, t2)
-
-- print t2.listdir()
-+ print(t2.listdir())
- t1.tree()
- t2.tree()
---- fs/watch.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/watch.py
-@@ -32,7 +32,7 @@ an iterator over the change events.
- import sys
- import weakref
- import threading
--import Queue
-+import queue
- import traceback
-
- from fs.path import *
-@@ -54,10 +54,10 @@ class EVENT(object):
- self.path = path
-
- def __str__(self):
-- return unicode(self).encode("utf8")
-+ return str(self).encode("utf8")
-
- def __unicode__(self):
-- return u"<fs.watch.%s object (path='%s') at %s>" % (self.__class__.__name__,self.path,hex(id(self)))
-+ return "<fs.watch.%s object (path='%s') at %s>" % (self.__class__.__name__,self.path,hex(id(self)))
-
- def clone(self,fs=None,path=None):
- if fs is None:
-@@ -102,7 +102,7 @@ class MOVED_DST(EVENT):
- self.source = source
-
- def __unicode__(self):
-- return u"<fs.watch.%s object (path=%r,src=%r) at %s>" % (self.__class__.__name__,self.path,self.source,hex(id(self)))
-+ return "<fs.watch.%s object (path=%r,src=%r) at %s>" % (self.__class__.__name__,self.path,self.source,hex(id(self)))
-
- def clone(self,fs=None,path=None,source=None):
- evt = super(MOVED_DST,self).clone(fs,path)
-@@ -120,7 +120,7 @@ class MOVED_SRC(EVENT):
- self.destination = destination
-
- def __unicode__(self):
-- return u"<fs.watch.%s object (path=%r,dst=%r) at %s>" % (self.__class__.__name__,self.path,self.destination,hex(id(self)))
-+ return "<fs.watch.%s object (path=%r,dst=%r) at %s>" % (self.__class__.__name__,self.path,self.destination,hex(id(self)))
-
- def clone(self,fs=None,path=None,destination=None):
- evt = super(MOVED_SRC,self).clone(fs,path)
-@@ -182,7 +182,7 @@ class Watcher(object):
- try:
- self.callback(event)
- except Exception:
-- print >>sys.stderr, "error in FS watcher callback", self.callback
-+ print("error in FS watcher callback", self.callback, file=sys.stderr)
- traceback.print_exc()
-
-
-@@ -213,7 +213,7 @@ class WatchableFSMixin(FS):
- if isinstance(watcher_or_callback,Watcher):
- self._watchers[watcher_or_callback.path].remove(watcher_or_callback)
- else:
-- for watchers in self._watchers.itervalues():
-+ for watchers in self._watchers.values():
- for i,watcher in enumerate(watchers):
- if watcher.callback is watcher_or_callback:
- del watchers[i]
-@@ -221,7 +221,7 @@ class WatchableFSMixin(FS):
-
- def _find_watchers(self,callback):
- """Find watchers registered with the given callback."""
-- for watchers in self._watchers.itervalues():
-+ for watchers in self._watchers.values():
- for watcher in watchers:
- if watcher.callback is callback:
- yield watcher
-@@ -235,7 +235,7 @@ class WatchableFSMixin(FS):
- if path is None:
- path = event.path
- if path is None:
-- for watchers in self._watchers.itervalues():
-+ for watchers in self._watchers.values():
- for watcher in watchers:
- watcher.handle_event(event)
- else:
-@@ -443,7 +443,7 @@ class WatchableFS(WatchableFSMixin,WrapFS):
-
- def _post_move(self,src,dst,data):
- (src_paths,dst_paths) = data
-- for src_path,isdir in sorted(src_paths.items(),reverse=True):
-+ for src_path,isdir in sorted(list(src_paths.items()),reverse=True):
- path = pathjoin(src,src_path)
- self.notify_watchers(REMOVED,path)
-
-@@ -554,7 +554,7 @@ class PollingWatchableFS(WatchableFS):
- else:
- was_accessed = False
- was_modified = False
-- for (k,v) in new_info.iteritems():
-+ for (k,v) in new_info.items():
- if k not in old_info:
- was_modified = True
- break
-@@ -612,7 +612,7 @@ class iter_changes(object):
-
- def __init__(self,fs=None,path="/",events=None,**kwds):
- self.closed = False
-- self._queue = Queue.Queue()
-+ self._queue = queue.Queue()
- self._watching = set()
- if fs is not None:
- self.add_watcher(fs,path,events,**kwds)
-@@ -628,7 +628,7 @@ class iter_changes(object):
- raise StopIteration
- try:
- event = self._queue.get(timeout=timeout)
-- except Queue.Empty:
-+ except queue.Empty:
- raise StopIteration
- if event is None:
- raise StopIteration
---- fs/wrapfs/__init__.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/wrapfs/__init__.py
-@@ -32,12 +32,12 @@ def rewrite_errors(func):
- def wrapper(self,*args,**kwds):
- try:
- return func(self,*args,**kwds)
-- except ResourceError, e:
-+ except ResourceError as e:
- (exc_type,exc_inst,tb) = sys.exc_info()
- try:
- e.path = self._decode(e.path)
- except (AttributeError, ValueError, TypeError):
-- raise e, None, tb
-+ raise e.with_traceback(tb)
- raise
- return wrapper
-
-@@ -119,7 +119,7 @@ class WrapFS(FS):
- return (mode, mode)
-
- def __unicode__(self):
-- return u"<%s: %s>" % (self.__class__.__name__,self.wrapped_fs,)
-+ return "<%s: %s>" % (self.__class__.__name__,self.wrapped_fs,)
-
- #def __str__(self):
- # return unicode(self).encode(sys.getdefaultencoding(),"replace")
---- fs/wrapfs/debugfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/wrapfs/debugfs.py
-@@ -66,7 +66,7 @@ class DebugFS(object):
- logger.log(level, message)
-
- def __parse_param(self, value):
-- if isinstance(value, basestring):
-+ if isinstance(value, str):
- if len(value) > 60:
- value = "%s ... (length %d)" % (repr(value[:60]), len(value))
- else:
-@@ -75,7 +75,7 @@ class DebugFS(object):
- value = "%s (%d items)" % (repr(value[:3]), len(value))
- elif isinstance(value, dict):
- items = {}
-- for k, v in value.items()[:3]:
-+ for k, v in list(value.items())[:3]:
- items[k] = v
- value = "%s (%d items)" % (repr(items), len(value))
- else:
-@@ -84,7 +84,7 @@ class DebugFS(object):
-
- def __parse_args(self, *arguments, **kwargs):
- args = [self.__parse_param(a) for a in arguments]
-- for k, v in kwargs.items():
-+ for k, v in list(kwargs.items()):
- args.append("%s=%s" % (k, self.__parse_param(v)))
-
- args = ','.join(args)
-@@ -105,10 +105,10 @@ class DebugFS(object):
-
- try:
- attr = getattr(self.__wrapped_fs, key)
-- except AttributeError, e:
-+ except AttributeError as e:
- self.__log(DEBUG, "Asking for not implemented method %s" % key)
- raise e
-- except Exception, e:
-+ except Exception as e:
- self.__log(CRITICAL, "Exception %s: %s" % \
- (e.__class__.__name__, str(e)))
- raise e
-@@ -122,19 +122,19 @@ class DebugFS(object):
- try:
- value = attr(*args, **kwargs)
- self.__report("Call method", key, value, *args, **kwargs)
-- except FSError, e:
-+ except FSError as e:
- self.__log(ERROR, "Call method %s%s -> Exception %s: %s" % \
- (key, self.__parse_args(*args, **kwargs), \
- e.__class__.__name__, str(e)))
- (exc_type,exc_inst,tb) = sys.exc_info()
-- raise e, None, tb
-- except Exception, e:
-+ raise e.with_traceback(tb)
-+ except Exception as e:
- self.__log(CRITICAL,
- "Call method %s%s -> Non-FS exception %s: %s" %\
- (key, self.__parse_args(*args, **kwargs), \
- e.__class__.__name__, str(e)))
- (exc_type,exc_inst,tb) = sys.exc_info()
-- raise e, None, tb
-+ raise e.with_traceback(tb)
- return value
-
- if self.__verbose:
---- fs/wrapfs/hidedotfilesfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/wrapfs/hidedotfilesfs.py
-@@ -87,7 +87,7 @@ class HideDotFilesFS(WrapFS):
- path = normpath(path)
- iter_dir = iter(self.listdir(path,hidden=True))
- try:
-- iter_dir.next()
-+ next(iter_dir)
- except StopIteration:
- return True
- return False
---- fs/wrapfs/lazyfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/wrapfs/lazyfs.py
-@@ -39,14 +39,14 @@ class LazyFS(WrapFS):
- # It appears that python2.5 has trouble printing out
- # classes that define a __unicode__ method.
- try:
-- return u"<LazyFS: %s>" % (self._fsclass,)
-+ return "<LazyFS: %s>" % (self._fsclass,)
- except TypeError:
- try:
-- return u"<LazyFS: %s>" % (self._fsclass.__name__,)
-+ return "<LazyFS: %s>" % (self._fsclass.__name__,)
- except AttributeError:
-- return u"<LazyFS: <unprintable>>"
-+ return "<LazyFS: <unprintable>>"
- else:
-- return u"<LazyFS: %s>" % (wrapped_fs,)
-+ return "<LazyFS: %s>" % (wrapped_fs,)
-
- def __getstate__(self):
- state = super(LazyFS,self).__getstate__()
---- fs/wrapfs/limitsizefs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/wrapfs/limitsizefs.py
-@@ -9,7 +9,7 @@ total size of files stored in the wrapped FS.
-
- """
-
--from __future__ import with_statement
-+
-
- from fs.errors import *
- from fs.path import *
---- fs/wrapfs/subfs.py.orig 2022-03-04 17:14:43 UTC
-+++ fs/wrapfs/subfs.py
-@@ -34,7 +34,7 @@ class SubFS(WrapFS):
- return '<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/'))
-
- def __unicode__(self):
-- return u'<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/'))
-+ return '<SubFS: %s/%s>' % (self.wrapped_fs, self.sub_dir.lstrip('/'))
-
- def __repr__(self):
- return "SubFS(%r, %r)" % (self.wrapped_fs, self.sub_dir)
---- fs/xattrs.py.orig 2015-04-12 17:24:29 UTC
-+++ fs/xattrs.py
-@@ -23,7 +23,7 @@ if it has native xattr support, and return a wrapped v
-
- import sys
- try:
-- import cPickle as pickle
-+ import pickle as pickle
- except ImportError:
- import pickle
-
-@@ -104,7 +104,7 @@ class SimulateXAttr(WrapFS):
- """Set an extended attribute on the given path."""
- if not self.exists(path):
- raise ResourceNotFoundError(path)
-- key = unicode(key)
-+ key = str(key)
- attrs = self._get_attr_dict(path)
- attrs[key] = str(value)
- self._set_attr_dict(path, attrs)
-@@ -133,7 +133,7 @@ class SimulateXAttr(WrapFS):
- """List all the extended attribute keys set on the given path."""
- if not self.exists(path):
- raise ResourceNotFoundError(path)
-- return self._get_attr_dict(path).keys()
-+ return list(self._get_attr_dict(path).keys())
-
- def _encode(self,path):
- """Prevent requests for operations on .xattr files."""
-@@ -189,7 +189,7 @@ class SimulateXAttr(WrapFS):
- d_attr_file = self._get_attr_path(dst)
- try:
- self.wrapped_fs.copy(s_attr_file,d_attr_file,overwrite=True)
-- except ResourceNotFoundError,e:
-+ except ResourceNotFoundError as e:
- pass
-
- def move(self,src,dst,**kwds):
---- fs/zipfs.py.orig 2015-04-12 17:25:37 UTC
-+++ fs/zipfs.py
-@@ -16,9 +16,9 @@ from fs.filelike import StringIO
- from fs import iotools
-
- from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED, BadZipfile, LargeZipFile
--from memoryfs import MemoryFS
-+from .memoryfs import MemoryFS
-
--import tempfs
-+from . import tempfs
-
- from six import PY3
-
-@@ -74,7 +74,7 @@ class _ExceptionProxy(object):
- def __setattr__(self, name, value):
- raise ValueError("Zip file has been closed")
-
-- def __nonzero__(self):
-+ def __bool__(self):
- return False
-
-
-@@ -117,7 +117,7 @@ class ZipFS(FS):
- self.zip_mode = mode
- self.encoding = encoding
-
-- if isinstance(zip_file, basestring):
-+ if isinstance(zip_file, str):
- zip_file = os.path.expanduser(os.path.expandvars(zip_file))
- zip_file = os.path.normpath(os.path.abspath(zip_file))
- self._zip_file_string = True
-@@ -126,10 +126,10 @@ class ZipFS(FS):
-
- try:
- self.zf = ZipFile(zip_file, mode, compression_type, allow_zip_64)
-- except BadZipfile, bzf:
-+ except BadZipfile as bzf:
- raise ZipOpenError("Not a zip file or corrupt (%s)" % str(zip_file),
- details=bzf)
-- except IOError, ioe:
-+ except IOError as ioe:
- if str(ioe).startswith('[Errno 22] Invalid argument'):
- raise ZipOpenError("Not a zip file or corrupt (%s)" % str(zip_file),
- details=ioe)
-@@ -151,7 +151,7 @@ class ZipFS(FS):
- return "<ZipFS: %s>" % self.zip_path
-
- def __unicode__(self):
-- return u"<ZipFS: %s>" % self.zip_path
-+ return "<ZipFS: %s>" % self.zip_path
-
- def _decode_path(self, path):
- if PY3:
-@@ -280,7 +280,7 @@ class ZipFS(FS):
- try:
- zi = self.zf.getinfo(self._encode_path(path))
- zinfo = dict((attrib, getattr(zi, attrib)) for attrib in dir(zi) if not attrib.startswith('_'))
-- for k, v in zinfo.iteritems():
-+ for k, v in zinfo.items():
- if callable(v):
- zinfo[k] = v()
- except KeyError:
---- setup.py.orig 2015-11-14 11:44:01 UTC
-+++ setup.py
-@@ -38,8 +38,6 @@ with open('README.txt', 'r') as f:
-
-
- extra = {}
--if PY3:
-- extra["use_2to3"] = True
-
- setup(install_requires=['setuptools', 'six'],
- name='fs',
diff --git a/devel/py-fs/pkg-descr b/devel/py-fs/pkg-descr
deleted file mode 100644
index 712dede347d6..000000000000
--- a/devel/py-fs/pkg-descr
+++ /dev/null
@@ -1,8 +0,0 @@
-Pyfilesystem is a Python module that provides a simplified common interface to
-many types of filesystem. Filesystems exposed via Pyfilesystem can also be
-served over the network, or 'mounted' on the native filesystem.
-
-Pyfilesystem simplifies working directories and paths, even if you only intend
-to work with local files. Differences in path formats between platforms are
-abstracted away, and you can write code that sand-boxes any changes to a given
-directory.
diff --git a/devel/py-fs2/Makefile b/devel/py-fs2/Makefile
deleted file mode 100644
index 352ddbf36525..000000000000
--- a/devel/py-fs2/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-PORTNAME= fs
-PORTVERSION= 2.4.16
-CATEGORIES= devel
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-PKGNAMESUFFIX= 2
-
-MAINTAINER= bofh@FreeBSD.org
-COMMENT= Python Filesystem abstraction layer, version 2.x
-WWW= https://www.pyfilesystem.org/
-
-LICENSE= MIT
-LICENSE_FILE= ${WRKSRC}/LICENSE
-
-RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}appdirs>=1.4.3:devel/py-appdirs@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}pytz>0:devel/py-pytz@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}six>=1.10:devel/py-six@${PY_FLAVOR}
-
-USES= python
-USE_PYTHON= autoplist concurrent distutils
-
-NO_ARCH= yes
-
-PORTSCOUT= limit:^2\.
-
-.include <bsd.port.mk>
diff --git a/devel/py-fs2/distinfo b/devel/py-fs2/distinfo
deleted file mode 100644
index 0aee290aa7cd..000000000000
--- a/devel/py-fs2/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1681495093
-SHA256 (fs-2.4.16.tar.gz) = ae97c7d51213f4b70b6a958292530289090de3a7e15841e108fbe144f069d313
-SIZE (fs-2.4.16.tar.gz) = 187441
diff --git a/devel/py-fs2/pkg-descr b/devel/py-fs2/pkg-descr
deleted file mode 100644
index 6807f41b753b..000000000000
--- a/devel/py-fs2/pkg-descr
+++ /dev/null
@@ -1,5 +0,0 @@
-Filesystem Abstraction for Python. Work with files and directories in archives,
-memory, the cloud etc. as easily as your local drive. Write code now, decide
-later where the data will be stored; unit test without writing real files;
-upload files to the cloud without learning a new API; sandbox your file writing
-code; etc.
diff --git a/devel/py-fsspec-xrootd/Makefile b/devel/py-fsspec-xrootd/Makefile
deleted file mode 100644
index d48487387b00..000000000000
--- a/devel/py-fsspec-xrootd/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-PORTNAME= fsspec-xrootd
-PORTVERSION= 0.4.0
-CATEGORIES= devel python
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-DISTNAME= fsspec_xrootd-${PORTVERSION}
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= XRootD implementation for fsspec
-WWW= https://coffeateam.github.io/fsspec-xrootd/ \
- https://github.com/CoffeaTeam/fsspec-xrootd
-
-LICENSE= BSD3CLAUSE
-LICENSE_FILE= ${WRKSRC}/LICENSE
-
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=42:devel/py-setuptools@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}setuptools-scm>=3.4:devel/py-setuptools-scm@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
-RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fsspec>=0:devel/py-fsspec@${PY_FLAVOR}
-
-USES= python
-USE_PYTHON= autoplist concurrent pep517
-
-NO_ARCH= yes
-
-.include <bsd.port.mk>
diff --git a/devel/py-fsspec-xrootd/distinfo b/devel/py-fsspec-xrootd/distinfo
deleted file mode 100644
index 2d8ab0f7585e..000000000000
--- a/devel/py-fsspec-xrootd/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1725559717
-SHA256 (fsspec_xrootd-0.4.0.tar.gz) = d7f124430d26ab9139d33bc50fa8abfde3624db5dcaa5c18f56af9bf17f16f13
-SIZE (fsspec_xrootd-0.4.0.tar.gz) = 23442
diff --git a/devel/py-fsspec-xrootd/pkg-descr b/devel/py-fsspec-xrootd/pkg-descr
deleted file mode 100644
index cd72b8510731..000000000000
--- a/devel/py-fsspec-xrootd/pkg-descr
+++ /dev/null
@@ -1,3 +0,0 @@
-To allow fsspec to use XRootD accessible storage systems, install fsspec-xrootd
-alongside fsspec and have easy access to files stored on XRootD servers. Once
-installed, fsspec will be able to work with urls with the 'root' protocol.
diff --git a/devel/py-fsspec/Makefile b/devel/py-fsspec/Makefile
deleted file mode 100644
index cbf695c4e698..000000000000
--- a/devel/py-fsspec/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-PORTNAME= fsspec
-PORTVERSION= 2024.10.0
-CATEGORIES= devel python
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= File-system specification
-WWW= https://filesystem-spec.readthedocs.io/en/latest/ \
- https://github.com/fsspec/filesystem_spec
-
-LICENSE= BSD3CLAUSE
-LICENSE_FILE= ${WRKSRC}/LICENSE
-
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}hatch-vcs>=0:devel/py-hatch-vcs@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}hatchling>=0:devel/py-hatchling@${PY_FLAVOR}
-
-USES= python
-USE_PYTHON= autoplist concurrent pep517
-
-NO_ARCH= yes
-
-OPTIONS_DEFINE= GITHUB
-GITHUB_DESC= GitHub file system
-
-GITHUB_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}requests>=0:www/py-requests@${PY_FLAVOR}
-
-.include <bsd.port.mk>
diff --git a/devel/py-fsspec/distinfo b/devel/py-fsspec/distinfo
deleted file mode 100644
index 09aee0a26075..000000000000
--- a/devel/py-fsspec/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1729653348
-SHA256 (fsspec-2024.10.0.tar.gz) = eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493
-SIZE (fsspec-2024.10.0.tar.gz) = 286853
diff --git a/devel/py-fsspec/pkg-descr b/devel/py-fsspec/pkg-descr
deleted file mode 100644
index 7d23085ccff9..000000000000
--- a/devel/py-fsspec/pkg-descr
+++ /dev/null
@@ -1,3 +0,0 @@
-Filesystem Spec is a project to unify various projects and classes to work with
-remote filesystems and file-system-like abstractions using a standard pythonic
-interface.
diff --git a/devel/py-fusepy/Makefile b/devel/py-fusepy/Makefile
deleted file mode 100644
index 23f039b8e04f..000000000000
--- a/devel/py-fusepy/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-PORTNAME= fusepy
-PORTVERSION= 3.0.1
-PORTREVISION= 1
-CATEGORIES= devel python
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= Simple ctypes bindings for FUSE
-WWW= https://github.com/fusepy/fusepy
-
-LICENSE= ISCL
-
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=0:devel/py-setuptools@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
-
-USES= fuse python
-USE_PYTHON= autoplist concurrent pep517
-
-NO_ARCH= yes
-
-CONFLICTS_INSTALL= ${PYTHON_PKGNAMEPREFIX}fusefs # fuse.py
-
-.include <bsd.port.mk>
diff --git a/devel/py-fusepy/distinfo b/devel/py-fusepy/distinfo
deleted file mode 100644
index ef6c8fc3299e..000000000000
--- a/devel/py-fusepy/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1537143232
-SHA256 (fusepy-3.0.1.tar.gz) = 72ff783ec2f43de3ab394e3f7457605bf04c8cf288a2f4068b4cde141d4ee6bd
-SIZE (fusepy-3.0.1.tar.gz) = 11519
diff --git a/devel/py-fusepy/pkg-descr b/devel/py-fusepy/pkg-descr
deleted file mode 100644
index 898b2d3e9be3..000000000000
--- a/devel/py-fusepy/pkg-descr
+++ /dev/null
@@ -1,2 +0,0 @@
-fusepy is a Python module that provides a simple interface to FUSE and MacFUSE.
-It's just one file and is implemented using ctypes.
diff --git a/devel/py-gcsfs/Makefile b/devel/py-gcsfs/Makefile
deleted file mode 100644
index 2bb0c79f14ed..000000000000
--- a/devel/py-gcsfs/Makefile
+++ /dev/null
@@ -1,37 +0,0 @@
-PORTNAME= gcsfs
-PORTVERSION= 2024.10.0
-CATEGORIES= devel python
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= Convenient Filesystem interface over GCS
-WWW= https://gcsfs.readthedocs.io/en/latest/ \
- https://github.com/fsspec/gcsfs
-
-LICENSE= BSD3CLAUSE
-LICENSE_FILE= ${WRKSRC}/LICENSE.txt
-
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=0:devel/py-setuptools@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
-RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}aiohttp>=0:www/py-aiohttp@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}decorator>=4.1.2:devel/py-decorator@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}fsspec>=${PORTVERSION}<${PORTVERSION}_99:devel/py-fsspec@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}google-auth-oauthlib>=0:security/py-google-auth-oauthlib@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}google-auth>=1.2:security/py-google-auth@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}google-cloud-storage>=0:www/py-google-cloud-storage@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}requests>=0:www/py-requests@${PY_FLAVOR}
-
-USES= python
-USE_PYTHON= autoplist concurrent pep517
-
-NO_ARCH= yes
-
-OPTIONS_DEFINE= GCSFUSE CRC
-GCSFUSE_DESC= Fuse support
-CRC_DESC= CRC support
-
-GCSFUSE_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}crcmod>=0:devel/py-crcmod@${PY_FLAVOR}
-CRC_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fusepy>=0:devel/py-fusepy@${PY_FLAVOR}
-
-.include <bsd.port.mk>
diff --git a/devel/py-gcsfs/distinfo b/devel/py-gcsfs/distinfo
deleted file mode 100644
index e0a97da9c6a0..000000000000
--- a/devel/py-gcsfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1729653350
-SHA256 (gcsfs-2024.10.0.tar.gz) = 5df54cfe568e8fdeea5aafa7fed695cdc69a9a674e991ca8c1ce634f5df1d314
-SIZE (gcsfs-2024.10.0.tar.gz) = 79588
diff --git a/devel/py-gcsfs/pkg-descr b/devel/py-gcsfs/pkg-descr
deleted file mode 100644
index 781572993acc..000000000000
--- a/devel/py-gcsfs/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Pythonic file-system for Google Cloud Storage
diff --git a/devel/py-libioc/Makefile b/devel/py-libioc/Makefile
index e7e50ee7eac8..45c68d867b61 100644
--- a/devel/py-libioc/Makefile
+++ b/devel/py-libioc/Makefile
@@ -16,7 +16,7 @@ LICENSE_FILE= ${WRKSRC}/LICENSE.txt
BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}pip>0:devel/py-pip@${PY_FLAVOR}
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}gitpython>0:devel/py-gitpython@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}libzfs>0:devel/py-libzfs@${PY_FLAVOR} \
+ ${PYTHON_PKGNAMEPREFIX}libzfs>0:filesystems/py-libzfs@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}sysctl>0:devel/py-sysctl@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}ucl>0:textproc/py-ucl@${PY_FLAVOR} \
ca_root_nss>0:security/ca_root_nss \
diff --git a/devel/py-libzfs/Makefile b/devel/py-libzfs/Makefile
deleted file mode 100644
index 2db1495ec1c6..000000000000
--- a/devel/py-libzfs/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-PORTNAME= libzfs
-PORTVERSION= 1.1.2023020700
-CATEGORIES= devel python
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= freqlabs@FreeBSD.org
-COMMENT= Python libzfs bindings
-WWW= https://github.com/freenas/py-libzfs
-
-LICENSE= BSD2CLAUSE
-LICENSE_FILE= ${WRKSRC}/LICENSE
-
-USE_GITHUB= yes
-GH_ACCOUNT= truenas
-GH_PROJECT= py-libzfs
-GH_TAGNAME= c1bd4a0
-
-HAS_CONFIGURE= yes
-USES= compiler:c11 python
-USE_PYTHON= autoplist distutils cython
-
-CONFIGURE_ENV= freebsd_src=${SRC_BASE}
-MAKE_ENV= freebsd_src=${SRC_BASE}
-
-.include <bsd.port.options.mk>
-
-.if ${OPSYS} == FreeBSD && (${OSVERSION} >= 1500018 || (${OSVERSION} >= 1401000 && ${OSVERSION} < 1500000))
-EXTRA_PATCHES+= ${FILESDIR}/extra-zpool-add.patch
-.endif
-
-.if !exists(${SRC_BASE}/sys/Makefile)
-IGNORE= requires kernel source files in ${SRC_BASE}
-.endif
-
-.include <bsd.port.mk>
diff --git a/devel/py-libzfs/distinfo b/devel/py-libzfs/distinfo
deleted file mode 100644
index 3ae8f5fa9a4d..000000000000
--- a/devel/py-libzfs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1675808317
-SHA256 (truenas-py-libzfs-1.1.2023020700-c1bd4a0_GH0.tar.gz) = 23b2d6e1b6ed78be2d12068f9b1b0b01270afaaf0f017817a5fb109d358aa818
-SIZE (truenas-py-libzfs-1.1.2023020700-c1bd4a0_GH0.tar.gz) = 99656
diff --git a/devel/py-libzfs/files/extra-zpool-add.patch b/devel/py-libzfs/files/extra-zpool-add.patch
deleted file mode 100644
index 7d0688ca4f13..000000000000
--- a/devel/py-libzfs/files/extra-zpool-add.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From b5ffe1f1d6097df6e2f5cc6dd3c968872ec60804 Mon Sep 17 00:00:00 2001
-From: Ameer Hamza <ahamza@ixsystems.com>
-Date: Tue, 2 Apr 2024 23:56:55 +0500
-Subject: [PATCH] zpool_add API changed in upstream zfs master
-
----
- libzfs.pyx | 5 +++--
- pxd/libzfs.pxd | 2 +-
- 2 files changed, 4 insertions(+), 3 deletions(-)
-
-diff --git a/libzfs.pyx b/libzfs.pyx
-index a59fca8..71efa96 100644
---- a/libzfs.pyx
-+++ ./libzfs.pyx
-@@ -3175,13 +3175,14 @@ cdef class ZFSPool(object):
- hopts = self.root.generate_history_opts(fsopts, '-o')
- self.root.write_history('zfs create', hopts, name)
-
-- def attach_vdevs(self, vdevs_tree):
-+ def attach_vdevs(self, vdevs_tree, check_ashift=0):
- cdef const char *command = 'zpool add'
- cdef ZFSVdev vd = self.root.make_vdev_tree(vdevs_tree, {'ashift': self.properties['ashift'].parsed})
- cdef int ret
-+ cdef boolean_t ashift = check_ashift
-
- with nogil:
-- ret = libzfs.zpool_add(self.handle, vd.nvlist.handle)
-+ ret = libzfs.zpool_add(self.handle, vd.nvlist.handle, ashift)
-
- if ret != 0:
- raise self.root.get_error()
-diff --git a/pxd/libzfs.pxd b/pxd/libzfs.pxd
-index 3ab9374..6afa275 100644
---- a/pxd/libzfs.pxd
-+++ ./pxd/libzfs.pxd
-@@ -228,7 +228,7 @@ cdef extern from "libzfs.h" nogil:
- extern int zpool_create(libzfs_handle_t *, const char *, nvpair.nvlist_t *,
- nvpair.nvlist_t *, nvpair.nvlist_t *)
- extern int zpool_destroy(zpool_handle_t *, const char *)
-- extern int zpool_add(zpool_handle_t *, nvpair.nvlist_t *)
-+ extern int zpool_add(zpool_handle_t *, nvpair.nvlist_t *, boolean_t)
-
- IF HAVE_ZPOOL_SCAN == 3:
- extern int zpool_scan(zpool_handle_t *, zfs.pool_scan_func_t, zfs.pool_scrub_cmd_t)
diff --git a/devel/py-libzfs/pkg-descr b/devel/py-libzfs/pkg-descr
deleted file mode 100644
index 51a23404441a..000000000000
--- a/devel/py-libzfs/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Python libzfs bindings using cython.
diff --git a/devel/py-llfuse/Makefile b/devel/py-llfuse/Makefile
deleted file mode 100644
index 55c0941bb899..000000000000
--- a/devel/py-llfuse/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-PORTNAME= llfuse
-PORTVERSION= 1.5.1
-CATEGORIES= devel python
-MASTER_SITES= PYPI \
- https://github.com/python-llfuse/python-llfuse/releases/download/release-${PORTVERSION}/
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= Python bindings for the low-level FUSE API
-WWW= https://github.com/python-llfuse/python-llfuse
-
-LICENSE= LGPL20+
-LICENSE_FILE= ${WRKSRC}/LICENSE
-
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=0:devel/py-setuptools@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
-
-USES= compiler:c++11-lang fuse pkgconfig python
-USE_PYTHON= autoplist concurrent cython pep517 pytest
-
-TEST_ENV= PYTHONPATH=${STAGEDIR}${PYTHON_SITELIBDIR}
-
-post-install:
- ${FIND} ${STAGEDIR}${PYTHON_SITELIBDIR} -name '*.so' -exec ${STRIP_CMD} {} +
-
-.include <bsd.port.mk>
diff --git a/devel/py-llfuse/distinfo b/devel/py-llfuse/distinfo
deleted file mode 100644
index 30da47d20723..000000000000
--- a/devel/py-llfuse/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1725559735
-SHA256 (llfuse-1.5.1.tar.gz) = 7c9be52289cf647e3d735104531cc23a1a89fd1be3a621613a1cc0991f1b2699
-SIZE (llfuse-1.5.1.tar.gz) = 959557
diff --git a/devel/py-llfuse/pkg-descr b/devel/py-llfuse/pkg-descr
deleted file mode 100644
index b6da83cc7e55..000000000000
--- a/devel/py-llfuse/pkg-descr
+++ /dev/null
@@ -1,3 +0,0 @@
-Python-LLFUSE is a set of Python bindings for the low level FUSE API. It
-requires at least FUSE 2.8.0 and supports both Python 2.x and 3.x. It runs
-under Linux, OS-X, FreeBSD and NetBSD.
diff --git a/devel/py-pyfakefs/Makefile b/devel/py-pyfakefs/Makefile
deleted file mode 100644
index 1565ad8b0350..000000000000
--- a/devel/py-pyfakefs/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-PORTNAME= pyfakefs
-PORTVERSION= 5.7.1
-CATEGORIES= devel python
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= Implement a fake file system that mocks the Python file system modules
-WWW= https://pytest-pyfakefs.readthedocs.io/en/stable/ \
- https://github.com/jmcgeheeiv/pyfakefs
-
-LICENSE= APACHE20
-LICENSE_FILE= ${WRKSRC}/COPYING
-
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=0:devel/py-setuptools@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
-TEST_DEPENDS= ${PYTHON_PKGNAMEPREFIX}undefined>=0:devel/py-undefined@${PY_FLAVOR}
-
-USES= python
-USE_PYTHON= autoplist concurrent pep517 pytest
-
-NO_ARCH= yes
-
-.include <bsd.port.mk>
diff --git a/devel/py-pyfakefs/distinfo b/devel/py-pyfakefs/distinfo
deleted file mode 100644
index d76b6044cd26..000000000000
--- a/devel/py-pyfakefs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1729509154
-SHA256 (pyfakefs-5.7.1.tar.gz) = 24774c632f3b67ea26fd56b08115ba7c339d5cd65655410bca8572d73a1ae9a4
-SIZE (pyfakefs-5.7.1.tar.gz) = 211163
diff --git a/devel/py-pyfakefs/pkg-descr b/devel/py-pyfakefs/pkg-descr
deleted file mode 100644
index 4e82fbb5cd45..000000000000
--- a/devel/py-pyfakefs/pkg-descr
+++ /dev/null
@@ -1,13 +0,0 @@
-pyfakefs implements a fake file system that mocks the Python file system
-modules. Using pyfakefs, your tests operate on a fake file system in memory
-without touching the real disk. The software under test requires no modification
-to work with pyfakefs.
-
-Pyfakefs creates a new empty in-memory file system at each test start, which
-replaces the real filesystem during the test. Think of pyfakefs as making a
-per-test temporary directory, except for an entire file system.
-
-There are several means to achieve this: by using the fs fixture if running
-pytest, by using fake_filesystem_unittest.TestCase as a base class if using
-unittest, by using a fake_filesystem_unittest.Patcher instance as a context
-manager, or by using the patchfs decorator.
diff --git a/devel/py-pyyaml-include/Makefile b/devel/py-pyyaml-include/Makefile
index fac502189536..76a6a7004414 100644
--- a/devel/py-pyyaml-include/Makefile
+++ b/devel/py-pyyaml-include/Makefile
@@ -16,7 +16,7 @@ LICENSE_FILE= ${WRKSRC}/LICENSE
BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=61:devel/py-setuptools@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}setuptools-scm>=8:devel/py-setuptools-scm@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
-RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fsspec>=2021.04.0:devel/py-fsspec@${PY_FLAVOR} \
+RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fsspec>=2021.04.0:filesystems/py-fsspec@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}pyyaml>=6.0<7.0:devel/py-pyyaml@${PY_FLAVOR}
TEST_DEPENDS= ${PYTHON_PKGNAMEPREFIX}toml>=0:textproc/py-toml@${PY_FLAVOR}
diff --git a/devel/py-s3fs/Makefile b/devel/py-s3fs/Makefile
deleted file mode 100644
index 22915d7c9cc3..000000000000
--- a/devel/py-s3fs/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
-PORTNAME= s3fs
-PORTVERSION= 2024.10.0
-CATEGORIES= devel python
-MASTER_SITES= PYPI
-PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= Convenient Filesystem interface over S3
-WWW= https://s3fs.readthedocs.io/en/latest/ \
- https://github.com/fsspec/s3fs
-
-LICENSE= BSD3CLAUSE
-LICENSE_FILE= ${WRKSRC}/LICENSE.txt
-
-BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}setuptools>=0:devel/py-setuptools@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}wheel>=0:devel/py-wheel@${PY_FLAVOR}
-RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}aiobotocore>=2.5.4<3.0.0:devel/py-aiobotocore@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}aiohttp>=0:www/py-aiohttp@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}fsspec>=${PORTVERSION}<${PORTVERSION}_99:devel/py-fsspec@${PY_FLAVOR}
-
-USES= python
-USE_PYTHON= autoplist concurrent pep517
-
-NO_ARCH= yes
-
-OPTIONS_DEFINE= AWSCLI BOTO3
-AWSCLI_DESC= Use awscli
-BOTO3_DESC= Use boto3
-
-AWSCLI_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}awscli>=1.29.17:devel/py-awscli@${PY_FLAVOR}
-BOTO3_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}boto3>=1.28.17:www/py-boto3@${PY_FLAVOR}
-
-.include <bsd.port.mk>
diff --git a/devel/py-s3fs/distinfo b/devel/py-s3fs/distinfo
deleted file mode 100644
index e12bca6c5c7a..000000000000
--- a/devel/py-s3fs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1729653364
-SHA256 (s3fs-2024.10.0.tar.gz) = 58b8c3650f8b99dbedf361543da3533aac8707035a104db5d80b094617ad4a3f
-SIZE (s3fs-2024.10.0.tar.gz) = 75916
diff --git a/devel/py-s3fs/pkg-descr b/devel/py-s3fs/pkg-descr
deleted file mode 100644
index ba62075e68c6..000000000000
--- a/devel/py-s3fs/pkg-descr
+++ /dev/null
@@ -1,2 +0,0 @@
-S3FS builds on aiobotocore to provide a convenient Python filesystem interface
-for S3.
diff --git a/devel/py-uproot/Makefile b/devel/py-uproot/Makefile
index ecb63853f7fc..a2447ff202f6 100644
--- a/devel/py-uproot/Makefile
+++ b/devel/py-uproot/Makefile
@@ -16,7 +16,7 @@ BUILD_DEPENDS= ${PYTHON_PKGNAMEPREFIX}hatch-vcs>=0:devel/py-hatch-vcs@${PY_FLAVO
${PYTHON_PKGNAMEPREFIX}hatchling>=0:devel/py-hatchling@${PY_FLAVOR}
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}awkward>=2.4.6:math/py-awkward@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}cramjam>=2.5.0:archivers/py-cramjam@${PY_FLAVOR} \
- ${PYTHON_PKGNAMEPREFIX}fsspec>=0:devel/py-fsspec@${PY_FLAVOR} \
+ ${PYTHON_PKGNAMEPREFIX}fsspec>=0:filesystems/py-fsspec@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}numpy>=0,1:math/py-numpy@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}packaging>=0:devel/py-packaging@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}xxhash>=0:devel/py-xxhash@${PY_FLAVOR}
@@ -32,8 +32,8 @@ S3_DESC= Access Amazon S3 storage
XROOTD_DESC= Access extended ROOT daemon
HTTP_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}aiohttp>=0:www/py-aiohttp@${PY_FLAVOR}
-S3_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}s3fs>=0:devel/py-s3fs@${PY_FLAVOR}
-XROOTD_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fsspec-xrootd>=0:devel/py-fsspec-xrootd@${PY_FLAVOR}
+S3_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}s3fs>=0:filesystems/py-s3fs@${PY_FLAVOR}
+XROOTD_RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}fsspec-xrootd>=0:filesystems/py-fsspec-xrootd@${PY_FLAVOR}
.include <bsd.port.pre.mk>
diff --git a/devel/py-zarr/Makefile b/devel/py-zarr/Makefile
index dfa5af0aff18..c2f354524ca4 100644
--- a/devel/py-zarr/Makefile
+++ b/devel/py-zarr/Makefile
@@ -18,7 +18,7 @@ RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}asciitree>0:graphics/py-asciitree@${PY_FLAVO
${PYTHON_PKGNAMEPREFIX}fasteners>0:devel/py-fasteners@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}numcodecs>=0.10.0:misc/py-numcodecs@${PY_FLAVOR} \
${PYNUMPY}
-RUN_DEPENDS+= ${PYTHON_PKGNAMEPREFIX}fsspec>0:devel/py-fsspec@${PY_FLAVOR} \
+RUN_DEPENDS+= ${PYTHON_PKGNAMEPREFIX}fsspec>0:filesystems/py-fsspec@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}h5py>0:science/py-h5py@${PY_FLAVOR}
USES= python
diff --git a/devel/rubygem-aws-sdk-efs/Makefile b/devel/rubygem-aws-sdk-efs/Makefile
deleted file mode 100644
index df70496db8c7..000000000000
--- a/devel/rubygem-aws-sdk-efs/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-PORTNAME= aws-sdk-efs
-PORTVERSION= 1.86.0
-CATEGORIES= devel rubygems
-MASTER_SITES= RG
-
-MAINTAINER= sunpoet@FreeBSD.org
-COMMENT= Official AWS Ruby gem for Amazon Elastic File System (EFS)
-WWW= https://github.com/aws/aws-sdk-ruby/tree/version-3/gems/aws-sdk-efs
-
-LICENSE= APACHE20
-LICENSE_FILE= ${WRKSRC}/LICENSE.txt
-
-RUN_DEPENDS= rubygem-aws-sdk-core>=3.210.0<4:devel/rubygem-aws-sdk-core \
- rubygem-aws-sigv4>=1.5<2:devel/rubygem-aws-sigv4
-
-USES= gem
-
-NO_ARCH= yes
-
-.include <bsd.port.mk>
diff --git a/devel/rubygem-aws-sdk-efs/distinfo b/devel/rubygem-aws-sdk-efs/distinfo
deleted file mode 100644
index 2b61149e6faa..000000000000
--- a/devel/rubygem-aws-sdk-efs/distinfo
+++ /dev/null
@@ -1,3 +0,0 @@
-TIMESTAMP = 1729856552
-SHA256 (rubygem/aws-sdk-efs-1.86.0.gem) = f62c273472bf5010a6e71dc87fd0cc343fc7e52ad4bfe774ee01d34ad1ea41fa
-SIZE (rubygem/aws-sdk-efs-1.86.0.gem) = 65024
diff --git a/devel/rubygem-aws-sdk-efs/pkg-descr b/devel/rubygem-aws-sdk-efs/pkg-descr
deleted file mode 100644
index 6bee43ab7c62..000000000000
--- a/devel/rubygem-aws-sdk-efs/pkg-descr
+++ /dev/null
@@ -1 +0,0 @@
-Official AWS Ruby gem for Amazon Elastic File System (EFS).
diff --git a/devel/rubygem-aws-sdk-resources/Makefile b/devel/rubygem-aws-sdk-resources/Makefile
index f3b4b5aae499..e1705fe6a383 100644
--- a/devel/rubygem-aws-sdk-resources/Makefile
+++ b/devel/rubygem-aws-sdk-resources/Makefile
@@ -137,7 +137,7 @@ RUN_DEPENDS= rubygem-aws-sdk-accessanalyzer>=1<2:devel/rubygem-aws-sdk-accessana
rubygem-aws-sdk-ecr>=1<2:devel/rubygem-aws-sdk-ecr \
rubygem-aws-sdk-ecrpublic>=1<2:devel/rubygem-aws-sdk-ecrpublic \
rubygem-aws-sdk-ecs>=1<2:devel/rubygem-aws-sdk-ecs \
- rubygem-aws-sdk-efs>=1<2:devel/rubygem-aws-sdk-efs \
+ rubygem-aws-sdk-efs>=1<2:filesystems/rubygem-aws-sdk-efs \
rubygem-aws-sdk-eks>=1<2:devel/rubygem-aws-sdk-eks \
rubygem-aws-sdk-eksauth>=1<2:devel/rubygem-aws-sdk-eksauth \
rubygem-aws-sdk-elasticache>=1<2:devel/rubygem-aws-sdk-elasticache \
diff --git a/devel/tclvfs/Makefile b/devel/tclvfs/Makefile
deleted file mode 100644
index a720e16be00d..000000000000
--- a/devel/tclvfs/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-PORTNAME= tclvfs
-PORTVERSION= 20080503
-PORTREVISION= 1
-CATEGORIES= devel
-MASTER_SITES= SF/${PORTNAME}/${PORTNAME}/${PORTNAME}-${PORTVERSION}/
-
-MAINTAINER= tcltk@FreeBSD.org
-COMMENT= Tcl VirtualFileSystem layer
-WWW= https://sourceforge.net/projects/tclvfs/
-
-LIB_DEPENDS= libTrf2.1.4.so:devel/tcl-trf
-
-USES+= gmake tcl:tea
-TCL_PKG= vfs1.3
-
-GNU_CONFIGURE_MANPREFIX=${PREFIX}/share
-
-.include <bsd.port.mk>
diff --git a/devel/tclvfs/distinfo b/devel/tclvfs/distinfo
deleted file mode 100644
index 2e2c73fc23d7..000000000000
--- a/devel/tclvfs/distinfo
+++ /dev/null
@@ -1,2 +0,0 @@
-SHA256 (tclvfs-20080503.tar.gz) = 0d90362078c8f59347b14be377e9306336b6d25d147397f845e705a6fa1d38f2
-SIZE (tclvfs-20080503.tar.gz) = 258723
diff --git a/devel/tclvfs/files/patch-Makefile.in b/devel/tclvfs/files/patch-Makefile.in
deleted file mode 100644
index f14d9d60f5a4..000000000000
--- a/devel/tclvfs/files/patch-Makefile.in
+++ /dev/null
@@ -1,12 +0,0 @@
---- Makefile.in.orig 2013-04-30 12:23:15.000000000 +0200
-+++ Makefile.in 2013-04-30 12:23:54.000000000 +0200
-@@ -256,7 +256,8 @@
-
- VPATH = $(srcdir)/generic:$(srcdir)/unix:$(srcdir)/win
-
--.c.@OBJEXT@:
-+
-+vfs.@OBJEXT@: $(srcdir)/generic/vfs.c
- $(COMPILE) -c `@CYGPATH@ $<` -o $@
-
- #========================================================================
diff --git a/devel/tclvfs/files/patch-generic_vfs.c b/devel/tclvfs/files/patch-generic_vfs.c
deleted file mode 100644
index 1c52b734021d..000000000000
--- a/devel/tclvfs/files/patch-generic_vfs.c
+++ /dev/null
@@ -1,14 +0,0 @@
---- generic/vfs.c.orig 2006-08-30 21:38:03.000000000 +0200
-+++ generic/vfs.c 2013-09-24 10:26:34.000000000 +0200
-@@ -1656,7 +1656,10 @@
- return returnVal;
- }
-
--static CONST char**
-+#ifndef CONST86
-+#define CONST86
-+#endif
-+static CONST char*CONST86 *
- VfsFileAttrStrings(pathPtr, objPtrRef)
- Tcl_Obj* pathPtr;
- Tcl_Obj** objPtrRef;
diff --git a/devel/tclvfs/pkg-descr b/devel/tclvfs/pkg-descr
deleted file mode 100644
index 462b43010f65..000000000000
--- a/devel/tclvfs/pkg-descr
+++ /dev/null
@@ -1,4 +0,0 @@
-The TclVfs project aims to provide an extension to the Tcl language which
-allows Virtual Filesystems to be built using Tcl scripts only. It is also a
-repository of such Tcl-implemented filesystems (metakit, zip, ftp, tar, http,
-webdav, namespace, url)
diff --git a/devel/tclvfs/pkg-plist b/devel/tclvfs/pkg-plist
deleted file mode 100644
index 1edebfbba711..000000000000
--- a/devel/tclvfs/pkg-plist
+++ /dev/null
@@ -1,28 +0,0 @@
-lib/%%TCL_PKG%%/ftpvfs.tcl
-lib/%%TCL_PKG%%/httpvfs.tcl
-lib/%%TCL_PKG%%/lib%%TCL_PKG%%.so
-lib/%%TCL_PKG%%/mk4vfs.tcl
-lib/%%TCL_PKG%%/mk4vfscompat.tcl
-lib/%%TCL_PKG%%/pkgIndex.tcl
-lib/%%TCL_PKG%%/starkit.tcl
-lib/%%TCL_PKG%%/tarvfs.tcl
-lib/%%TCL_PKG%%/tclIndex
-lib/%%TCL_PKG%%/tclprocvfs.tcl
-lib/%%TCL_PKG%%/template/collatevfs.tcl
-lib/%%TCL_PKG%%/template/deltavfs.tcl
-lib/%%TCL_PKG%%/template/fishvfs.tcl
-lib/%%TCL_PKG%%/template/globfind.tcl
-lib/%%TCL_PKG%%/template/quotavfs.tcl
-lib/%%TCL_PKG%%/template/tdelta.tcl
-lib/%%TCL_PKG%%/template/templatevfs.tcl
-lib/%%TCL_PKG%%/template/versionvfs.tcl
-lib/%%TCL_PKG%%/testvfs.tcl
-lib/%%TCL_PKG%%/tkvfs.tcl
-lib/%%TCL_PKG%%/vfsUrl.tcl
-lib/%%TCL_PKG%%/vfsUtils.tcl
-lib/%%TCL_PKG%%/vfslib.tcl
-lib/%%TCL_PKG%%/webdavvfs.tcl
-lib/%%TCL_PKG%%/zipvfs.tcl
-lib/%%TCL_PKG%%/zipvfscompat.tcl
-share/man/mann/vfs.n.gz
-share/man/mann/vfslib.n.gz