From 6c8c4bcaa9a193917d8eeb3c7f0544604471a766 Mon Sep 17 00:00:00 2001 From: freezed Date: Tue, 13 Dec 2022 01:11:45 +0100 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20Add=20ZFS=20legacy=20mountpoints=20?= =?UTF-8?q?notes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...-mountpoint-proxmox-ovh-installation-en.md | 66 +++++++++++++++++++ ...acy-mountpoint-proxmox-ovh-installation.md | 65 ++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation-en.md create mode 100644 content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation.md diff --git a/content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation-en.md b/content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation-en.md new file mode 100644 index 0000000..46ea806 --- /dev/null +++ b/content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation-en.md @@ -0,0 +1,66 @@ +Title: Unset ZFS legacy mountpoints +Date: 2022-12-12 23:38 +Summary: Unset ZFS legacy mountpoints on an OVH baremetal server installed with Proxmox and ZFS +Category: Bloc-notes +Tags: zfs, proxmox, ovh, admin, shell, cli, storage, backup +Status: Published +Translation: true +Lang: en +Slug: unset-zfs-legacy-mountpoint-proxmox-ovh-installation + +[Proxmox][pve] with a [ZFS][openzfs] root file system on an [OVHcloud baremetal server][ovh-sd] use _Legacy mountpoints_ (managed via `/etc/fstab`). This is a valid setup but avoid using properties inheritance between datasets. Let’s get gives back to ZFS the mountpoint management with a few configuration. + +_initial situation on proxmox after installation_: + +```bash +root@server:~# zfs list +NAME USED AVAIL REFER MOUNTPOINT +zp0 3.29G 1.75T 96K none +zp0/zd0 91.8M 932M 91.8M legacy +zp0/zd1 3.20G 1.75T 3.20G legacy +zp0/zd2 104K 1024M 104K legacy + +root@server:~# grep zp0 /mnt/etc/fstab +zp0/zd1 / zfs defaults 0 1 +zp0/zd0 /boot zfs defaults,x-systemd.requires=zfs-import.target 0 0 +zp0/zd2 /var/lib/vz zfs defaults,x-systemd.requires=zfs-import.target 0 0 +``` +_reboot to customer rescue to set mountpoints_… + +```bash +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # modprobe zfs +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zpool import -N -R /mnt zp0 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs set mountpoint=/ zp0/zd1 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs set mountpoint=/boot zp0/zd0 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs set mountpoint=/var/lib/vz zp0/zd2 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs mount zp0/zd1 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # grep zp0 /mnt/etc/fstab +zp0/zd1 / zfs defaults 0 1 +zp0/zd0 /boot zfs defaults,x-systemd.requires=zfs-import.target 0 0 +zp0/zd2 /var/lib/vz zfs defaults,x-systemd.requires=zfs-import.target 0 0 + +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # sed -i '/^zp0/d' /mnt/etc/fstab +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # grep zp0 /mnt/etc/fstab +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zpool export -a +``` +_reboot to proxmox, et voilà!_ + +```bash +root@server:~# zfs list +NAME USED AVAIL REFER MOUNTPOINT +zp0 3.29G 1.75T 96K none +zp0/zd0 91.8M 932M 91.8M /boot +zp0/zd1 3.20G 1.75T 3.20G / +zp0/zd2 104K 1024M 104K /var/lib/vz +``` + +_References_: + +- [FreeBSD Manual Pages - `ZFS(8)`](https://www.freebsd.org/cgi/man.cgi?query=zfs&apropos=0&sektion=8&manpath=FreeBSD+14.0-CURRENT&arch=default&format=html) +- [Oracle Solaris ZFS Administration Guide - Legacy Mount Points](https://docs.oracle.com/cd/E19253-01/819-5461/gbaln/index.html) +- Thank you _Louis_ 🤝 + +[openzfs]: https://openzfs.org +[OVHcloud baremetal OS]: https://www.ovhcloud.com/en-ie/bare-metal/os/ +[ovh-sd]: https://www.ovhcloud.com/en-ie/bare-metal/ +[pve]: https://proxmox.com/en/ diff --git a/content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation.md b/content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation.md new file mode 100644 index 0000000..871c6a3 --- /dev/null +++ b/content/unset-zfs-legacy-mountpoint-proxmox-ovh-installation.md @@ -0,0 +1,65 @@ +Title: Déconfigurer des «Legacy mountpoints» ZFS +Date: 2022-12-12 23:38 +Summary: Déconfigurer des _Legacy mountpoints_ ZFS sur un serveur _OVH baremetal_ installé avec Proxmox et ZFS +Category: Bloc-notes +Tags: zfs, proxmox, ovh, admin, shell, cli, storage, backup +Status: Published +Lang: fr +Slug: unset-zfs-legacy-mountpoint-proxmox-ovh-installation + +[Proxmox][pve] installé sur [ZFS][openzfs] avec un modèle d'installation [OVHcloud baremetal][ovh-sd] utilise les _Legacy mountpoints_ (gestion par `/etc/fstab`). Cette configuration est valide mais prive le système de l'héritage des propriétés entre _datasets_ ZFS. Rendons la gestion des points de montages à ZFS avec très peu de configuration. + +_situation initiale sur proxmox après l'installation_: + +```bash +root@server:~# zfs list +NAME USED AVAIL REFER MOUNTPOINT +zp0 3.29G 1.75T 96K none +zp0/zd0 91.8M 932M 91.8M legacy +zp0/zd1 3.20G 1.75T 3.20G legacy +zp0/zd2 104K 1024M 104K legacy + +root@server:~# grep zp0 /mnt/etc/fstab +zp0/zd1 / zfs defaults 0 1 +zp0/zd0 /boot zfs defaults,x-systemd.requires=zfs-import.target 0 0 +zp0/zd2 /var/lib/vz zfs defaults,x-systemd.requires=zfs-import.target 0 0 +``` +_reboot en customer rescue pour reconfigurer les points de montages_… + +```bash +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # modprobe zfs +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zpool import -N -R /mnt zp0 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs set mountpoint=/ zp0/zd1 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs set mountpoint=/boot zp0/zd0 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs set mountpoint=/var/lib/vz zp0/zd2 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zfs mount zp0/zd1 +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # grep zp0 /mnt/etc/fstab +zp0/zd1 / zfs defaults 0 1 +zp0/zd0 /boot zfs defaults,x-systemd.requires=zfs-import.target 0 0 +zp0/zd2 /var/lib/vz zfs defaults,x-systemd.requires=zfs-import.target 0 0 + +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # sed -i '/^zp0/d' /mnt/etc/fstab +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # grep zp0 /mnt/etc/fstab +root@rescue-customer-eu (nsxxxxx.ip-x-x-x.eu) ~ # zpool export -a +``` +_reboot sur Proxmox, et voilà!_ + +```bash +root@server:~# zfs list +NAME USED AVAIL REFER MOUNTPOINT +zp0 3.29G 1.75T 96K none +zp0/zd0 91.8M 932M 91.8M /boot +zp0/zd1 3.20G 1.75T 3.20G / +zp0/zd2 104K 1024M 104K /var/lib/vz +``` + +_References_: + +- [FreeBSD Manual Pages - `ZFS(8)`](https://www.freebsd.org/cgi/man.cgi?query=zfs&apropos=0&sektion=8&manpath=FreeBSD+14.0-CURRENT&arch=default&format=html) +- [Oracle Solaris ZFS Administration Guide - Legacy Mount Points](https://docs.oracle.com/cd/E19253-01/819-5461/gbaln/index.html) +- Merci _Louis_ 🤝 + +[openzfs]: https://openzfs.org +[OVHcloud baremetal OS]: https://www.ovhcloud.com/en-ie/bare-metal/os/ +[ovh-sd]: https://www.ovhcloud.com/en-ie/bare-metal/ +[pve]: https://proxmox.com/en/