Just some random notes on how to setup ZFS.
ZFS on Linux
When setting up a zpool you specify disks in a way that remains consistent across reboots and systems. Reading https://wiki.archlinux.org/index.php/Persistent_block_device_naming#by-id_and_by-path it mentions that the wwn-XXXX names should be the fully persistent.
ZFS Permissions:
1
2
3
|
zfs mount mypool/myzfs
chown myuser:mygroup /mypool/myzfs
|
Encryption
1
2
3
4
5
6
7
8
9
10
11
| zfs create -o encryption=on -o keylocation=prompt -o keyformat=passphrase
zfs change-key
zfs load-key -r <dataset path>
zfs unload-key -r <dataset path>
ls -l /banshee/encrypted/child2
zfs get keystatus banshee/encrypted
zfs send -w
|
ZFS volumes
View volume list:
ZFS volume devices:
These devices are in /dev/zvol//
Pool can be multi-layer (e.g. rpool/data).
With a setup like the following (done via proxmox host but should apply to other ZFS systems):
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
| root@pve001:~# zfs list
NAME USED AVAIL REFER MOUNTPOINT
rpool 663G 236G 104K /rpool
rpool/ROOT 567G 236G 96K /rpool/ROOT
rpool/ROOT/pve-1 567G 236G 567G /
rpool/data 66.0G 236G 96K /rpool/data
rpool/data/subvol-102-disk-0 404M 7.61G 404M /rpool/data/subvol-102-disk-0
rpool/data/vm-100-disk-0 104K 236G 104K -
rpool/data/vm-100-disk-1 18.8G 236G 18.8G -
rpool/data/vm-100-disk-2 64K 236G 64K -
rpool/data/vm-101-disk-0 3.82G 236G 3.82G -
rpool/data/vm-103-disk-0 3.99G 236G 3.99G -
rpool/data/vm-104-disk-0 25.4G 236G 25.4G -
rpool/data/vm-105-disk-0 4.38G 236G 4.38G -
rpool/data/vm-106-disk-0 9.27G 236G 9.27G -
rpool/var-lib-vz 29.3G 236G 29.3G /var/lib/vz
|
And a VM config like the following:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
| root@pve001:~# qm config 101
balloon: 0
boot: order=scsi0;ide2;net0
cores: 2
cpu: x86-64-v2-AES
description:
ide2: none,media=cdrom
machine: q35
memory: 4096
meta: creation-qemu=9.0.2,ctime=1725865589
name: ansible001
net0: virtio=BC:24:11:23:36:AE,bridge=vmbr0,firewall=1
numa: 0
ostype: l26
scsi0: local-zfs:vm-101-disk-0,iothread=1,size=80G
scsihw: virtio-scsi-single
smbios1: uuid=5adf1be4-29ba-4fdb-86fb-e470e7042498
sockets: 1
vga: qxl
vmgenid: 84875a30-c554-423d-96d1-7602dcf93b92
|
If you want to view partition layout via fdisk:
1
2
3
4
5
6
7
8
9
10
11
| root@pve001:~# fdisk -l /dev/zvol/rpool/data/vm-101-disk-0
Disk /dev/zvol/rpool/data/vm-101-disk-0: 80 GiB, 85899345920 bytes, 167772160 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 16384 bytes
I/O size (minimum/optimal): 16384 bytes / 16384 bytes
Disklabel type: gpt
Disk identifier: E4685CB0-C04D-43E5-B7C5-45919E117E79
Device Start End Sectors Size Type
/dev/zvol/rpool/data/vm-101-disk-0p1 2048 4095 2048 1M BIOS boot
/dev/zvol/rpool/data/vm-101-disk-0p2 4096 167770111 167766016 80G Linux filesystem
|
View devices names:
1
2
3
4
| root@pve001:~# ls -lh /dev/zvol/rpool/data/vm-101-disk-0*
lrwxrwxrwx 1 root root 12 Oct 10 15:54 /dev/zvol/rpool/data/vm-101-disk-0 -> ../../../zd0
lrwxrwxrwx 1 root root 14 Oct 10 15:54 /dev/zvol/rpool/data/vm-101-disk-0-part1 -> ../../../zd0p1
lrwxrwxrwx 1 root root 14 Oct 10 15:54 /dev/zvol/rpool/data/vm-101-disk-0-part2 -> ../../../zd0p2
|
Mount a VM disk (WARNING: DO NOT DO THIS AGAINST A RUNNING VM)
1
2
3
4
5
6
| root@pve001:~# mkdir /mnt/vm-disk
root@pve001:~# mount /dev/zvol/rpool/data/vm-101-disk-0-part2 /mnt/vm-disk/
root@pve001:~# ls /mnt/vm-disk/
bin bin.usr-is-merged boot cdrom dev etc home lib lib64 lib.usr-is-merged lost+found media mnt opt proc root run sbin sbin.usr-is-merged snap srv swap.img sys tmp usr var
root@pve001:~# umount /mnt/vm-disk
|
You can also resize partitions on those disks (the current resize disk only increases the disk itself, you can just add a new partition as well).
Monitoring
General status
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
| root@pve001:~# zpool status
pool: rpool
state: ONLINE
scan: scrub repaired 0B in 00:16:45 with 0 errors on Sun Aug 10 00:40:46 2025
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
nvme-eui.0025382741a04ee6-part3 ONLINE 0 0 0
errors: No known data errors
pool: tank
state: ONLINE
scan: scrub repaired 0B in 00:02:47 with 0 errors on Sun Aug 10 00:26:49 2025
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
nvme-Samsung_SSD_990_EVO_1TB_S7M3NL0X720193X ONLINE 0 0 0
errors: No known data errors
|
List sizes
1
2
3
4
| root@pve001:~# zpool list
NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
rpool 928G 780G 148G - - 33% 84% 1.00x ONLINE -
tank 928G 191G 737G - - 7% 20% 1.00x ONLINE -
|
iostat
1
2
3
4
5
6
7
| root@pve001:~# zpool iostat
capacity operations bandwidth
pool alloc free read write read write
---------- ----- ----- ----- ----- ----- -----
rpool 780G 148G 31 47 3.18M 3.22M
tank 191G 737G 14 31 363K 730K
---------- ----- ----- ----- ----- ----- -----
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
| root@pve001:~# zpool iostat -r
rpool sync_read sync_write async_read async_write scrub trim rebuild
req_size ind agg ind agg ind agg ind agg ind agg ind agg ind agg
---------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
512 7.98K 0 28.2K 0 1.77M 0 501K 0 85.4K 0 0 0 0 0
1K 8.90K 0 0 0 0 0 1.00M 0 139 0 0 0 0 0
2K 6.71K 0 0 0 5 0 501K 0 130 0 0 0 0 0
4K 378K 0 1.00M 0 97.0K 0 11.3M 0 206K 0 0 0 0 0
8K 443K 68.0K 78.2K 11.3K 145K 306K 2.57M 3.98M 104K 108K 0 0 0 0
16K 173K 142K 19.3K 2.22K 62.0K 213K 1.02M 3.02M 35.1K 144K 0 0 0 0
32K 64.6K 178K 126K 2.22K 4.13M 140K 2.79M 1.72M 368K 163K 0 0 0 0
64K 31.8K 222K 86.2K 61.0K 141K 657K 637K 3.46M 48.8K 1.02M 0 0 0 0
128K 73.6K 276K 821K 1.78K 21.8M 588K 25.2M 582K 5.48M 497K 0 0 0 0
256K 0 0 0 0 0 0 0 0 0 0 0 0 0 0
512K 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1M 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2M 0 0 0 0 0 0 0 0 0 0 0 0 0 0
4M 0 0 0 0 0 0 0 0 0 0 0 0 0 0
8M 0 0 0 0 0 0 0 0 0 0 0 0 0 0
16M 0 0 0 0 0 0 0 0 0 0 0 0 0 0
------------------------------------------------------------------------------------------------------------
tank sync_read sync_write async_read async_write scrub trim rebuild
req_size ind agg ind agg ind agg ind agg ind agg ind agg ind agg
---------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
512 4 0 0 0 0 0 242 0 7 0 0 0 0 0
1K 6 0 0 0 0 0 313K 0 2 0 0 0 0 0
2K 1 0 0 0 0 0 0 0 0 0 0 0 0 0
4K 3.26M 0 1.34M 0 761K 0 11.2M 0 178K 0 0 0 0 0
8K 5.78M 37.6K 7.48M 1 1.31M 30.6K 2.48M 3.32M 42.0K 9.90K 0 0 0 0
16K 2.79M 152K 3.01M 2 420K 102K 576K 2.63M 2.14K 16.9K 1 0 0 0
32K 3.34K 241K 122K 2 3.32K 184K 1.26M 1.66M 4.95K 20.3K 443K 0 0 0
64K 301 233K 166K 3 0 513K 146K 2.79M 548 314K 341K 0 0 0
128K 0 203K 481K 2 0 583K 42 1.18M 0 689K 347K 0 0 0
256K 0 0 0 0 0 0 0 0 0 0 207K 0 0 0
512K 0 0 0 0 0 0 0 0 0 0 81.7K 0 0 0
1M 0 0 0 0 0 0 0 0 0 0 37.2K 0 0 0
2M 0 0 0 0 0 0 0 0 0 0 22.9K 0 0 0
4M 0 0 0 0 0 0 0 0 0 0 12.6K 0 0 0
8M 0 0 0 0 0 0 0 0 0 0 6.61K 0 0 0
16M 0 0 0 0 0 0 0 0 0 0 13.9K 0 0 0
------------------------------------------------------------------------------------------------------------
|
References:
- https://docs.oracle.com/cd/E18752_01/html/819-5461/gazsu.html
- https://docs.oracle.com/cd/E18752_01/html/819-5461/gbciq.html
- https://arstechnica.com/gadgets/2021/06/a-quick-start-guide-to-openzfs-native-encryption/
- https://wiki.archlinux.org/title/ZFS#Unlock_at_boot_time:_systemd