Just as an reminder - many people think of the ZFS mirroring as two or three disk configurations. But actually it's a n-way mirroring:
You could use two disks:
root@template:/testdev# zpool create testpool mirror /testdev/test0 /testdev/test1
root@template:/testdev# zpool status testpool
pool: testpool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
testpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
/testdev/test0 ONLINE 0 0 0
/testdev/test1 ONLINE 0 0 0
errors: No known data errors
You could use three disks:
root@template:/testdev# zpool create testpool mirror /testdev/test0 /testdev/test1 /testdev/test2
root@template:/testdev# zpool status testpool
pool: testpool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
testpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
/testdev/test0 ONLINE 0 0 0
/testdev/test1 ONLINE 0 0 0
/testdev/test2 ONLINE 0 0 0
errors: No known data errors
However you could do 11 mirrors as well:
root@template:/testdev# zpool create testpool mirror \
/testdev/test0 /testdev/test1 \
/testdev/test2 /testdev/test3 \
/testdev/test4 /testdev/test5 \
/testdev/test6 /testdev/test7 \
/testdev/test8 /testdev/test9 \
/testdev/test10
root@template:/testdev# zpool status testpool
pool: testpool
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
testpool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
/testdev/test0 ONLINE 0 0 0
/testdev/test1 ONLINE 0 0 0
/testdev/test2 ONLINE 0 0 0
/testdev/test3 ONLINE 0 0 0
/testdev/test4 ONLINE 0 0 0
/testdev/test5 ONLINE 0 0 0
/testdev/test6 ONLINE 0 0 0
/testdev/test7 ONLINE 0 0 0
/testdev/test8 ONLINE 0 0 0
/testdev/test9 ONLINE 0 0 0
/testdev/test10 ONLINE 0 0 0
You could now fill up your pool with some data
root@template:/testpool# dd if=/dev/urandom of=testfile
^C11266+0 records in
11266+0 records out
root@template:/testpool# md5sum testfile
788d23ef58a5e5f9c60d17958a6c4aca testfile
Why should you want something like that. Well ... for people from the belt and suspender department this is really useful. But it think more of creating identical copies of your pool on several devices. I'm sure you already see some nice usecases when those disks are located in a SAN or on a iSCSI-Server:
root@template:/testdev# zpool split testpool testpool1_10 /testdev/test10
root@template:/testdev# zpool split testpool testpool1_9 /testdev/test9
root@template:/testdev# zpool split testpool testpool1_8 /testdev/test8
root@template:/testdev# zpool split testpool testpool1_7 /testdev/test7
root@template:/testdev# zpool split testpool testpool1_6 /testdev/test6
root@template:/testdev# zpool split testpool testpool1_5 /testdev/test5
root@template:/testdev# zpool split testpool testpool1_4 /testdev/test4
root@template:/testdev# zpool split testpool testpool1_3 /testdev/test3
root@template:/testdev# zpool split testpool testpool1_2 /testdev/test2
root@template:/testdev# zpool split testpool testpool1_1 /testdev/test1
Afterwards you could just take one of the disks and import as a separate pool:
root@template:/testdev# zpool import -d /testdev testpool1_1
root@template:/testdev# zpool status testpool1_1
pool: testpool1_1
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
testpool1_1 ONLINE 0 0 0
/testdev/test1 ONLINE 0 0 0
errors: No known data errors
Let's check the content:
root@template:~# cd /testpool1_1/
root@template:/testpool1_1# md5sum testfile
788d23ef58a5e5f9c60d17958a6c4aca testfile