|
| 1 | +#!/bin/ksh -p |
| 2 | +# SPDX-License-Identifier: CDDL-1.0 |
| 3 | +# |
| 4 | +# CDDL HEADER START |
| 5 | +# |
| 6 | +# The contents of this file are subject to the terms of the |
| 7 | +# Common Development and Distribution License (the "License"). |
| 8 | +# You may not use this file except in compliance with the License. |
| 9 | +# |
| 10 | +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE |
| 11 | +# or https://opensource.org/licenses/CDDL-1.0. |
| 12 | +# See the License for the specific language governing permissions |
| 13 | +# and limitations under the License. |
| 14 | +# |
| 15 | +# When distributing Covered Code, include this CDDL HEADER in each |
| 16 | +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. |
| 17 | +# If applicable, add the following below this CDDL HEADER, with the |
| 18 | +# fields enclosed by brackets "[]" replaced with your own identifying |
| 19 | +# information: Portions Copyright [yyyy] [name of copyright owner] |
| 20 | +# |
| 21 | +# CDDL HEADER END |
| 22 | +# |
| 23 | + |
| 24 | +# |
| 25 | +# Copyright (c) 2026 by Lawrence Livermore National Security, LLC. |
| 26 | +# Copyright (c) 2026 by Wasabi Technologies, Inc. |
| 27 | +# |
| 28 | + |
| 29 | +. $STF_SUITE/include/libtest.shlib |
| 30 | +. $STF_SUITE/tests/functional/redundancy/redundancy.kshlib |
| 31 | + |
| 32 | +# |
| 33 | +# DESCRIPTION: |
| 34 | +# When sequentially resilvering a dRAID pool with multiple vdevs |
| 35 | +# and N faulted vdevs, where N=parity, ensure that when another leaf |
| 36 | +# is marked degraded the pool can still be sequentially resilvered |
| 37 | +# without introducing new checksum errors. Note we've exhausted |
| 38 | +# the available redundancy so no silent correction can be tolerated. |
| 39 | +# |
| 40 | +# This test is very similar to redundancy_draid_degraded1 and is |
| 41 | +# based on it. The difference is that 1) we always have some faulted |
| 42 | +# vdev which is already resilvered, and 2) we resilver the most |
| 43 | +# recently faulted, but marked degraded due to redundancy exhaustion, |
| 44 | +# vdev also. |
| 45 | +# |
| 46 | +# STRATEGY: |
| 47 | +# 1. Create block device files for the test draid pool |
| 48 | +# 2. For each parity value [1..3] |
| 49 | +# - create draid pool |
| 50 | +# - fill it with some directories/files |
| 51 | +# - fault one vdev and resilver it |
| 52 | +# - fault N=parity vdevs eliminating any redundancy |
| 53 | +# - force fault an additional vdev causing it to be degraded |
| 54 | +# - replace faulted vdevs using a sequential resilver. |
| 55 | +# The minimum pool redundancy requirements are met so |
| 56 | +# reconstruction is possible when reading from all online vdevs. |
| 57 | +# - verify that the draid spare was correctly reconstructed and |
| 58 | +# no checksum errors were introduced. |
| 59 | +# - destroy the draid pool |
| 60 | +# |
| 61 | + |
| 62 | +typeset -r devs=13 |
| 63 | +typeset -r dev_size_mb=512 |
| 64 | + |
| 65 | +typeset -a disks |
| 66 | + |
| 67 | +prefetch_disable=$(get_tunable PREFETCH_DISABLE) |
| 68 | +rebuild_scrub_enabled=$(get_tunable REBUILD_SCRUB_ENABLED) |
| 69 | +scan_suspend_progress=$(get_tunable SCAN_SUSPEND_PROGRESS) |
| 70 | + |
| 71 | +function cleanup |
| 72 | +{ |
| 73 | + poolexists "$TESTPOOL" && destroy_pool "$TESTPOOL" |
| 74 | + |
| 75 | + for i in {0..$devs}; do |
| 76 | + rm -f "$TEST_BASE_DIR/dev-$i" |
| 77 | + done |
| 78 | + |
| 79 | + set_tunable32 PREFETCH_DISABLE $prefetch_disable |
| 80 | + set_tunable32 REBUILD_SCRUB_ENABLED $rebuild_scrub_enabled |
| 81 | + set_tunable32 SCAN_SUSPEND_PROGRESS $scan_suspend_progress |
| 82 | +} |
| 83 | + |
| 84 | +function test_sequential_resilver # <pool> <parity> <dir> |
| 85 | +{ |
| 86 | + typeset pool=$1 |
| 87 | + typeset nparity=$2 |
| 88 | + typeset dir=$3 |
| 89 | + |
| 90 | + # Fault N=parity devices |
| 91 | + for (( i=0; i<$nparity; i++ )); do |
| 92 | + log_must zpool offline -f $pool $dir/dev-$i |
| 93 | + done |
| 94 | + |
| 95 | + # Parity is exhausted, faulting another device marks it degraded |
| 96 | + log_must zpool offline -f $pool $dir/dev-$nparity |
| 97 | + |
| 98 | + # Replace all faulted vdevs with distributed spares |
| 99 | + log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 |
| 100 | + for (( i=0; i<$((nparity+1)); i++ )); do |
| 101 | + spare=draid${nparity}-0-$i |
| 102 | + log_must zpool replace -fs $pool $dir/dev-$i $spare |
| 103 | + done |
| 104 | + log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0 |
| 105 | + |
| 106 | + log_must zpool wait -t resilver $pool |
| 107 | + |
| 108 | + log_must zpool scrub -w $pool |
| 109 | + log_must zpool status $pool |
| 110 | + |
| 111 | + log_must check_pool_status $pool "scan" "repaired 0B" |
| 112 | + log_must check_pool_status $pool "errors" "No known data errors" |
| 113 | + log_must check_pool_status $pool "scan" "with 0 errors" |
| 114 | +} |
| 115 | + |
| 116 | +log_onexit cleanup |
| 117 | + |
| 118 | +log_must set_tunable32 PREFETCH_DISABLE 1 |
| 119 | +log_must set_tunable32 REBUILD_SCRUB_ENABLED 0 |
| 120 | + |
| 121 | +# Disk files which will be used by pool |
| 122 | +for i in {0..$(($devs - 1))}; do |
| 123 | + device=$TEST_BASE_DIR/dev-$i |
| 124 | + log_must truncate -s ${dev_size_mb}M $device |
| 125 | + disks[${#disks[*]}+1]=$device |
| 126 | +done |
| 127 | + |
| 128 | +# Disk file which will be attached |
| 129 | +log_must truncate -s 512M $TEST_BASE_DIR/dev-$devs |
| 130 | + |
| 131 | +for nparity in 3; do |
| 132 | + raid=draid${nparity}:$((nparity+2))s |
| 133 | + dir=$TEST_BASE_DIR |
| 134 | + |
| 135 | + log_must zpool create -O compression=off -f -o cachefile=none $TESTPOOL $raid ${disks[@]} |
| 136 | + log_must zfs set primarycache=metadata $TESTPOOL |
| 137 | + |
| 138 | + log_must zfs create $TESTPOOL/fs |
| 139 | + log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R |
| 140 | + |
| 141 | + log_must zfs create -o compress=on $TESTPOOL/fs2 |
| 142 | + log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R |
| 143 | + |
| 144 | + log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3 |
| 145 | + log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R |
| 146 | + |
| 147 | + log_must zpool export $TESTPOOL |
| 148 | + log_must zpool import -o cachefile=none -d $dir $TESTPOOL |
| 149 | + |
| 150 | + log_must check_pool_status $TESTPOOL "errors" "No known data errors" |
| 151 | + |
| 152 | + test_sequential_resilver $TESTPOOL $nparity $dir |
| 153 | + |
| 154 | + log_must zpool destroy "$TESTPOOL" |
| 155 | +done |
| 156 | + |
| 157 | +log_pass "draid degraded device(s) test succeeded." |
0 commit comments