TT#118659 Do not fail when deploying SW-RAID if no RAID was present yet

Followup fix for commit fc9b43f92e (Fix re-deploying over existing
SW-RAID arrays).

We try to detect present SW-RAIDs and identify the disks which are part
of the RAID array, to be able to properly reset them then. Though if we
don't find such an existing SW-RAID array the orig_swraid_device
variable stays unset and our deployments with SW-RAID fails now, as
observed on carrier-sp1-trunk:

| root@carrier-sp1-trunk ~ # tail -20 /tmp/deployment-installer-debug.log
| ++02:00:04 (netscript.grml:620): set_up_partition_table_swraid():  head -1
| ++02:00:04 (netscript.grml:620): set_up_partition_table_swraid():  lsblk --list --noheadings --output TYPE,NAME
| Sleeping for 10 seconds (as requested via boot option 'ngcpstatus')
| +02:00:04 (netscript.grml:620): set_up_partition_table_swraid():  raid_device=
| +02:00:04 (netscript.grml:623): set_up_partition_table_swraid():  [[ -n '' ]]
| +02:00:04 (netscript.grml:645): set_up_partition_table_swraid():  [[ -b /dev/md0 ]]
| /tmp/netscript.grml: line 669: orig_swraid_device: unbound variable
| ++02:00:04 (netscript.grml:1): set_up_partition_table_swraid():  wait_exit
| ++02:00:04 (netscript.grml:339): wait_exit():  local e_code=1
| ++02:00:04 (netscript.grml:340): wait_exit():  [[ 1 -ne 0 ]]
| ++02:00:04 (netscript.grml:341): wait_exit():  set_deploy_status error
| ++02:00:04 (netscript.grml:103): set_deploy_status():  '[' -n error ']'
| ++02:00:04 (netscript.grml:104): set_deploy_status():  echo error
| ++02:00:04 (netscript.grml:343): wait_exit():  trap '' 1 2 3 6 15 ERR EXIT
| ++02:00:04 (netscript.grml:344): wait_exit():  status_wait
| ++02:00:04 (netscript.grml:329): status_wait():  [[ -n 10 ]]
| ++02:00:04 (netscript.grml:329): status_wait():  [[ 10 != 0 ]]
| ++02:00:04 (netscript.grml:333): status_wait():  echo 'Sleeping for 10 seconds (as requested via boot option '\''ngcpstatus'\'')'
| ++02:00:04 (netscript.grml:334): status_wait():  sleep 10
| ++02:00:14 (netscript.grml:345): wait_exit():  exit 1

FTR:

| root@carrier-sp1-trunk ~ # cat /proc/cmdline
| BOOT_IMAGE=vmlinuz initrd=initrd.img fetch=http://builder6.mgm.sipwise.com:3000/ngcp-pxe-boot-sipwise20230915/fs/grml64-small/grml64-small.squashfs boot=live ignore_bootid apm=power-off nomce net.ifnames=0 noprompt noeject vga=791 ssh=sipwise ethdevice=eth0 ethdevice-timeout=30 live-netdev=eth0 netscript=http://deb.sipwise.com/netscript/master/deployment.sh debianrelease=bookworm lowperformance enablevmservices debugmode ngcpvers=trunk ngcpnoupload  ngcppro ngcpsp1 ngcphostname=web01a ngcpcrole=mgmt ngcpnonwrecfg dns=1.1.1.1,1.0.0.1 ngcpeaddr=192.168.209.180 ip=192.168.209.180::192.168.209.1:255.255.255.0:sp1:eth0:off vagrant swraiddisk1=sda swraiddisk2=sdb ngcpnodename=sp1 ngcpstatus=10 swapfilesize=2048M rootfssize=8G fallbackfssize=10M
|
| root@carrier-sp1-trunk ~ # cat /proc/mdstat
| Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
| unused devices: <none>
|
| root@carrier-sp1-trunk ~ # lsblk
| NAME  MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
| loop0   7:0    0 428.8M  1 loop /usr/lib/live/mount/rootfs/grml64-small.squashfs
|                                 /run/live/rootfs/grml64-small.squashfs
| sda     8:0    0    16G  0 disk
| sdb     8:16   0    16G  0 disk
| sr0    11:0    1  1024M  0 rom

Change-Id: I2329aaa0754674b5d192a174b644900f09f9db84
mr12.3
Michael Prokop 1 year ago
parent 1d59d89d04
commit e99f33e11a

@ -666,7 +666,7 @@ set_up_partition_table_swraid() {
fi fi
fi fi
if [[ -n "${orig_swraid_device}" ]] ; then if [[ -n "${orig_swraid_device:-}" ]] ; then
echo "NOTE: modified RAID array detected, setting SWRAID_DEVICE back to original setting '${orig_swraid_device}'" echo "NOTE: modified RAID array detected, setting SWRAID_DEVICE back to original setting '${orig_swraid_device}'"
SWRAID_DEVICE="${orig_swraid_device}" SWRAID_DEVICE="${orig_swraid_device}"
fi fi

Loading…
Cancel
Save