2019-03-29 17:25:17 -05:00
|
|
|
#!/bin/bash -ex
|
2018-11-21 00:32:40 -06:00
|
|
|
|
|
|
|
# Copyright (C) 2014 Curt Brune <curt@cumulusnetworks.com>
|
|
|
|
#
|
|
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
2022-02-21 23:42:27 -06:00
|
|
|
MEM=8192
|
2018-11-21 00:32:40 -06:00
|
|
|
DISK=$1
|
|
|
|
ONIE_RECOVERY_ISO=$2
|
|
|
|
INSTALLER=$3
|
|
|
|
DISK_SIZE=$4
|
|
|
|
|
|
|
|
INSTALLER_DISK="./sonic-installer.img"
|
|
|
|
|
|
|
|
# VM will listen on telnet port $KVM_PORT
|
|
|
|
KVM_PORT=9000
|
|
|
|
|
|
|
|
on_exit()
|
|
|
|
{
|
|
|
|
rm -f $kvm_log
|
|
|
|
}
|
|
|
|
|
2020-04-07 14:31:20 -05:00
|
|
|
on_error()
|
|
|
|
{
|
2021-01-17 13:05:33 -06:00
|
|
|
netstat -antp
|
2021-02-06 17:11:31 -06:00
|
|
|
ps aux
|
2020-04-07 14:31:20 -05:00
|
|
|
echo "============= kvm_log =============="
|
|
|
|
cat $kvm_log
|
|
|
|
}
|
2018-11-21 00:32:40 -06:00
|
|
|
|
|
|
|
create_disk()
|
|
|
|
{
|
|
|
|
echo "Creating SONiC kvm disk : $DISK of size $DISK_SIZE GB"
|
|
|
|
qemu-img create -f qcow2 $DISK ${DISK_SIZE}G
|
|
|
|
}
|
|
|
|
|
|
|
|
prepare_installer_disk()
|
|
|
|
{
|
2022-08-29 13:15:42 -05:00
|
|
|
fallocate -l 4096M $INSTALLER_DISK
|
2018-11-21 00:32:40 -06:00
|
|
|
|
|
|
|
mkfs.vfat $INSTALLER_DISK
|
|
|
|
|
|
|
|
tmpdir=$(mktemp -d)
|
|
|
|
|
|
|
|
mount -o loop $INSTALLER_DISK $tmpdir
|
|
|
|
|
|
|
|
cp $INSTALLER $tmpdir/onie-installer.bin
|
|
|
|
|
|
|
|
umount $tmpdir
|
|
|
|
}
|
|
|
|
|
[Build] Fix the build unstalbe issue caused by the kvm not ready (#12180)
Why I did it
Fix the build unstable issue caused by the kvm 9000 port is not ready to use in 2 seconds.
2022-09-02T10:57:30.8122304Z + /usr/bin/kvm -m 8192 -name onie -boot order=cd,once=d -cdrom target/files/bullseye/onie-recovery-x86_64-kvm_x86_64_4_asic-r0.iso -device e1000,netdev=onienet -netdev user,id=onienet,hostfwd=:0.0.0.0:3041-:22 -vnc 0.0.0.0:0 -vga std -drive file=target/sonic-6asic-vs.img,media=disk,if=virtio,index=0 -drive file=./sonic-installer.img,if=virtio,index=1 -serial telnet:127.0.0.1:9000,server
2022-09-02T10:57:30.8123378Z + sleep 2.0
2022-09-02T10:57:30.8123889Z + '[' -d /proc/284923 ']'
2022-09-02T10:57:30.8124528Z + echo 'to kill kvm: sudo kill 284923'
2022-09-02T10:57:30.8124994Z to kill kvm: sudo kill 284923
2022-09-02T10:57:30.8125362Z + ./install_sonic.py
2022-09-02T10:57:30.8125720Z Trying 127.0.0.1...
2022-09-02T10:57:30.8126041Z telnet: Unable to connect to remote host: Connection refused
How I did it
Waiting more time until the tcp port 9000 is ready, waiting for 60 seconds in maximum.
2022-09-26 17:55:19 -05:00
|
|
|
wait_kvm_ready()
|
|
|
|
{
|
|
|
|
local count=30
|
|
|
|
local waiting_in_seconds=2.0
|
|
|
|
for ((i=1; i<=$count; i++)); do
|
|
|
|
sleep $waiting_in_seconds
|
|
|
|
echo "$(date) [$i/$count] waiting for the port $KVM_PORT ready"
|
|
|
|
if netstat -l | grep -q ":$KVM_PORT"; then
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2021-01-30 08:50:35 -06:00
|
|
|
apt-get install -y net-tools
|
2018-11-21 00:32:40 -06:00
|
|
|
create_disk
|
|
|
|
prepare_installer_disk
|
|
|
|
|
2019-03-29 17:25:17 -05:00
|
|
|
echo "Prepare memory for KVM build: $vs_build_prepare_mem"
|
2021-01-17 13:05:33 -06:00
|
|
|
mount proc /proc -t proc || true
|
2019-03-29 17:25:17 -05:00
|
|
|
free -m
|
|
|
|
if [[ "$vs_build_prepare_mem" == "yes" ]]; then
|
|
|
|
# Force o.s. to drop cache and compact memory so that KVM can get 2G memory
|
2021-01-17 13:05:33 -06:00
|
|
|
bash -c 'echo 1 > /proc/sys/vm/drop_caches'
|
2021-11-10 10:31:03 -06:00
|
|
|
# Not all kernels support compact_memory
|
|
|
|
if [[ -w '/proc/sys/vm/compact_memory' ]]
|
|
|
|
then
|
|
|
|
bash -c 'echo 1 > /proc/sys/vm/compact_memory'
|
|
|
|
fi
|
2019-03-29 17:25:17 -05:00
|
|
|
free -m
|
|
|
|
fi
|
|
|
|
|
2020-04-07 14:31:20 -05:00
|
|
|
kvm_log=$(mktemp)
|
|
|
|
trap on_exit EXIT
|
|
|
|
trap on_error ERR
|
|
|
|
|
Remove the rw folder from the image after installing in KVM (#8746)
* Remove the rw folder from the image after installing in KVM
When the image is installed from within KVM and then loaded, some files
(such as timer stamp files) are created as part of that bootup that then
get into the final image. This can cause some side effects, such as
systemd thinking that some persistent timers need to run because the
last trigger time got missed.
Therefore, at the end of the check_install.py script, remove the rw
folder so that it doesn't exist in the image, and that when this image
is started up in a KVM setup for the first time, it starts with a truly
clean slate.
Without this change, the issue seen was that for fstrim.timer, a stamp
file would be present in /var/lib/systemd/timers (and for other timers
that are marked as persistent). This would then cause fstrim.service to
get started immediately when starting a QEMU setup if the timer for that
service missed a trigger, and not wait 10 minutes after bootup. In the
case of fstrim.timer, that means if the image was started in QEMU after
next Monday, since that timer is scheduled to be triggered weekly.
Signed-off-by: Saikrishna Arcot <sarcot@microsoft.com>
* Split installation of SONiC and test bootup into two separate scripts
Just removing the rw directory causes other issues, since the first boot
tasks no longer run since that file isn't present. Also, just recreating
that file doesn't completely help, because there are some files that are
moved from the /host folder into the base filesystem layer, and so are
no longer available.
Instead, split the installation of SONiC and doing the test bootup into
two separate scripts and two separate KVM instances. The first KVM
instance is the one currently being run, while the second one has the
`-snapshot` flag added in, which means any changes to the disk image
don't take effect.
Signed-off-by: Saikrishna Arcot <sarcot@microsoft.com>
2021-12-10 15:13:35 -06:00
|
|
|
echo "Installing SONiC"
|
|
|
|
|
2018-11-21 00:32:40 -06:00
|
|
|
/usr/bin/kvm -m $MEM \
|
|
|
|
-name "onie" \
|
|
|
|
-boot "order=cd,once=d" -cdrom "$ONIE_RECOVERY_ISO" \
|
|
|
|
-device e1000,netdev=onienet \
|
|
|
|
-netdev user,id=onienet,hostfwd=:0.0.0.0:3041-:22 \
|
|
|
|
-vnc 0.0.0.0:0 \
|
|
|
|
-vga std \
|
|
|
|
-drive file=$DISK,media=disk,if=virtio,index=0 \
|
|
|
|
-drive file=$INSTALLER_DISK,if=virtio,index=1 \
|
2018-11-25 13:33:00 -06:00
|
|
|
-serial telnet:127.0.0.1:$KVM_PORT,server > $kvm_log 2>&1 &
|
2018-11-21 00:32:40 -06:00
|
|
|
|
|
|
|
kvm_pid=$!
|
|
|
|
|
[Build] Fix the build unstalbe issue caused by the kvm not ready (#12180)
Why I did it
Fix the build unstable issue caused by the kvm 9000 port is not ready to use in 2 seconds.
2022-09-02T10:57:30.8122304Z + /usr/bin/kvm -m 8192 -name onie -boot order=cd,once=d -cdrom target/files/bullseye/onie-recovery-x86_64-kvm_x86_64_4_asic-r0.iso -device e1000,netdev=onienet -netdev user,id=onienet,hostfwd=:0.0.0.0:3041-:22 -vnc 0.0.0.0:0 -vga std -drive file=target/sonic-6asic-vs.img,media=disk,if=virtio,index=0 -drive file=./sonic-installer.img,if=virtio,index=1 -serial telnet:127.0.0.1:9000,server
2022-09-02T10:57:30.8123378Z + sleep 2.0
2022-09-02T10:57:30.8123889Z + '[' -d /proc/284923 ']'
2022-09-02T10:57:30.8124528Z + echo 'to kill kvm: sudo kill 284923'
2022-09-02T10:57:30.8124994Z to kill kvm: sudo kill 284923
2022-09-02T10:57:30.8125362Z + ./install_sonic.py
2022-09-02T10:57:30.8125720Z Trying 127.0.0.1...
2022-09-02T10:57:30.8126041Z telnet: Unable to connect to remote host: Connection refused
How I did it
Waiting more time until the tcp port 9000 is ready, waiting for 60 seconds in maximum.
2022-09-26 17:55:19 -05:00
|
|
|
wait_kvm_ready
|
2018-11-21 00:32:40 -06:00
|
|
|
|
|
|
|
[ -d "/proc/$kvm_pid" ] || {
|
|
|
|
echo "ERROR: kvm died."
|
|
|
|
cat $kvm_log
|
|
|
|
exit 1
|
|
|
|
}
|
|
|
|
|
|
|
|
echo "to kill kvm: sudo kill $kvm_pid"
|
|
|
|
|
Remove the rw folder from the image after installing in KVM (#8746)
* Remove the rw folder from the image after installing in KVM
When the image is installed from within KVM and then loaded, some files
(such as timer stamp files) are created as part of that bootup that then
get into the final image. This can cause some side effects, such as
systemd thinking that some persistent timers need to run because the
last trigger time got missed.
Therefore, at the end of the check_install.py script, remove the rw
folder so that it doesn't exist in the image, and that when this image
is started up in a KVM setup for the first time, it starts with a truly
clean slate.
Without this change, the issue seen was that for fstrim.timer, a stamp
file would be present in /var/lib/systemd/timers (and for other timers
that are marked as persistent). This would then cause fstrim.service to
get started immediately when starting a QEMU setup if the timer for that
service missed a trigger, and not wait 10 minutes after bootup. In the
case of fstrim.timer, that means if the image was started in QEMU after
next Monday, since that timer is scheduled to be triggered weekly.
Signed-off-by: Saikrishna Arcot <sarcot@microsoft.com>
* Split installation of SONiC and test bootup into two separate scripts
Just removing the rw directory causes other issues, since the first boot
tasks no longer run since that file isn't present. Also, just recreating
that file doesn't completely help, because there are some files that are
moved from the /host folder into the base filesystem layer, and so are
no longer available.
Instead, split the installation of SONiC and doing the test bootup into
two separate scripts and two separate KVM instances. The first KVM
instance is the one currently being run, while the second one has the
`-snapshot` flag added in, which means any changes to the disk image
don't take effect.
Signed-off-by: Saikrishna Arcot <sarcot@microsoft.com>
2021-12-10 15:13:35 -06:00
|
|
|
./install_sonic.py
|
|
|
|
|
|
|
|
kill $kvm_pid
|
|
|
|
|
|
|
|
echo "Booting up SONiC"
|
|
|
|
|
|
|
|
/usr/bin/kvm -m $MEM \
|
|
|
|
-name "onie" \
|
|
|
|
-device e1000,netdev=onienet \
|
|
|
|
-netdev user,id=onienet,hostfwd=:0.0.0.0:3041-:22 \
|
|
|
|
-vnc 0.0.0.0:0 \
|
|
|
|
-vga std \
|
|
|
|
-snapshot \
|
|
|
|
-drive file=$DISK,media=disk,if=virtio,index=0 \
|
|
|
|
-serial telnet:127.0.0.1:$KVM_PORT,server > $kvm_log 2>&1 &
|
|
|
|
|
|
|
|
kvm_pid=$!
|
|
|
|
|
[Build] Fix the build unstalbe issue caused by the kvm not ready (#12180)
Why I did it
Fix the build unstable issue caused by the kvm 9000 port is not ready to use in 2 seconds.
2022-09-02T10:57:30.8122304Z + /usr/bin/kvm -m 8192 -name onie -boot order=cd,once=d -cdrom target/files/bullseye/onie-recovery-x86_64-kvm_x86_64_4_asic-r0.iso -device e1000,netdev=onienet -netdev user,id=onienet,hostfwd=:0.0.0.0:3041-:22 -vnc 0.0.0.0:0 -vga std -drive file=target/sonic-6asic-vs.img,media=disk,if=virtio,index=0 -drive file=./sonic-installer.img,if=virtio,index=1 -serial telnet:127.0.0.1:9000,server
2022-09-02T10:57:30.8123378Z + sleep 2.0
2022-09-02T10:57:30.8123889Z + '[' -d /proc/284923 ']'
2022-09-02T10:57:30.8124528Z + echo 'to kill kvm: sudo kill 284923'
2022-09-02T10:57:30.8124994Z to kill kvm: sudo kill 284923
2022-09-02T10:57:30.8125362Z + ./install_sonic.py
2022-09-02T10:57:30.8125720Z Trying 127.0.0.1...
2022-09-02T10:57:30.8126041Z telnet: Unable to connect to remote host: Connection refused
How I did it
Waiting more time until the tcp port 9000 is ready, waiting for 60 seconds in maximum.
2022-09-26 17:55:19 -05:00
|
|
|
wait_kvm_ready
|
Remove the rw folder from the image after installing in KVM (#8746)
* Remove the rw folder from the image after installing in KVM
When the image is installed from within KVM and then loaded, some files
(such as timer stamp files) are created as part of that bootup that then
get into the final image. This can cause some side effects, such as
systemd thinking that some persistent timers need to run because the
last trigger time got missed.
Therefore, at the end of the check_install.py script, remove the rw
folder so that it doesn't exist in the image, and that when this image
is started up in a KVM setup for the first time, it starts with a truly
clean slate.
Without this change, the issue seen was that for fstrim.timer, a stamp
file would be present in /var/lib/systemd/timers (and for other timers
that are marked as persistent). This would then cause fstrim.service to
get started immediately when starting a QEMU setup if the timer for that
service missed a trigger, and not wait 10 minutes after bootup. In the
case of fstrim.timer, that means if the image was started in QEMU after
next Monday, since that timer is scheduled to be triggered weekly.
Signed-off-by: Saikrishna Arcot <sarcot@microsoft.com>
* Split installation of SONiC and test bootup into two separate scripts
Just removing the rw directory causes other issues, since the first boot
tasks no longer run since that file isn't present. Also, just recreating
that file doesn't completely help, because there are some files that are
moved from the /host folder into the base filesystem layer, and so are
no longer available.
Instead, split the installation of SONiC and doing the test bootup into
two separate scripts and two separate KVM instances. The first KVM
instance is the one currently being run, while the second one has the
`-snapshot` flag added in, which means any changes to the disk image
don't take effect.
Signed-off-by: Saikrishna Arcot <sarcot@microsoft.com>
2021-12-10 15:13:35 -06:00
|
|
|
|
|
|
|
[ -d "/proc/$kvm_pid" ] || {
|
|
|
|
echo "ERROR: kvm died."
|
|
|
|
cat $kvm_log
|
|
|
|
exit 1
|
|
|
|
}
|
|
|
|
|
|
|
|
echo "to kill kvm: sudo kill $kvm_pid"
|
|
|
|
|
2018-11-21 00:32:40 -06:00
|
|
|
./check_install.py -u $SONIC_USERNAME -P $PASSWD -p $KVM_PORT
|
|
|
|
|
|
|
|
kill $kvm_pid
|
|
|
|
|
|
|
|
exit 0
|