2018-09-24 18:35:01 -05:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
|
|
|
|
function debug()
|
|
|
|
{
|
2019-02-10 13:56:31 -06:00
|
|
|
/usr/bin/logger $1
|
2018-09-24 18:35:01 -05:00
|
|
|
/bin/echo `date` "- $1" >> ${DEBUGLOG}
|
|
|
|
}
|
|
|
|
|
|
|
|
function lock_service_state_change()
|
|
|
|
{
|
2020-03-31 12:06:19 -05:00
|
|
|
debug "Locking ${LOCKFILE} from ${SERVICE}$DEV service"
|
2018-09-24 18:35:01 -05:00
|
|
|
|
|
|
|
exec {LOCKFD}>${LOCKFILE}
|
|
|
|
/usr/bin/flock -x ${LOCKFD}
|
|
|
|
trap "/usr/bin/flock -u ${LOCKFD}" 0 2 3 15
|
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
debug "Locked ${LOCKFILE} (${LOCKFD}) from ${SERVICE}$DEV service"
|
2018-09-24 18:35:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
function unlock_service_state_change()
|
|
|
|
{
|
2020-03-31 12:06:19 -05:00
|
|
|
debug "Unlocking ${LOCKFILE} (${LOCKFD}) from ${SERVICE}$DEV service"
|
2018-09-24 18:35:01 -05:00
|
|
|
/usr/bin/flock -u ${LOCKFD}
|
|
|
|
}
|
|
|
|
|
|
|
|
function check_warm_boot()
|
|
|
|
{
|
2020-05-08 23:24:05 -05:00
|
|
|
SYSTEM_WARM_START=`$SONIC_DB_CLI STATE_DB hget "WARM_RESTART_ENABLE_TABLE|system" enable`
|
|
|
|
SERVICE_WARM_START=`$SONIC_DB_CLI STATE_DB hget "WARM_RESTART_ENABLE_TABLE|${SERVICE}" enable`
|
2018-09-24 18:35:01 -05:00
|
|
|
# SYSTEM_WARM_START could be empty, always make WARM_BOOT meaningful.
|
2018-10-16 13:20:39 -05:00
|
|
|
if [[ x"$SYSTEM_WARM_START" == x"true" ]] || [[ x"$SERVICE_WARM_START" == x"true" ]]; then
|
2018-09-24 18:35:01 -05:00
|
|
|
WARM_BOOT="true"
|
|
|
|
else
|
|
|
|
WARM_BOOT="false"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
function wait_for_database_service()
|
|
|
|
{
|
|
|
|
# Wait for redis server start before database clean
|
2020-05-08 23:24:05 -05:00
|
|
|
until [[ $($SONIC_DB_CLI PING | grep -c PONG) -gt 0 ]]; do
|
2020-05-06 17:41:28 -05:00
|
|
|
sleep 1;
|
|
|
|
done
|
2018-09-24 18:35:01 -05:00
|
|
|
|
|
|
|
# Wait for configDB initialization
|
2020-05-08 23:24:05 -05:00
|
|
|
until [[ $($SONIC_DB_CLI CONFIG_DB GET "CONFIG_DB_INITIALIZED") ]];
|
2018-09-24 18:35:01 -05:00
|
|
|
do sleep 1;
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2018-12-15 13:36:12 -06:00
|
|
|
function getBootType()
|
|
|
|
{
|
2019-06-26 14:46:58 -05:00
|
|
|
# same code snippet in files/build_templates/docker_image_ctl.j2
|
|
|
|
case "$(cat /proc/cmdline)" in
|
|
|
|
*SONIC_BOOT_TYPE=warm*)
|
2018-12-15 13:36:12 -06:00
|
|
|
TYPE='warm'
|
|
|
|
;;
|
2019-06-26 14:46:58 -05:00
|
|
|
*SONIC_BOOT_TYPE=fastfast*)
|
2018-12-15 13:36:12 -06:00
|
|
|
TYPE='fastfast'
|
|
|
|
;;
|
2019-06-26 14:46:58 -05:00
|
|
|
*SONIC_BOOT_TYPE=fast*|*fast-reboot*)
|
2019-12-04 16:10:19 -06:00
|
|
|
# check that the key exists
|
2020-05-08 23:24:05 -05:00
|
|
|
if [[ $($SONIC_DB_CLI STATE_DB GET "FAST_REBOOT|system") == "1" ]]; then
|
2019-12-04 16:10:19 -06:00
|
|
|
TYPE='fast'
|
|
|
|
else
|
|
|
|
TYPE='cold'
|
|
|
|
fi
|
2018-12-15 13:36:12 -06:00
|
|
|
;;
|
|
|
|
*)
|
|
|
|
TYPE='cold'
|
|
|
|
esac
|
|
|
|
echo "${TYPE}"
|
|
|
|
}
|
|
|
|
|
2018-09-24 18:35:01 -05:00
|
|
|
start() {
|
2020-03-31 12:06:19 -05:00
|
|
|
debug "Starting ${SERVICE}$DEV service..."
|
2018-09-24 18:35:01 -05:00
|
|
|
|
|
|
|
lock_service_state_change
|
|
|
|
|
2018-10-01 21:01:04 -05:00
|
|
|
mkdir -p /host/warmboot
|
|
|
|
|
2018-09-24 18:35:01 -05:00
|
|
|
wait_for_database_service
|
|
|
|
check_warm_boot
|
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
debug "Warm boot flag: ${SERVICE}$DEV ${WARM_BOOT}."
|
2018-09-24 18:35:01 -05:00
|
|
|
|
2018-11-21 19:56:19 -06:00
|
|
|
if [[ x"$WARM_BOOT" == x"true" ]]; then
|
|
|
|
# Leave a mark for syncd scripts running inside docker.
|
|
|
|
touch /host/warmboot/warm-starting
|
|
|
|
else
|
|
|
|
rm -f /host/warmboot/warm-starting
|
2018-09-24 18:35:01 -05:00
|
|
|
fi
|
|
|
|
|
2018-12-15 13:36:12 -06:00
|
|
|
# platform specific tasks
|
2019-01-10 16:09:04 -06:00
|
|
|
|
|
|
|
# start mellanox drivers regardless of
|
|
|
|
# boot type
|
2019-03-06 20:51:46 -06:00
|
|
|
if [[ x"$sonic_asic_platform" == x"mellanox" ]]; then
|
2018-12-15 13:36:12 -06:00
|
|
|
BOOT_TYPE=`getBootType`
|
|
|
|
if [[ x"$WARM_BOOT" == x"true" || x"$BOOT_TYPE" == x"fast" ]]; then
|
|
|
|
export FAST_BOOT=1
|
|
|
|
fi
|
2019-03-06 20:51:46 -06:00
|
|
|
|
|
|
|
if [[ x"$WARM_BOOT" != x"true" ]]; then
|
2019-05-29 01:57:29 -05:00
|
|
|
if [[ x"$(/bin/systemctl is-active pmon)" == x"active" ]]; then
|
|
|
|
/bin/systemctl stop pmon
|
[Mellanox] Stop pmon ahead of syncd (#3505)
Issue Overview
shutdown flow
For any shutdown flow, which means all dockers are stopped in order, pmon docker stops after syncd docker has stopped, causing pmon docker fail to release sx_core resources and leaving sx_core in a bad state. The related logs are like the following:
INFO syncd.sh[23597]: modprobe: FATAL: Module sx_core is in use.
INFO syncd.sh[23597]: Unloading sx_core[FAILED]
INFO syncd.sh[23597]: rmmod: ERROR: Module sx_core is in use
config reload & service swss.restart
In the flows like "config reload" and "service swss restart", the failure cause further consequences:
sx_core initialization error with error message like "sx_core: create EMAD sdq 0 failed. err: -16"
syncd fails to execute the create switch api with error message "syncd_main: Runtime error: :- processEvent: failed to execute api: create, key: SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000, status: SAI_STATUS_FAILURE"
swss fails to call SAI API "SAI_SWITCH_ATTR_INIT_SWITCH", which causes orchagent to restart. This will introduce an extra 1 or 2 minutes for the system to be available, failing related test cases.
reboot, warm-reboot & fast-reboot
In the reboot flows including "reboot", "fast-reboot" and "warm-reboot" this failure doesn't have further negative effects since the system has already rebooted. In addition, "warm-reboot" requires the system to be shutdown as soon as possible to meet the GR time restriction of both BGP and LACP. "fast-reboot" also requires to meet the GR time restriction of BGP which is longer than LACP. In this sense, any unnecessary steps should be avoided. It's better to keep those flows untouched.
summary
To summarize, we have to come up with a way to ensure:
shutdown pmon docker ahead of syncd for "config reload" or "service swss restart" flow;
don't shutdown pmon docker ahead of syncd for "fast-reboot" or "warm-reboot" flow in order to save time.
for "reboot" flow, either order is acceptable.
Solution
To solve the issue, pmon shoud be stopped ahead of syncd stopped for all flows except for the warm-reboot.
- How I did it
To stop pmon ahead of syncd stopped. This is done in /usr/local/bin/syncd.sh::stop() and for all shutdown sequence.
Now pmon stops ahead of syncd so there must be a way in which pmon can start after syncd started. Another point that should be taken consideration is that pmon starting should be deferred so that services which have the logic of graceful restart in fast-reboot and warm-reboot have sufficient CPU cycles to meet their deadline.
This is done by add "syncd.service" as "After" to pmon.service and startin /usr/local/bin/syncd.sh::wait()
To start pmon automatically after syncd started.
2019-09-27 03:15:46 -05:00
|
|
|
debug "pmon is active while syncd starting, stop it first"
|
2019-05-29 01:57:29 -05:00
|
|
|
fi
|
2019-03-06 20:51:46 -06:00
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ x"$BOOT_TYPE" == x"fast" ]]; then
|
|
|
|
/usr/bin/hw-management.sh chipupdis
|
|
|
|
fi
|
|
|
|
|
2018-12-15 13:36:12 -06:00
|
|
|
/usr/bin/mst start
|
|
|
|
/usr/bin/mlnx-fw-upgrade.sh
|
|
|
|
/etc/init.d/sxdkernel start
|
|
|
|
fi
|
|
|
|
|
2019-01-10 16:09:04 -06:00
|
|
|
if [[ x"$WARM_BOOT" != x"true" ]]; then
|
|
|
|
if [ x$sonic_asic_platform == x'cavium' ]; then
|
|
|
|
/etc/init.d/xpnet.sh start
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2018-09-24 18:35:01 -05:00
|
|
|
# start service docker
|
2020-03-31 12:06:19 -05:00
|
|
|
/usr/bin/${SERVICE}.sh start $DEV
|
2018-09-24 18:35:01 -05:00
|
|
|
debug "Started ${SERVICE} service..."
|
|
|
|
|
|
|
|
unlock_service_state_change
|
2019-03-02 17:28:34 -06:00
|
|
|
}
|
|
|
|
|
2019-03-08 12:59:41 -06:00
|
|
|
wait() {
|
[Mellanox] Stop pmon ahead of syncd (#3505)
Issue Overview
shutdown flow
For any shutdown flow, which means all dockers are stopped in order, pmon docker stops after syncd docker has stopped, causing pmon docker fail to release sx_core resources and leaving sx_core in a bad state. The related logs are like the following:
INFO syncd.sh[23597]: modprobe: FATAL: Module sx_core is in use.
INFO syncd.sh[23597]: Unloading sx_core[FAILED]
INFO syncd.sh[23597]: rmmod: ERROR: Module sx_core is in use
config reload & service swss.restart
In the flows like "config reload" and "service swss restart", the failure cause further consequences:
sx_core initialization error with error message like "sx_core: create EMAD sdq 0 failed. err: -16"
syncd fails to execute the create switch api with error message "syncd_main: Runtime error: :- processEvent: failed to execute api: create, key: SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000, status: SAI_STATUS_FAILURE"
swss fails to call SAI API "SAI_SWITCH_ATTR_INIT_SWITCH", which causes orchagent to restart. This will introduce an extra 1 or 2 minutes for the system to be available, failing related test cases.
reboot, warm-reboot & fast-reboot
In the reboot flows including "reboot", "fast-reboot" and "warm-reboot" this failure doesn't have further negative effects since the system has already rebooted. In addition, "warm-reboot" requires the system to be shutdown as soon as possible to meet the GR time restriction of both BGP and LACP. "fast-reboot" also requires to meet the GR time restriction of BGP which is longer than LACP. In this sense, any unnecessary steps should be avoided. It's better to keep those flows untouched.
summary
To summarize, we have to come up with a way to ensure:
shutdown pmon docker ahead of syncd for "config reload" or "service swss restart" flow;
don't shutdown pmon docker ahead of syncd for "fast-reboot" or "warm-reboot" flow in order to save time.
for "reboot" flow, either order is acceptable.
Solution
To solve the issue, pmon shoud be stopped ahead of syncd stopped for all flows except for the warm-reboot.
- How I did it
To stop pmon ahead of syncd stopped. This is done in /usr/local/bin/syncd.sh::stop() and for all shutdown sequence.
Now pmon stops ahead of syncd so there must be a way in which pmon can start after syncd started. Another point that should be taken consideration is that pmon starting should be deferred so that services which have the logic of graceful restart in fast-reboot and warm-reboot have sufficient CPU cycles to meet their deadline.
This is done by add "syncd.service" as "After" to pmon.service and startin /usr/local/bin/syncd.sh::wait()
To start pmon automatically after syncd started.
2019-09-27 03:15:46 -05:00
|
|
|
if [[ x"$sonic_asic_platform" == x"mellanox" ]]; then
|
|
|
|
debug "Starting pmon service..."
|
|
|
|
/bin/systemctl start pmon
|
|
|
|
debug "Started pmon service"
|
|
|
|
fi
|
2020-03-31 12:06:19 -05:00
|
|
|
/usr/bin/${SERVICE}.sh wait $DEV
|
2018-09-24 18:35:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
stop() {
|
2020-03-31 12:06:19 -05:00
|
|
|
debug "Stopping ${SERVICE}$DEV service..."
|
2018-09-24 18:35:01 -05:00
|
|
|
|
|
|
|
lock_service_state_change
|
|
|
|
check_warm_boot
|
2020-03-31 12:06:19 -05:00
|
|
|
debug "Warm boot flag: ${SERVICE}$DEV ${WARM_BOOT}."
|
2018-09-24 18:35:01 -05:00
|
|
|
|
2018-10-01 21:01:04 -05:00
|
|
|
if [[ x"$WARM_BOOT" == x"true" ]]; then
|
2018-11-15 17:47:33 -06:00
|
|
|
TYPE=warm
|
|
|
|
else
|
|
|
|
TYPE=cold
|
|
|
|
fi
|
2018-10-01 21:01:04 -05:00
|
|
|
|
[Mellanox] Stop pmon ahead of syncd (#3505)
Issue Overview
shutdown flow
For any shutdown flow, which means all dockers are stopped in order, pmon docker stops after syncd docker has stopped, causing pmon docker fail to release sx_core resources and leaving sx_core in a bad state. The related logs are like the following:
INFO syncd.sh[23597]: modprobe: FATAL: Module sx_core is in use.
INFO syncd.sh[23597]: Unloading sx_core[FAILED]
INFO syncd.sh[23597]: rmmod: ERROR: Module sx_core is in use
config reload & service swss.restart
In the flows like "config reload" and "service swss restart", the failure cause further consequences:
sx_core initialization error with error message like "sx_core: create EMAD sdq 0 failed. err: -16"
syncd fails to execute the create switch api with error message "syncd_main: Runtime error: :- processEvent: failed to execute api: create, key: SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000, status: SAI_STATUS_FAILURE"
swss fails to call SAI API "SAI_SWITCH_ATTR_INIT_SWITCH", which causes orchagent to restart. This will introduce an extra 1 or 2 minutes for the system to be available, failing related test cases.
reboot, warm-reboot & fast-reboot
In the reboot flows including "reboot", "fast-reboot" and "warm-reboot" this failure doesn't have further negative effects since the system has already rebooted. In addition, "warm-reboot" requires the system to be shutdown as soon as possible to meet the GR time restriction of both BGP and LACP. "fast-reboot" also requires to meet the GR time restriction of BGP which is longer than LACP. In this sense, any unnecessary steps should be avoided. It's better to keep those flows untouched.
summary
To summarize, we have to come up with a way to ensure:
shutdown pmon docker ahead of syncd for "config reload" or "service swss restart" flow;
don't shutdown pmon docker ahead of syncd for "fast-reboot" or "warm-reboot" flow in order to save time.
for "reboot" flow, either order is acceptable.
Solution
To solve the issue, pmon shoud be stopped ahead of syncd stopped for all flows except for the warm-reboot.
- How I did it
To stop pmon ahead of syncd stopped. This is done in /usr/local/bin/syncd.sh::stop() and for all shutdown sequence.
Now pmon stops ahead of syncd so there must be a way in which pmon can start after syncd started. Another point that should be taken consideration is that pmon starting should be deferred so that services which have the logic of graceful restart in fast-reboot and warm-reboot have sufficient CPU cycles to meet their deadline.
This is done by add "syncd.service" as "After" to pmon.service and startin /usr/local/bin/syncd.sh::wait()
To start pmon automatically after syncd started.
2019-09-27 03:15:46 -05:00
|
|
|
if [[ x$sonic_asic_platform == x"mellanox" ]] && [[ x$TYPE == x"cold" ]]; then
|
|
|
|
debug "Stopping pmon service ahead of syncd..."
|
|
|
|
/bin/systemctl stop pmon
|
|
|
|
debug "Stopped pmon service"
|
|
|
|
fi
|
|
|
|
|
2018-12-15 13:36:12 -06:00
|
|
|
if [[ x$sonic_asic_platform != x"mellanox" ]] || [[ x$TYPE != x"cold" ]]; then
|
|
|
|
debug "${TYPE} shutdown syncd process ..."
|
2020-03-31 12:06:19 -05:00
|
|
|
/usr/bin/docker exec -i syncd$DEV /usr/bin/syncd_request_shutdown --${TYPE}
|
2018-10-01 21:01:04 -05:00
|
|
|
|
[docker-syncd] Add timeout to force stop syncd container (#4617)
**- Why I did it**
When I tested auto-restart feature of swss container by manually killing one of critical processes in it, swss will be stopped. Then syncd container as the peer container should also be
stopped as expected. However, I found sometimes syncd container can be stopped, sometimes
it can not be stopped. The reason why syncd container can not be stopped is the process
(/usr/local/bin/syncd.sh stop) to execute the stop() function will be stuck between the lines 164 –167. Systemd will wait for 90 seconds and then kill this process.
164 # wait until syncd quit gracefully
165 while docker top syncd$DEV | grep -q /usr/bin/syncd; do
166 sleep 0.1
167 done
The first thing I did is to profile how long this while loop will spin if syncd container can be
normally stopped after swss container is stopped. The result is 5 seconds or 6 seconds. If syncd
container can be normally stopped, two messages will be written into syslog:
str-a7050-acs-3 NOTICE syncd#dsserve: child /usr/bin/syncd exited status: 134
str-a7050-acs-3 INFO syncd#supervisord: syncd [5] child /usr/bin/syncd exited status: 134
The second thing I did was to add a timer in the condition of while loop to ensure this while loop will be forced to exit after 20 seconds:
After that, the testing result is that syncd container can be normally stopped if swss is stopped
first. One more thing I want to mention is that if syncd container is stopped during 5 seconds or 6 seconds, then the two log messages can be still seen in syslog. However, if the execution
time of while loop is longer than 20 seconds and is forced to exit, although syncd container can be stopped, I did not see these two messages in syslog. Further, although I observed the auto-restart feature of swss container can work correctly right now, I can not make sure the issue which syncd container can not stopped will occur in future.
**- How I did it**
I added a timer around the while loop in stop() function. This while loop will exit after spinning
20 seconds.
Signed-off-by: Yong Zhao <yozhao@microsoft.com>
2020-06-04 17:17:28 -05:00
|
|
|
# wait until syncd quits gracefully or force syncd to exit after
|
|
|
|
# waiting for 20 seconds
|
|
|
|
start_in_secs=${SECONDS}
|
|
|
|
end_in_secs=${SECONDS}
|
|
|
|
timer_threshold=20
|
|
|
|
while docker top syncd$DEV | grep -q /usr/bin/syncd \
|
|
|
|
&& [[ $((end_in_secs - start_in_secs)) -le $timer_threshold ]]; do
|
2018-12-15 13:36:12 -06:00
|
|
|
sleep 0.1
|
[docker-syncd] Add timeout to force stop syncd container (#4617)
**- Why I did it**
When I tested auto-restart feature of swss container by manually killing one of critical processes in it, swss will be stopped. Then syncd container as the peer container should also be
stopped as expected. However, I found sometimes syncd container can be stopped, sometimes
it can not be stopped. The reason why syncd container can not be stopped is the process
(/usr/local/bin/syncd.sh stop) to execute the stop() function will be stuck between the lines 164 –167. Systemd will wait for 90 seconds and then kill this process.
164 # wait until syncd quit gracefully
165 while docker top syncd$DEV | grep -q /usr/bin/syncd; do
166 sleep 0.1
167 done
The first thing I did is to profile how long this while loop will spin if syncd container can be
normally stopped after swss container is stopped. The result is 5 seconds or 6 seconds. If syncd
container can be normally stopped, two messages will be written into syslog:
str-a7050-acs-3 NOTICE syncd#dsserve: child /usr/bin/syncd exited status: 134
str-a7050-acs-3 INFO syncd#supervisord: syncd [5] child /usr/bin/syncd exited status: 134
The second thing I did was to add a timer in the condition of while loop to ensure this while loop will be forced to exit after 20 seconds:
After that, the testing result is that syncd container can be normally stopped if swss is stopped
first. One more thing I want to mention is that if syncd container is stopped during 5 seconds or 6 seconds, then the two log messages can be still seen in syslog. However, if the execution
time of while loop is longer than 20 seconds and is forced to exit, although syncd container can be stopped, I did not see these two messages in syslog. Further, although I observed the auto-restart feature of swss container can work correctly right now, I can not make sure the issue which syncd container can not stopped will occur in future.
**- How I did it**
I added a timer around the while loop in stop() function. This while loop will exit after spinning
20 seconds.
Signed-off-by: Yong Zhao <yozhao@microsoft.com>
2020-06-04 17:17:28 -05:00
|
|
|
end_in_secs=${SECONDS}
|
2018-12-15 13:36:12 -06:00
|
|
|
done
|
2018-11-15 17:47:33 -06:00
|
|
|
|
[docker-syncd] Add timeout to force stop syncd container (#4617)
**- Why I did it**
When I tested auto-restart feature of swss container by manually killing one of critical processes in it, swss will be stopped. Then syncd container as the peer container should also be
stopped as expected. However, I found sometimes syncd container can be stopped, sometimes
it can not be stopped. The reason why syncd container can not be stopped is the process
(/usr/local/bin/syncd.sh stop) to execute the stop() function will be stuck between the lines 164 –167. Systemd will wait for 90 seconds and then kill this process.
164 # wait until syncd quit gracefully
165 while docker top syncd$DEV | grep -q /usr/bin/syncd; do
166 sleep 0.1
167 done
The first thing I did is to profile how long this while loop will spin if syncd container can be
normally stopped after swss container is stopped. The result is 5 seconds or 6 seconds. If syncd
container can be normally stopped, two messages will be written into syslog:
str-a7050-acs-3 NOTICE syncd#dsserve: child /usr/bin/syncd exited status: 134
str-a7050-acs-3 INFO syncd#supervisord: syncd [5] child /usr/bin/syncd exited status: 134
The second thing I did was to add a timer in the condition of while loop to ensure this while loop will be forced to exit after 20 seconds:
After that, the testing result is that syncd container can be normally stopped if swss is stopped
first. One more thing I want to mention is that if syncd container is stopped during 5 seconds or 6 seconds, then the two log messages can be still seen in syslog. However, if the execution
time of while loop is longer than 20 seconds and is forced to exit, although syncd container can be stopped, I did not see these two messages in syslog. Further, although I observed the auto-restart feature of swss container can work correctly right now, I can not make sure the issue which syncd container can not stopped will occur in future.
**- How I did it**
I added a timer around the while loop in stop() function. This while loop will exit after spinning
20 seconds.
Signed-off-by: Yong Zhao <yozhao@microsoft.com>
2020-06-04 17:17:28 -05:00
|
|
|
if [[ $((end_in_secs - start_in_secs)) -gt $timer_threshold ]]; then
|
|
|
|
debug "syncd process in container syncd$DEV did not exit gracefully"
|
|
|
|
fi
|
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
/usr/bin/docker exec -i syncd$DEV /bin/sync
|
2018-12-15 13:36:12 -06:00
|
|
|
debug "Finished ${TYPE} shutdown syncd process ..."
|
|
|
|
fi
|
2018-10-01 21:01:04 -05:00
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
/usr/bin/${SERVICE}.sh stop $DEV
|
|
|
|
debug "Stopped ${SERVICE}$DEV service..."
|
2018-09-24 18:35:01 -05:00
|
|
|
|
2019-01-10 16:09:04 -06:00
|
|
|
# platform specific tasks
|
|
|
|
|
2018-09-24 18:35:01 -05:00
|
|
|
if [[ x"$WARM_BOOT" != x"true" ]]; then
|
2019-02-15 18:08:08 -06:00
|
|
|
if [ x$sonic_asic_platform == x'mellanox' ]; then
|
|
|
|
/etc/init.d/sxdkernel stop
|
|
|
|
/usr/bin/mst stop
|
|
|
|
elif [ x$sonic_asic_platform == x'cavium' ]; then
|
2018-09-24 18:35:01 -05:00
|
|
|
/etc/init.d/xpnet.sh stop
|
|
|
|
/etc/init.d/xpnet.sh start
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
|
|
|
unlock_service_state_change
|
|
|
|
}
|
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
OP=$1
|
|
|
|
DEV=$2
|
|
|
|
|
|
|
|
SERVICE="syncd"
|
|
|
|
PEER="swss"
|
|
|
|
DEBUGLOG="/tmp/swss-syncd-debug$DEV.log"
|
|
|
|
LOCKFILE="/tmp/swss-syncd-lock$DEV"
|
2020-05-08 23:24:05 -05:00
|
|
|
NAMESPACE_PREFIX="asic"
|
2020-03-31 12:06:19 -05:00
|
|
|
if [ "$DEV" ]; then
|
2020-05-08 23:24:05 -05:00
|
|
|
NET_NS="$NAMESPACE_PREFIX$DEV" #name of the network namespace
|
|
|
|
SONIC_DB_CLI="sonic-db-cli -n $NET_NS"
|
2020-03-31 12:06:19 -05:00
|
|
|
else
|
|
|
|
NET_NS=""
|
2020-05-08 23:24:05 -05:00
|
|
|
SONIC_DB_CLI="sonic-db-cli"
|
2020-03-31 12:06:19 -05:00
|
|
|
fi
|
|
|
|
|
2018-09-24 18:35:01 -05:00
|
|
|
case "$1" in
|
2019-03-08 12:59:41 -06:00
|
|
|
start|wait|stop)
|
2018-09-24 18:35:01 -05:00
|
|
|
$1
|
|
|
|
;;
|
|
|
|
*)
|
2019-03-08 12:59:41 -06:00
|
|
|
echo "Usage: $0 {start|wait|stop}"
|
2018-09-24 18:35:01 -05:00
|
|
|
exit 1
|
|
|
|
;;
|
|
|
|
esac
|