2017-01-29 13:33:33 -06:00
|
|
|
#!/bin/bash
|
|
|
|
|
2020-01-26 15:56:42 -06:00
|
|
|
# single instance containers are still supported (even though it might not look like it)
|
2020-02-26 02:56:54 -06:00
|
|
|
# if no instance number is passed to this script, $DEV will simply be unset, resulting in docker
|
2020-01-26 15:56:42 -06:00
|
|
|
# commands being sent to the base container name. E.g. `docker start database$DEV` simply starts
|
|
|
|
# the container `database` if no instance number is passed since `$DEV` is not defined
|
|
|
|
|
|
|
|
|
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
link_namespace() {
|
|
|
|
# Makes namespace of a docker container available in
|
|
|
|
# /var/run/netns so it can be managed with iproute2
|
|
|
|
|
|
|
|
mkdir -p /var/run/netns
|
2020-10-14 17:15:24 -05:00
|
|
|
PID="$(docker inspect -f {{"'{{.State.Pid}}'"}} "${DOCKERNAME}")"
|
2020-01-26 15:56:42 -06:00
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
PIDS=`ip netns pids "$NET_NS" 2>/dev/null`
|
|
|
|
if [ "$?" -eq "0" ]; then # namespace exists
|
|
|
|
if `echo $PIDS | grep --quiet -w $PID`; then # namespace is correctly linked
|
2020-01-26 15:56:42 -06:00
|
|
|
return 0
|
|
|
|
else # if it's incorrectly linked remove it
|
2020-03-31 12:06:19 -05:00
|
|
|
ip netns delete $NET_NS
|
2020-01-26 15:56:42 -06:00
|
|
|
fi
|
|
|
|
fi
|
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
ln -s /proc/$PID/ns/net /var/run/netns/$NET_NS
|
2020-01-26 15:56:42 -06:00
|
|
|
}
|
|
|
|
{%- endif %}
|
|
|
|
|
2020-06-30 08:29:20 -05:00
|
|
|
function updateSyslogConf()
|
|
|
|
{
|
|
|
|
# On multiNPU platforms, change the syslog target ip to docker0 ip to allow logs from containers
|
|
|
|
# running on the namespace to reach the rsyslog service running on the host
|
2021-03-16 10:03:12 -05:00
|
|
|
# Also update the container name
|
2020-06-30 08:29:20 -05:00
|
|
|
if [[ ($NUM_ASIC -gt 1) ]]; then
|
|
|
|
TARGET_IP=$(docker network inspect bridge --format={{ "'{{(index .IPAM.Config 0).Gateway}}'" }})
|
2023-12-20 01:12:03 -06:00
|
|
|
CONTAINER_NAME="$DOCKERNAME"
|
|
|
|
TMP_FILE="/tmp/rsyslog.$CONTAINER_NAME.conf"
|
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
python -c "import jinja2, os; paths=['/usr/share/sonic/templates']; loader = jinja2.FileSystemLoader(paths); env = jinja2.Environment(loader=loader, trim_blocks=True); template_file='/usr/share/sonic/templates/rsyslog-container.conf.j2'; template = env.get_template(os.path.basename(template_file)); data=template.render({\"target_ip\":\"$TARGET_IP\",\"container_name\":\"$CONTAINER_NAME\"}); print(data)" > $TMP_FILE
|
|
|
|
{%- else %}
|
|
|
|
sonic-cfggen -t /usr/share/sonic/templates/rsyslog-container.conf.j2 -a "{\"target_ip\": \"$TARGET_IP\", \"container_name\": \"$CONTAINER_NAME\" }" > $TMP_FILE
|
|
|
|
{%- endif %}
|
|
|
|
docker cp $TMP_FILE ${DOCKERNAME}:/etc/rsyslog.conf
|
|
|
|
rm -rf $TMP_FILE
|
2023-02-06 08:38:04 -06:00
|
|
|
fi
|
2020-06-30 08:29:20 -05:00
|
|
|
}
|
2021-01-27 10:36:10 -06:00
|
|
|
function ebtables_config()
|
|
|
|
{
|
2023-11-17 11:10:03 -06:00
|
|
|
if [[ "$DEV" && $DATABASE_TYPE != "dpudb" ]]; then
|
2021-01-27 10:36:10 -06:00
|
|
|
# Install ebtables filter in namespaces on multi-asic.
|
|
|
|
ip netns exec $NET_NS ebtables-restore < /etc/ebtables.filter.cfg
|
|
|
|
else
|
|
|
|
if [[ ! ($NUM_ASIC -gt 1) ]]; then
|
|
|
|
# Install ebtables filter in host for single asic.
|
|
|
|
ebtables-restore < /etc/ebtables.filter.cfg
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
2020-06-30 08:29:20 -05:00
|
|
|
|
2017-09-14 10:43:02 -05:00
|
|
|
function getMountPoint()
|
|
|
|
{
|
2021-08-27 00:35:45 -05:00
|
|
|
echo $1 | python -c "import sys, json, os; mnts = [x for x in json.load(sys.stdin)[0]['Mounts'] if x['Destination'] == '/usr/share/sonic/hwsku']; print('' if len(mnts) == 0 else os.path.abspath(mnts[0]['Source']))" 2>/dev/null
|
2018-11-02 09:20:07 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
function getBootType()
|
|
|
|
{
|
2019-06-26 14:46:58 -05:00
|
|
|
# same code snippet in files/scripts/syncd.sh
|
|
|
|
case "$(cat /proc/cmdline)" in
|
|
|
|
*SONIC_BOOT_TYPE=warm*)
|
2018-11-02 09:20:07 -05:00
|
|
|
TYPE='warm'
|
|
|
|
;;
|
2019-06-26 14:46:58 -05:00
|
|
|
*SONIC_BOOT_TYPE=fastfast*)
|
2018-12-04 12:11:24 -06:00
|
|
|
TYPE='fastfast'
|
|
|
|
;;
|
2019-06-26 14:46:58 -05:00
|
|
|
*SONIC_BOOT_TYPE=fast*|*fast-reboot*)
|
2018-12-04 12:11:24 -06:00
|
|
|
TYPE='fast'
|
|
|
|
;;
|
2018-11-02 09:20:07 -05:00
|
|
|
*)
|
2018-11-22 17:13:35 -06:00
|
|
|
TYPE='cold'
|
2018-11-02 09:20:07 -05:00
|
|
|
esac
|
2018-12-04 12:11:24 -06:00
|
|
|
echo "${TYPE}"
|
2017-09-14 10:43:02 -05:00
|
|
|
}
|
|
|
|
|
2018-11-22 17:13:35 -06:00
|
|
|
function preStartAction()
|
|
|
|
{
|
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
WARM_DIR=/host/warmboot
|
2020-10-14 17:15:24 -05:00
|
|
|
if [ "$DATABASE_TYPE" != "chassisdb" ]; then
|
2022-09-26 11:01:49 -05:00
|
|
|
if [[ ("$BOOT_TYPE" == "warm" || "$BOOT_TYPE" == "fastfast" || "$BOOT_TYPE" == "fast") && -f $WARM_DIR/dump.rdb ]]; then
|
2020-10-14 17:15:24 -05:00
|
|
|
# Load redis content from /host/warmboot/dump.rdb
|
|
|
|
docker cp $WARM_DIR/dump.rdb database$DEV:/var/lib/redis/dump.rdb
|
|
|
|
else
|
|
|
|
# Create an emtpy file and overwrite any RDB if already there
|
|
|
|
echo -n > /tmp/dump.rdb
|
|
|
|
docker cp /tmp/dump.rdb database$DEV:/var/lib/redis/
|
|
|
|
fi
|
2018-11-22 17:13:35 -06:00
|
|
|
fi
|
2021-07-24 18:24:43 -05:00
|
|
|
{%- elif docker_container_name == "pde" %}
|
|
|
|
if [[ $(/bin/systemctl status swss | grep -c running) -gt 0 ]]; then
|
|
|
|
echo "Stopping SWSS before starting PDE"
|
|
|
|
systemctl stop swss
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [[ $(/bin/systemctl status watchdog-control.service | grep -c running) -gt 0 ]]; then
|
|
|
|
echo "Stopping watchdog-control.service before starting PDE"
|
|
|
|
systemctl stop watchdog-control.service
|
2022-07-17 21:57:27 -05:00
|
|
|
fi
|
2019-10-10 11:24:18 -05:00
|
|
|
{%- elif docker_container_name == "snmp" %}
|
2020-05-08 23:24:05 -05:00
|
|
|
$SONIC_DB_CLI STATE_DB HSET 'DEVICE_METADATA|localhost' chassis_serial_number $(decode-syseeprom -s)
|
2018-11-22 17:13:35 -06:00
|
|
|
{%- else %}
|
|
|
|
: # nothing
|
|
|
|
{%- endif %}
|
2020-06-30 08:29:20 -05:00
|
|
|
updateSyslogConf
|
2018-11-22 17:13:35 -06:00
|
|
|
}
|
|
|
|
|
2021-03-31 01:21:53 -05:00
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
|
|
|
|
function setPlatformLagIdBoundaries()
|
|
|
|
{
|
2022-05-24 18:54:12 -05:00
|
|
|
docker exec -i ${DOCKERNAME} $SONIC_DB_CLI CHASSIS_APP_DB SET "SYSTEM_LAG_ID_START" "$lag_id_start"
|
|
|
|
docker exec -i ${DOCKERNAME} $SONIC_DB_CLI CHASSIS_APP_DB SET "SYSTEM_LAG_ID_END" "$lag_id_end"
|
2021-03-31 01:21:53 -05:00
|
|
|
}
|
2022-05-09 13:06:11 -05:00
|
|
|
function waitForAllInstanceDatabaseConfigJsonFilesReady()
|
|
|
|
{
|
|
|
|
if [ ! -z "$DEV" ]; then
|
|
|
|
cnt=0
|
|
|
|
SONIC_DB_GLOBAL_JSON="/var/run/redis/sonic-db/database_global.json"
|
|
|
|
if [ -f "$SONIC_DB_GLOBAL_JSON" ]; then
|
2022-07-17 21:57:27 -05:00
|
|
|
# Create a separate python script to get a list of location of all instance database_config.json file
|
2022-05-09 13:06:11 -05:00
|
|
|
redis_database_cfg_list=`/usr/bin/python -c "import sys; import os; import json; f=open(sys.argv[1]); \
|
|
|
|
global_db_dir = os.path.dirname(sys.argv[1]); data=json.load(f); \
|
|
|
|
print(\" \".join([os.path.normpath(global_db_dir+'/'+elem['include']) \
|
|
|
|
for elem in data['INCLUDES'] if 'namespace' in elem])); f.close()" $SONIC_DB_GLOBAL_JSON`
|
|
|
|
for file in $redis_database_cfg_list
|
|
|
|
do
|
|
|
|
while [ ! -f $file ]
|
|
|
|
do
|
|
|
|
sleep 1
|
|
|
|
cnt=$(( $cnt + 1))
|
|
|
|
if [ $cnt -ge 60 ]; then
|
|
|
|
echo "Error: $file not found"
|
|
|
|
break
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
done
|
2023-02-21 13:23:22 -06:00
|
|
|
fi
|
|
|
|
# Delay a second to allow all instance database_config.json files to be completely generated and fully accessible.
|
2023-06-28 17:26:49 -05:00
|
|
|
# This delay is needed to make sure that the database_config.json files are correctly rendered from j2 template
|
2023-02-21 13:23:22 -06:00
|
|
|
# files ( renderning takes some time )
|
2023-06-28 17:26:49 -05:00
|
|
|
sleep 1
|
2022-05-09 13:06:11 -05:00
|
|
|
fi
|
|
|
|
}
|
2021-03-31 01:21:53 -05:00
|
|
|
{%- endif %}
|
|
|
|
|
2017-09-14 10:43:02 -05:00
|
|
|
function postStartAction()
|
|
|
|
{
|
2018-02-26 13:23:29 -06:00
|
|
|
{%- if docker_container_name == "database" %}
|
2022-05-24 18:54:12 -05:00
|
|
|
CHASSISDB_CONF="/usr/share/sonic/device/$PLATFORM/chassisdb.conf"
|
|
|
|
[ -f $CHASSISDB_CONF ] && source $CHASSISDB_CONF
|
2023-11-17 11:10:03 -06:00
|
|
|
if [[ "$DEV" && $DATABASE_TYPE != "dpudb" ]]; then
|
2020-09-26 14:14:30 -05:00
|
|
|
# Enable the forwarding on eth0 interface in namespace.
|
|
|
|
SYSCTL_NET_CONFIG="/etc/sysctl.d/sysctl-net.conf"
|
|
|
|
docker exec -i database$DEV sed -i -e "s/^net.ipv4.conf.eth0.forwarding=0/net.ipv4.conf.eth0.forwarding=1/;
|
|
|
|
s/^net.ipv6.conf.eth0.forwarding=0/net.ipv6.conf.eth0.forwarding=1/" $SYSCTL_NET_CONFIG
|
2020-07-01 17:58:53 -05:00
|
|
|
docker exec -i database$DEV sysctl --system -e
|
2020-01-26 15:56:42 -06:00
|
|
|
link_namespace $DEV
|
2022-05-24 18:54:12 -05:00
|
|
|
|
|
|
|
|
|
|
|
if [[ -n "$midplane_subnet" ]]; then
|
|
|
|
# Use /16 for loopback interface
|
|
|
|
ip netns exec "$NET_NS" ip addr add 127.0.0.1/16 dev lo
|
|
|
|
ip netns exec "$NET_NS" ip addr del 127.0.0.1/8 dev lo
|
|
|
|
|
2022-09-15 19:23:41 -05:00
|
|
|
slot_id=$(python3 -c 'import sonic_platform.platform; platform_chassis = sonic_platform.platform.Platform().get_chassis(); print(platform_chassis.get_my_slot())' 2>/dev/null)
|
|
|
|
supervisor_slot_id=$(python3 -c 'import sonic_platform.platform; platform_chassis = sonic_platform.platform.Platform().get_chassis(); print(platform_chassis.get_supervisor_slot())' 2>/dev/null)
|
|
|
|
|
|
|
|
# Create eth1 in database instance
|
|
|
|
if [[ "${slot_id}" == "${supervisor_slot_id}" ]]; then
|
|
|
|
ip link add name ns-eth1"$NET_NS" type veth peer name eth1@"$NET_NS"
|
|
|
|
ip link set dev eth1@"$NET_NS" master br1
|
|
|
|
ip link set dev eth1@"$NET_NS" up
|
2023-09-23 02:27:21 -05:00
|
|
|
# For chassis system where Linux bridge is used on supervisor for midplane communication
|
|
|
|
# assign alternate name as eth1-midplane for generic design
|
|
|
|
ip link property add dev br1 altname eth1-midplane
|
2022-09-15 19:23:41 -05:00
|
|
|
else
|
|
|
|
ip link add name ns-eth1"$NET_NS" link eth1-midplane type macvlan mode bridge
|
|
|
|
fi
|
|
|
|
|
2022-05-24 18:54:12 -05:00
|
|
|
# Create eth1 in database instance
|
|
|
|
ip link set dev ns-eth1"$NET_NS" netns "$NET_NS"
|
|
|
|
ip netns exec "$NET_NS" ip link set ns-eth1"$NET_NS" name eth1
|
|
|
|
|
|
|
|
# Configure IP address and enable eth1
|
2022-09-15 19:23:41 -05:00
|
|
|
slot_ip_address=`echo $midplane_subnet | awk -F. '{print $1 "." $2}'`.$slot_id.$(($DEV + 10))
|
|
|
|
slot_subnet_mask=${midplane_subnet#*/}
|
|
|
|
ip netns exec "$NET_NS" ip addr add $slot_ip_address/$slot_subnet_mask dev eth1
|
2022-05-24 18:54:12 -05:00
|
|
|
ip netns exec "$NET_NS" ip link set dev eth1 up
|
|
|
|
|
|
|
|
# Allow localnet routing on the new interfaces if midplane is using a
|
|
|
|
# subnet in the 127/8 range.
|
|
|
|
if [[ "${midplane_subnet#127}" != "$midplane_subnet" ]]; then
|
|
|
|
ip netns exec "$NET_NS" bash -c "echo 1 > /proc/sys/net/ipv4/conf/eth1/route_localnet"
|
|
|
|
fi
|
|
|
|
fi
|
2020-01-26 15:56:42 -06:00
|
|
|
fi
|
2021-01-27 10:36:10 -06:00
|
|
|
# Setup ebtables configuration
|
2022-08-04 11:18:00 -05:00
|
|
|
{%- if sonic_asic_platform != "vs" %}
|
2021-01-27 10:36:10 -06:00
|
|
|
ebtables_config
|
2022-08-04 11:18:00 -05:00
|
|
|
{%- endif %}
|
2020-10-14 17:15:24 -05:00
|
|
|
# chassisdb starts before database starts, bypass the PING check since other
|
|
|
|
# databases are not availbale until database container is ready.
|
|
|
|
# also chassisdb doesn't support warm/fast reboot, its dump.rdb is deleted
|
|
|
|
# at service startup time, nothing need to be done here.
|
2023-11-17 11:10:03 -06:00
|
|
|
if [[ "$DATABASE_TYPE" != "chassisdb" ]]; then
|
2020-10-14 17:15:24 -05:00
|
|
|
# Wait until supervisord and redis starts. This change is needed
|
|
|
|
# because now database_config.json is jinja2 templated based
|
|
|
|
# and by the time file gets generated if we do redis ping
|
|
|
|
# then we catch python exception of file not valid
|
|
|
|
# that comes to syslog which is unwanted so wait till database
|
|
|
|
# config is ready and then ping
|
2023-02-21 13:23:22 -06:00
|
|
|
# sonic-db-cli try to initialize the global database. If in multiasic platform, inital global
|
|
|
|
# database will try to access to all other instance database-config.json. If other instance
|
|
|
|
# database-config.json files are not ready yet, it will generate the sonic-db-cli core files.
|
|
|
|
waitForAllInstanceDatabaseConfigJsonFilesReady
|
2022-05-24 18:54:12 -05:00
|
|
|
until [[ ($(docker exec -i database$DEV pgrep -x -c supervisord) -gt 0) && ($($SONIC_DB_CLI PING | grep -c PONG) -gt 0) &&
|
|
|
|
($(docker exec -i database$DEV sonic-db-cli PING | grep -c PONG) -gt 0) ]]; do
|
2020-10-14 17:15:24 -05:00
|
|
|
sleep 1;
|
|
|
|
done
|
2023-08-04 18:00:26 -05:00
|
|
|
|
2022-09-26 11:01:49 -05:00
|
|
|
if [[ ("$BOOT_TYPE" == "warm" || "$BOOT_TYPE" == "fastfast" || "$BOOT_TYPE" == "fast") && -f $WARM_DIR/dump.rdb ]]; then
|
2021-09-24 01:53:22 -05:00
|
|
|
# retain the dump file from last boot for debugging purposes
|
|
|
|
mv $WARM_DIR/dump.rdb $WARM_DIR/dump.rdb.old
|
2020-10-14 17:15:24 -05:00
|
|
|
else
|
|
|
|
# If there is a config_db.json dump file, load it.
|
2023-06-28 17:26:49 -05:00
|
|
|
if [ -r /etc/sonic/config_db$DEV.json ]; then
|
2022-05-09 13:06:11 -05:00
|
|
|
|
2020-10-14 17:15:24 -05:00
|
|
|
if [ -r /etc/sonic/init_cfg.json ]; then
|
|
|
|
$SONIC_CFGGEN -j /etc/sonic/init_cfg.json -j /etc/sonic/config_db$DEV.json --write-to-db
|
|
|
|
else
|
|
|
|
$SONIC_CFGGEN -j /etc/sonic/config_db$DEV.json --write-to-db
|
|
|
|
fi
|
2020-03-05 17:35:35 -06:00
|
|
|
fi
|
2023-09-28 11:37:21 -05:00
|
|
|
|
|
|
|
if [[ "$BOOT_TYPE" == "fast" ]]; then
|
|
|
|
# this is the case when base OS version does not support fast-reboot with reconciliation logic (dump.rdb is absent)
|
|
|
|
# In this case, we need to set the flag to indicate fast-reboot is in progress. Set the key to expire in 3 minutes
|
|
|
|
$SONIC_DB_CLI STATE_DB SET "FAST_REBOOT|system" "1" "EX" "180"
|
|
|
|
fi
|
2020-10-14 17:15:24 -05:00
|
|
|
fi
|
2019-04-30 16:46:18 -05:00
|
|
|
|
2023-08-04 18:00:26 -05:00
|
|
|
if [ -e /tmp/pending_config_migration ] || [ -e /tmp/pending_config_initialization ]; then
|
2023-07-06 19:34:35 -05:00
|
|
|
# this is first boot to a new image, config-setup execution is pending.
|
2023-08-04 18:00:26 -05:00
|
|
|
# for warmboot case, DB is loaded but migration is still pending
|
|
|
|
# For firstbboot/fast/cold reboot case, DB contains nothing at this point
|
|
|
|
# unset CONFIG_DB_INITIALIZED to indicate pending config load and migration
|
|
|
|
# This flag will be set to "1" after DB migration/initialization is completed as part of config-setup
|
|
|
|
$SONIC_DB_CLI CONFIG_DB SET "CONFIG_DB_INITIALIZED" "0"
|
2023-07-06 19:34:35 -05:00
|
|
|
else
|
2023-08-22 10:36:38 -05:00
|
|
|
$SONIC_DB_CLI CONFIG_DB SET "CONFIG_DB_INITIALIZED" "0"
|
|
|
|
# this is not a first time boot to a new image. Datbase container starts w/ old pre-existing config
|
|
|
|
if [[ -x /usr/local/bin/db_migrator.py ]]; then
|
|
|
|
# Migrate the DB to the latest schema version if needed
|
|
|
|
if [ -z "$DEV" ]; then
|
|
|
|
/usr/local/bin/db_migrator.py -o migrate
|
|
|
|
fi
|
|
|
|
fi
|
2023-08-04 18:00:26 -05:00
|
|
|
# set CONFIG_DB_INITIALIZED to indicate end of config load and migration
|
|
|
|
$SONIC_DB_CLI CONFIG_DB SET "CONFIG_DB_INITIALIZED" "1"
|
2020-05-08 23:24:05 -05:00
|
|
|
fi
|
2023-08-22 10:36:38 -05:00
|
|
|
|
2020-10-14 17:15:24 -05:00
|
|
|
# Add redis UDS to the redis group and give read/write access to the group
|
|
|
|
REDIS_SOCK="/var/run/redis${DEV}/redis.sock"
|
|
|
|
else
|
2020-12-04 12:09:35 -06:00
|
|
|
until [[ ($(docker exec -i ${DOCKERNAME} pgrep -x -c supervisord) -gt 0) &&
|
|
|
|
($(docker exec -i ${DOCKERNAME} $SONIC_DB_CLI CHASSIS_APP_DB PING | grep -c True) -gt 0) ]]; do
|
|
|
|
sleep 1
|
|
|
|
done
|
2022-05-24 18:54:12 -05:00
|
|
|
if [[ -n "$lag_id_start" && -n "$lag_id_end" ]]; then
|
|
|
|
setPlatformLagIdBoundaries
|
|
|
|
fi
|
2020-10-14 17:15:24 -05:00
|
|
|
REDIS_SOCK="/var/run/redis-chassis/redis_chassis.sock"
|
2019-04-30 16:46:18 -05:00
|
|
|
fi
|
2020-09-03 01:40:22 -05:00
|
|
|
chgrp -f redis $REDIS_SOCK && chmod -f 0760 $REDIS_SOCK
|
2018-02-26 13:23:29 -06:00
|
|
|
{%- elif docker_container_name == "swss" %}
|
2024-01-04 14:39:16 -06:00
|
|
|
# Wait until swss container state is Running
|
|
|
|
until [[ ($(docker inspect -f {{"'{{.State.Running}}'"}} swss$DEV) == "true") ]]; do
|
|
|
|
sleep 0.1
|
|
|
|
done
|
|
|
|
echo "swss container is up and running"
|
|
|
|
|
2020-03-31 12:06:19 -05:00
|
|
|
docker exec swss$DEV rm -f /ready # remove cruft
|
2019-01-10 16:09:04 -06:00
|
|
|
if [[ "$BOOT_TYPE" == "fast" ]] && [[ -d /host/fast-reboot ]]; then
|
2020-03-31 12:06:19 -05:00
|
|
|
test -e /host/fast-reboot/fdb.json && docker cp /host/fast-reboot/fdb.json swss$DEV:/
|
|
|
|
test -e /host/fast-reboot/arp.json && docker cp /host/fast-reboot/arp.json swss$DEV:/
|
|
|
|
test -e /host/fast-reboot/default_routes.json && docker cp /host/fast-reboot/default_routes.json swss$DEV:/
|
2021-11-08 17:21:11 -06:00
|
|
|
test -e /host/fast-reboot/media_config.json && docker cp /host/fast-reboot/media_config.json swss$DEV:/
|
2018-02-26 13:23:29 -06:00
|
|
|
rm -fr /host/fast-reboot
|
|
|
|
fi
|
2020-03-31 12:06:19 -05:00
|
|
|
docker exec swss$DEV touch /ready # signal swssconfig.sh to go
|
2024-01-04 14:39:16 -06:00
|
|
|
# Re-confirm that file is indeed created and log an error if not
|
|
|
|
docker exec swss$DEV test -f /ready && echo "File swss:/ready created" || echo "Error: File swss:/ready doesn't exist"
|
|
|
|
|
2018-06-22 12:48:44 -05:00
|
|
|
{%- elif docker_container_name == "pmon" %}
|
|
|
|
|
|
|
|
DEVPATH="/usr/share/sonic/device"
|
|
|
|
REBOOT="platform_reboot"
|
|
|
|
PSENSOR="/usr/local/bin/platform_sensors.py"
|
|
|
|
if [ -d ${DEVPATH}/${PLATFORM} ] && [ -f $PSENSOR ]; then
|
|
|
|
exist=`docker exec -i pmon ls /usr/bin/platform_sensors.py "$@" 2>/dev/null`
|
|
|
|
if [ -z "$exist" ]; then
|
|
|
|
docker cp $PSENSOR pmon:/usr/bin/
|
|
|
|
fi
|
|
|
|
fi
|
2018-02-26 13:23:29 -06:00
|
|
|
{%- else %}
|
2018-02-26 19:42:20 -06:00
|
|
|
: # nothing
|
2017-12-19 18:02:26 -06:00
|
|
|
{%- endif %}
|
2017-09-14 10:43:02 -05:00
|
|
|
}
|
|
|
|
|
2018-10-12 10:39:06 -05:00
|
|
|
start() {
|
2018-11-22 17:13:35 -06:00
|
|
|
# Obtain boot type from kernel arguments
|
|
|
|
BOOT_TYPE=`getBootType`
|
|
|
|
|
2018-10-12 10:39:06 -05:00
|
|
|
# Obtain our platform as we will mount directories with these names in each docker
|
2020-07-31 19:59:09 -05:00
|
|
|
PLATFORM=${PLATFORM:-`$SONIC_CFGGEN -H -v DEVICE_METADATA.localhost.platform`}
|
2020-05-08 23:24:05 -05:00
|
|
|
|
|
|
|
# Parse the device specific asic conf file, if it exists
|
|
|
|
ASIC_CONF=/usr/share/sonic/device/$PLATFORM/asic.conf
|
|
|
|
if [ -f "$ASIC_CONF" ]; then
|
|
|
|
source $ASIC_CONF
|
|
|
|
fi
|
2017-09-14 10:43:02 -05:00
|
|
|
|
2023-12-20 01:12:03 -06:00
|
|
|
# Default rsyslog target IP for single ASIC platform
|
|
|
|
SYSLOG_TARGET_IP=127.0.0.1
|
|
|
|
if [[ ($NUM_ASIC -gt 1) ]]; then
|
|
|
|
SYSLOG_TARGET_IP=$(docker network inspect bridge --format={{ "'{{(index .IPAM.Config 0).Gateway}}'" }})
|
|
|
|
fi
|
|
|
|
|
2022-04-09 12:47:18 -05:00
|
|
|
PLATFORM_ENV_CONF=/usr/share/sonic/device/$PLATFORM/platform_env.conf
|
|
|
|
if [ -f "$PLATFORM_ENV_CONF" ]; then
|
|
|
|
source $PLATFORM_ENV_CONF
|
|
|
|
fi
|
|
|
|
|
[broadcom]: Set default SYNCD_SHM_SIZE for Broadcom XGS devices (#13297)
After upgrade to brcmsai 8.1, the sdk running environment (container) recommended with mininum memory size as below
TH4/TD4(ltsw) uses 512MB
TH3 used 300MB
Helix4/TD2/TD3/TH/TH 256 MB
Base on this requirement, adjust the default syncd share memory size and set the memory size for special ACISs in platform_env.conf file for different types of Broadcom ASICs.
How I did it
Add the platform_env.conf file if none of it for broadcom platform (base on platform_asic file)
Add the 'SYNCD_SHM_SIZE' and set the value
for ltsw(TD4/TH4) devices set to 512M at least (update the platform_env.conf)
for Td2/TH2/TH devices set to 256M
for TH3 set to 300M
verify
How to verify it
verify the image with code fix
Check with UT
Check on lab devices
On a problematic device which cannot start successfully
Run with the command
$ cat /proc/linux-kernel-bde
Broadcom Device Enumerator (linux-kernel-bde)
Module parameters:
maxpayload=128
usemsi=0
dmasize=32M
himem=(null)
himemaddr=(null)
DMA Memory (kernel): 33554432 bytes, 0 used, 33554432 free, local mmap
No devices found
$ docker rm -f syncd
syncd
$ sudo /usr/bin/syncd.sh start
Cannot get Broadcom Chip Id. Skip set SYNCD_SHM_SIZE.
Creating new syncd container with HWSKU Force10-S6000
a4862129a7fea04f00ed71a88715eac65a41cdae51c3158f9cdd7de3ccc3dd31
$ docker inspect syncd | grep -i shm
"ShmSize": 67108864,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e",
On Normal device
$ docker inspect syncd | grep -i shm
"ShmSize": 268435456,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e"
change the config syncd_shm.ini to b85=128m
$ docker rm -f syncd
syncd
$ sudo /usr/bin/syncd.sh start
Creating new syncd container with HWSKU Force10-S6000
3209ffc1e5a7224b99640eb9a286c4c7aa66a2e6a322be32fb7fe2113bb9524c
$ docker inspect syncd | grep -i shm
"ShmSize": 134217728,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e",
change the config under
/usr/share/sonic/device/x86_64-dell_s6000_s1220-r0/Force10-S6000/platform_env.conf
and run command
$ cat /usr/share/sonic/device/x86_64-dell_s6000_s1220-r0/platform_env.conf
SYNCD_SHM_SIZE=300m
$ sudo /usr/bin/syncd.sh start
Creating new syncd container with HWSKU Force10-S6000
897f6fcde1f669ad2caab7da4326079abd7e811bf73f018c6dacc24cf24bfda5
$ docker inspect syncd | grep -i shm
"ShmSize": 314572800,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e",
Signed-off-by: richardyu-ms <richard.yu@microsoft.com>
2023-01-30 22:23:03 -06:00
|
|
|
{%- if sonic_asic_platform == "broadcom" %}
|
|
|
|
{%- if docker_container_name == "syncd" %}
|
2023-06-28 17:26:49 -05:00
|
|
|
# Set the SYNCD_SHM_SIZE if this variable not defined
|
[broadcom]: Set default SYNCD_SHM_SIZE for Broadcom XGS devices (#13297)
After upgrade to brcmsai 8.1, the sdk running environment (container) recommended with mininum memory size as below
TH4/TD4(ltsw) uses 512MB
TH3 used 300MB
Helix4/TD2/TD3/TH/TH 256 MB
Base on this requirement, adjust the default syncd share memory size and set the memory size for special ACISs in platform_env.conf file for different types of Broadcom ASICs.
How I did it
Add the platform_env.conf file if none of it for broadcom platform (base on platform_asic file)
Add the 'SYNCD_SHM_SIZE' and set the value
for ltsw(TD4/TH4) devices set to 512M at least (update the platform_env.conf)
for Td2/TH2/TH devices set to 256M
for TH3 set to 300M
verify
How to verify it
verify the image with code fix
Check with UT
Check on lab devices
On a problematic device which cannot start successfully
Run with the command
$ cat /proc/linux-kernel-bde
Broadcom Device Enumerator (linux-kernel-bde)
Module parameters:
maxpayload=128
usemsi=0
dmasize=32M
himem=(null)
himemaddr=(null)
DMA Memory (kernel): 33554432 bytes, 0 used, 33554432 free, local mmap
No devices found
$ docker rm -f syncd
syncd
$ sudo /usr/bin/syncd.sh start
Cannot get Broadcom Chip Id. Skip set SYNCD_SHM_SIZE.
Creating new syncd container with HWSKU Force10-S6000
a4862129a7fea04f00ed71a88715eac65a41cdae51c3158f9cdd7de3ccc3dd31
$ docker inspect syncd | grep -i shm
"ShmSize": 67108864,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e",
On Normal device
$ docker inspect syncd | grep -i shm
"ShmSize": 268435456,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e"
change the config syncd_shm.ini to b85=128m
$ docker rm -f syncd
syncd
$ sudo /usr/bin/syncd.sh start
Creating new syncd container with HWSKU Force10-S6000
3209ffc1e5a7224b99640eb9a286c4c7aa66a2e6a322be32fb7fe2113bb9524c
$ docker inspect syncd | grep -i shm
"ShmSize": 134217728,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e",
change the config under
/usr/share/sonic/device/x86_64-dell_s6000_s1220-r0/Force10-S6000/platform_env.conf
and run command
$ cat /usr/share/sonic/device/x86_64-dell_s6000_s1220-r0/platform_env.conf
SYNCD_SHM_SIZE=300m
$ sudo /usr/bin/syncd.sh start
Creating new syncd container with HWSKU Force10-S6000
897f6fcde1f669ad2caab7da4326079abd7e811bf73f018c6dacc24cf24bfda5
$ docker inspect syncd | grep -i shm
"ShmSize": 314572800,
"Tag": "fix_8.1_shm_issue.67873427-9f7ca60a0e",
Signed-off-by: richardyu-ms <richard.yu@microsoft.com>
2023-01-30 22:23:03 -06:00
|
|
|
BRCM_PLATFORM_COMMON_DIR=/usr/share/sonic/device/x86_64-broadcom_common
|
|
|
|
SYNCD_SHM_INI=$BRCM_PLATFORM_COMMON_DIR/syncd_shm.ini
|
|
|
|
|
|
|
|
readline=$(grep '0x14e4' /proc/linux-kernel-bde)
|
|
|
|
bcm_chip_id=${readline#*0x14e4:0x}
|
|
|
|
bcm_chip_id=${bcm_chip_id::3}
|
|
|
|
|
|
|
|
if [ -z "$SYNCD_SHM_SIZE" ]; then
|
|
|
|
if [ -z "$bcm_chip_id" ]; then
|
|
|
|
echo "Cannot get Broadcom Chip Id. Skip set SYNCD_SHM_SIZE."
|
|
|
|
elif [ -f "$SYNCD_SHM_INI" ] && [ "$(grep -m1 "^${bcm_chip_id}=" $SYNCD_SHM_INI)" ]; then
|
|
|
|
SYNCD_SHM_SIZE=`grep -m1 "^${bcm_chip_id}=" $SYNCD_SHM_INI | awk -F= '{print $2}'`
|
|
|
|
else
|
|
|
|
echo "Cannot get SYNCD_SHM_SIZE for chip: [${bcm_chip_id}] in $SYNCD_SHM_INI. Skip set SYNCD_SHM_SIZE."
|
|
|
|
fi
|
|
|
|
|
|
|
|
fi
|
|
|
|
{%- endif %}
|
|
|
|
{%- endif %}
|
|
|
|
|
2022-07-17 21:57:27 -05:00
|
|
|
{%- if docker_container_name == "gbsyncd" %}
|
|
|
|
GBSYNCD_CONF=/usr/share/sonic/device/$PLATFORM/gbsyncd.ini
|
|
|
|
GBSYNCD_PLATFORM=gbsyncd-vs
|
|
|
|
if [ -f "$GBSYNCD_CONF" ]; then
|
|
|
|
while IFS="=" read -r key value; do
|
|
|
|
case "$key" in
|
|
|
|
platform)
|
|
|
|
GBSYNCD_PLATFORM="$value"
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
done < "$GBSYNCD_CONF"
|
|
|
|
fi
|
|
|
|
{%- endif %}
|
|
|
|
|
2018-10-12 10:39:06 -05:00
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
# Don't mount HWSKU in {{docker_container_name}} container.
|
|
|
|
HWSKU=""
|
2020-11-09 15:03:00 -06:00
|
|
|
MOUNTPATH=""
|
2018-10-12 10:39:06 -05:00
|
|
|
{%- else %}
|
|
|
|
# Obtain our HWSKU as we will mount directories with these names in each docker
|
2020-07-31 19:59:09 -05:00
|
|
|
HWSKU=${HWSKU:-`$SONIC_CFGGEN -d -v 'DEVICE_METADATA["localhost"]["hwsku"]'`}
|
2020-11-09 15:03:00 -06:00
|
|
|
MOUNTPATH="/usr/share/sonic/device/$PLATFORM/$HWSKU"
|
|
|
|
if [ "$DEV" ]; then
|
|
|
|
MOUNTPATH="$MOUNTPATH/$DEV"
|
|
|
|
fi
|
2018-10-12 10:39:06 -05:00
|
|
|
{%- endif %}
|
2023-10-03 10:35:57 -05:00
|
|
|
|
|
|
|
{%- if docker_container_name == "swss" %}
|
|
|
|
# Insert "create_only_config_db_buffers" attribute
|
|
|
|
HWSKU_FOLDER="/usr/share/sonic/device/$PLATFORM/$HWSKU"
|
|
|
|
if [ -d "$HWSKU_FOLDER" ]; then
|
|
|
|
CREATE_ONLY_CONFIG_DB_BUFFERS_JSON="$HWSKU_FOLDER/create_only_config_db_buffers.json"
|
|
|
|
if [ -f "$CREATE_ONLY_CONFIG_DB_BUFFERS_JSON" ]; then
|
|
|
|
$SONIC_CFGGEN -j $CREATE_ONLY_CONFIG_DB_BUFFERS_JSON --write-to-db
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
{%- endif %}
|
|
|
|
|
2020-10-14 17:15:24 -05:00
|
|
|
DOCKERCHECK=`docker inspect --type container ${DOCKERNAME} 2>/dev/null`
|
2017-01-29 13:33:33 -06:00
|
|
|
if [ "$?" -eq "0" ]; then
|
2019-03-19 22:14:07 -05:00
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
DOCKERMOUNT=""
|
|
|
|
{%- else %}
|
2017-09-14 10:43:02 -05:00
|
|
|
DOCKERMOUNT=`getMountPoint "$DOCKERCHECK"`
|
2019-03-19 22:14:07 -05:00
|
|
|
{%- endif %}
|
2020-11-09 15:03:00 -06:00
|
|
|
if [ x"$DOCKERMOUNT" == x"$MOUNTPATH" ]; then
|
2022-12-08 10:58:35 -06:00
|
|
|
CONTAINER_EXISTS="yes"
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
preStartAction
|
2019-03-19 22:14:07 -05:00
|
|
|
{%- if docker_container_name == "database" %}
|
2020-10-14 17:15:24 -05:00
|
|
|
echo "Starting existing ${DOCKERNAME} container"
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
docker start ${DOCKERNAME}
|
2019-03-19 22:14:07 -05:00
|
|
|
{%- else %}
|
2020-10-14 17:15:24 -05:00
|
|
|
echo "Starting existing ${DOCKERNAME} container with HWSKU $HWSKU"
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
/usr/local/bin/container start ${DOCKERNAME}
|
2019-03-19 22:14:07 -05:00
|
|
|
{%- endif %}
|
2020-01-26 15:56:42 -06:00
|
|
|
postStartAction
|
2018-11-02 09:20:07 -05:00
|
|
|
exit $?
|
2017-09-14 10:43:02 -05:00
|
|
|
fi
|
|
|
|
|
|
|
|
# docker created with a different HWSKU, remove and recreate
|
2020-10-14 17:15:24 -05:00
|
|
|
echo "Removing obsolete ${DOCKERNAME} container with HWSKU $DOCKERMOUNT"
|
|
|
|
docker rm -f ${DOCKERNAME}
|
2017-09-14 10:43:02 -05:00
|
|
|
fi
|
2017-03-23 14:18:52 -05:00
|
|
|
|
2020-01-26 15:56:42 -06:00
|
|
|
{%- if docker_container_name == "database" %}
|
2020-10-14 17:15:24 -05:00
|
|
|
|
|
|
|
echo "Creating new ${DOCKERNAME} container"
|
|
|
|
if [ "$DATABASE_TYPE" != "chassisdb" ]; then
|
|
|
|
if [ -z "$DEV" ]; then
|
|
|
|
# if database_global exists in old_config, use it; otherwise use the default one in new image
|
|
|
|
if [ -f /etc/sonic/old_config/database_global.json ]; then
|
|
|
|
echo "Use database_global.json from old system..."
|
|
|
|
mv /etc/sonic/old_config/database_global.json /etc/sonic/
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
# if database_config exists in old_config, use it; otherwise use the default one in new image
|
|
|
|
if [ -f /etc/sonic/old_config/database_config$DEV.json ]; then
|
|
|
|
echo "Use database_config.json from old system..."
|
|
|
|
mv /etc/sonic/old_config/database_config$DEV.json /etc/sonic/
|
2020-05-08 23:24:05 -05:00
|
|
|
fi
|
create multiple Redis DB instances based on CONFIG at /etc/sonic/database_config.json (#2182)
this is the first step to moving different databases tables into different database instances
in this PR, only handle multiple database instances creation based on user configuration at /etc/sonic/database_config.json
we keep current method to create single database instance if no extra/new DATABASE configuration exist in database_config.json file.
if user try to configure more db instances at database_config.json , we create those new db instances along with the original db instance existing today.
The configuration is as below, later we can add more db related information if needed:
{
...
"DATABASE": {
"redis-db-01" : {
"port" : "6380",
"database": ["APPL_DB", "STATE_DB"]
},
"redis-db-02" : {
"port" : "6381",
"database":["ASIC_DB"]
},
}
...
}
The detail description is at design doc at Azure/SONiC#271
The main idea is : when database.sh started, we check the configuration and generate corresponding scripts.
rc.local service handle old_config copy when loading new images, there is no dependency between rc.local and database service today, for safety and make sure the copy operation are done before database try to read it, we make database service run after rc.local
Then database docker started, we check the configuration and generate corresponding scripts/.conf in database docker as well.
based on those conf, we create databases instances as required.
at last, we ping_pong check database are up and continue
Signed-off-by: Dong Zhang d.zhang@alibaba-inc.com
2019-08-28 13:15:10 -05:00
|
|
|
fi
|
2020-01-26 15:56:42 -06:00
|
|
|
{%- else %}
|
2020-10-14 17:15:24 -05:00
|
|
|
echo "Creating new ${DOCKERNAME} container with HWSKU $HWSKU"
|
2020-01-26 15:56:42 -06:00
|
|
|
{%- endif %}
|
|
|
|
|
2020-12-13 13:35:39 -06:00
|
|
|
{%- if docker_container_name == "swss" %}
|
|
|
|
# Generate the asic_table.json and peripheral_table.json
|
|
|
|
if [ ! -f /etc/sonic/asic_table.json ] && [ -f /usr/share/sonic/templates/asic_table.j2 ]; then
|
2023-04-07 16:23:35 -05:00
|
|
|
sonic-cfggen -d -t /usr/share/sonic/templates/asic_table.j2 > /etc/sonic/asic_table.json 2> errorlog.txt
|
|
|
|
if [[ $? -ne 0 ]] ; then
|
|
|
|
echo "sonic-cfggen failed to render asic_table.json"
|
|
|
|
echo "$(cat errorlog.txt)"
|
2023-04-24 09:02:35 -05:00
|
|
|
sonic-cfggen -a '{"DEVICE_METADATA":{"localhost":{"platform":"'$PLATFORM'"}}}' -t /usr/share/sonic/templates/asic_table.j2 > /etc/sonic/asic_table.json
|
2023-04-07 16:23:35 -05:00
|
|
|
fi
|
2020-12-13 13:35:39 -06:00
|
|
|
fi
|
|
|
|
if [ ! -f /etc/sonic/peripheral_table.json ] && [ -f /usr/share/sonic/device/$PLATFORM/port_peripheral_config.j2 ]; then
|
2023-04-07 16:23:35 -05:00
|
|
|
sonic-cfggen -d -t /usr/share/sonic/device/$PLATFORM/port_peripheral_config.j2 > /etc/sonic/peripheral_table.json 2> errorlog.txt
|
|
|
|
if [[ $? -ne 0 ]] ; then
|
|
|
|
echo "sonic-cfggen failed to render peripheral_table.json"
|
|
|
|
echo "$(cat errorlog.txt)"
|
2023-04-24 09:02:35 -05:00
|
|
|
sonic-cfggen -a '{"DEVICE_METADATA":{"localhost":{"platform":"'$PLATFORM'"}}}' -t /usr/share/sonic/device/$PLATFORM/port_peripheral_config.j2 > /etc/sonic/peripheral_table.json
|
2023-04-07 16:23:35 -05:00
|
|
|
fi
|
2020-12-13 13:35:39 -06:00
|
|
|
fi
|
2021-11-24 07:00:23 -06:00
|
|
|
if [ ! -f /etc/sonic/zero_profiles.json ] && [ -f /usr/share/sonic/templates/zero_profiles.j2 ]; then
|
2023-04-07 16:23:35 -05:00
|
|
|
sonic-cfggen -d -t /usr/share/sonic/device/$PLATFORM/zero_profiles.j2 > /etc/sonic/zero_profiles.json 2> errorlog.txt
|
|
|
|
if [[ $? -ne 0 ]] ; then
|
|
|
|
echo "sonic-cfggen failed to render zero_profiles.json"
|
|
|
|
echo "$(cat errorlog.txt)"
|
2023-04-24 09:02:35 -05:00
|
|
|
sonic-cfggen -t /usr/share/sonic/device/$PLATFORM/zero_profiles.j2 > /etc/sonic/zero_profiles.json
|
2023-04-07 16:23:35 -05:00
|
|
|
fi
|
2021-11-24 07:00:23 -06:00
|
|
|
fi
|
2022-02-09 05:29:18 -06:00
|
|
|
|
|
|
|
{%- if enable_asan == "y" %}
|
|
|
|
mkdir -p /var/log/asan
|
|
|
|
{%- endif %}
|
2020-12-13 13:35:39 -06:00
|
|
|
{%- endif %}
|
|
|
|
|
2020-05-08 23:24:05 -05:00
|
|
|
# In Multi ASIC platforms the global database config file database_global.json will exist.
|
|
|
|
# Parse the file and get the include path for the database_config.json files used in
|
|
|
|
# various namesapces. The database_config paths are relative to the DIR of SONIC_DB_GLOBAL_JSON.
|
|
|
|
SONIC_DB_GLOBAL_JSON="/var/run/redis/sonic-db/database_global.json"
|
|
|
|
if [ -f "$SONIC_DB_GLOBAL_JSON" ]; then
|
|
|
|
# TODO Create a separate python script with the below logic and invoke it here.
|
|
|
|
redis_dir_list=`/usr/bin/python -c "import sys; import os; import json; f=open(sys.argv[1]); \
|
|
|
|
global_db_dir = os.path.dirname(sys.argv[1]); data=json.load(f); \
|
|
|
|
print(\" \".join([os.path.normpath(global_db_dir+'/'+elem['include']).partition('sonic-db')[0]\
|
|
|
|
for elem in data['INCLUDES'] if 'namespace' in elem])); f.close()" $SONIC_DB_GLOBAL_JSON`
|
|
|
|
fi
|
|
|
|
|
2020-10-14 17:15:24 -05:00
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
start_chassis_db=0
|
|
|
|
chassis_db_address=""
|
2020-10-21 03:40:04 -05:00
|
|
|
chassisdb_config="/usr/share/sonic/device/$PLATFORM/chassisdb.conf"
|
2020-10-14 17:15:24 -05:00
|
|
|
[ -f $chassisdb_config ] && source $chassisdb_config
|
|
|
|
DB_OPT=" -v /var/run/redis-chassis:/var/run/redis-chassis:ro "
|
|
|
|
if [[ "$start_chassis_db" != "1" ]] && [[ -z "$chassis_db_address" ]]; then
|
|
|
|
DB_OPT=""
|
|
|
|
else
|
|
|
|
DB_OPT=$DB_OPT" --add-host=redis_chassis.server:$chassis_db_address "
|
|
|
|
fi
|
|
|
|
{%- endif %}
|
|
|
|
|
2023-11-17 11:10:03 -06:00
|
|
|
if [[ -z "$DEV" || $DATABASE_TYPE == "dpudb" ]]; then
|
2020-01-26 15:56:42 -06:00
|
|
|
NET="host"
|
2020-05-08 23:24:05 -05:00
|
|
|
|
|
|
|
# For Multi-ASIC platform we have to mount the redis paths for database instances running in different
|
|
|
|
# namespaces, into the single instance dockers like snmp, pmon on linux host. These global dockers
|
|
|
|
# will need to get/set tables from databases in different namespaces.
|
|
|
|
# /var/run/redis0 ---> mounted as --> /var/run/redis0
|
|
|
|
# /var/run/redis1 ---> mounted as --> /var/run/redis1 .. etc
|
|
|
|
# The below logic extracts the base DIR's where database_config.json's for various namespaces exist.
|
|
|
|
# redis_dir_list is a string of form "/var/run/redis0/ /var/run/redis1/ /var/run/redis2/"
|
|
|
|
|
|
|
|
{%- if docker_container_name != "database" %}
|
|
|
|
if [ -n "$redis_dir_list" ]; then
|
|
|
|
for redis_dir in $redis_dir_list
|
|
|
|
do
|
|
|
|
REDIS_MNT=$REDIS_MNT" -v $redis_dir:$redis_dir:rw "
|
|
|
|
done
|
|
|
|
fi
|
2020-10-14 17:15:24 -05:00
|
|
|
{%- else %}
|
|
|
|
if [ "$DATABASE_TYPE" == "chassisdb" ]; then
|
|
|
|
DB_OPT=${DB_OPT/redis-chassis:ro/redis-chassis:rw}
|
|
|
|
DB_OPT=$DB_OPT" -v /var/run/redis-chassis:/var/run/redis:rw "
|
|
|
|
DB_OPT=$DB_OPT" --env DATABASE_TYPE=$DATABASE_TYPE"
|
|
|
|
else
|
|
|
|
DB_OPT=$DB_OPT" -v /var/run/redis$DEV:/var/run/redis:rw "
|
2023-11-17 11:10:03 -06:00
|
|
|
DB_OPT=$DB_OPT" --env DATABASE_TYPE=$DATABASE_TYPE "
|
|
|
|
DB_OPT=$DB_OPT" --env NUM_DPU=$NUM_DPU "
|
|
|
|
if [[ "$DEV" ]]; then
|
|
|
|
DB_OPT=$DB_OPT" -v /var/run/redis$DEV:/var/run/redis$DEV:rw "
|
|
|
|
fi
|
2020-10-14 17:15:24 -05:00
|
|
|
fi
|
2020-05-08 23:24:05 -05:00
|
|
|
{%- endif %}
|
2020-01-26 15:56:42 -06:00
|
|
|
else
|
2020-05-08 23:24:05 -05:00
|
|
|
# This part of code is applicable for Multi-ASIC platforms. Here we mount the namespace specific
|
|
|
|
# redis directory into the docker running in that namespace. Below eg: is for namespace "asic1"
|
|
|
|
# /var/run/redis1 ---> mounted as --> /var/run/redis1
|
|
|
|
# redis_dir_list is a string of form "/var/run/redis0/ /var/run/redis1/ /var/run/redis2/"
|
|
|
|
if [ -n "$redis_dir_list" ]; then
|
|
|
|
id=`expr $DEV + 1`
|
|
|
|
redis_dir=`echo $redis_dir_list | cut -d " " -f $id`
|
|
|
|
REDIS_MNT=" -v $redis_dir:$redis_dir:rw "
|
|
|
|
fi
|
|
|
|
|
2020-02-26 02:56:54 -06:00
|
|
|
{%- if docker_container_name == "database" %}
|
2021-12-13 12:17:05 -06:00
|
|
|
NET="bridge"
|
2020-10-14 17:15:24 -05:00
|
|
|
DB_OPT=$DB_OPT" -v /var/run/redis$DEV:/var/run/redis:rw "
|
2020-01-26 15:56:42 -06:00
|
|
|
{%- else %}
|
|
|
|
NET="container:database$DEV"
|
2020-10-14 17:15:24 -05:00
|
|
|
DB_OPT=""
|
2020-01-26 15:56:42 -06:00
|
|
|
{%- endif %}
|
|
|
|
fi
|
2020-03-31 12:06:19 -05:00
|
|
|
{%- if docker_container_name == "bgp" %}
|
|
|
|
if [ "$DEV" ]; then
|
|
|
|
if [ ! -d "/etc/sonic/frr/$DEV" ]; then
|
|
|
|
mkdir /etc/sonic/frr/$DEV
|
|
|
|
cp -r /etc/sonic/frr/*.conf /etc/sonic/frr/$DEV
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
{%- endif %}
|
2023-11-17 11:10:03 -06:00
|
|
|
|
|
|
|
NAMESPACE_ID="$DEV"
|
|
|
|
if [[ $DATABASE_TYPE == "dpudb" ]]; then
|
|
|
|
NAMESPACE_ID=""
|
|
|
|
fi
|
|
|
|
|
2019-06-05 13:19:56 -05:00
|
|
|
{%- if sonic_asic_platform == "mellanox" %}
|
2019-06-26 14:46:58 -05:00
|
|
|
# TODO: Mellanox will remove the --tmpfs exception after SDK socket path changed in new SDK version
|
2017-09-14 10:43:02 -05:00
|
|
|
{%- endif %}
|
2018-11-22 17:13:35 -06:00
|
|
|
docker create {{docker_image_run_opt}} \
|
2023-09-11 11:15:56 -05:00
|
|
|
{%- if docker_container_name != "dhcp_server" %}
|
2020-01-26 15:56:42 -06:00
|
|
|
--net=$NET \
|
2023-09-11 11:15:56 -05:00
|
|
|
{%- endif %}
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
-e RUNTIME_OWNER=local \
|
2020-02-26 02:56:54 -06:00
|
|
|
--uts=host \{# W/A: this should be set per-docker, for those dockers which really need host's UTS namespace #}
|
2019-07-04 00:13:55 -05:00
|
|
|
{%- if install_debug_image == "y" %}
|
|
|
|
-v /src:/src:ro -v /debug:/debug:rw \
|
|
|
|
{%- endif %}
|
2017-05-05 19:44:36 -05:00
|
|
|
{%- if '--log-driver=json-file' in docker_image_run_opt or '--log-driver' not in docker_image_run_opt %}
|
2017-09-14 10:43:02 -05:00
|
|
|
--log-opt max-size=2M --log-opt max-file=5 \
|
2018-04-02 22:24:59 -05:00
|
|
|
{%- endif %}
|
2019-03-06 20:51:46 -06:00
|
|
|
{%- if sonic_asic_platform == "mellanox" %}
|
|
|
|
{%- if docker_container_name == "syncd" %}
|
2021-06-22 08:27:56 -05:00
|
|
|
-v /var/log/mellanox:/var/log/mellanox:rw \
|
2020-03-09 14:36:56 -05:00
|
|
|
-v mlnx_sdk_socket:/var/run/sx_sdk \
|
2023-06-28 17:26:49 -05:00
|
|
|
-v /tmp/nv-syncd-shared/:/tmp \
|
2019-06-05 13:19:56 -05:00
|
|
|
-v /dev/shm:/dev/shm:rw \
|
2023-02-05 08:45:49 -06:00
|
|
|
-v /var/log/sai_failure_dump:/var/log/sai_failure_dump:rw \
|
2020-03-09 14:36:56 -05:00
|
|
|
-e SX_API_SOCKET_FILE=/var/run/sx_sdk/sx_api.sock \
|
2019-06-14 19:38:24 -05:00
|
|
|
{%- elif docker_container_name == "pmon" %}
|
|
|
|
-v /var/run/hw-management:/var/run/hw-management:rw \
|
2020-03-09 14:36:56 -05:00
|
|
|
-v mlnx_sdk_socket:/var/run/sx_sdk \
|
2023-06-28 17:26:49 -05:00
|
|
|
-v /tmp/nv-syncd-shared/:/tmp \
|
|
|
|
-v /dev/shm:/dev/shm:rw \
|
2020-03-09 14:36:56 -05:00
|
|
|
-e SX_API_SOCKET_FILE=/var/run/sx_sdk/sx_api.sock \
|
2019-06-14 19:38:24 -05:00
|
|
|
-v /dev/shm:/dev/shm:rw \
|
2019-06-05 13:19:56 -05:00
|
|
|
{%- else %}
|
2021-03-16 10:03:12 -05:00
|
|
|
{%- if mount_default_tmpfs|default("n") == "y" %}
|
2019-06-05 13:19:56 -05:00
|
|
|
--tmpfs /tmp \
|
2019-03-06 20:51:46 -06:00
|
|
|
{%- endif %}
|
2017-05-05 19:44:36 -05:00
|
|
|
{%- endif %}
|
2021-03-16 10:03:12 -05:00
|
|
|
{%- endif %}
|
2020-07-09 13:02:53 -05:00
|
|
|
{%- if sonic_asic_platform == "broadcom" %}
|
|
|
|
{%- if docker_container_name == "syncd" %}
|
2022-04-09 12:47:18 -05:00
|
|
|
--shm-size=${SYNCD_SHM_SIZE:-64m} \
|
2020-07-09 13:02:53 -05:00
|
|
|
-v /var/run/docker-syncd$DEV:/var/run/sswsyncd \
|
|
|
|
{%- endif %}
|
|
|
|
{%- endif %}
|
2021-08-03 11:39:39 -05:00
|
|
|
{%- if docker_container_name == "pmon" %}
|
|
|
|
-v /usr/share/sonic/firmware:/usr/share/sonic/firmware:rw \
|
|
|
|
{%- endif %}
|
2020-12-13 13:35:39 -06:00
|
|
|
{%- if docker_container_name == "swss" %}
|
2021-08-16 02:36:48 -05:00
|
|
|
-e ASIC_VENDOR={{ sonic_asic_platform }} \
|
2022-02-09 05:29:18 -06:00
|
|
|
{%- endif -%}
|
2022-04-14 17:00:32 -05:00
|
|
|
{%- if docker_container_name in ["swss", "syncd"] and enable_asan == "y" %}
|
|
|
|
-v /var/log/asan/:/var/log/asan \
|
2020-12-13 13:35:39 -06:00
|
|
|
{%- endif -%}
|
2020-03-31 12:06:19 -05:00
|
|
|
{%- if docker_container_name == "bgp" %}
|
|
|
|
-v /etc/sonic/frr/$DEV:/etc/frr:rw \
|
|
|
|
{%- endif %}
|
2020-10-14 17:15:24 -05:00
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
$DB_OPT \
|
|
|
|
{%- else %}
|
2020-03-31 12:06:19 -05:00
|
|
|
-v /var/run/redis$DEV:/var/run/redis:rw \
|
2020-10-14 17:15:24 -05:00
|
|
|
-v /var/run/redis-chassis:/var/run/redis-chassis:ro \
|
2020-03-31 12:06:19 -05:00
|
|
|
-v /usr/share/sonic/device/$PLATFORM/$HWSKU/$DEV:/usr/share/sonic/hwsku:ro \
|
2017-09-12 16:13:27 -05:00
|
|
|
{%- endif %}
|
2020-10-14 17:15:24 -05:00
|
|
|
$REDIS_MNT \
|
2023-07-28 03:54:02 -05:00
|
|
|
-v /etc/fips/fips_enable:/etc/fips/fips_enable:ro \
|
2020-10-14 17:15:24 -05:00
|
|
|
-v /usr/share/sonic/device/$PLATFORM:/usr/share/sonic/platform:ro \
|
2022-12-08 10:58:35 -06:00
|
|
|
-v /usr/share/sonic/templates/rsyslog-container.conf.j2:/usr/share/sonic/templates/rsyslog-container.conf.j2:ro \
|
2019-06-05 13:19:56 -05:00
|
|
|
{%- if sonic_asic_platform != "mellanox" %}
|
2021-03-16 10:03:12 -05:00
|
|
|
{%- if mount_default_tmpfs|default("n") == "y" %}
|
2018-01-17 01:20:13 -06:00
|
|
|
--tmpfs /tmp \
|
2019-06-05 13:19:56 -05:00
|
|
|
{%- endif %}
|
2021-03-16 10:03:12 -05:00
|
|
|
{%- endif %}
|
|
|
|
{%- if mount_default_tmpfs|default("n") == "y" %}
|
2018-01-17 01:20:13 -06:00
|
|
|
--tmpfs /var/tmp \
|
2021-03-16 10:03:12 -05:00
|
|
|
{%- endif %}
|
2023-11-17 11:10:03 -06:00
|
|
|
--env "NAMESPACE_ID"="$NAMESPACE_ID" \
|
2020-05-08 23:24:05 -05:00
|
|
|
--env "NAMESPACE_PREFIX"="$NAMESPACE_PREFIX" \
|
2023-11-17 11:10:03 -06:00
|
|
|
--env "NAMESPACE_COUNT"="$NUM_ASIC" \
|
|
|
|
--env "DEV"="$DEV" \
|
2022-12-08 10:58:35 -06:00
|
|
|
--env "CONTAINER_NAME"=$DOCKERNAME \
|
2023-12-20 01:12:03 -06:00
|
|
|
--env "SYSLOG_TARGET_IP"=$SYSLOG_TARGET_IP \
|
|
|
|
--env "PLATFORM"=$PLATFORM \
|
2021-03-16 10:03:12 -05:00
|
|
|
--name=$DOCKERNAME \
|
2022-07-17 21:57:27 -05:00
|
|
|
{%- if docker_container_name == "gbsyncd" %}
|
|
|
|
-v /var/run/docker-syncd$DEV:/var/run/sswsyncd \
|
|
|
|
"docker-$GBSYNCD_PLATFORM":latest \
|
|
|
|
{%- elif docker_image_name is defined %}
|
2021-03-16 10:03:12 -05:00
|
|
|
{{docker_image_name}}:latest \
|
|
|
|
{%- else %}
|
|
|
|
{{docker_image_id}} \
|
|
|
|
{%- endif %}
|
|
|
|
|| {
|
2018-07-24 23:46:55 -05:00
|
|
|
echo "Failed to docker run" >&1
|
|
|
|
exit 4
|
|
|
|
}
|
2017-09-14 10:43:02 -05:00
|
|
|
|
2018-11-22 17:13:35 -06:00
|
|
|
preStartAction
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
{%- if docker_container_name == "database" %}
|
2020-10-14 17:15:24 -05:00
|
|
|
docker start $DOCKERNAME
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
{%- else %}
|
|
|
|
/usr/local/bin/container start ${DOCKERNAME}
|
|
|
|
{%- endif %}
|
2017-09-14 10:43:02 -05:00
|
|
|
postStartAction
|
2017-01-29 13:33:33 -06:00
|
|
|
}
|
|
|
|
|
2019-03-08 12:59:41 -06:00
|
|
|
wait() {
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
{%- if docker_container_name == "database" %}
|
2020-10-14 17:15:24 -05:00
|
|
|
docker wait $DOCKERNAME
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
{%- else %}
|
|
|
|
/usr/local/bin/container wait $DOCKERNAME
|
|
|
|
{%- endif %}
|
[oneimage]: Fix race condition in systemd container services (#421)
When Type=simple, systemd will consider the service activated immediately
after specified in ExecStart process is started. If there is downstream
service depending on the state prepared in ExecStart, there will be race condition.
For example, issue #390. In this case, database.service calls database.sh, which
calls docker run or docker start -a to start database container. However, systemd
considers database.service successfully started at the time database.sh begins,
not after docker run finishes. As database.service is consider started, bgp.service
can be started. The redis database, which bgp service depends on, might or might not
have been started at this time point.
To fix this issue (and still keeping the functionality to monitor docker status with
systemd), we split the ExecStart process into an ExecStartPre part and an ExecStart
part. docker run is splitted into docker run -d then docker attach , while docker start
-a is splitted into docker start and then docker attach. In this way, we make sure
the downstream services are blocked until container is successfully started.
2017-03-22 15:04:48 -05:00
|
|
|
}
|
|
|
|
|
2017-01-29 13:33:33 -06:00
|
|
|
stop() {
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
{%- if docker_container_name == "database" %}
|
2020-10-14 17:15:24 -05:00
|
|
|
docker stop $DOCKERNAME
|
2023-11-17 11:10:03 -06:00
|
|
|
if [[ "$DEV" && $DATABASE_TYPE != "dpudb" ]]; then
|
2020-03-31 12:06:19 -05:00
|
|
|
ip netns delete "$NET_NS"
|
|
|
|
fi
|
2021-01-23 22:57:52 -06:00
|
|
|
{%- elif docker_container_name == "teamd" %}
|
|
|
|
# Longer timeout of 60 sec to wait for Portchannels to be cleaned.
|
|
|
|
/usr/local/bin/container stop -t 60 $DOCKERNAME
|
2022-04-14 17:00:32 -05:00
|
|
|
{%- elif docker_container_name in ["swss", "syncd"] and enable_asan == "y" %}
|
2022-02-09 05:29:18 -06:00
|
|
|
/usr/local/bin/container stop -t 60 $DOCKERNAME
|
First cut image update for kubernetes support. (#5421)
* First cut image update for kubernetes support.
With this,
1) dockers dhcp_relay, lldp, pmon, radv, snmp, telemetry are enabled
for kube management
init_cfg.json configure set_owner as kube for these
2) Each docker's start.sh updated to call container_startup.py to register going up
As part of this call, it registers the current owner as local/kube and its version
The images are built with its version ingrained into image during build
3) Update all docker's bash script to call 'container start/stop/wait' instead of 'docker start/stop/wait'.
For all locally managed containers, it calls docker commands, hence no change for locally managed.
4) Introduced a new ctrmgrd service, that helps with transition between owners as kube & local and carry over any labels update from STATE-DB to API server
5) hostcfgd updated to handle owner change
6) Reboot scripts are updatd to tag kube running images as local, so upon reboot they run the same image.
7) Added kube_commands.py to handle all updates with Kubernetes API serrver -- dedicated for k8s interaction only.
2020-12-22 10:01:33 -06:00
|
|
|
{%- else %}
|
|
|
|
/usr/local/bin/container stop $DOCKERNAME
|
|
|
|
{%- endif %}
|
2017-01-29 13:33:33 -06:00
|
|
|
}
|
|
|
|
|
2022-09-19 11:34:33 -05:00
|
|
|
kill() {
|
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
docker kill $DOCKERNAME
|
2023-11-17 11:10:03 -06:00
|
|
|
if [[ "$DEV" && $DATABASE_TYPE != "dpudb" ]]; then
|
2022-09-19 11:34:33 -05:00
|
|
|
ip netns delete "$NET_NS"
|
|
|
|
fi
|
|
|
|
{%- else %}
|
|
|
|
/usr/local/bin/container kill $DOCKERNAME
|
|
|
|
{%- endif %}
|
|
|
|
}
|
|
|
|
|
2020-10-14 17:15:24 -05:00
|
|
|
DOCKERNAME={{docker_container_name}}
|
2020-01-26 15:56:42 -06:00
|
|
|
OP=$1
|
|
|
|
DEV=$2 # namespace/device number to operate on
|
2020-10-14 17:15:24 -05:00
|
|
|
{%- if docker_container_name == "database" %}
|
|
|
|
if [ "$DEV" == "chassisdb" ]; then
|
|
|
|
DATABASE_TYPE="chassisdb"
|
|
|
|
DOCKERNAME=$DOCKERNAME"-chassis"
|
|
|
|
unset DEV
|
|
|
|
fi
|
2023-11-17 11:10:03 -06:00
|
|
|
|
|
|
|
if [[ "$DEV" == *"dpu"* ]]; then
|
|
|
|
DATABASE_TYPE="dpudb"
|
|
|
|
fi
|
|
|
|
|
2020-10-14 17:15:24 -05:00
|
|
|
{%- endif %}
|
2020-05-08 23:24:05 -05:00
|
|
|
NAMESPACE_PREFIX="asic"
|
2020-10-14 17:15:24 -05:00
|
|
|
DOCKERNAME=$DOCKERNAME$DEV
|
2022-12-08 10:58:35 -06:00
|
|
|
CONTAINER_EXISTS="no"
|
2023-11-17 11:10:03 -06:00
|
|
|
if [[ "$DEV" && $DATABASE_TYPE != "dpudb" ]]; then
|
2020-05-08 23:24:05 -05:00
|
|
|
NET_NS="$NAMESPACE_PREFIX$DEV" #name of the network namespace
|
|
|
|
|
2020-09-03 01:40:22 -05:00
|
|
|
SONIC_CFGGEN="sonic-cfggen -n $NET_NS"
|
|
|
|
SONIC_DB_CLI="sonic-db-cli -n $NET_NS"
|
2020-05-08 23:24:05 -05:00
|
|
|
else
|
2020-04-06 09:40:24 -05:00
|
|
|
NET_NS=""
|
2020-05-08 23:24:05 -05:00
|
|
|
SONIC_CFGGEN="sonic-cfggen"
|
|
|
|
SONIC_DB_CLI="sonic-db-cli"
|
2020-03-31 12:06:19 -05:00
|
|
|
fi
|
2020-01-26 15:56:42 -06:00
|
|
|
|
2020-07-31 19:59:09 -05:00
|
|
|
# read SONiC immutable variables
|
|
|
|
[ -f /etc/sonic/sonic-environment ] && . /etc/sonic/sonic-environment
|
|
|
|
|
2017-01-29 13:33:33 -06:00
|
|
|
case "$1" in
|
2022-09-19 11:34:33 -05:00
|
|
|
start|wait|stop|kill)
|
2020-01-26 15:56:42 -06:00
|
|
|
$1
|
|
|
|
;;
|
2017-01-29 13:33:33 -06:00
|
|
|
*)
|
2020-02-26 02:56:54 -06:00
|
|
|
echo "Usage: $0 {start namespace(optional)|wait namespace(optional)|stop namespace(optional)}"
|
2017-01-29 13:33:33 -06:00
|
|
|
exit 1
|
|
|
|
;;
|
2023-02-21 13:23:22 -06:00
|
|
|
esac
|