Merge remote-tracking branch 'origin/master' into master_python_log

This commit is contained in:
Junchao-Mellanox 2023-12-18 12:55:05 +02:00
commit 64ac9db120
65 changed files with 2039 additions and 502 deletions

View File

@ -28,13 +28,14 @@ stages:
cmake pkg-config python3-pip python cmake libgtest-dev libgmock-dev libyang-dev \
debhelper-compat dh-elpa dh-sequence-python3 python3-all \
libpython3-all-dev python3-six xmlto unzip rake-compiler gem2deb pkg-php-tools \
ant default-jdk maven-repo-helper libguava-java
ant default-jdk maven-repo-helper libguava-java \
libboost-all-dev libgtest-dev build-essential
wget http://ftp.us.debian.org/debian/pool/main/libg/libgoogle-gson-java/libgoogle-gson-java_2.8.6-1+deb11u1_all.deb
sudo dpkg -i libgoogle-gson-java_2.8.6-1+deb11u1_all.deb
mkdir -p /tmp/artifacts
displayName: "Install dependencies"
- script: |
SONIC_CONFIG_MAKE_JOBS=$(nproc) CONFIGURED_ARCH=amd64 DEST=/tmp/artifacts make -f ../rules/protobuf.mk -f protobuf/Makefile
BLDENV=bullseye SONIC_CONFIG_MAKE_JOBS=$(nproc) CONFIGURED_ARCH=amd64 DEST=/tmp/artifacts make -f ../rules/protobuf.mk -f protobuf/Makefile
workingDirectory: src
displayName: "Build protobuf"
- script: |

View File

@ -114,12 +114,19 @@ jobs:
docker_syncd_rpc_image: yes
platform_rpc: nephos
- name: pensando
pool: sonicbld-arm64
variables:
PLATFORM_ARCH: arm64
buildSteps:
- template: .azure-pipelines/template-skipvstest.yml@buildimage
- template: .azure-pipelines/template-daemon.yml@buildimage
- bash: |
set -ex
if [ $(GROUP_NAME) == vs ]; then
if [ $(GROUP_NAME) == pensando ]; then
make $BUILD_OPTIONS target/sonic-pensando.tar
elif [ $(GROUP_NAME) == vs ]; then
if [ $(dbg_image) == yes ]; then
make $BUILD_OPTIONS INSTALL_DEBUG_TOOLS=y target/sonic-vs.img.gz
mv target/sonic-vs.img.gz target/sonic-vs-dbg.img.gz

View File

@ -65,6 +65,7 @@ stages:
- name: broadcom
variables:
swi_image: yes
INCLUDE_RESTAPI: y
- name: mellanox
variables:
dbg_image: yes
@ -79,6 +80,7 @@ stages:
timeoutInMinutes: 1200
variables:
PLATFORM_ARCH: armhf
INCLUDE_RESTAPI: y
- stage: Test
dependsOn: BuildVS

View File

@ -1,68 +1,32 @@
#!/bin/bash
#!/usr/bin/python3
declare -r SYSLOG_LOGGER="/usr/bin/logger"
declare -r SYSLOG_IDENTIFIER="platform_wait"
declare -r SYSLOG_ERROR="error"
declare -r SYSLOG_NOTICE="notice"
declare -r SYSLOG_INFO="info"
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
declare -r HW_MGMT_CONFIG="/var/run/hw-management/config"
import sys
from sonic_platform.device_data import DeviceDataManager
from sonic_py_common.logger import Logger
declare -r ASIC_INIT_DONE="${HW_MGMT_CONFIG}/asics_init_done"
declare -r NUM_ASICS="${HW_MGMT_CONFIG}/asic_num"
declare -r ASIC_CHIPUP_COMPLETED="${HW_MGMT_CONFIG}/asic_chipup_completed"
declare -r EXIT_SUCCESS="0"
declare -r EXIT_TIMEOUT="1"
function log_error() {
eval "${SYSLOG_LOGGER} -t ${SYSLOG_IDENTIFIER} -p ${SYSLOG_ERROR} $@"
}
function log_notice() {
eval "${SYSLOG_LOGGER} -t ${SYSLOG_IDENTIFIER} -p ${SYSLOG_NOTICE} $@"
}
function log_info() {
eval "${SYSLOG_LOGGER} -t ${SYSLOG_IDENTIFIER} -p ${SYSLOG_INFO} $@"
}
function wait_for_asic_chipup() {
local _ASIC_INIT="0"
local _ASIC_COUNT="0"
local _ASICS_CHIPUP="0"
local -i _WDOG_CNT="1"
local -ir _WDOG_MAX="300"
local -r _TIMEOUT="1s"
while [[ "${_WDOG_CNT}" -le "${_WDOG_MAX}" ]]; do
_ASIC_INIT="$(cat ${ASIC_INIT_DONE} 2>&1)"
_ASIC_COUNT="$(cat ${NUM_ASICS} 2>&1)"
_ASICS_CHIPUP="$(cat ${ASIC_CHIPUP_COMPLETED} 2>&1)"
if [[ "${_ASIC_INIT}" -eq 1 && "${_ASIC_COUNT}" -eq "${_ASICS_CHIPUP}" ]]; then
return "${EXIT_SUCCESS}"
fi
let "_WDOG_CNT++"
sleep "${_TIMEOUT}"
done
log_error "Mellanox ASIC is not ready: INIT: ${_ASIC_INIT}, NUM_ASIC: ${_ASIC_COUNT}, CHIPUP: ${_ASICS_CHIPUP} timeout...."
return "${EXIT_TIMEOUT}"
}
log_info "Wait for Mellanox ASIC to be ready"
wait_for_asic_chipup
EXIT_CODE="$?"
if [[ "${EXIT_CODE}" != "${EXIT_SUCCESS}" ]]; then
exit "${EXIT_CODE}"
fi
log_notice "Mellanox ASIC is ready"
exit "${EXIT_SUCCESS}"
logger = Logger(log_identifier='platform_wait')
logger.log_notice('Nvidia: Wait for PMON dependencies to be ready')
if DeviceDataManager.wait_platform_ready():
logger.log_notice('Nvidia: PMON dependencies are ready')
sys.exit(0)
else:
logger.log_error('Nvidia: PMON dependencies are not ready: timeout')
sys.exit(-1)

View File

@ -1,9 +1,7 @@
[group:dhcp-relay]
programs=dhcprelayd,
programs=dhcprelayd
{%- set relay_for_ipv6 = { 'flag': False } %}
{%- set add_preceding_comma = { 'flag': False } %}
{% if dhcp_server_ipv4_enabled %}
{%- endif %}
{%- set add_preceding_comma = { 'flag': True } %}
{% for vlan_name in VLAN_INTERFACE %}
{% if DHCP_RELAY and vlan_name in DHCP_RELAY and DHCP_RELAY[vlan_name]['dhcpv6_servers']|length > 0 %}
{% set _dummy = relay_for_ipv6.update({'flag': True}) %}

View File

@ -224,7 +224,7 @@ RUN mkdir -p /tmp/protobuf \
# Install dash-api
RUN cd /tmp \
&& mkdir -p /usr/lib/python3/dist-packages/dash_api \
&& wget https://raw.githubusercontent.com/sonic-net/sonic-buildimage/master/src/sonic-dash-api/pypkg/__init__.py -O /usr/lib/python3/dist-packages/dash_api/__init__.py \
&& wget https://raw.githubusercontent.com/sonic-net/sonic-dash-api/master/misc/pypkg/dash_api/__init__.py -O /usr/lib/python3/dist-packages/dash_api/__init__.py \
&& git clone https://github.com/sonic-net/sonic-dash-api.git \
&& protoc -I=sonic-dash-api/proto --python_out=/usr/lib/python3/dist-packages/dash_api sonic-dash-api/proto/*.proto \
&& rm -rf /tmp/sonic-dash-api

View File

@ -157,5 +157,15 @@
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M",
"num_dumps": "3"
}
},
"NTP": {
"global": {
"authentication": "disabled",
"dhcp": "enabled",
"server_role": "disabled",
"src_intf": "eth0",
"admin_state": "enabled",
"vrf": "default"
}
}
}

View File

@ -373,10 +373,14 @@ sudo dpkg --root=$FILESYSTEM_ROOT -i $debs_path/flashrom_*.deb
sudo cp -f $IMAGE_CONFIGS/cron.d/* $FILESYSTEM_ROOT/etc/cron.d/
# Copy NTP configuration files and templates
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT \
apt-get -y install ntpdate
sudo rm -f $FILESYSTEM_ROOT/etc/network/if-up.d/ntpsec-ntpdate
sudo cp $IMAGE_CONFIGS/ntp/ntp-config.service $FILESYSTEM_ROOT_USR_LIB_SYSTEMD_SYSTEM
echo "ntp-config.service" | sudo tee -a $GENERATED_SERVICE_FILE
sudo cp $IMAGE_CONFIGS/ntp/ntp-config.sh $FILESYSTEM_ROOT/usr/bin/
sudo cp $IMAGE_CONFIGS/ntp/ntp.conf.j2 $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/
sudo cp $IMAGE_CONFIGS/ntp/ntp.keys.j2 $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/
sudo cp $IMAGE_CONFIGS/ntp/ntp-systemd-wrapper $FILESYSTEM_ROOT/usr/libexec/ntpsec/
sudo mkdir $FILESYSTEM_ROOT_USR_LIB_SYSTEMD_SYSTEM/ntpsec.service.d
sudo cp $IMAGE_CONFIGS/ntp/sonic-target.conf $FILESYSTEM_ROOT_USR_LIB_SYSTEMD_SYSTEM/ntpsec.service.d/

View File

@ -24,6 +24,8 @@ function modify_ntp_default
}
sonic-cfggen -d -t /usr/share/sonic/templates/ntp.conf.j2 >/etc/ntpsec/ntp.conf
sonic-cfggen -d -t /usr/share/sonic/templates/ntp.keys.j2 >/etc/ntpsec/ntp.keys
chmod o-r /etc/ntp.keys
get_database_reboot_type
echo "Disabling NTP long jump for reboot type ${reboot_type} ..."

View File

@ -13,7 +13,8 @@ if [ -r /etc/default/ntpsec ]; then
. /etc/default/ntpsec
fi
if [ "$IGNORE_DHCP" != "yes" ] && [ -e /run/ntpsec/ntp.conf.dhcp ]; then
dhcp=$(/usr/local/bin/sonic-cfggen -d -v 'NTP["global"]["dhcp"]' 2> /dev/null)
if [ "$IGNORE_DHCP" != "yes" ] && [ -e /run/ntpsec/ntp.conf.dhcp ] && [ "$dhcp" = "enabled" ]; then
NTPD_OPTS="$NTPD_OPTS -c /run/ntpsec/ntp.conf.dhcp"
else
# List the default -c first, so if the admin has specified -c in
@ -26,6 +27,14 @@ NTPD_OPTS="$NTPD_OPTS -u ntpsec:ntpsec"
# Protect the service startup against concurrent ntpdate ifup hooks
(
if flock -w 180 9; then
ntpEnabled=$(/usr/local/bin/sonic-cfggen -d -v 'NTP["global"]["admin_state"]' 2> /dev/null)
if [ "$ntpEnabled" = "disabled" ]
then
echo "Stopping NTP daemon"
kill -9 $(cat $PIDFILE)
exit 0
fi
# when mgmt vrf is configured, ntp starts in mgmt vrf by default unless user configures otherwise
vrfEnabled=$(/usr/local/bin/sonic-cfggen -d -v 'MGMT_VRF_CONFIG["vrf_global"]["mgmtVrfEnabled"]' 2> /dev/null)
vrfConfigured=$(/usr/local/bin/sonic-cfggen -d -v 'NTP["global"]["vrf"]' 2> /dev/null)

View File

@ -1,9 +1,9 @@
###############################################################################
# Managed by Ansible
# file: ansible/roles/acs/templates/ntp.conf.j2
# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY.
# Controlled by ntp-config.service
###############################################################################
# /etc/ntpsec/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
# To avoid ntpd from panic and exit if the drift between new time and
# current system time is large.
@ -12,35 +12,82 @@ tinker panic 0
driftfile /var/lib/ntpsec/ntp.drift
leapfile /usr/share/zoneinfo/leap-seconds.list
# To enable Network Time Security support as a server, obtain a certificate
# (e.g. with Let's Encrypt), configure the paths below, and uncomment:
# nts cert CERT_FILE
# nts key KEY_FILE
# nts enable
{# Getting NTP global configuration -#}
{% set global = (NTP | d({})).get('global', {}) -%}
# You must create /var/log/ntpsec (owned by ntpsec:ntpsec) to enable logging.
#statsdir /var/log/ntpsec/
#statistics loopstats peerstats clockstats
#filegen loopstats file loopstats type day enable
#filegen peerstats file peerstats type day enable
#filegen clockstats file clockstats type day enable
{# Adding NTP servers. We need to know if we have some pools, to set proper
config -#}
{% set ns = namespace(is_pools=false) %}
{% for server in NTP_SERVER if NTP_SERVER[server].admin_state != 'disabled' and
NTP_SERVER[server].resolve_as and
NTP_SERVER[server].association_type -%}
{% set config = NTP_SERVER[server] -%}
{# Server options -#}
{% set soptions = '' -%}
{# Server access control options -#}
{% set aoptions = '' -%}
# Specify one or more NTP servers.
{# Authentication key -#}
{% if global.authentication == 'enabled' -%}
{% if config.key -%}
{% set soptions = soptions ~ ' key ' ~ config.key -%}
{% endif -%}
{% endif -%}
# Public NTP servers supporting Network Time Security:
# server time.cloudflare.com nts
{% for ntp_server in NTP_SERVER %}
server {{ ntp_server }} iburst
{# Aggressive polling -#}
{% if config.iburst -%}
{% set soptions = soptions ~ ' iburst' -%}
{% endif -%}
{# Protocol version -#}
{% if config.version -%}
{% set soptions = soptions ~ ' version ' ~ config.version -%}
{% endif -%}
{# Check if there are any pool configured. BTW it doesn't matter what was
configured as "resolve_as" for pools. If they were configured with FQDN they
must remain like that -#}
{% set config_as = config.resolve_as -%}
{% if config.association_type == 'pool' -%}
{% set ns.is_pools = true -%}
{% set config_as = server -%}
{% else -%}
{% set aoptions = aoptions ~ ' nopeer' -%}
{% endif -%}
{{ config.association_type }} {{ config_as }}{{ soptions }}
{% if global.server_role == 'disabled' %}
restrict {{ config_as }} kod limited nomodify notrap noquery{{ aoptions }}
{% endif %}
{% endfor -%}
{% set trusted_keys_arr = [] -%}
{% for key in NTP_KEY -%}
{% set keydata = NTP_KEY[key] -%}
{% if keydata.trusted == 'yes' -%}
{% set trusted_keys_arr = trusted_keys_arr.append(key) -%}
{% endif -%}
{% endfor %}
# pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will
# pick a different set every time it starts up. Please consider joining the
# pool: <https://www.pool.ntp.org/join.html>
{% if global.authentication == 'enabled' %}
keys /etc/ntpsec/ntp.keys
{% if trusted_keys_arr != [] %}
trustedkey {{ trusted_keys_arr|join(' ') }}
{% endif %}
{% endif %}
#listen on source interface if configured, else
#only listen on MGMT_INTERFACE, LOOPBACK_INTERFACE ip when MGMT_INTERFACE is not defined, or eth0
# if we don't have both of them (default is to listen on all ip addresses)
{# listen on source interface if configured, else only listen on MGMT_INTERFACE,
LOOPBACK_INTERFACE ip when MGMT_INTERFACE is not defined, or eth0 if we don't
have both of them (default is to listen on all ip addresses) -#}
interface ignore wildcard
{# Set interface to listen on:
* Set global variable for configured source interface name.
* Set global boolean to indicate if the ip of the configured source
interface is configured.
* If the source interface is configured but no ip on that
interface, then listen on another interface based on existing logic. -#}
{%- macro check_ip_on_interface(interface_name, table_name) %}
{%- set ns = namespace(valid_intf = 'false') %}
{%- if table_name %}
@ -55,8 +102,8 @@ interface ignore wildcard
{% set ns = namespace(source_intf = "") %}
{%- set ns = namespace(source_intf_ip = 'false') %}
{%- if (NTP) and (NTP['global']['src_intf']) %}
{%- set ns.source_intf = (NTP['global']['src_intf']) %}
{%- if global.src_intf %}
{%- set ns.source_intf = global.src_intf %}
{%- if ns.source_intf != "" %}
{%- if ns.source_intf == "eth0" %}
{%- set ns.source_intf_ip = 'true' %}
@ -91,16 +138,19 @@ interface listen eth0
{% endif %}
interface listen 127.0.0.1
# Access control configuration; see /usr/share/doc/ntpsec-doc/html/accopt.html
# for details.
#
# Note that "restrict" applies to both servers and clients, so a configuration
# that might be intended to block requests from certain clients could also end
# up blocking replies from your own upstream servers.
{# Access control options -#}
{% set options = '' -%}
{# Disable NTP server functionality. Should stay on when dhcp is enabled -#}
{# {% if global.server_role == 'disabled' and global.dhcp == 'disabled' -%}
{% set options = options ~ ' ignore' -%}
{% endif -%} #}
# Access control configuration
# By default, exchange time with everybody, but don't allow configuration.
# NTPsec doesn't establish peer associations, and so nopeer has no effect, and has been removed from here
restrict default kod nomodify noquery limited
# NTPsec doesn't establish peer associations, and so nopeer has no effect, and
# has been removed from here
restrict default kod nomodify noquery limited{{ options }}
# Local users may interrogate the ntp server more closely.
restrict 127.0.0.1

View File

@ -0,0 +1,18 @@
###############################################################################
# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY.
# Controlled by ntp-config.service
###############################################################################
{# We can connect only to the servers we trust. Determine those servers -#}
{% set trusted_arr = [] -%}
{% for server in NTP_SERVER if NTP_SERVER[server].trusted == 'yes' and
NTP_SERVER[server].resolve_as -%}
{% set _ = trusted_arr.append(NTP_SERVER[server].resolve_as) -%}
{% endfor -%}
{# Define authentication keys inventory -#}
{% set trusted_str = ' ' ~ trusted_arr|join(',') -%}
{% for keyid in NTP_KEY if NTP_KEY[keyid].type and NTP_KEY[keyid].value %}
{% set keyval = NTP_KEY[keyid].value | b64decode %}
{{ keyid }} {{ NTP_KEY[keyid].type }} {{ keyval }}{{trusted_str}}
{% endfor -%}

View File

@ -1,100 +0,0 @@
#!/bin/sh
# This file was originally created automatically as part of default NTP application installation from debian package.
# This is now manually modified for supporting NTP in management VRF.
# When management VRF is enabled, the NTP application should be started using "cgexec -g l3mdev:mgmt".
# Check has been added to verify the management VRF enabled status and use cgexec when it is enabled.
# This file will be copied on top of the etc/init.d/ntpsec file that gets created during build process.
### BEGIN INIT INFO
# Provides: ntpsec
# Required-Start: $network $remote_fs $syslog
# Required-Stop: $network $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Start NTP daemon
# Description: NTP, the Network Time Protocol, is used to keep computer
# clocks accurate by synchronizing them over the Internet or
# a local network, or by following an accurate hardware
# receiver that interprets GPS, DCF-77, or similar time
# signals.
### END INIT INFO
PATH=/sbin:/bin:/usr/sbin:/usr/bin
. /lib/lsb/init-functions
DAEMON=/usr/sbin/ntpd
PIDFILE=/run/ntpd.pid
test -x $DAEMON || exit 5
if [ -r /etc/default/ntpsec ]; then
. /etc/default/ntpsec
fi
if [ "$IGNORE_DHCP" != "yes" ] && [ -e /run/ntpsec/ntp.conf.dhcp ]; then
NTPD_OPTS="$NTPD_OPTS -c /run/ntpsec/ntp.conf.dhcp"
else
# List the default -c first, so if the admin has specified -c in
# NTPD_OPTS, it is honored.
NTPD_OPTS="-c /etc/ntpsec/ntp.conf $NTPD_OPTS"
fi
NTPD_OPTS="$NTPD_OPTS -u ntpsec:ntpsec"
LOCKFILE=/run/lock/ntpsec-ntpdate
case $1 in
start)
log_daemon_msg "Starting NTP server" "ntpd"
(
flock -w 180 9
# when mgmt vrf is configured, ntp starts in mgmt vrf by default unless user configures otherwise
vrfEnabled=$(/usr/local/bin/sonic-cfggen -d -v 'MGMT_VRF_CONFIG["vrf_global"]["mgmtVrfEnabled"]' 2> /dev/null)
vrfConfigured=$(/usr/local/bin/sonic-cfggen -d -v 'NTP["global"]["vrf"]' 2> /dev/null)
if [ "$vrfEnabled" = "true" ]
then
if [ "$vrfConfigured" = "default" ]
then
log_daemon_msg "Starting NTP server in default-vrf for default set as NTP vrf" "ntpd"
start-stop-daemon --start --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON -- -p $PIDFILE $NTPD_OPTS
else
log_daemon_msg "Starting NTP server in mgmt-vrf" "ntpd"
cgexec -g l3mdev:mgmt start-stop-daemon --start --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON -- -p $PIDFILE $NTPD_OPTS
fi
else
log_daemon_msg "Starting NTP server in default-vrf" "ntpd"
start-stop-daemon --start --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON -- -p $PIDFILE $NTPD_OPTS
fi
) 9>$LOCKFILE
log_end_msg $?
;;
stop)
log_daemon_msg "Stopping NTP server" "ntpd"
start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE --retry=TERM/30/KILL/5 --exec $DAEMON
log_end_msg $?
rm -f $PIDFILE
;;
restart|force-reload)
$0 stop && sleep 2 && $0 start
;;
try-restart)
if $0 status >/dev/null; then
$0 restart
else
exit 0
fi
;;
reload)
exit 3
;;
status)
status_of_proc $DAEMON "NTP server"
;;
*)
echo "Usage: $0 {start|stop|restart|try-restart|force-reload|status}"
exit 2
;;
esac

View File

@ -20,6 +20,11 @@ function waitplatform() {
}
function stopplatform1() {
if ! docker top gbsyncd$DEV | grep -q /usr/bin/syncd; then
debug "syncd process in container gbsyncd$DEV is not running"
return
fi
# Invoke platform specific pre shutdown routine.
PLATFORM=`$SONIC_DB_CLI CONFIG_DB hget 'DEVICE_METADATA|localhost' platform`
PLATFORM_PRE_SHUTDOWN="/usr/share/sonic/device/$PLATFORM/plugins/gbsyncd_request_pre_shutdown"

View File

@ -264,14 +264,6 @@ demo_install_grub()
exit 1
}
# Create a blank environment block file.
if [ ! -f "$onie_initrd_tmp/$demo_mnt/grub/grubenv" ]; then
grub-editenv "$onie_initrd_tmp/$demo_mnt/grub/grubenv" create || {
echo "ERROR: grub-editenv failed on: $blk_dev"
exit 1
}
fi
if [ "$demo_type" = "DIAG" ] ; then
# Install GRUB in the partition also. This allows for
# chainloading the DIAG image from another OS.
@ -354,14 +346,6 @@ demo_install_uefi_grub()
}
rm -f $grub_install_log
# Create a blank environment block file.
if [ ! -f "$demo_mnt/grub/grubenv" ]; then
grub-editenv "$demo_mnt/grub/grubenv" create || {
echo "ERROR: grub-editenv failed on: $blk_dev"
exit 1
}
fi
# Configure EFI NVRAM Boot variables. --create also sets the
# new boot number as active.
grub=$(find /boot/efi/EFI/$demo_volume_label/ -name grub*.efi -exec basename {} \;)
@ -631,6 +615,14 @@ EOF
umount $demo_mnt
else
cp $grub_cfg $onie_initrd_tmp/$demo_mnt/grub/grub.cfg
# Create a blank environment block file.
if [ ! -f "$onie_initrd_tmp/$demo_mnt/grub/grubenv" ]; then
grub-editenv "$onie_initrd_tmp/$demo_mnt/grub/grubenv" create || {
echo "ERROR: grub-editenv failed on: $blk_dev"
exit 1
}
fi
fi
cd /

View File

@ -1,6 +1,6 @@
LIBSAIBCM_XGS_VERSION = 8.4.31.0
LIBSAIBCM_XGS_VERSION = 10.1.0.0
LIBSAIBCM_DNX_VERSION = 7.1.111.1
LIBSAIBCM_XGS_BRANCH_NAME = SAI_8.4.0_GA
LIBSAIBCM_XGS_BRANCH_NAME = SAI_10.1.0_GA
LIBSAIBCM_DNX_BRANCH_NAME = REL_7.0_SAI_1.11
LIBSAIBCM_XGS_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/sai-broadcom/$(LIBSAIBCM_XGS_BRANCH_NAME)/$(LIBSAIBCM_XGS_VERSION)/xgs"
LIBSAIBCM_DNX_URL_PREFIX = "https://sonicstorage.blob.core.windows.net/public/sai/bcmsai/$(LIBSAIBCM_DNX_BRANCH_NAME)/$(LIBSAIBCM_DNX_VERSION)"

View File

@ -25,29 +25,29 @@ SIMX_VERSION = 24.1-1007
FW_FROM_URL = y
MLNX_FW_ASSETS_RELEASE_TAG = fw-2012.2104
MLNX_FW_ASSETS_RELEASE_TAG = fw-2012.2134
MLNX_FW_ASSETS_URL = $(MLNX_ASSETS_GITHUB_URL)/releases/download/$(MLNX_FW_ASSETS_RELEASE_TAG)
ifeq ($(MLNX_FW_BASE_URL), )
MLNX_FW_BASE_URL = $(MLNX_FW_ASSETS_URL)
endif
MLNX_SPC_FW_VERSION = 13.2012.2104
MLNX_SPC_FW_VERSION = 13.2012.2134
MLNX_SPC_FW_FILE = fw-SPC-rel-$(subst .,_,$(MLNX_SPC_FW_VERSION))-EVB.mfa
$(MLNX_SPC_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH)
$(MLNX_SPC_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC_FW_FILE)
MLNX_SPC2_FW_VERSION = 29.2012.2104
MLNX_SPC2_FW_VERSION = 29.2012.2134
MLNX_SPC2_FW_FILE = fw-SPC2-rel-$(subst .,_,$(MLNX_SPC2_FW_VERSION))-EVB.mfa
$(MLNX_SPC2_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH)
$(MLNX_SPC2_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC2_FW_FILE)
MLNX_SPC3_FW_VERSION = 30.2012.2104
MLNX_SPC3_FW_VERSION = 30.2012.2134
MLNX_SPC3_FW_FILE = fw-SPC3-rel-$(subst .,_,$(MLNX_SPC3_FW_VERSION))-EVB.mfa
$(MLNX_SPC3_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH)
$(MLNX_SPC3_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC3_FW_FILE)
MLNX_SPC4_FW_VERSION = 34.2012.2104
MLNX_SPC4_FW_VERSION = 34.2012.2134
MLNX_SPC4_FW_FILE = fw-SPC4-rel-$(subst .,_,$(MLNX_SPC4_FW_VERSION))-EVB.mfa
$(MLNX_SPC4_FW_FILE)_PATH = $(MLNX_FW_BASE_PATH)
$(MLNX_SPC4_FW_FILE)_URL = $(MLNX_FW_BASE_URL)/$(MLNX_SPC4_FW_FILE)

View File

@ -82,6 +82,8 @@ class Chassis(ChassisBase):
# System UID LED
_led_uid = None
chassis_instance = None
def __init__(self):
super(Chassis, self).__init__()
@ -122,11 +124,14 @@ class Chassis(ChassisBase):
self.reboot_cause_initialized = False
self.sfp_module = None
self.sfp_lock = threading.Lock()
# Build the RJ45 port list from platform.json and hwsku.json
self._RJ45_port_inited = False
self._RJ45_port_list = None
Chassis.chassis_instance = self
self.modules_mgmt_thread = threading.Thread()
self.modules_changes_queue = queue.Queue()
self.modules_mgmt_task_stopping_event = threading.Event()
@ -273,38 +278,49 @@ class Chassis(ChassisBase):
def initialize_single_sfp(self, index):
sfp_count = self.get_num_sfps()
# Use double checked locking mechanism for:
# 1. protect shared resource self._sfp_list
# 2. performance (avoid locking every time)
if index < sfp_count:
if not self._sfp_list:
self._sfp_list = [None] * sfp_count
if not self._sfp_list or not self._sfp_list[index]:
with self.sfp_lock:
if not self._sfp_list:
self._sfp_list = [None] * sfp_count
if not self._sfp_list[index]:
sfp_module = self._import_sfp_module()
if self.RJ45_port_list and index in self.RJ45_port_list:
self._sfp_list[index] = sfp_module.RJ45Port(index)
else:
self._sfp_list[index] = sfp_module.SFP(index)
self.sfp_initialized_count += 1
if not self._sfp_list[index]:
sfp_module = self._import_sfp_module()
if self.RJ45_port_list and index in self.RJ45_port_list:
self._sfp_list[index] = sfp_module.RJ45Port(index)
else:
self._sfp_list[index] = sfp_module.SFP(index)
self.sfp_initialized_count += 1
def initialize_sfp(self):
if not self._sfp_list:
sfp_module = self._import_sfp_module()
sfp_count = self.get_num_sfps()
for index in range(sfp_count):
if self.RJ45_port_list and index in self.RJ45_port_list:
sfp_object = sfp_module.RJ45Port(index)
else:
sfp_object = sfp_module.SFP(index)
self._sfp_list.append(sfp_object)
self.sfp_initialized_count = sfp_count
elif self.sfp_initialized_count != len(self._sfp_list):
sfp_module = self._import_sfp_module()
for index in range(len(self._sfp_list)):
if self._sfp_list[index] is None:
if self.RJ45_port_list and index in self.RJ45_port_list:
self._sfp_list[index] = sfp_module.RJ45Port(index)
else:
self._sfp_list[index] = sfp_module.SFP(index)
self.sfp_initialized_count = len(self._sfp_list)
sfp_count = self.get_num_sfps()
# Use double checked locking mechanism for:
# 1. protect shared resource self._sfp_list
# 2. performance (avoid locking every time)
if sfp_count != self.sfp_initialized_count:
with self.sfp_lock:
if sfp_count != self.sfp_initialized_count:
if not self._sfp_list:
sfp_module = self._import_sfp_module()
for index in range(sfp_count):
if self.RJ45_port_list and index in self.RJ45_port_list:
sfp_object = sfp_module.RJ45Port(index)
else:
sfp_object = sfp_module.SFP(index)
self._sfp_list.append(sfp_object)
self.sfp_initialized_count = sfp_count
elif self.sfp_initialized_count != len(self._sfp_list):
sfp_module = self._import_sfp_module()
for index in range(len(self._sfp_list)):
if self._sfp_list[index] is None:
if self.RJ45_port_list and index in self.RJ45_port_list:
self._sfp_list[index] = sfp_module.RJ45Port(index)
else:
self._sfp_list[index] = sfp_module.SFP(index)
self.sfp_initialized_count = len(self._sfp_list)
def get_num_sfps(self):
"""

View File

@ -17,6 +17,7 @@
import glob
import os
import time
from . import utils
@ -167,8 +168,11 @@ class DeviceDataManager:
@classmethod
@utils.read_only_cache()
def get_sfp_count(cls):
sfp_count = utils.read_int_from_file('/run/hw-management/config/sfp_counter')
return sfp_count if sfp_count > 0 else len(glob.glob('/sys/module/sx_core/asic0/module*'))
from sonic_py_common import device_info
platform_path = device_info.get_path_to_platform_dir()
platform_json_path = os.path.join(platform_path, 'platform.json')
platform_data = utils.load_json_file(platform_json_path)
return len(platform_data['chassis']['sfps'])
@classmethod
def get_linecard_sfp_count(cls, lc_index):
@ -244,3 +248,23 @@ class DeviceDataManager:
sai_profile_file = os.path.join(hwsku_dir, 'sai.profile')
data = utils.read_key_value_file(sai_profile_file, delimeter='=')
return data.get('SAI_INDEPENDENT_MODULE_MODE') == '1'
@classmethod
def wait_platform_ready(cls):
"""
Wait for Nvidia platform related services(SDK, hw-management) ready
Returns:
bool: True if wait success else timeout
"""
conditions = []
sysfs_nodes = ['power_mode', 'power_mode_policy', 'present', 'reset', 'status', 'statuserror']
if cls.is_independent_mode():
sysfs_nodes.extend(['control', 'frequency', 'frequency_support', 'hw_present', 'hw_reset',
'power_good', 'power_limit', 'power_on', 'temperature/input'])
else:
conditions.append(lambda: utils.read_int_from_file('/var/run/hw-management/config/asics_init_done') == 1)
sfp_count = cls.get_sfp_count()
for sfp_index in range(sfp_count):
for sysfs_node in sysfs_nodes:
conditions.append(lambda: os.path.exists(f'/sys/module/sx_core/asic0/module{sfp_index}/{sysfs_node}'))
return utils.wait_until_conditions(conditions, 300, 1)

View File

@ -26,11 +26,14 @@ try:
import ctypes
import subprocess
import os
import threading
from sonic_py_common.logger import Logger
from sonic_py_common.general import check_output_pipe
from . import utils
from .device_data import DeviceDataManager
from sonic_platform_base.sonic_xcvr.sfp_optoe_base import SfpOptoeBase
from sonic_platform_base.sonic_xcvr.fields import consts
from sonic_platform_base.sonic_xcvr.api.public import sff8636, sff8436
except ImportError as e:
raise ImportError (str(e) + "- required module not found")
@ -155,6 +158,10 @@ SFP_TYPE_SFF8636 = 'sff8636'
# SFP stderr
SFP_EEPROM_NOT_AVAILABLE = 'Input/output error'
SFP_DEFAULT_TEMP_WARNNING_THRESHOLD = 70.0
SFP_DEFAULT_TEMP_CRITICAL_THRESHOLD = 80.0
SFP_TEMPERATURE_SCALE = 8.0
# SFP EEPROM limited bytes
limited_eeprom = {
SFP_TYPE_CMIS: {
@ -213,6 +220,9 @@ class SdkHandleContext(object):
deinitialize_sdk_handle(self.sdk_handle)
class NvidiaSFPCommon(SfpOptoeBase):
sfp_index_to_logical_port_dict = {}
sfp_index_to_logical_lock = threading.Lock()
def __init__(self, sfp_index):
super(NvidiaSFPCommon, self).__init__()
self.index = sfp_index + 1
@ -241,7 +251,31 @@ class NvidiaSFPCommon(SfpOptoeBase):
error_type = utils.read_int_from_file(status_error_file_path)
return oper_state, error_type
@classmethod
def get_sfp_index_to_logical_port(cls, force=False):
if not cls.sfp_index_to_logical_port_dict or force:
config_db = utils.DbUtils.get_db_instance('CONFIG_DB')
port_data = config_db.get_table('PORT')
for key, data in port_data.items():
if data['index'] not in cls.sfp_index_to_logical_port_dict:
cls.sfp_index_to_logical_port_dict[int(data['index']) - 1] = key
@classmethod
def get_logical_port_by_sfp_index(cls, sfp_index):
with cls.sfp_index_to_logical_lock:
cls.get_sfp_index_to_logical_port()
logical_port_name = cls.sfp_index_to_logical_port_dict.get(sfp_index)
if not logical_port_name:
cls.get_sfp_index_to_logical_port(force=True)
else:
config_db = utils.DbUtils.get_db_instance('CONFIG_DB')
current_index = int(config_db.get('CONFIG_DB', f'PORT|{logical_port_name}', 'index'))
if current_index != sfp_index:
cls.get_sfp_index_to_logical_port(force=True)
logical_port_name = cls.sfp_index_to_logical_port_dict.get(sfp_index)
return logical_port_name
class SFP(NvidiaSFPCommon):
"""Platform-specific SFP class"""
@ -264,7 +298,7 @@ class SFP(NvidiaSFPCommon):
if slot_id == 0: # For non-modular chassis
from .thermal import initialize_sfp_thermal
self._thermal_list = initialize_sfp_thermal(sfp_index)
self._thermal_list = initialize_sfp_thermal(self)
else: # For modular chassis
# (slot_id % MAX_LC_CONUNT - 1) * MAX_PORT_COUNT + (sfp_index + 1) * (MAX_PORT_COUNT / LC_PORT_COUNT)
max_linecard_count = DeviceDataManager.get_linecard_count()
@ -293,6 +327,17 @@ class SFP(NvidiaSFPCommon):
Returns:
bool: True if device is present, False if not
"""
if DeviceDataManager.is_independent_mode():
if utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/control') != 0:
if not utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/hw_present'):
return False
if not utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/power_good'):
return False
if not utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/power_on'):
return False
if utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/hw_reset') == 1:
return False
eeprom_raw = self._read_eeprom(0, 1, log_on_error=False)
return eeprom_raw is not None
@ -449,8 +494,17 @@ class SFP(NvidiaSFPCommon):
refer plugins/sfpreset.py
"""
file_path = SFP_SDK_MODULE_SYSFS_ROOT_TEMPLATE.format(self.sdk_index) + SFP_SYSFS_RESET
return utils.write_file(file_path, '1')
try:
if not self.is_sw_control():
file_path = SFP_SDK_MODULE_SYSFS_ROOT_TEMPLATE.format(self.sdk_index) + SFP_SYSFS_RESET
return utils.write_file(file_path, '1')
else:
file_path = SFP_SDK_MODULE_SYSFS_ROOT_TEMPLATE.format(self.sdk_index) + SFP_SYSFS_HWRESET
return utils.write_file(file_path, '0') and utils.write_file(file_path, '1')
except Exception as e:
print(f'Failed to reset module - {e}')
logger.log_error(f'Failed to reset module - {e}')
return False
@classmethod
@ -771,6 +825,13 @@ class SFP(NvidiaSFPCommon):
Returns:
bool: True if the limited bytes is hit
"""
try:
if self.is_sw_control():
return False
except Exception as e:
logger.log_notice(f'Module is under initialization, cannot write module EEPROM - {e}')
return True
eeprom_path = self._get_eeprom_path()
limited_data = limited_eeprom.get(self._get_sfp_type_str(eeprom_path))
if not limited_data:
@ -815,6 +876,77 @@ class SFP(NvidiaSFPCommon):
api = self.get_xcvr_api()
return [False] * api.NUM_CHANNELS if api else None
def get_temperature(self):
try:
if not self.is_sw_control():
temp_file = f'/sys/module/sx_core/asic0/module{self.sdk_index}/temperature/input'
if not os.path.exists(temp_file):
logger.log_error(f'Failed to read from file {temp_file} - not exists')
return None
temperature = utils.read_int_from_file(temp_file,
log_func=None)
return temperature / SFP_TEMPERATURE_SCALE if temperature is not None else None
except:
return 0.0
self.reinit()
temperature = super().get_temperature()
return temperature if temperature is not None else None
def get_temperature_warning_threashold(self):
"""Get temperature warning threshold
Returns:
int: temperature warning threshold
"""
try:
if not self.is_sw_control():
emergency = utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/temperature/emergency',
log_func=None,
default=None)
return emergency / SFP_TEMPERATURE_SCALE if emergency is not None else SFP_DEFAULT_TEMP_WARNNING_THRESHOLD
except:
return SFP_DEFAULT_TEMP_WARNNING_THRESHOLD
thresh = self._get_temperature_threshold()
if thresh and consts.TEMP_HIGH_WARNING_FIELD in thresh:
return thresh[consts.TEMP_HIGH_WARNING_FIELD]
return SFP_DEFAULT_TEMP_WARNNING_THRESHOLD
def get_temperature_critical_threashold(self):
"""Get temperature critical threshold
Returns:
int: temperature critical threshold
"""
try:
if not self.is_sw_control():
critical = utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/temperature/critical',
log_func=None,
default=None)
return critical / SFP_TEMPERATURE_SCALE if critical is not None else SFP_DEFAULT_TEMP_CRITICAL_THRESHOLD
except:
return SFP_DEFAULT_TEMP_CRITICAL_THRESHOLD
thresh = self._get_temperature_threshold()
if thresh and consts.TEMP_HIGH_ALARM_FIELD in thresh:
return thresh[consts.TEMP_HIGH_ALARM_FIELD]
return SFP_DEFAULT_TEMP_CRITICAL_THRESHOLD
def _get_temperature_threshold(self):
self.reinit()
api = self.get_xcvr_api()
if not api:
return None
thresh_support = api.get_transceiver_thresholds_support()
if thresh_support:
if isinstance(api, sff8636.Sff8636Api) or isinstance(api, sff8436.Sff8436Api):
return api.xcvr_eeprom.read(consts.TEMP_THRESHOLDS_FIELD)
return api.xcvr_eeprom.read(consts.THRESHOLDS_FIELD)
else:
return None
def get_xcvr_api(self):
"""
Retrieves the XcvrApi associated with this SFP
@ -834,15 +966,15 @@ class SFP(NvidiaSFPCommon):
return False
db = utils.DbUtils.get_db_instance('STATE_DB')
control_type = db.get('STATE_DB', f'TRANSCEIVER_MODULES_MGMT|{self.sdk_index}', 'control_type')
control_file_value = utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/control')
if control_type == 'SW_CONTROL' and control_file_value == 1:
return True
elif control_type == 'FW_CONTROL' and control_file_value == 0:
return False
else:
raise Exception(f'Module {self.sdk_index} is in initialization, please retry later')
logical_port = NvidiaSFPCommon.get_logical_port_by_sfp_index(self.sdk_index)
if not logical_port:
raise Exception(f'Module {self.sdk_index} is not present or in initialization')
initialized = db.exists('STATE_DB', f'TRANSCEIVER_STATUS|{logical_port}')
if not initialized:
raise Exception(f'Module {self.sdk_index} is not present or in initialization')
return utils.read_int_from_file(f'/sys/module/sx_core/asic0/module{self.sdk_index}/control') == 1
class RJ45Port(NvidiaSFPCommon):

View File

@ -36,6 +36,8 @@ except ImportError as e:
# Global logger class instance
logger = Logger()
DEFAULT_TEMP_SCALE = 1000
"""
The most important information for creating a Thermal object is 3 sysfs files: temperature file, high threshold file and
high critical threshold file. There is no common naming rule for thermal objects on Nvidia platform. There are two types
@ -72,9 +74,11 @@ THERMAL_NAMING_RULE = {
"chassis thermals": [
{
"name": "ASIC",
"temperature": "asic",
"high_threshold": "asic_temp_emergency",
"high_critical_threshold": "asic_temp_trip_crit"
"temperature": "input",
"high_threshold_default": 105,
"high_critical_threshold_default": 120,
"sysfs_folder": "/sys/module/sx_core/asic0/temperature",
"scale": 8
},
{
"name": "Ambient Port Side Temp",
@ -187,8 +191,8 @@ def initialize_psu_thermal(psu_index, presence_cb):
return [create_indexable_thermal(THERMAL_NAMING_RULE['psu thermals'], psu_index, CHASSIS_THERMAL_SYSFS_FOLDER, 1, presence_cb)]
def initialize_sfp_thermal(sfp_index):
return [create_indexable_thermal(THERMAL_NAMING_RULE['sfp thermals'], sfp_index, CHASSIS_THERMAL_SYSFS_FOLDER, 1)]
def initialize_sfp_thermal(sfp):
return [ModuleThermal(sfp)]
def initialize_linecard_thermals(lc_name, lc_index):
@ -214,6 +218,7 @@ def initialize_linecard_sfp_thermal(lc_name, lc_index, sfp_index):
def create_indexable_thermal(rule, index, sysfs_folder, position, presence_cb=None):
index += rule.get('start_index', 1)
name = rule['name'].format(index)
sysfs_folder = rule.get('sysfs_folder', sysfs_folder)
temp_file = os.path.join(sysfs_folder, rule['temperature'].format(index))
_check_thermal_sysfs_existence(temp_file)
if 'high_threshold' in rule:
@ -226,10 +231,13 @@ def create_indexable_thermal(rule, index, sysfs_folder, position, presence_cb=No
_check_thermal_sysfs_existence(high_crit_th_file)
else:
high_crit_th_file = None
high_th_default = rule.get('high_threshold_default')
high_crit_th_default = rule.get('high_critical_threshold_default')
scale = rule.get('scale', DEFAULT_TEMP_SCALE)
if not presence_cb:
return Thermal(name, temp_file, high_th_file, high_crit_th_file, position)
return Thermal(name, temp_file, high_th_file, high_crit_th_file, high_th_default, high_crit_th_default, scale, position)
else:
return RemovableThermal(name, temp_file, high_th_file, high_crit_th_file, position, presence_cb)
return RemovableThermal(name, temp_file, high_th_file, high_crit_th_file, high_th_default, high_crit_th_default, scale, position, presence_cb)
def create_single_thermal(rule, sysfs_folder, position, presence_cb=None):
@ -243,6 +251,7 @@ def create_single_thermal(rule, sysfs_folder, position, presence_cb=None):
elif not default_present:
return None
sysfs_folder = rule.get('sysfs_folder', sysfs_folder)
temp_file = os.path.join(sysfs_folder, temp_file)
_check_thermal_sysfs_existence(temp_file)
if 'high_threshold' in rule:
@ -255,11 +264,14 @@ def create_single_thermal(rule, sysfs_folder, position, presence_cb=None):
_check_thermal_sysfs_existence(high_crit_th_file)
else:
high_crit_th_file = None
high_th_default = rule.get('high_threshold_default')
high_crit_th_default = rule.get('high_critical_threshold_default')
scale = rule.get('scale', DEFAULT_TEMP_SCALE)
name = rule['name']
if not presence_cb:
return Thermal(name, temp_file, high_th_file, high_crit_th_file, position)
return Thermal(name, temp_file, high_th_file, high_crit_th_file, high_th_default, high_crit_th_default, scale, position)
else:
return RemovableThermal(name, temp_file, high_th_file, high_crit_th_file, position, presence_cb)
return RemovableThermal(name, temp_file, high_th_file, high_crit_th_file, high_th_default, high_crit_th_default, scale, position, presence_cb)
def _check_thermal_sysfs_existence(file_path):
@ -268,7 +280,7 @@ def _check_thermal_sysfs_existence(file_path):
class Thermal(ThermalBase):
def __init__(self, name, temp_file, high_th_file, high_crit_th_file, position):
def __init__(self, name, temp_file, high_th_file, high_crit_th_file, high_th_default, high_crit_th_default, scale, position):
"""
index should be a string for category ambient and int for other categories
"""
@ -278,6 +290,9 @@ class Thermal(ThermalBase):
self.temperature = temp_file
self.high_threshold = high_th_file
self.high_critical_threshold = high_crit_th_file
self.high_th_default = high_th_default
self.high_crit_th_default = high_crit_th_default
self.scale = scale
def get_name(self):
"""
@ -297,7 +312,7 @@ class Thermal(ThermalBase):
of one degree Celsius, e.g. 30.125
"""
value = utils.read_float_from_file(self.temperature, None, log_func=logger.log_info)
return value / 1000.0 if (value is not None and value != 0) else None
return value / self.scale if (value is not None and value != 0) else None
def get_high_threshold(self):
"""
@ -308,9 +323,9 @@ class Thermal(ThermalBase):
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
if self.high_threshold is None:
return None
return self.high_th_default
value = utils.read_float_from_file(self.high_threshold, None, log_func=logger.log_info)
return value / 1000.0 if (value is not None and value != 0) else None
return value / self.scale if (value is not None and value != 0) else self.high_th_default
def get_high_critical_threshold(self):
"""
@ -321,9 +336,9 @@ class Thermal(ThermalBase):
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
if self.high_critical_threshold is None:
return None
return self.high_crit_th_default
value = utils.read_float_from_file(self.high_critical_threshold, None, log_func=logger.log_info)
return value / 1000.0 if (value is not None and value != 0) else None
return value / self.scale if (value is not None and value != 0) else self.high_crit_th_default
def get_position_in_parent(self):
"""
@ -343,8 +358,8 @@ class Thermal(ThermalBase):
class RemovableThermal(Thermal):
def __init__(self, name, temp_file, high_th_file, high_crit_th_file, position, presence_cb):
super(RemovableThermal, self).__init__(name, temp_file, high_th_file, high_crit_th_file, position)
def __init__(self, name, temp_file, high_th_file, high_crit_th_file, high_th_default, high_crit_th_default, scale, position, presence_cb):
super(RemovableThermal, self).__init__(name, temp_file, high_th_file, high_crit_th_file, high_th_default, high_crit_th_default, scale, position)
self.presence_cb = presence_cb
def get_temperature(self):
@ -388,3 +403,68 @@ class RemovableThermal(Thermal):
logger.log_debug("get_high_critical_threshold for {} failed due to {}".format(self.name, hint))
return None
return super(RemovableThermal, self).get_high_critical_threshold()
class ModuleThermal(ThermalBase):
def __init__(self, sfp):
"""
index should be a string for category ambient and int for other categories
"""
super(ModuleThermal, self).__init__()
self.name = f'xSFP module {sfp.sdk_index + 1} Temp'
self.sfp = sfp
def get_name(self):
"""
Retrieves the name of the device
Returns:
string: The name of the device
"""
return self.name
def get_temperature(self):
"""
Retrieves current temperature reading from thermal
Returns:
A float number of current temperature in Celsius up to nearest thousandth
of one degree Celsius, e.g. 30.125
"""
return self.sfp.get_temperature()
def get_high_threshold(self):
"""
Retrieves the high threshold temperature of thermal
Returns:
A float number, the high threshold temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return self.sfp.get_temperature_warning_threashold()
def get_high_critical_threshold(self):
"""
Retrieves the high critical threshold temperature of thermal
Returns:
A float number, the high critical threshold temperature of thermal in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return self.sfp.get_temperature_critical_threashold()
def get_position_in_parent(self):
"""
Retrieves 1-based relative physical position in parent device
Returns:
integer: The 1-based relative physical position in parent device
"""
return 1
def is_replaceable(self):
"""
Indicate whether this device is replaceable.
Returns:
bool: True if it is replaceable.
"""
return False

View File

@ -15,9 +15,36 @@
# limitations under the License.
#
from sonic_platform_base.sonic_thermal_control.thermal_manager_base import ThermalManagerBase
from . import thermal_updater
from .device_data import DeviceDataManager
class ThermalManager(ThermalManagerBase):
thermal_updater_task = None
@classmethod
def run_policy(cls, chassis):
pass
@classmethod
def initialize(cls):
"""
Initialize thermal manager, including register thermal condition types and thermal action types
and any other vendor specific initialization.
:return:
"""
if DeviceDataManager.is_independent_mode():
from .chassis import Chassis
cls.thermal_updater_task = thermal_updater.ThermalUpdater(Chassis.chassis_instance.get_all_sfps())
cls.thermal_updater_task.start()
@classmethod
def deinitialize(cls):
"""
Destroy thermal manager, including any vendor specific cleanup. The default behavior of this function
is a no-op.
:return:
"""
if DeviceDataManager.is_independent_mode():
cls.thermal_updater_task.stop()

View File

@ -0,0 +1,213 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from . import utils
from sonic_py_common import logger
import sys
import time
sys.path.append('/run/hw-management/bin')
try:
import hw_management_independent_mode_update
except ImportError:
# For unit test only
from unittest import mock
hw_management_independent_mode_update = mock.MagicMock()
hw_management_independent_mode_update.module_data_set_module_counter = mock.MagicMock()
hw_management_independent_mode_update.thermal_data_set_asic = mock.MagicMock()
hw_management_independent_mode_update.thermal_data_set_module = mock.MagicMock()
hw_management_independent_mode_update.thermal_data_clean_asic = mock.MagicMock()
hw_management_independent_mode_update.thermal_data_clean_module = mock.MagicMock()
SFP_TEMPERATURE_SCALE = 1000
ASIC_TEMPERATURE_SCALE = 125
ASIC_DEFAULT_TEMP_WARNNING_THRESHOLD = 105000
ASIC_DEFAULT_TEMP_CRITICAL_THRESHOLD = 120000
ERROR_READ_THERMAL_DATA = 254000
TC_CONFIG_FILE = '/run/hw-management/config/tc_config.json'
logger = logger.Logger('thermal-updater')
class ThermalUpdater:
def __init__(self, sfp_list):
self._sfp_list = sfp_list
self._sfp_status = {}
self._timer = utils.Timer()
def load_tc_config(self):
asic_poll_interval = 1
sfp_poll_interval = 10
data = utils.load_json_file(TC_CONFIG_FILE)
if not data:
logger.log_notice(f'{TC_CONFIG_FILE} does not exist, use default polling interval')
if data:
dev_parameters = data.get('dev_parameters')
if dev_parameters is not None:
asic_parameter = dev_parameters.get('asic')
if asic_parameter is not None:
asic_poll_interval_config = asic_parameter.get('poll_time')
if asic_poll_interval_config:
asic_poll_interval = int(asic_poll_interval_config) / 2
module_parameter = dev_parameters.get('module\\d+')
if module_parameter is not None:
sfp_poll_interval_config = module_parameter.get('poll_time')
if sfp_poll_interval_config:
sfp_poll_interval = int(sfp_poll_interval_config) / 2
logger.log_notice(f'ASIC polling interval: {asic_poll_interval}')
self._timer.schedule(asic_poll_interval, self.update_asic)
logger.log_notice(f'Module polling interval: {sfp_poll_interval}')
self._timer.schedule(sfp_poll_interval, self.update_module)
def start(self):
self.clean_thermal_data()
if not self.wait_all_sfp_ready():
logger.log_error('Failed to wait for all SFP ready, will put hw-management-tc to suspend')
self.control_tc(True)
return
self.control_tc(False)
self.load_tc_config()
self._timer.start()
def stop(self):
self._timer.stop()
self.control_tc(True)
def control_tc(self, suspend):
logger.log_notice(f'Set hw-management-tc to {"suspend" if suspend else "resume"}')
utils.write_file('/run/hw-management/config/suspend', 1 if suspend else 0)
def clean_thermal_data(self):
hw_management_independent_mode_update.module_data_set_module_counter(len(self._sfp_list))
hw_management_independent_mode_update.thermal_data_clean_asic(0)
for sfp in self._sfp_list:
hw_management_independent_mode_update.thermal_data_clean_module(
0,
sfp.sdk_index + 1
)
def wait_all_sfp_ready(self):
logger.log_notice('Waiting for all SFP modules ready...')
max_wait_time = 60
ready_set = set()
while len(ready_set) != len(self._sfp_list):
for sfp in self._sfp_list:
try:
sfp.is_sw_control()
ready_set.add(sfp)
except:
continue
max_wait_time -= 1
if max_wait_time == 0:
return False
time.sleep(1)
logger.log_notice('All SFP modules are ready')
return True
def get_asic_temp(self):
temperature = utils.read_int_from_file('/sys/module/sx_core/asic0/temperature/input', default=None)
return temperature * ASIC_TEMPERATURE_SCALE if temperature is not None else None
def get_asic_temp_warning_threashold(self):
emergency = utils.read_int_from_file('/sys/module/sx_core/asic0/temperature/emergency', default=None, log_func=None)
return emergency * ASIC_TEMPERATURE_SCALE if emergency is not None else ASIC_DEFAULT_TEMP_WARNNING_THRESHOLD
def get_asic_temp_critical_threashold(self):
critical = utils.read_int_from_file('/sys/module/sx_core/asic0/temperature/critical', default=None, log_func=None)
return critical * ASIC_TEMPERATURE_SCALE if critical is not None else ASIC_DEFAULT_TEMP_CRITICAL_THRESHOLD
def update_single_module(self, sfp):
try:
presence = sfp.get_presence()
pre_presence = self._sfp_status.get(sfp.sdk_index)
if presence:
temperature = sfp.get_temperature()
if temperature == 0:
warning_thresh = 0
critical_thresh = 0
fault = 0
else:
warning_thresh = sfp.get_temperature_warning_threashold()
critical_thresh = sfp.get_temperature_critical_threashold()
fault = ERROR_READ_THERMAL_DATA if (temperature is None or warning_thresh is None or critical_thresh is None) else 0
temperature = 0 if temperature is None else int(temperature * SFP_TEMPERATURE_SCALE)
warning_thresh = 0 if warning_thresh is None else int(warning_thresh * SFP_TEMPERATURE_SCALE)
critical_thresh = 0 if critical_thresh is None else int(critical_thresh * SFP_TEMPERATURE_SCALE)
hw_management_independent_mode_update.thermal_data_set_module(
0, # ASIC index always 0 for now
sfp.sdk_index + 1,
temperature,
critical_thresh,
warning_thresh,
fault
)
else:
if pre_presence != presence:
hw_management_independent_mode_update.thermal_data_clean_module(0, sfp.sdk_index + 1)
if pre_presence != presence:
self._sfp_status[sfp.sdk_index] = presence
except Exception as e:
logger.log_error('Failed to update module {sfp.sdk_index} thermal data - {e}')
hw_management_independent_mode_update.thermal_data_set_module(
0, # ASIC index always 0 for now
sfp.sdk_index + 1,
0,
0,
0,
ERROR_READ_THERMAL_DATA
)
def update_module(self):
for sfp in self._sfp_list:
self.update_single_module(sfp)
def update_asic(self):
try:
asic_temp = self.get_asic_temp()
warn_threshold = self.get_asic_temp_warning_threashold()
critical_threshold = self.get_asic_temp_critical_threashold()
fault = 0
if asic_temp is None:
logger.log_error('Failed to read ASIC temperature, send fault to hw-management-tc')
asic_temp = warn_threshold
fault = ERROR_READ_THERMAL_DATA
hw_management_independent_mode_update.thermal_data_set_asic(
0, # ASIC index always 0 for now
asic_temp,
critical_threshold,
warn_threshold,
fault
)
except Exception as e:
logger.log_error('Failed to update ASIC thermal data - {e}')
hw_management_independent_mode_update.thermal_data_set_asic(
0, # ASIC index always 0 for now
0,
0,
0,
ERROR_READ_THERMAL_DATA
)

View File

@ -18,6 +18,7 @@ import ctypes
import functools
import subprocess
import json
import queue
import sys
import threading
import time
@ -289,6 +290,84 @@ def wait_until(predict, timeout, interval=1, *args, **kwargs):
return False
def wait_until_conditions(conditions, timeout, interval=1):
"""
Wait until all the conditions become true
Args:
conditions (list): a list of callable which generate True|False
timeout (int): wait time in seconds
interval (int, optional): interval to check the predict. Defaults to 1.
Returns:
bool: True if wait success else False
"""
while timeout > 0:
pending_conditions = []
for condition in conditions:
if not condition():
pending_conditions.append(condition)
if not pending_conditions:
return True
conditions = pending_conditions
time.sleep(interval)
timeout -= interval
return False
class TimerEvent:
def __init__(self, interval, cb, repeat):
self.interval = interval
self._cb = cb
self.repeat = repeat
def execute(self):
self._cb()
class Timer(threading.Thread):
def __init__(self):
super(Timer, self).__init__()
self._timestamp_queue = queue.PriorityQueue()
self._wait_event = threading.Event()
self._stop_event = threading.Event()
self._min_timestamp = None
def schedule(self, interval, cb, repeat=True, run_now=True):
timer_event = TimerEvent(interval, cb, repeat)
self.add_timer_event(timer_event, run_now)
def add_timer_event(self, timer_event, run_now=True):
timestamp = time.time()
if not run_now:
timestamp += timer_event.interval
self._timestamp_queue.put_nowait((timestamp, timer_event))
if self._min_timestamp is not None and timestamp < self._min_timestamp:
self._wait_event.set()
def stop(self):
if self.is_alive():
self._wait_event.set()
self._stop_event.set()
self.join()
def run(self):
while not self._stop_event.is_set():
now = time.time()
item = self._timestamp_queue.get()
self._min_timestamp = item[0]
if self._min_timestamp > now:
self._wait_event.wait(self._min_timestamp - now)
self._wait_event.clear()
self._timestamp_queue.put(item)
continue
timer_event = item[1]
timer_event.execute()
if timer_event.repeat:
self.add_timer_event(timer_event, False)
class DbUtils:
lock = threading.Lock()
db_instances = threading.local()
@ -302,9 +381,9 @@ class DbUtils:
cls.db_instances.data = {}
if db_name not in cls.db_instances.data:
from swsscommon.swsscommon import SonicV2Connector
db = SonicV2Connector(use_unix_socket_path=True)
db.connect(db_name)
from swsscommon.swsscommon import ConfigDBConnector
db = ConfigDBConnector(use_unix_socket_path=True)
db.db_connect(db_name)
cls.db_instances.data[db_name] = db
return cls.db_instances.data[db_name]
except Exception as e:

View File

@ -16,8 +16,10 @@
#
import os
import random
import sys
import subprocess
import threading
from mock import MagicMock
if sys.version_info.major == 3:
@ -167,6 +169,30 @@ class TestChassis:
assert len(sfp_list) == 3
assert chassis.sfp_initialized_count == 3
def test_create_sfp_in_multi_thread(self):
DeviceDataManager.get_sfp_count = mock.MagicMock(return_value=3)
iteration_num = 100
while iteration_num > 0:
chassis = Chassis()
assert chassis.sfp_initialized_count == 0
t1 = threading.Thread(target=lambda: chassis.get_sfp(1))
t2 = threading.Thread(target=lambda: chassis.get_sfp(1))
t3 = threading.Thread(target=lambda: chassis.get_all_sfps())
t4 = threading.Thread(target=lambda: chassis.get_all_sfps())
threads = [t1, t2, t3, t4]
random.shuffle(threads)
for t in threads:
t.start()
for t in threads:
t.join()
assert len(chassis.get_all_sfps()) == 3
assert chassis.sfp_initialized_count == 3
for index, s in enumerate(chassis.get_all_sfps()):
assert s.sdk_index == index
iteration_num -= 1
@mock.patch('sonic_platform.device_data.DeviceDataManager.get_sfp_count', MagicMock(return_value=3))
def test_change_event(self):
chassis = Chassis()

View File

@ -60,6 +60,26 @@ class TestDeviceData:
mock_read.return_value = {'SAI_INDEPENDENT_MODULE_MODE': '1'}
assert DeviceDataManager.is_independent_mode()
@mock.patch('sonic_py_common.device_info.get_path_to_platform_dir', mock.MagicMock(return_value='/tmp'))
@mock.patch('sonic_platform.device_data.utils.load_json_file')
def test_get_sfp_count(self, mock_load_json):
mock_load_json.return_value = {
'chassis': {
'sfps': [1,2,3]
}
}
assert DeviceDataManager.get_sfp_count() == 3
@mock.patch('sonic_platform.device_data.time.sleep', mock.MagicMock())
@mock.patch('sonic_platform.device_data.DeviceDataManager.get_sfp_count', mock.MagicMock(return_value=3))
@mock.patch('sonic_platform.device_data.utils.read_int_from_file', mock.MagicMock(return_value=1))
@mock.patch('sonic_platform.device_data.os.path.exists')
@mock.patch('sonic_platform.device_data.DeviceDataManager.is_independent_mode')
def test_wait_platform_ready(self, mock_is_indep, mock_exists):
mock_exists.return_value = True
mock_is_indep.return_value = True
assert DeviceDataManager.wait_platform_ready()
mock_is_indep.return_value = False
assert DeviceDataManager.wait_platform_ready()
mock_exists.return_value = False
assert not DeviceDataManager.wait_platform_ready()

View File

@ -162,8 +162,13 @@ class TestSfp:
@mock.patch('sonic_platform.sfp.SFP._get_eeprom_path', mock.MagicMock(return_value = None))
@mock.patch('sonic_platform.sfp.SFP._get_sfp_type_str')
def test_is_write_protected(self, mock_get_type_str):
@mock.patch('sonic_platform.sfp.SFP.is_sw_control')
def test_is_write_protected(self, mock_sw_control, mock_get_type_str):
sfp = SFP(0)
mock_sw_control.return_value = True
assert not sfp._is_write_protected(page=0, page_offset=26, num_bytes=1)
mock_sw_control.return_value = False
mock_get_type_str.return_value = 'cmis'
assert sfp._is_write_protected(page=0, page_offset=26, num_bytes=1)
assert not sfp._is_write_protected(page=0, page_offset=27, num_bytes=1)
@ -261,9 +266,14 @@ class TestSfp:
@mock.patch('sonic_platform.utils.write_file')
def test_reset(self, mock_write):
sfp = SFP(0)
sfp.is_sw_control = mock.MagicMock(return_value=False)
mock_write.return_value = True
assert sfp.reset()
mock_write.assert_called_with('/sys/module/sx_core/asic0/module0/reset', '1')
sfp.is_sw_control.return_value = True
assert sfp.reset()
sfp.is_sw_control.side_effect = Exception('')
assert not sfp.reset()
@mock.patch('sonic_platform.sfp.SFP.read_eeprom')
def test_get_xcvr_api(self, mock_read):
@ -287,30 +297,72 @@ class TestSfp:
assert sfp.get_transceiver_threshold_info()
sfp.reinit()
@mock.patch('os.path.exists')
@mock.patch('sonic_platform.utils.read_int_from_file')
def test_get_temperature(self, mock_read, mock_exists):
sfp = SFP(0)
sfp.is_sw_control = mock.MagicMock(return_value=True)
mock_exists.return_value = False
assert sfp.get_temperature() == None
mock_exists.return_value = True
assert sfp.get_temperature() == None
mock_read.return_value = None
sfp.is_sw_control.return_value = False
assert sfp.get_temperature() == None
mock_read.return_value = 448
assert sfp.get_temperature() == 56.0
def test_get_temperature_threshold(self):
sfp = SFP(0)
sfp.is_sw_control = mock.MagicMock(return_value=True)
assert sfp.get_temperature_warning_threashold() == 70.0
assert sfp.get_temperature_critical_threashold() == 80.0
mock_api = mock.MagicMock()
mock_api.get_transceiver_thresholds_support = mock.MagicMock(return_value=False)
sfp.get_xcvr_api = mock.MagicMock(return_value=mock_api)
assert sfp.get_temperature_warning_threashold() == 70.0
assert sfp.get_temperature_critical_threashold() == 80.0
from sonic_platform_base.sonic_xcvr.fields import consts
mock_api.get_transceiver_thresholds_support.return_value = True
mock_api.xcvr_eeprom = mock.MagicMock()
mock_api.xcvr_eeprom.read = mock.MagicMock(return_value={
consts.TEMP_HIGH_ALARM_FIELD: 85.0,
consts.TEMP_HIGH_WARNING_FIELD: 75.0
})
assert sfp.get_temperature_warning_threashold() == 75.0
assert sfp.get_temperature_critical_threashold() == 85.0
@mock.patch('sonic_platform.sfp.NvidiaSFPCommon.get_logical_port_by_sfp_index')
@mock.patch('sonic_platform.utils.read_int_from_file')
@mock.patch('sonic_platform.device_data.DeviceDataManager.is_independent_mode')
@mock.patch('sonic_platform.utils.DbUtils.get_db_instance')
def test_is_sw_control(self, mock_get_db, mock_mode, mock_read):
def test_is_sw_control(self, mock_get_db, mock_mode, mock_read, mock_get_logical):
sfp = SFP(0)
mock_mode.return_value = False
assert not sfp.is_sw_control()
mock_mode.return_value = True
mock_get_logical.return_value = None
with pytest.raises(Exception):
sfp.is_sw_control()
mock_get_logical.return_value = 'Ethernet0'
mock_db = mock.MagicMock()
mock_get_db.return_value = mock_db
mock_db.get = mock.MagicMock(return_value=None)
mock_db.exists = mock.MagicMock(return_value=False)
with pytest.raises(Exception):
sfp.is_sw_control()
mock_db.exists.return_value = True
mock_read.return_value = 0
mock_db.get.return_value = 'FW_CONTROL'
assert not sfp.is_sw_control()
mock_read.return_value = 1
mock_db.get.return_value = 'SW_CONTROL'
assert sfp.is_sw_control()
mock_read.return_value = 0
with pytest.raises(Exception):
sfp.is_sw_control()
@mock.patch('sonic_platform.device_data.DeviceDataManager.is_independent_mode', mock.MagicMock(return_value=False))
@mock.patch('sonic_platform.sfp.SFP.is_sw_control', mock.MagicMock(return_value=False))

View File

@ -31,6 +31,7 @@ sys.path.insert(0, modules_path)
import sonic_platform.chassis
from sonic_platform.chassis import Chassis
from sonic_platform.device_data import DeviceDataManager
from sonic_platform.sfp import SFP
sonic_platform.chassis.extract_RJ45_ports_index = mock.MagicMock(return_value=[])
@ -148,23 +149,27 @@ class TestThermal:
@mock.patch('os.path.exists', mock.MagicMock(return_value=True))
def test_sfp_thermal(self):
from sonic_platform.thermal import initialize_sfp_thermal, THERMAL_NAMING_RULE
thermal_list = initialize_sfp_thermal(0)
from sonic_platform.thermal import THERMAL_NAMING_RULE
sfp = SFP(0)
thermal_list = sfp.get_all_thermals()
assert len(thermal_list) == 1
thermal = thermal_list[0]
rule = THERMAL_NAMING_RULE['sfp thermals']
start_index = rule.get('start_index', 1)
assert thermal.get_name() == rule['name'].format(start_index)
assert rule['temperature'].format(start_index) in thermal.temperature
assert rule['high_threshold'].format(start_index) in thermal.high_threshold
assert rule['high_critical_threshold'].format(start_index) in thermal.high_critical_threshold
assert thermal.get_position_in_parent() == 1
assert thermal.is_replaceable() == False
sfp.get_temperature = mock.MagicMock(return_value=35.4)
sfp.get_temperature_warning_threashold = mock.MagicMock(return_value=70)
sfp.get_temperature_critical_threashold = mock.MagicMock(return_value=80)
assert thermal.get_temperature() == 35.4
assert thermal.get_high_threshold() == 70
assert thermal.get_high_critical_threshold() == 80
@mock.patch('sonic_platform.utils.read_float_from_file')
def test_get_temperature(self, mock_read):
from sonic_platform.thermal import Thermal
thermal = Thermal('test', 'temp_file', None, None, 1)
thermal = Thermal('test', 'temp_file', None, None, None, None, 1000, 1)
mock_read.return_value = 35727
assert thermal.get_temperature() == 35.727
@ -177,7 +182,7 @@ class TestThermal:
@mock.patch('sonic_platform.utils.read_float_from_file')
def test_get_high_threshold(self, mock_read):
from sonic_platform.thermal import Thermal
thermal = Thermal('test', None, None, None, 1)
thermal = Thermal('test', None, None, None, None, None, 1000, 1)
assert thermal.get_high_threshold() is None
thermal.high_threshold = 'high_th_file'
@ -193,7 +198,7 @@ class TestThermal:
@mock.patch('sonic_platform.utils.read_float_from_file')
def test_get_high_critical_threshold(self, mock_read):
from sonic_platform.thermal import Thermal
thermal = Thermal('test', None, None, None, 1)
thermal = Thermal('test', None, None, None, None, None, 1000, 1)
assert thermal.get_high_critical_threshold() is None
thermal.high_critical_threshold = 'high_th_file'

View File

@ -0,0 +1,128 @@
#
# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from unittest import mock
from sonic_platform import utils
from sonic_platform.thermal_updater import ThermalUpdater, hw_management_independent_mode_update
from sonic_platform.thermal_updater import ASIC_DEFAULT_TEMP_WARNNING_THRESHOLD, \
ASIC_DEFAULT_TEMP_CRITICAL_THRESHOLD
mock_tc_config = """
{
"dev_parameters": {
"asic": {
"pwm_min": 20,
"pwm_max": 100,
"val_min": "!70000",
"val_max": "!105000",
"poll_time": 3
},
"module\\\\d+": {
"pwm_min": 20,
"pwm_max": 100,
"val_min": 60000,
"val_max": 80000,
"poll_time": 20
}
}
}
"""
class TestThermalUpdater:
def test_load_tc_config_non_exists(self):
updater = ThermalUpdater(None)
updater.load_tc_config()
assert updater._timer._timestamp_queue.qsize() == 2
def test_load_tc_config_mocked(self):
updater = ThermalUpdater(None)
mock_os_open = mock.mock_open(read_data=mock_tc_config)
with mock.patch('sonic_platform.utils.open', mock_os_open):
updater.load_tc_config()
assert updater._timer._timestamp_queue.qsize() == 2
@mock.patch('sonic_platform.thermal_updater.ThermalUpdater.update_asic', mock.MagicMock())
@mock.patch('sonic_platform.thermal_updater.ThermalUpdater.update_module', mock.MagicMock())
@mock.patch('sonic_platform.thermal_updater.ThermalUpdater.wait_all_sfp_ready')
@mock.patch('sonic_platform.utils.write_file')
def test_start_stop(self, mock_write, mock_wait):
mock_wait.return_value = True
mock_sfp = mock.MagicMock()
mock_sfp.sdk_index = 1
updater = ThermalUpdater([mock_sfp])
updater.start()
mock_write.assert_called_once_with('/run/hw-management/config/suspend', 0)
utils.wait_until(updater._timer.is_alive, timeout=5)
mock_write.reset_mock()
updater.stop()
assert not updater._timer.is_alive()
mock_write.assert_called_once_with('/run/hw-management/config/suspend', 1)
mock_wait.return_value = False
mock_write.reset_mock()
updater.start()
mock_write.assert_called_once_with('/run/hw-management/config/suspend', 1)
updater.stop()
@mock.patch('sonic_platform.thermal_updater.time.sleep', mock.MagicMock())
def test_wait_all_sfp_ready(self):
mock_sfp = mock.MagicMock()
mock_sfp.is_sw_control = mock.MagicMock(return_value=True)
updater = ThermalUpdater([mock_sfp])
assert updater.wait_all_sfp_ready()
mock_sfp.is_sw_control.side_effect = Exception('')
assert not updater.wait_all_sfp_ready()
@mock.patch('sonic_platform.utils.read_int_from_file')
def test_update_asic(self, mock_read):
mock_read.return_value = 8
updater = ThermalUpdater(None)
assert updater.get_asic_temp() == 1000
assert updater.get_asic_temp_warning_threashold() == 1000
assert updater.get_asic_temp_critical_threashold() == 1000
updater.update_asic()
hw_management_independent_mode_update.thermal_data_set_asic.assert_called_once()
mock_read.return_value = None
assert updater.get_asic_temp() is None
assert updater.get_asic_temp_warning_threashold() == ASIC_DEFAULT_TEMP_WARNNING_THRESHOLD
assert updater.get_asic_temp_critical_threashold() == ASIC_DEFAULT_TEMP_CRITICAL_THRESHOLD
def test_update_module(self):
mock_sfp = mock.MagicMock()
mock_sfp.sdk_index = 10
mock_sfp.get_presence = mock.MagicMock(return_value=True)
mock_sfp.get_temperature = mock.MagicMock(return_value=55.0)
mock_sfp.get_temperature_warning_threashold = mock.MagicMock(return_value=70.0)
mock_sfp.get_temperature_critical_threashold = mock.MagicMock(return_value=80.0)
updater = ThermalUpdater([mock_sfp])
updater.update_module()
hw_management_independent_mode_update.thermal_data_set_module.assert_called_once_with(0, 11, 55000, 80000, 70000, 0)
mock_sfp.get_temperature = mock.MagicMock(return_value=0.0)
hw_management_independent_mode_update.reset_mock()
updater.update_module()
hw_management_independent_mode_update.thermal_data_set_module.assert_called_once_with(0, 11, 0, 0, 0, 0)
mock_sfp.get_presence = mock.MagicMock(return_value=False)
updater.update_module()
hw_management_independent_mode_update.thermal_data_clean_module.assert_called_once_with(0, 11)

View File

@ -191,6 +191,33 @@ class TestUtils:
mock_os_open = mock.mock_open(read_data='a:b')
with mock.patch('sonic_platform.utils.open', mock_os_open):
assert utils.read_key_value_file('some_file') == {'a':'b'}
mock_os_open = mock.mock_open(read_data='a=b')
with mock.patch('sonic_platform.utils.open', mock_os_open):
assert utils.read_key_value_file('some_file', delimeter='=') == {'a':'b'}
@mock.patch('sonic_platform.utils.time.sleep', mock.MagicMock())
def test_wait_until_conditions(self):
conditions = [lambda: True]
assert utils.wait_until_conditions(conditions, 1)
conditions = [lambda: False]
assert not utils.wait_until_conditions(conditions, 1)
def test_timer(self):
timer = utils.Timer()
timer.start()
mock_cb_1000_run_now = mock.MagicMock()
mock_cb_1000_run_future = mock.MagicMock()
mock_cb_1_run_future_once = mock.MagicMock()
mock_cb_1_run_future_repeat = mock.MagicMock()
timer.schedule(1000, cb=mock_cb_1000_run_now, repeat=False, run_now=True)
timer.schedule(1000, cb=mock_cb_1000_run_future, repeat=False, run_now=False)
timer.schedule(1, cb=mock_cb_1_run_future_once, repeat=False, run_now=False)
timer.schedule(1, cb=mock_cb_1_run_future_repeat, repeat=True, run_now=False)
time.sleep(3)
timer.stop()
mock_cb_1000_run_now.assert_called_once()
mock_cb_1000_run_future.assert_not_called()
mock_cb_1_run_future_once.assert_called_once()
assert mock_cb_1_run_future_repeat.call_count > 1

View File

@ -1,6 +1,6 @@
# Mellanox SAI
MLNX_SAI_VERSION = SAIBuild2311.25.0.36
MLNX_SAI_VERSION = SAIBuild2311.26.0.28
MLNX_SAI_ASSETS_GITHUB_URL = https://github.com/Mellanox/Spectrum-SDK-Drivers-SONiC-Bins
MLNX_SAI_ASSETS_RELEASE_TAG = sai-$(MLNX_SAI_VERSION)-$(BLDENV)-$(CONFIGURED_ARCH)
MLNX_SAI_ASSETS_URL = $(MLNX_SAI_ASSETS_GITHUB_URL)/releases/download/$(MLNX_SAI_ASSETS_RELEASE_TAG)

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
MLNX_SDK_VERSION = 4.6.2104
MLNX_SDK_VERSION = 4.6.2134
MLNX_SDK_ISSU_VERSION = 101
MLNX_SDK_DRIVERS_GITHUB_URL = https://github.com/Mellanox/Spectrum-SDK-Drivers

View File

@ -144,7 +144,7 @@ INCLUDE_MGMT_FRAMEWORK = y
ENABLE_HOST_SERVICE_ON_START = y
# INCLUDE_RESTAPI - build docker-sonic-restapi for configuring the switch using REST APIs
INCLUDE_RESTAPI = n
INCLUDE_RESTAPI ?= n
# INCLUDE_NAT - build docker-nat for nat support
INCLUDE_NAT = y

@ -1 +1 @@
Subproject commit e420df41a08ea9ec421687fb06d1f96535ebf151
Subproject commit 79c3872b38b8ae464dd04907610657bb2a611360

View File

@ -26,6 +26,7 @@ import os
import sys
import yaml
import ipaddress
import base64
from collections import OrderedDict
from config_samples import generate_sample_config, get_available_config
@ -139,6 +140,29 @@ def ip_network(value):
return "Invalid ip address %s" % value
return r_v.network
def b64encode(value):
"""Base64 encoder
Return:
encoded string or the same value in case of error
"""
try:
ret = base64.b64encode(value.encode()).decode()
except:
return value
return ret
def b64decode(value):
"""Base64 decoder
Return:
decoded string or the same value in case of error
"""
try:
ret = base64.b64decode(value.encode()).decode()
except:
return value
return ret
def get_primary_addr(value):
if not value:
return ""
@ -274,6 +298,10 @@ def _get_jinja2_env(paths):
for attr in ['ip', 'network', 'prefixlen', 'netmask', 'broadcast']:
env.filters[attr] = partial(prefix_attr, attr)
# Base64 encoder/decoder
env.filters['b64encode'] = b64encode
env.filters['b64decode'] = b64decode
return env
def main():

View File

@ -1,7 +1,49 @@
{
"NTP": {
"global": {
"src_intf": "Ethernet0"
"src_intf": "eth0",
"vrf": "default",
"authentication": "enabled",
"dhcp": "disabled",
"server_role": "disabled",
"admin_state": "enabled"
}
},
"NTP_SERVER": {
"my_ntp_server": {
"association_type": "server",
"iburst": "off",
"admin_state": "disabled",
"version": 3,
"resolve_as": "10.20.30.40"
},
"server2": {
"association_type": "server",
"iburst": "off",
"admin_state": "enabled",
"version": 3,
"resolve_as": "10.20.30.50",
"key": 42,
"trusted": "no"
},
"pool.ntp.org": {
"association_type": "pool",
"iburst": "on",
"admin_state": "enabled",
"version": 3,
"resolve_as": "pool.ntp.org"
}
},
"NTP_KEY": {
"1": {
"type": "md5",
"trusted": "no",
"value": "blabla"
},
"42": {
"type": "sha1",
"trusted": "yes",
"value": "the_answer"
}
},
"INTERFACE": {

View File

@ -0,0 +1 @@
../../../files/image_config/ntp/ntp.keys.j2

View File

@ -1,58 +0,0 @@
###############################################################################
# Managed by Ansible
# file: ansible/roles/acs/templates/ntp.conf.j2
###############################################################################
# /etc/ntpsec/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
# To avoid ntpd from panic and exit if the drift between new time and
# current system time is large.
tinker panic 0
driftfile /var/lib/ntpsec/ntp.drift
leapfile /usr/share/zoneinfo/leap-seconds.list
# To enable Network Time Security support as a server, obtain a certificate
# (e.g. with Let's Encrypt), configure the paths below, and uncomment:
# nts cert CERT_FILE
# nts key KEY_FILE
# nts enable
# You must create /var/log/ntpsec (owned by ntpsec:ntpsec) to enable logging.
#statsdir /var/log/ntpsec/
#statistics loopstats peerstats clockstats
#filegen loopstats file loopstats type day enable
#filegen peerstats file peerstats type day enable
#filegen clockstats file clockstats type day enable
# Specify one or more NTP servers.
# Public NTP servers supporting Network Time Security:
# server time.cloudflare.com nts
# pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will
# pick a different set every time it starts up. Please consider joining the
# pool: <https://www.pool.ntp.org/join.html>
#listen on source interface if configured, else
#only listen on MGMT_INTERFACE, LOOPBACK_INTERFACE ip when MGMT_INTERFACE is not defined, or eth0
# if we don't have both of them (default is to listen on all ip addresses)
interface ignore wildcard
interface listen Ethernet0
interface listen 127.0.0.1
# Access control configuration; see /usr/share/doc/ntpsec-doc/html/accopt.html
# for details.
#
# Note that "restrict" applies to both servers and clients, so a configuration
# that might be intended to block requests from certain clients could also end
# up blocking replies from your own upstream servers.
# By default, exchange time with everybody, but don't allow configuration.
# NTPsec doesn't establish peer associations, and so nopeer has no effect, and has been removed from here
restrict default kod nomodify noquery limited
# Local users may interrogate the ntp server more closely.
restrict 127.0.0.1
restrict ::1

View File

@ -0,0 +1 @@
../py3/ntp.conf

View File

@ -0,0 +1 @@
../py3/ntp.keys

View File

@ -1,9 +1,9 @@
###############################################################################
# Managed by Ansible
# file: ansible/roles/acs/templates/ntp.conf.j2
# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY.
# Controlled by ntp-config.service
###############################################################################
# /etc/ntpsec/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
# To avoid ntpd from panic and exit if the drift between new time and
# current system time is large.
@ -12,45 +12,28 @@ tinker panic 0
driftfile /var/lib/ntpsec/ntp.drift
leapfile /usr/share/zoneinfo/leap-seconds.list
# To enable Network Time Security support as a server, obtain a certificate
# (e.g. with Let's Encrypt), configure the paths below, and uncomment:
# nts cert CERT_FILE
# nts key KEY_FILE
# nts enable
server 10.20.30.50 key 42 iburst version 3
restrict 10.20.30.50 kod limited nomodify notrap noquery nopeer
# You must create /var/log/ntpsec (owned by ntpsec:ntpsec) to enable logging.
#statsdir /var/log/ntpsec/
#statistics loopstats peerstats clockstats
#filegen loopstats file loopstats type day enable
#filegen peerstats file peerstats type day enable
#filegen clockstats file clockstats type day enable
pool pool.ntp.org iburst version 3
restrict pool.ntp.org kod limited nomodify notrap noquery
# Specify one or more NTP servers.
# Public NTP servers supporting Network Time Security:
# server time.cloudflare.com nts
keys /etc/ntpsec/ntp.keys
trustedkey 42
# pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will
# pick a different set every time it starts up. Please consider joining the
# pool: <https://www.pool.ntp.org/join.html>
#listen on source interface if configured, else
#only listen on MGMT_INTERFACE, LOOPBACK_INTERFACE ip when MGMT_INTERFACE is not defined, or eth0
# if we don't have both of them (default is to listen on all ip addresses)
interface ignore wildcard
interface listen Ethernet0
interface listen eth0
interface listen 127.0.0.1
# Access control configuration; see /usr/share/doc/ntpsec-doc/html/accopt.html
# for details.
#
# Note that "restrict" applies to both servers and clients, so a configuration
# that might be intended to block requests from certain clients could also end
# up blocking replies from your own upstream servers.
# Access control configuration
# By default, exchange time with everybody, but don't allow configuration.
# NTPsec doesn't establish peer associations, and so nopeer has no effect, and has been removed from here
# NTPsec doesn't establish peer associations, and so nopeer has no effect, and
# has been removed from here
restrict default kod nomodify noquery limited
# Local users may interrogate the ntp server more closely.

View File

@ -0,0 +1,8 @@
###############################################################################
# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY.
# Controlled by ntp-config.service
###############################################################################
1 md5 blabla
42 sha1 the_answer

View File

@ -675,10 +675,19 @@ class TestJ2Files(TestCase):
def test_ntp_conf(self):
conf_template = os.path.join(self.test_dir, "ntp.conf.j2")
ntp_interfaces_json = os.path.join(self.test_dir, "data", "ntp", "ntp_interfaces.json")
config_db_ntp_json = os.path.join(self.test_dir, "data", "ntp", "ntp_interfaces.json")
expected = os.path.join(self.test_dir, "sample_output", utils.PYvX_DIR, "ntp.conf")
argument = ['-j', ntp_interfaces_json, '-t', conf_template]
argument = ['-j', config_db_ntp_json, '-t', conf_template]
self.run_script(argument, output_file=self.output_file)
assert utils.cmp(expected, self.output_file), self.run_diff(expected, self.output_file)
def test_ntp_keys(self):
conf_template = os.path.join(self.test_dir, "ntp.keys.j2")
config_db_ntp_json = os.path.join(self.test_dir, "data", "ntp", "ntp_interfaces.json")
expected = os.path.join(self.test_dir, "sample_output", utils.PYvX_DIR, "ntp.keys")
argument = ['-j', config_db_ntp_json, '-t', conf_template]
self.run_script(argument, output_file=self.output_file)
assert utils.cmp(expected, self.output_file), self.run_diff(expected, self.output_file)

@ -1 +1 @@
Subproject commit d4448c78b4e0afd1ec6dfaa390aef5c650cee4b3
Subproject commit 910814ffb4fd2e44e183d8d92086a724c62f5f1d

View File

@ -0,0 +1,57 @@
From c13964525dae96299dc54daf635609971576a09e Mon Sep 17 00:00:00 2001
From: Donald Sharp <sharpd@nvidia.com>
Date: Mon, 11 Dec 2023 13:41:36 -0500
Subject: [PATCH] zebra: The dplane_fpm_nl return path leaks memory
The route entry created when using a ctx to pass route
entry data backup to the master pthread in zebra is
being leaked. Prevent this from happening.
Signed-off-by: Donald Sharp <sharpd@nvidia.com>
diff --git a/zebra/rib.h b/zebra/rib.h
index 016106312..e99eee67c 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -352,6 +352,8 @@ extern void _route_entry_dump(const char *func, union prefixconstptr pp,
union prefixconstptr src_pp,
const struct route_entry *re);
+void zebra_rib_route_entry_free(struct route_entry *re);
+
struct route_entry *
zebra_rib_route_entry_new(vrf_id_t vrf_id, int type, uint8_t instance,
uint32_t flags, uint32_t nhe_id, uint32_t table_id,
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 6bdc15592..fc9e8c457 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -1001,6 +1001,8 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h,
re, ng, startup, ctx);
if (ng)
nexthop_group_delete(&ng);
+ if (ctx)
+ zebra_rib_route_entry_free(re);
} else {
/*
* I really don't see how this is possible
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index f2f20bcf7..1cefdfae7 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -4136,6 +4136,12 @@ struct route_entry *zebra_rib_route_entry_new(vrf_id_t vrf_id, int type,
return re;
}
+
+void zebra_rib_route_entry_free(struct route_entry *re)
+{
+ XFREE(MTYPE_RE, re);
+}
+
/*
* Internal route-add implementation; there are a couple of different public
* signatures. Callers in this path are responsible for the memory they
--
2.17.1

View File

@ -32,3 +32,4 @@ cross-compile-changes.patch
0030-bgpd-Treat-EOR-as-withdrawn-to-avoid-unwanted-handli.patch
0031-bgpd-Ignore-handling-NLRIs-if-we-received-MP_UNREACH.patch
0032-zebra-Fix-fpm-multipath-encap-addition.patch
0033-zebra-The-dplane_fpm_nl-return-path-leaks-memory.patch

@ -1 +1 @@
Subproject commit 07e0b364375c9b867ae1f5d600d0785f30e3f4d3
Subproject commit 88e82d4bc40658bc100a9a0508b1d1289a924a61

@ -1 +1 @@
Subproject commit e8ae2afd612ef7fd08b7d855c48c78fe54b34ec4
Subproject commit 722b7962f95e8d602a9a60e77356826e9f68891c

@ -1 +1 @@
Subproject commit b2601c7b1a10f42488d0f21c6c133f5c60c2387d
Subproject commit 63705029d7d3bbf480a18839a1e12524bd949fa5

@ -1 +1 @@
Subproject commit 427217bd4c869915f43f4e23f877aeb09db6bc3a
Subproject commit 0f72932a4cbfdf16cccc75e72304449b4df67d16

@ -1 +1 @@
Subproject commit 502c0b6622008363cb1ed6d1b7c85b4093997093
Subproject commit b2b890540d291d76b8763c1f6c22b3899385ff76

@ -1 +1 @@
Subproject commit e7ad3566a8265e1b4cc96a1ed50978eee319dc6a
Subproject commit 92c050ab86aac8b95ab83dc6aa06f2886038540e

@ -1 +1 @@
Subproject commit 6026b6d63b686f5f7cef1522715141b4c725cfba
Subproject commit 194daa04e05696afdb1f44765a57f6803ddc1c4c

@ -1 +1 @@
Subproject commit 3037959da0db3639952d15c9cb0ba1b7a04c1e64
Subproject commit 19ea8493388536921ad204833157dac2cd2bf3ba

View File

@ -70,6 +70,7 @@ Table of Contents
* [TC to Priority group map](#tc-to-priority-group-map)
* [TC to Queue map](#tc-to-queue-map)
* [Telemetry](#telemetry)
* [Telemetry client](#telemetry-client)
* [Tunnel](#tunnel)
* [Versions](#versions)
* [VLAN](#vlan)
@ -1538,6 +1539,35 @@ These configuration options are used to modify the way that
ntp binds to the ports on the switch and which port it uses to
make ntp update requests from.
***NTP Admin state***
If this option is set to `enabled` then ntp client will try to sync system time with configured NTP servers.
Otherwise, NTP client feature will be disabled.
```
{
"NTP": {
"global": {
"admin_state": "enabled"
}
}
}
```
***NTP Server role***
This option is used to control NTP server state on the switch.
If this option is set to `enabled` switch will act as NTP server.
By default `server_role` is `disabled`.
```
{
"NTP": {
"global": {
"server_role": "enabled"
}
}
}
```
***NTP VRF***
If this option is set to `default` then ntp will run within the default vrf
@ -1575,6 +1605,36 @@ for that address.
}
```
***NTP Authentication***
If this option is set to `enabled` then ntp will try to verify NTP servers it connects to.
This option **has no effect** if key is not set for NTP server.
By default it is `disabled`
```
{
"NTP": {
"global": {
"authentication": "enabled"
}
}
}
```
***NTP DHCP leases***
If this option is set to `enabled` then ntp client will try to use NTP servers provided by DHCP server.
If this option is set to `disabled` you will be able to use the user-configured NTP servers.
By default it is `enabled`
```
{
"NTP": {
"global": {
"dhcp": "enabled"
}
}
}
```
### NTP servers
These information are configured in individual tables. Domain name or IP
@ -1585,18 +1645,77 @@ attributes in those objects.
```
{
"NTP_SERVER": {
"2.debian.pool.ntp.org": {},
"1.debian.pool.ntp.org": {},
"3.debian.pool.ntp.org": {},
"0.debian.pool.ntp.org": {}
"2.debian.pool.ntp.org": {
"association_type": "pool",
"iburst": "on",
"admin_state": "enabled",
"version": 4
},
"1.debian.pool.ntp.org": {
"association_type": "pool",
"iburst": "off",
"admin_state": "enabled",
"version": 3
},
"3.debian.pool.ntp.org": {
"association_type": "pool",
"iburst": "on",
"admin_state": "disabled",
"version": 4
},
"0.debian.pool.ntp.org": {
"association_type": "pool",
"iburst": "off",
"admin_state": "disabled",
"version": 3
}
},
"NTP_SERVER": {
"23.92.29.245": {},
"204.2.134.164": {}
"23.92.29.245": {
"association_type": "server",
"iburst": "on",
"admin_state": "enabled",
"version": 4,
"key": 3,
"trusted": "yes"
},
"204.2.134.164": {
"association_type": "server",
"iburst": "on",
"admin_state": "enabled",
"version": 3
}
}
}
```
* `association_type` - is used to control the type of the server. It can be `server` or `pool`.
* `iburst` - agressive server polling `{on, off}`.
* `version` - NTP protool version to use `[3..4]`.
* `key` - authentication key id `[1..65535]` to use to auth the server.
* `admin_state` - enable or disable specific server.
* `trusted` - trust this server when auth is enabled.
***NTP keys***
```
{
"NTP_KEY": {
"1": {
"type": "md5",
"value": "bXlwYXNzd29yZA==",
"trusted": "yes"
},
"42": {
"type": "sha1",
"value": "dGhlYW5zd2Vy",
"trusted": "no"
}
}
}
```
* `type` - key type to use `{md5, sha1, sha256, sha384, sha512}`.
* `value` - base64 encoded key value.
* `trusted` - trust this NTP key `{yes, no}`.
### Peer Switch
@ -2103,6 +2222,31 @@ and is listed in this table.
}
```
### Telemetry client
```
{
"TELEMETRY_CLIENT": {
"Global": {
"encoding": "JSON_IETF",
"retry_interval": "30",
"src_ip": "30.57.185.38",
"unidirectional": "true"
},
"DestinationGroup|HS": {
"dst_addr": "30.57.186.214:8081,30.57.185.39:8081"
},
"Subscription|HS_RDMA": {
"dst_group": "HS",
"path_target": "COUNTERS_DB",
"paths": "COUNTERS/Ethernet*,COUNTERS_PORT_NAME_MAP",
"report_interval": "5000",
"report_type": "periodic"
}
}
}
```
### Tunnel
This table configures the MUX tunnel for Dual-ToR setup

View File

@ -477,14 +477,36 @@
},
"NTP": {
"global": {
"authentication": "disabled",
"dhcp": "enabled",
"server_role": "disabled",
"admin_state": "enabled",
"vrf": "mgmt",
"src_intf": "eth0;Loopback0"
}
},
"NTP_SERVER": {
"0.debian.pool.ntp.org": {},
"23.92.29.245": {},
"2001:aa:aa::aa": {}
"0.debian.pool.ntp.org": {
"association_type": "pool",
"resolve_as": "0.debian.pool.ntp.org"
},
"time.google.com": {
"association_type": "server",
"resolve_as": "216.239.35.4"
},
"23.92.29.245": {
"admin_state": "enabled",
"association_type": "server",
"resolve_as": "23.92.29.245",
"iburst": "off",
"trusted": "yes"
},
"2001:aa:aa::aa": {
"admin_state": "disabled",
"iburst": "on",
"association_type": "server",
"resolve_as": "2001:aa:aa::aa"
}
},
"SYSLOG_SERVER" : {
"10.13.14.17": {
@ -1213,10 +1235,10 @@
"src_ip": "30.57.185.38",
"unidirectional": "true"
},
"DestinationGroup_HS": {
"DestinationGroup|HS": {
"dst_addr": "30.57.186.214:8081,30.57.185.39:8081"
},
"Subscription_HS_RDMA": {
"Subscription|HS_RDMA": {
"dst_group": "HS",
"path_target": "COUNTERS_DB",
"paths": "COUNTERS/Ethernet*,COUNTERS_PORT_NAME_MAP",

View File

@ -58,5 +58,82 @@
"desc": "CONFIGURE NON-EXISTING MGMT INTERFACE AS NTP SOURCE INTERFACE.",
"eStrKey": "InvalidValue",
"eStr": ["src"]
},
"NTP_GLOB_VALID1": {
"desc": "NTP global params valid config 1"
},
"NTP_GLOB_VALID2": {
"desc": "NTP global params valid config 2"
},
"NTP_AUTH_INVALID1": {
"desc": "NTP authentication state invalid 1",
"eStrKey": "InvalidValue"
},
"NTP_AUTH_INVALID2": {
"desc": "NTP authentication state invalid 2",
"eStrKey": "InvalidValue"
},
"NTP_DHCP_INVALID1": {
"desc": "NTP DHCP state invalid 1",
"eStrKey": "InvalidValue"
},
"NTP_DHCP_INVALID2": {
"desc": "NTP DHCP state invalid 2",
"eStrKey": "InvalidValue"
},
"NTP_SERVER_ROLE_INVALID1": {
"desc": "NTP server role state invalid 1",
"eStrKey": "InvalidValue"
},
"NTP_SERVER_ROLE_INVALID2": {
"desc": "NTP server role state invalid 2",
"eStrKey": "InvalidValue"
},
"NTP_STATE_INVALID1": {
"desc": "NTP daemon state invalid 1",
"eStrKey": "InvalidValue"
},
"NTP_STATE_INVALID2": {
"desc": "NTP daemon state invalid 2",
"eStrKey": "InvalidValue"
},
"NTP_SERVER_ASSOCIATION_INVALID": {
"desc": "NTP server type invalid",
"eStrKey": "InvalidValue"
},
"NTP_SERVER_IBURST_INVALID": {
"desc": "NTP server aggressive mode invalid",
"eStrKey": "InvalidValue"
},
"NTP_SERVER_KEY_INVALID": {
"desc": "NTP server authentication key invalid",
"eStrKey": "InvalidValue"
},
"NTP_SERVER_STATE_INVALID": {
"desc": "NTP server state invalid",
"eStrKey": "InvalidValue"
},
"NTP_SERVER_TRUSTED_INVALID": {
"desc": "NTP server trusted mode invalid",
"eStrKey": "InvalidValue"
},
"NTP_KEY_VALID": {
"desc": "NTP authentication keys inventory"
},
"NTP_KEY_ID_INVALID": {
"desc": "NTP authentication keys invalid key id",
"eStrKey": "InvalidValue"
},
"NTP_KEY_TRUSTED_INVALID": {
"desc": "NTP authentication keys invalid trustiness",
"eStrKey": "InvalidValue"
},
"NTP_KEY_TYPE_INVALID": {
"desc": "NTP authentication keys invalid key type",
"eStrKey": "InvalidValue"
},
"NTP_KEY_VALUE_INVALID": {
"desc": "NTP authentication keys bad key value",
"eStrKey": "Range"
}
}

View File

@ -4,13 +4,38 @@
"sonic-ntp:NTP_SERVER": {
"NTP_SERVER_LIST": [
{
"server_address": "10.11.12.13"
"server_address": "10.11.12.13",
"association_type": "server",
"iburst": "on",
"key": 10,
"admin_state": "enabled",
"trusted": "no"
},
{
"server_address": "2001:aa:aa::aa"
"server_address": "2001:aa:aa::aa",
"association_type": "server",
"iburst": "off",
"key": 15,
"admin_state": "disabled",
"trusted": "yes"
},
{
"server_address": "pool.ntp.org"
"server_address": "pool.ntp.org",
"association_type": "pool",
"iburst": "on",
"admin_state": "enabled"
}
]
},
"sonic-ntp:NTP_KEY": {
"NTP_KEYS_LIST": [
{
"id": 10,
"value": "bHVtb3M="
},
{
"id": 15,
"value": "Ym9tYmFyZGE="
}
]
}
@ -237,5 +262,234 @@
]
}
}
},
"NTP_GLOB_VALID1": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"authentication": "enabled",
"dhcp": "enabled",
"server_role": "enabled",
"admin_state": "enabled"
}
}
}
},
"NTP_GLOB_VALID2": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"authentication": "disabled",
"dhcp": "disabled",
"server_role": "disabled",
"admin_state": "disabled"
}
}
}
},
"NTP_AUTH_INVALID1": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"authentication": ""
}
}
}
},
"NTP_AUTH_INVALID2": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"authentication": "blahblah"
}
}
}
},
"NTP_DHCP_INVALID1": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"dhcp": ""
}
}
}
},
"NTP_DHCP_INVALID2": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"dhcp": "abracadabra"
}
}
}
},
"NTP_SERVER_ROLE_INVALID1": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"server_role": ""
}
}
}
},
"NTP_SERVER_ROLE_INVALID2": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"server_role": "olololo"
}
}
}
},
"NTP_STATE_INVALID1": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"admin_state": ""
}
}
}
},
"NTP_STATE_INVALID2": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP": {
"sonic-ntp:global": {
"admin_state": "azazaza"
}
}
}
},
"NTP_SERVER_ASSOCIATION_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_SERVER": {
"NTP_SERVER_LIST": [
{
"server_address": "2001:aa:aa:aa",
"association_type": "puul"
}
]
}
}
},
"NTP_SERVER_IBURST_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_SERVER": {
"NTP_SERVER_LIST": [
{
"server_address": "2001:aa:aa:aa",
"iburst": "of"
}
]
}
}
},
"NTP_SERVER_KEY_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_SERVER": {
"NTP_SERVER_LIST": [
{
"server_address": "2001:aa:aa:aa",
"key": 0
}
]
}
}
},
"NTP_SERVER_STATE_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_SERVER": {
"NTP_SERVER_LIST": [
{
"server_address": "2001:aa:aa:aa",
"admin_state": "enable"
}
]
}
}
},
"NTP_SERVER_TRUSTED_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_SERVER": {
"NTP_SERVER_LIST": [
{
"server_address": "2001:aa:aa:aa",
"trusted": "not"
}
]
}
}
},
"NTP_KEY_VALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_KEY": {
"NTP_KEYS_LIST": [
{
"id": 20,
"type": "md5",
"value": "anNkZjg4MzIwZnNkMkBANDQ1",
"trusted": "no"
},
{
"id": 30,
"type": "sha1",
"value": "YWFiYmNjZGRlZWZm",
"trusted": "yes"
},
{
"id": 42,
"type": "md5",
"value": "dGhlYW5zd2Vy",
"trusted": "yes"
}
]
}
}
},
"NTP_KEY_ID_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_KEY": {
"NTP_KEYS_LIST": [
{
"id": 100000
}
]
}
}
},
"NTP_KEY_TRUSTED_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_KEY": {
"NTP_KEYS_LIST": [
{
"id": 20,
"trusted": "nope"
}
]
}
}
},
"NTP_KEY_TYPE_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_KEY": {
"NTP_KEYS_LIST": [
{
"id": 20,
"type": "md6"
}
]
}
}
},
"NTP_KEY_VALUE_INVALID": {
"sonic-ntp:sonic-ntp": {
"sonic-ntp:NTP_KEY": {
"NTP_KEYS_LIST": [
{
"id": 20,
"value": ""
}
]
}
}
}
}

View File

@ -180,9 +180,9 @@
"SONIC_EVENTS_SWSS_CHK_CRM_THRESHOLD_VALID": {
"sonic-events-swss:sonic-events-swss": {
"sonic-events-swss:chk_crm_threshold": {
"percent": 0,
"used_cnt": 0,
"free_cnt": 0,
"percent": 80,
"used_cnt": 6414,
"free_cnt": 65300,
"timestamp": "1985-04-12T23:20:50.52Z"
}
}

View File

@ -8,15 +8,15 @@
"src_ip": "30.57.185.38",
"unidirectional": "true"
},
"TELEMETRY_CLIENT_DS_LIST": [
"TELEMETRY_CLIENT_LIST": [
{
"prefix": "DestinationGroup_HS",
"prefix": "DestinationGroup",
"name": "HS",
"dst_addr": "30.57.186.214:8081,30.57.185.39:8081"
}
],
"TELEMETRY_CLIENT_SUB_LIST": [
},
{
"prefix": "Subscription_HS_RDMA",
"prefix": "Subscription",
"name": "HS_RDMA",
"dst_group": "HS",
"path_target": "COUNTERS_DB",
"paths": "COUNTERS/Ethernet*,COUNTERS_PORT_NAME_MAP",
@ -36,15 +36,15 @@
"src_ip": "30.57.185.38",
"unidirectional": "true"
},
"TELEMETRY_CLIENT_DS_LIST": [
"TELEMETRY_CLIENT_LIST": [
{
"prefix": "DestinationGroup_HS",
"prefix": "DestinationGroup",
"name": "HS",
"dst_addr": "30.57.186.214:8081,30.57.185.39:8081"
}
],
"TELEMETRY_CLIENT_SUB_LIST": [
},
{
"prefix": "Subscription_HS_RDMA",
"prefix": "Subscription",
"name": "HS_RDMA",
"dst_group": "FS",
"path_target": "COUNTERS_DB",
"paths": "COUNTERS/Ethernet*,COUNTERS_PORT_NAME_MAP",
@ -64,15 +64,15 @@
"src_ip": "30.57.185.388",
"unidirectional": "true"
},
"TELEMETRY_CLIENT_DS_LIST": [
"TELEMETRY_CLIENT_LIST": [
{
"prefix": "DestinationGroup_HS",
"prefix": "DestinationGroup",
"name": "HS",
"dst_addr": "30.57.186.214:8081,30.57.185.39:8081"
}
],
"TELEMETRY_CLIENT_SUB_LIST": [
},
{
"prefix": "Subscription_HS_RDMA",
"prefix": "Subscription",
"name": "HS_RDMA",
"dst_group": "HS",
"path_target": "COUNTERS_DB",
"paths": "COUNTERS/Ethernet*,COUNTERS_PORT_NAME_MAP",
@ -92,15 +92,15 @@
"src_ip": "30.57.185.38",
"unidirectional": "true"
},
"TELEMETRY_CLIENT_DS_LIST": [
"TELEMETRY_CLIENT_LIST": [
{
"prefix": "DestinationGroup_HS",
"prefix": "DestinationGroup",
"name": "HS",
"dst_addr": "30.57.186.214:8081,30.57.185.39:8081"
}
],
"TELEMETRY_CLIENT_SUB_LIST": [
},
{
"prefix": "Subscription_HS_RDMA",
"prefix": "Subscription",
"name": "HS_RDMA",
"dst_group": "HS",
"path_target": "COUNTERS_DB",
"paths": "COUNTERS/Ethernet*,COUNTERS_PORT_NAME_MAP",
@ -120,15 +120,15 @@
"src_ip": "30.57.185.38",
"unidirectional": "true"
},
"TELEMETRY_CLIENT_DS_LIST": [
"TELEMETRY_CLIENT_LIST": [
{
"prefix": "DestinationGroup_HS",
"prefix": "DestinationGroup",
"name": "HS",
"dst_addr": "30.57.186.214:80819,30.57.185.39:8081"
}
],
"TELEMETRY_CLIENT_SUB_LIST": [
},
{
"prefix": "Subscription_HS_RDMA",
"prefix": "Subscription",
"name": "HS_RDMA",
"dst_group": "HS",
"path_target": "COUNTERS_DB",
"paths": "COUNTERS/Ethernet*,COUNTERS_PORT_NAME_MAP",
@ -139,4 +139,4 @@
}
}
}
}
}

View File

@ -102,11 +102,11 @@ module sonic-events-swss {
}
leaf used_cnt {
type uint8;
type uint32;
}
leaf free_cnt {
type uint64;
type uint32;
}
uses evtcmn:sonic-events-cmn;

View File

@ -33,6 +33,10 @@ module sonic-ntp {
prefix mprt;
}
import sonic-types {
prefix stypes;
}
description
"NTP yang Module for SONiC OS";
@ -41,6 +45,39 @@ module sonic-ntp {
"First revision";
}
revision 2023-03-20 {
description
"Add extended configuration options";
}
typedef association-type {
description "NTP server association type";
type enumeration {
enum server;
enum pool;
}
}
typedef key-type {
description "NTP key encryption type";
type enumeration {
enum md5;
enum sha1;
enum sha256;
enum sha384;
enum sha512;
}
}
typedef key-id {
description "NTP key ID";
type uint16 {
range 1..65535 {
error-message "Failed NTP key ID";
}
}
}
container sonic-ntp {
container NTP {
@ -68,6 +105,9 @@ module sonic-ntp {
type leafref {
path /mprt:sonic-mgmt_port/mprt:MGMT_PORT/mprt:MGMT_PORT_LIST/mprt:name;
}
type string {
pattern 'eth0';
}
}
description
@ -92,6 +132,30 @@ module sonic-ntp {
default VRF or Management VRF.";
}
leaf authentication {
type stypes:admin_mode;
default disabled;
description "NTP authentication state";
}
leaf dhcp {
type stypes:admin_mode;
default enabled;
description "Use NTP servers distributed by DHCP";
}
leaf server_role {
type stypes:admin_mode;
default enabled;
description "NTP server functionality state";
}
leaf admin_state {
type stypes:admin_mode;
default enabled;
description "NTP feature state";
}
} /* end of container global */
} /* end of container NTP */
@ -112,10 +176,95 @@ module sonic-ntp {
leaf server_address {
type inet:host;
}
leaf association_type {
type association-type;
default server;
description "NTP remote association type: server or pool.";
}
leaf iburst {
type stypes:on-off;
default on;
description "NTP aggressive polling";
}
leaf key {
description "NTP server key ID";
type leafref {
path /ntp:sonic-ntp/ntp:NTP_KEY/ntp:NTP_KEYS_LIST/ntp:id;
}
}
leaf resolve_as {
type inet:host;
description "Server resolved IP address";
}
leaf admin_state {
type stypes:admin_mode;
default enabled;
description "NTP server state";
}
leaf trusted {
type stypes:yes-no;
default no;
description "Trust this server. It will force time
synchronization only to this server when
authentication is enabled";
}
leaf version {
type uint8 {
range "3..4" {
error-message "Failed NTP version";
}
}
default 4;
description "NTP proto version to communicate with NTP
server";
}
} /* end of list NTP_SERVER_LIST */
} /* end of container NTP_SERVER */
container NTP_KEY {
description "NTP authentication keys inventory";
list NTP_KEYS_LIST {
description "NTP authentication keys inventory";
key "id";
leaf id {
type key-id;
description "NTP key ID";
}
leaf trusted {
type stypes:yes-no;
default no;
description "Trust this NTP key";
}
leaf value {
type string {
length 1..64;
}
description "NTP encrypted authentication key";
}
leaf type {
type key-type;
default md5;
description "NTP authentication key type";
}
} /* end of list NTP_KEYS_LIST */
} /* end of container NTP_KEY */
} /* end of container sonic-ntp */
} /* end of module sonic-ntp */

View File

@ -86,33 +86,26 @@ module sonic-telemetry_client {
}
}
list TELEMETRY_CLIENT_DS_LIST {
list TELEMETRY_CLIENT_LIST {
ordered-by user;
key "prefix";
key "prefix name";
leaf prefix {
type string {
pattern "DestinationGroup_" + ".*";
pattern 'Subscription|DestinationGroup';
}
}
leaf name {
type string;
}
leaf dst_addr {
type ipv4-port;
}
}
list TELEMETRY_CLIENT_SUB_LIST {
ordered-by user;
key "prefix";
leaf prefix {
type string {
pattern "Subscription_" + ".*";
}
}
leaf dst_group {
must "(contains(../../TELEMETRY_CLIENT_DS_LIST/prefix, current()))";
must "(contains(../../TELEMETRY_CLIENT_LIST/name, current()))";
type string;
}

View File

@ -360,6 +360,22 @@ module sonic-types {
"BCP 175: Procedures for Maintaining the Time Zone Database";
}
typedef yes-no {
description "Yes/No configuration";
type enumeration {
enum yes;
enum no;
}
}
typedef on-off {
description "On/Off configuration";
type enumeration {
enum on;
enum off;
}
}
{% if yang_model_type == "cvl" %}
/* Required for CVL */
container operation {

View File

@ -102,37 +102,39 @@ class HardwareChecker(HealthChecker):
if not self._ignore_check(config.ignore_devices, 'fan', name, 'speed'):
speed = data_dict.get('speed', None)
speed_target = data_dict.get('speed_target', None)
speed_tolerance = data_dict.get('speed_tolerance', None)
is_under_speed = data_dict.get('is_under_speed', None)
is_over_speed = data_dict.get('is_over_speed', None)
if not speed:
self.set_object_not_ok('Fan', name, 'Failed to get actual speed data for {}'.format(name))
continue
elif not speed_target:
self.set_object_not_ok('Fan', name, 'Failed to get target speed date for {}'.format(name))
continue
elif not speed_tolerance:
self.set_object_not_ok('Fan', name, 'Failed to get speed tolerance for {}'.format(name))
elif is_under_speed is None:
self.set_object_not_ok('Fan', name, 'Failed to get under speed threshold check for {}'.format(name))
continue
elif is_over_speed is None:
self.set_object_not_ok('Fan', name, 'Failed to get over speed threshold check for {}'.format(name))
continue
else:
try:
speed = float(speed)
speed_target = float(speed_target)
speed_tolerance = float(speed_tolerance)
speed_min_th = speed_target * (1 - float(speed_tolerance) / 100)
speed_max_th = speed_target * (1 + float(speed_tolerance) / 100)
if speed < speed_min_th or speed > speed_max_th:
if 'true' in (is_under_speed.lower(), is_over_speed.lower()):
self.set_object_not_ok('Fan', name,
'{} speed is out of range, speed={}, range=[{},{}]'.format(name,
speed,
speed_min_th,
speed_max_th))
'{} speed is out of range, speed={}, target={}'.format(
name,
speed,
speed_target))
continue
except ValueError:
self.set_object_not_ok('Fan', name,
'Invalid fan speed data for {}, speed={}, target={}, tolerance={}'.format(
'Invalid fan speed data for {}, speed={}, target={}, is_under_speed={}, is_over_speed={}'.format(
name,
speed,
speed_target,
speed_tolerance))
is_under_speed,
is_over_speed))
continue
if not self._ignore_check(config.ignore_devices, 'fan', name, 'direction'):

View File

@ -298,7 +298,8 @@ def test_hardware_checker():
'status': 'True',
'speed': '60',
'speed_target': '60',
'speed_tolerance': '20',
'is_under_speed': 'False',
'is_over_speed': 'False',
'direction': 'intake'
},
'FAN_INFO|fan2': {
@ -306,28 +307,40 @@ def test_hardware_checker():
'status': 'True',
'speed': '60',
'speed_target': '60',
'speed_tolerance': '20'
'is_under_speed': 'False',
'is_over_speed': 'False',
},
'FAN_INFO|fan3': {
'presence': 'True',
'status': 'False',
'speed': '60',
'speed_target': '60',
'speed_tolerance': '20'
'is_under_speed': 'False',
'is_over_speed': 'False',
},
'FAN_INFO|fan4': {
'presence': 'True',
'status': 'True',
'speed': '20',
'speed_target': '60',
'speed_tolerance': '20'
'is_under_speed': 'True',
'is_over_speed': 'False',
},
'FAN_INFO|fan5': {
'presence': 'True',
'status': 'True',
'speed': '90',
'speed_target': '60',
'is_under_speed': 'False',
'is_over_speed': 'True',
},
'FAN_INFO|fan6': {
'presence': 'True',
'status': 'True',
'speed': '60',
'speed_target': '60',
'speed_tolerance': '20',
'is_under_speed': 'False',
'is_over_speed': 'False',
'direction': 'exhaust'
}
})
@ -426,7 +439,10 @@ def test_hardware_checker():
assert 'fan5' in checker._info
assert checker._info['fan5'][HealthChecker.INFO_FIELD_OBJECT_STATUS] == HealthChecker.STATUS_NOT_OK
assert checker._info['fan5'][HealthChecker.INFO_FIELD_OBJECT_MSG] == 'fan5 direction exhaust is not aligned with fan1 direction intake'
assert 'fan6' in checker._info
assert checker._info['fan6'][HealthChecker.INFO_FIELD_OBJECT_STATUS] == HealthChecker.STATUS_NOT_OK
assert checker._info['fan6'][HealthChecker.INFO_FIELD_OBJECT_MSG] == 'fan6 direction exhaust is not aligned with fan1 direction intake'
assert 'PSU 1' in checker._info
assert checker._info['PSU 1'][HealthChecker.INFO_FIELD_OBJECT_STATUS] == HealthChecker.STATUS_OK