#### Why I did it Fixes https://github.com/Azure/sonic-buildimage/issues/8619 #### How I did it 1) Listening to CFG_DB notifications was migrated from ConfigDBConnector to SubscriberStateTable & Select 2) This change in design helped me to remove `update_all_features_config` which was roughly taking a 5-10 sec time to execute and thus the reason for blackout 3) Edited FeatureHandler, Feature & NtpCfgd classes to suit this design 4) Added corresponding mocks and UT's **Changes made to classes other than HostConfigDaemon:** With the previous design, the initially read data from the config db was applied by using hardcoded methods even before the config_db.listen() was called. For Eg: `update_all_features_config` for FeatureHandler and load() named methods for NtpCfgd etc But with this design, since the existing data is read and given out as a notification by SubscriberStateTable, i've pretty much removed these hardcoded methods. Thus changes made to these class will be around adapting them to the new design and no change in the actual functionality . #### How to verify it UT's: ``` tests/determine-reboot-cause_test.py ......... [ 29%] tests/procdockerstatsd_test.py . [ 32%] tests/caclmgrd/caclmgrd_dhcp_test.py ...... [ 51%] tests/hostcfgd/hostcfgd_radius_test.py .. [ 58%] tests/hostcfgd/hostcfgd_test.py ............. [100%] ``` Verified manually, ``` Sep 10 22:53:25.662621 sonic INFO systemd[1]: hostcfgd.service: Succeeded. Sep 10 22:55:04.127719 sonic INFO /hostcfgd: ConfigDB connect success Sep 10 22:55:04.128108 sonic INFO /hostcfgd: KdumpCfg init ... Sep 10 22:55:04.148819 sonic INFO /hostcfgd: Waiting for systemctl to finish initialization Sep 10 22:55:04.163452 sonic INFO /hostcfgd: systemctl has finished initialization -- proceeding ... Sep 10 22:55:04.163834 sonic INFO /hostcfgd: Kdump handler... Sep 10 22:55:04.164019 sonic INFO /hostcfgd: Kdump global configuration update Sep 10 22:55:04.758784 sonic INFO hostcfgd[184471]: kdump is already disabled Sep 10 22:55:04.758876 sonic INFO hostcfgd[184471]: Kdump is already disabled Sep 10 22:55:05.182021 sonic INFO hostcfgd[184511]: Kdump configuration has been updated in the startup configuration Sep 10 22:55:05.596919 sonic INFO hostcfgd[184528]: Kdump configuration has been updated in the startup configuration Sep 10 22:55:06.140627 sonic INFO /hostcfgd: Feature nat is stopped and disabled Sep 10 22:55:06.642629 sonic INFO /hostcfgd: Feature telemetry is enabled and started Sep 10 22:55:07.101297 sonic INFO /hostcfgd: Feature pmon is enabled and started Sep 10 22:55:07.554366 sonic INFO /hostcfgd: Feature database is enabled and started Sep 10 22:55:08.009329 sonic INFO /hostcfgd: Feature mgmt-framework is enabled and started Sep 10 22:55:08.394952 sonic INFO /hostcfgd: Feature macsec is stopped and disabled Sep 10 22:55:08.782853 sonic INFO /hostcfgd: Feature snmp is enabled and started Sep 10 22:55:09.205381 sonic INFO /hostcfgd: Feature teamd is enabled and started Sep 10 22:55:09.224877 sonic INFO /hostcfgd: Feature what-just-happened is enabled and started Sep 10 22:55:09.627929 sonic INFO /hostcfgd: Feature lldp is enabled and started Sep 10 22:55:10.086993 sonic INFO /hostcfgd: Feature swss is enabled and started Sep 10 22:55:10.170312 sonic INFO /hostcfgd: cmd - service aaastatsd stop Sep 10 22:55:11.012236 sonic INFO /hostcfgd: cmd - service aaastatsd stop Sep 10 22:55:12.225946 sonic INFO /hostcfgd: Feature bgp is enabled and started Sep 10 22:55:12.712792 sonic INFO /hostcfgd: Feature dhcp_relay is enabled and started Sep 10 22:55:13.166656 sonic INFO /hostcfgd: Feature sflow is stopped and disabled Sep 10 22:55:13.593639 sonic INFO /hostcfgd: Feature radv is enabled and started Sep 10 22:55:14.034106 sonic INFO /hostcfgd: Feature syncd is enabled and started Sep 10 22:55:14.113064 sonic INFO /hostcfgd: cmd - service aaastatsd stop Sep 10 22:55:14.863601 sonic INFO /hostcfgd: RADIUS_SERVER update: key: 10.10.10.1, op: SET, data: {'auth_type': 'pap', 'passkey': 'p*****', 'retransmit': '1', 'timeout': '1'} Sep 10 22:55:14.938605 sonic INFO /hostcfgd: cmd - service aaastatsd stop Sep 10 22:55:15.667545 sonic INFO /hostcfgd: RADIUS_SERVER update: key: 10.10.10.3, op: SET, data: {'auth_type': 'chap', 'passkey': 'p*****', 'retransmit': '2', 'timeout': '2'} Sep 10 22:55:15.667801 sonic INFO /hostcfgd: RADIUS (NAS) IP change - key:eth0, current global info {} Sep 10 22:55:15.746531 sonic INFO /hostcfgd: cmd - service aaastatsd stop Sep 10 23:04:47.435340 sonic INFO /hostcfgd: ntp server update key 0.debian.pool.ntp.org Sep 10 23:04:47.435661 sonic INFO /hostcfgd: ntp server update, restarting ntp-config, ntp servers configured {'0.debian.pool.ntp.org'} Sep 10 23:04:47.866394 sonic INFO /hostcfgd: NTP GLOBAL Update Sep 10 23:04:47.866557 sonic INFO /hostcfgd: ntp global update for source intf old {''} new {'eth0', 'Loopback0'}, restarting ntp-config Sep 10 23:16:25.157600 sonic INFO /hostcfgd: Running cmd: 'sudo systemctl unmask sflow.service' Sep 10 23:16:25.178472 sonic INFO hostcfgd[192106]: Removed /etc/systemd/system/sflow.service. Sep 10 23:16:25.582018 sonic INFO /hostcfgd: Running cmd: 'sudo systemctl enable sflow.service' Sep 10 23:16:25.604534 sonic INFO hostcfgd[192123]: Created symlink /etc/systemd/system/sonic.target.wants/sflow.service → /lib/systemd/system/sflow.service. Sep 10 23:16:26.029416 sonic INFO /hostcfgd: Running cmd: 'sudo systemctl start sflow.service' Sep 10 23:16:26.691927 sonic INFO /hostcfgd: Feature sflow is enabled and started ```
1110 lines
45 KiB
Python
Executable File
1110 lines
45 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
import ast
|
|
import copy
|
|
import ipaddress
|
|
import os
|
|
import sys
|
|
import subprocess
|
|
import syslog
|
|
import signal
|
|
|
|
import jinja2
|
|
from sonic_py_common import device_info
|
|
from swsscommon.swsscommon import SubscriberStateTable, DBConnector, Select
|
|
from swsscommon.swsscommon import ConfigDBConnector, TableConsumable
|
|
|
|
# FILE
|
|
PAM_AUTH_CONF = "/etc/pam.d/common-auth-sonic"
|
|
PAM_AUTH_CONF_TEMPLATE = "/usr/share/sonic/templates/common-auth-sonic.j2"
|
|
NSS_TACPLUS_CONF = "/etc/tacplus_nss.conf"
|
|
NSS_TACPLUS_CONF_TEMPLATE = "/usr/share/sonic/templates/tacplus_nss.conf.j2"
|
|
NSS_RADIUS_CONF = "/etc/radius_nss.conf"
|
|
NSS_RADIUS_CONF_TEMPLATE = "/usr/share/sonic/templates/radius_nss.conf.j2"
|
|
PAM_RADIUS_AUTH_CONF_TEMPLATE = "/usr/share/sonic/templates/pam_radius_auth.conf.j2"
|
|
NSS_CONF = "/etc/nsswitch.conf"
|
|
ETC_PAMD_SSHD = "/etc/pam.d/sshd"
|
|
ETC_PAMD_LOGIN = "/etc/pam.d/login"
|
|
|
|
# TACACS+
|
|
TACPLUS_SERVER_PASSKEY_DEFAULT = ""
|
|
TACPLUS_SERVER_TIMEOUT_DEFAULT = "5"
|
|
TACPLUS_SERVER_AUTH_TYPE_DEFAULT = "pap"
|
|
|
|
# RADIUS
|
|
RADIUS_SERVER_AUTH_PORT_DEFAULT = "1812"
|
|
RADIUS_SERVER_PASSKEY_DEFAULT = ""
|
|
RADIUS_SERVER_RETRANSMIT_DEFAULT = "3"
|
|
RADIUS_SERVER_TIMEOUT_DEFAULT = "5"
|
|
RADIUS_SERVER_AUTH_TYPE_DEFAULT = "pap"
|
|
RADIUS_PAM_AUTH_CONF_DIR = "/etc/pam_radius_auth.d/"
|
|
|
|
# MISC Constants
|
|
CFG_DB = "CONFIG_DB"
|
|
HOSTCFGD_MAX_PRI = 10 # Used to enforce ordering b/w daemons under Hostcfgd
|
|
DEFAULT_SELECT_TIMEOUT = 1000
|
|
|
|
|
|
def safe_eval(val, default_value=False):
|
|
""" Safely evaluate the boolean expression, without raising an exception """
|
|
try:
|
|
ret = ast.literal_eval(val)
|
|
except ValueError:
|
|
ret = default_value
|
|
return ret
|
|
|
|
|
|
def signal_handler(sig, frame):
|
|
if sig == signal.SIGHUP:
|
|
syslog.syslog(syslog.LOG_INFO, "HostCfgd: signal 'SIGHUP' is caught and ignoring..")
|
|
elif sig == signal.SIGINT:
|
|
syslog.syslog(syslog.LOG_INFO, "HostCfgd: signal 'SIGINT' is caught and exiting...")
|
|
sys.exit(128 + sig)
|
|
elif sig == signal.SIGTERM:
|
|
syslog.syslog(syslog.LOG_INFO, "HostCfgd: signal 'SIGTERM' is caught and exiting...")
|
|
sys.exit(128 + sig)
|
|
else:
|
|
syslog.syslog(syslog.LOG_INFO, "HostCfgd: invalid signal - ignoring..")
|
|
|
|
|
|
def run_cmd(cmd, log_err=True, raise_exception=False):
|
|
try:
|
|
subprocess.check_call(cmd, shell=True)
|
|
except Exception as err:
|
|
if log_err:
|
|
syslog.syslog(syslog.LOG_ERR, "{} - failed: return code - {}, output:\n{}"
|
|
.format(err.cmd, err.returncode, err.output))
|
|
if raise_exception:
|
|
raise
|
|
|
|
|
|
def is_true(val):
|
|
if val == 'True' or val == 'true':
|
|
return True
|
|
else:
|
|
return False
|
|
|
|
|
|
def is_vlan_sub_interface(ifname):
|
|
ifname_split = ifname.split(".")
|
|
return (len(ifname_split) == 2)
|
|
|
|
|
|
def sub(l, start, end):
|
|
return l[start:end]
|
|
|
|
|
|
def obfuscate(data):
|
|
if data:
|
|
return data[0] + '*****'
|
|
else:
|
|
return data
|
|
|
|
|
|
class Feature(object):
|
|
""" Represents a feature configuration from CONFIG_DB data. """
|
|
|
|
def __init__(self, feature_name, feature_cfg, device_config=None):
|
|
""" Initialize Feature object based on CONFIG_DB data.
|
|
|
|
Args:
|
|
feature_name (str): Feature name string
|
|
feature_cfg (dict): Feature CONFIG_DB configuration
|
|
deviec_config (dict): DEVICE_METADATA section of CONFIG_DB
|
|
"""
|
|
|
|
self.name = feature_name
|
|
self.state = self._get_target_state(feature_cfg.get('state'), device_config or {})
|
|
self.auto_restart = feature_cfg.get('auto_restart', 'disabled')
|
|
self.has_timer = safe_eval(feature_cfg.get('has_timer', 'False'))
|
|
self.has_global_scope = safe_eval(feature_cfg.get('has_global_scope', 'True'))
|
|
self.has_per_asic_scope = safe_eval(feature_cfg.get('has_per_asic_scope', 'False'))
|
|
|
|
def _get_target_state(self, state_configuration, device_config):
|
|
""" Returns the target state for the feature by rendering the state field as J2 template.
|
|
|
|
Args:
|
|
state_configuration (str): State configuration from CONFIG_DB
|
|
deviec_config (dict): DEVICE_METADATA section of CONFIG_DB
|
|
Returns:
|
|
(str): Target feature state
|
|
"""
|
|
|
|
if state_configuration is None:
|
|
return None
|
|
|
|
template = jinja2.Template(state_configuration)
|
|
target_state = template.render(device_config)
|
|
if target_state not in ('enabled', 'disabled', 'always_enabled', 'always_disabled'):
|
|
raise ValueError('Invalid state rendered for feature {}: {}'.format(self.name, target_state))
|
|
return target_state
|
|
|
|
def compare_state(self, feature_name, feature_cfg):
|
|
if self.name != feature_name or not isinstance(feature_cfg, dict):
|
|
return False
|
|
|
|
if self.state != feature_cfg.get('state', ''):
|
|
return False
|
|
return True
|
|
|
|
|
|
class FeatureHandler(object):
|
|
""" Handles FEATURE table updates. """
|
|
|
|
SYSTEMD_SYSTEM_DIR = '/etc/systemd/system/'
|
|
SYSTEMD_SERVICE_CONF_DIR = os.path.join(SYSTEMD_SYSTEM_DIR, '{}.service.d/')
|
|
|
|
def __init__(self, config_db, device_config):
|
|
self._config_db = config_db
|
|
self._device_config = device_config
|
|
self._cached_config = {}
|
|
self.is_multi_npu = device_info.is_multi_npu()
|
|
|
|
def handle(self, feature_name, op, feature_cfg):
|
|
if not feature_cfg:
|
|
self._cached_config.pop(feature_name)
|
|
syslog.syslog(syslog.LOG_INFO, "Deregistering feature {}".format(feature_name))
|
|
return
|
|
|
|
feature = Feature(feature_name, feature_cfg, self._device_config)
|
|
self._cached_config.setdefault(feature_name, Feature(feature_name, {}))
|
|
|
|
# Change auto-restart configuration first.
|
|
# If service reached failed state before this configuration applies (e.g. on boot)
|
|
# the next called self.update_feature_state will start it again. If it will fail
|
|
# again the auto restart will kick-in. Another order may leave it in failed state
|
|
# and not auto restart.
|
|
self.update_feature_auto_restart(feature, feature_name)
|
|
|
|
# Enable/disable the container service if the feature state was changed from its previous state.
|
|
if self._cached_config[feature_name].state != feature.state:
|
|
if self.update_feature_state(feature):
|
|
self._cached_config[feature_name].state = feature.state
|
|
else:
|
|
self.resync_feature_state(self._cached_config[feature_name])
|
|
|
|
def sync_state_field(self):
|
|
"""
|
|
Summary:
|
|
Updates the state field in the FEATURE|* tables as the state field
|
|
might have to be rendered based on DEVICE_METADATA table
|
|
"""
|
|
feature_table = self._config_db.get_table('FEATURE')
|
|
for feature_name in feature_table.keys():
|
|
if not feature_name:
|
|
syslog.syslog(syslog.LOG_WARNING, "Feature is None")
|
|
continue
|
|
|
|
feature = Feature(feature_name, feature_table[feature_name], self._device_config)
|
|
if not feature.compare_state(feature_name, feature_table.get(feature_name, {})):
|
|
self.resync_feature_state(feature)
|
|
|
|
def update_feature_state(self, feature):
|
|
cached_feature = self._cached_config[feature.name]
|
|
enable = False
|
|
disable = False
|
|
|
|
# Allowed transitions:
|
|
# None -> always_enabled
|
|
# -> always_disabled
|
|
# -> enabled
|
|
# -> disabled
|
|
# always_enabled -> always_disabled
|
|
# enabled -> disabled
|
|
# disabled -> enabled
|
|
if cached_feature.state is None:
|
|
enable = feature.state in ("always_enabled", "enabled")
|
|
disable = feature.state in ("always_disabled", "disabled")
|
|
elif cached_feature.state in ("always_enabled", "always_disabled"):
|
|
disable = feature.state == "always_disabled"
|
|
enable = feature.state == "always_enabled"
|
|
elif cached_feature.state in ("enabled", "disabled"):
|
|
enable = feature.state == "enabled"
|
|
disable = feature.state == "disabled"
|
|
else:
|
|
syslog.syslog(syslog.LOG_INFO, "Feature {} service is {}".format(feature.name, cached_feature.state))
|
|
return False
|
|
|
|
if not enable and not disable:
|
|
syslog.syslog(syslog.LOG_ERR, "Unexpected state value '{}' for feature {}"
|
|
.format(feature.state, feature.name))
|
|
return False
|
|
|
|
if enable:
|
|
self.enable_feature(feature)
|
|
syslog.syslog(syslog.LOG_INFO, "Feature {} is enabled and started".format(feature.name))
|
|
|
|
if disable:
|
|
self.disable_feature(feature)
|
|
syslog.syslog(syslog.LOG_INFO, "Feature {} is stopped and disabled".format(feature.name))
|
|
|
|
return True
|
|
|
|
def update_feature_auto_restart(self, feature, feature_name):
|
|
|
|
dir_name = self.SYSTEMD_SERVICE_CONF_DIR.format(feature_name)
|
|
auto_restart_conf = os.path.join(dir_name, 'auto_restart.conf')
|
|
|
|
write_conf = False
|
|
if not os.path.exists(auto_restart_conf): # if the auto_restart_conf file is not found, set it
|
|
write_conf = True
|
|
|
|
if self._cached_config[feature_name].auto_restart != feature.auto_restart:
|
|
write_conf = True
|
|
|
|
if not write_conf:
|
|
return
|
|
|
|
self._cached_config[feature_name].auto_restart = feature.auto_restart # Update Cache
|
|
|
|
restart_config = "always" if feature.auto_restart == "enabled" else "no"
|
|
service_conf = "[Service]\nRestart={}\n".format(restart_config)
|
|
feature_names, feature_suffixes = self.get_feature_attribute(feature)
|
|
|
|
for name in feature_names:
|
|
dir_name = self.SYSTEMD_SERVICE_CONF_DIR.format(name)
|
|
auto_restart_conf = os.path.join(dir_name, 'auto_restart.conf')
|
|
if not os.path.exists(dir_name):
|
|
os.mkdir(dir_name)
|
|
with open(auto_restart_conf, 'w') as cfgfile:
|
|
cfgfile.write(service_conf)
|
|
|
|
try:
|
|
run_cmd("sudo systemctl daemon-reload", raise_exception=True)
|
|
except Exception as err:
|
|
syslog.syslog(syslog.LOG_ERR, "Feature '{}' failed to configure auto_restart".format(feature.name))
|
|
return
|
|
|
|
def get_feature_attribute(self, feature):
|
|
# Create feature name suffix depending feature is running in host or namespace or in both
|
|
feature_names = (
|
|
([feature.name] if feature.has_global_scope or not self.is_multi_npu else []) +
|
|
([(feature.name + '@' + str(asic_inst)) for asic_inst in range(device_info.get_num_npus())
|
|
if feature.has_per_asic_scope and self.is_multi_npu])
|
|
)
|
|
|
|
if not feature_names:
|
|
syslog.syslog(syslog.LOG_ERR, "Feature '{}' service not available"
|
|
.format(feature.name))
|
|
|
|
feature_suffixes = ["service"] + (["timer"] if feature.has_timer else [])
|
|
|
|
return feature_names, feature_suffixes
|
|
|
|
def get_systemd_unit_state(self, unit):
|
|
""" Returns service configuration """
|
|
|
|
cmd = "sudo systemctl show {} --property UnitFileState".format(unit)
|
|
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
stdout, stderr = proc.communicate()
|
|
if proc.returncode != 0:
|
|
syslog.syslog(syslog.LOG_ERR, "Failed to get status of {}: rc={} stderr={}".format(unit, proc.returncode, stderr))
|
|
return 'invalid' # same as systemd's "invalid indicates that it could not be determined whether the unit file is enabled".
|
|
|
|
props = dict([line.split("=") for line in stdout.decode().strip().splitlines()])
|
|
return props["UnitFileState"]
|
|
|
|
def enable_feature(self, feature):
|
|
cmds = []
|
|
feature_names, feature_suffixes = self.get_feature_attribute(feature)
|
|
for feature_name in feature_names:
|
|
# Check if it is already enabled, if yes skip the system call
|
|
unit_file_state = self.get_systemd_unit_state("{}.{}".format(feature_name, feature_suffixes[-1]))
|
|
if unit_file_state == "enabled":
|
|
continue
|
|
|
|
for suffix in feature_suffixes:
|
|
cmds.append("sudo systemctl unmask {}.{}".format(feature_name, suffix))
|
|
|
|
# If feature has timer associated with it, start/enable corresponding systemd .timer unit
|
|
# otherwise, start/enable corresponding systemd .service unit
|
|
|
|
cmds.append("sudo systemctl enable {}.{}".format(feature_name, feature_suffixes[-1]))
|
|
cmds.append("sudo systemctl start {}.{}".format(feature_name, feature_suffixes[-1]))
|
|
|
|
for cmd in cmds:
|
|
syslog.syslog(syslog.LOG_INFO, "Running cmd: '{}'".format(cmd))
|
|
try:
|
|
run_cmd(cmd, raise_exception=True)
|
|
except Exception as err:
|
|
syslog.syslog(syslog.LOG_ERR, "Feature '{}.{}' failed to be enabled and started"
|
|
.format(feature.name, feature_suffixes[-1]))
|
|
return
|
|
|
|
def disable_feature(self, feature):
|
|
cmds = []
|
|
feature_names, feature_suffixes = self.get_feature_attribute(feature)
|
|
for feature_name in feature_names:
|
|
# Check if it is already disabled, if yes skip the system call
|
|
unit_file_state = self.get_systemd_unit_state("{}.{}".format(feature_name, feature_suffixes[-1]))
|
|
if unit_file_state in ("disabled", "masked"):
|
|
continue
|
|
|
|
for suffix in reversed(feature_suffixes):
|
|
cmds.append("sudo systemctl stop {}.{}".format(feature_name, suffix))
|
|
cmds.append("sudo systemctl disable {}.{}".format(feature_name, feature_suffixes[-1]))
|
|
cmds.append("sudo systemctl mask {}.{}".format(feature_name, feature_suffixes[-1]))
|
|
for cmd in cmds:
|
|
syslog.syslog(syslog.LOG_INFO, "Running cmd: '{}'".format(cmd))
|
|
try:
|
|
run_cmd(cmd, raise_exception=True)
|
|
except Exception as err:
|
|
syslog.syslog(syslog.LOG_ERR, "Feature '{}.{}' failed to be stopped and disabled"
|
|
.format(feature.name, feature_suffixes[-1]))
|
|
return
|
|
|
|
def resync_feature_state(self, feature):
|
|
self._config_db.mod_entry('FEATURE', feature.name, {'state': feature.state})
|
|
|
|
|
|
class Iptables(object):
|
|
def __init__(self):
|
|
'''
|
|
Default MSS to 1460 - (MTU 1500 - 40 (TCP/IP Overhead))
|
|
For IPv6, it would be 1440 - (MTU 1500 - 60 octects)
|
|
'''
|
|
self.tcpmss = 1460
|
|
self.tcp6mss = 1440
|
|
|
|
def is_ip_prefix_in_key(self, key):
|
|
'''
|
|
Function to check if IP address is present in the key. If it
|
|
is present, then the key would be a tuple or else, it shall be
|
|
be string
|
|
'''
|
|
return (isinstance(key, tuple))
|
|
|
|
def command(self, chain, ip, ver, op):
|
|
cmd = 'iptables' if ver == '4' else 'ip6tables'
|
|
cmd += ' -t mangle --{} {} -p tcp --tcp-flags SYN SYN'.format(op, chain)
|
|
cmd += ' -d' if chain == 'PREROUTING' else ' -s'
|
|
mss = self.tcpmss if ver == '4' else self.tcp6mss
|
|
cmd += ' {} -j TCPMSS --set-mss {}'.format(ip, mss)
|
|
|
|
return cmd
|
|
|
|
def iptables_handler(self, key, data, add=True):
|
|
if not self.is_ip_prefix_in_key(key):
|
|
return
|
|
|
|
iface, ip = key
|
|
ip_str = ip.split("/")[0]
|
|
ip_addr = ipaddress.ip_address(ip_str)
|
|
if isinstance(ip_addr, ipaddress.IPv6Address):
|
|
ver = '6'
|
|
else:
|
|
ver = '4'
|
|
|
|
self.mangle_handler(ip_str, ver, add)
|
|
|
|
def mangle_handler(self, ip, ver, add):
|
|
if not add:
|
|
op = 'delete'
|
|
else:
|
|
op = 'check'
|
|
|
|
iptables_cmds = []
|
|
chains = ['PREROUTING', 'POSTROUTING']
|
|
for chain in chains:
|
|
cmd = self.command(chain, ip, ver, op)
|
|
if not add:
|
|
iptables_cmds.append(cmd)
|
|
else:
|
|
'''
|
|
For add case, first check if rule exists. Iptables just appends to the chain
|
|
as a new rule even if it is the same as an existing one. Check this and
|
|
do nothing if rule exists
|
|
'''
|
|
ret = subprocess.call(cmd, shell=True)
|
|
if ret == 0:
|
|
syslog.syslog(syslog.LOG_INFO, "{} rule exists in {}".format(ip, chain))
|
|
else:
|
|
# Modify command from Check to Append
|
|
iptables_cmds.append(cmd.replace("check", "append"))
|
|
|
|
for cmd in iptables_cmds:
|
|
syslog.syslog(syslog.LOG_INFO, "Running cmd - {}".format(cmd))
|
|
run_cmd(cmd)
|
|
|
|
|
|
class AaaCfg(object):
|
|
def __init__(self):
|
|
self.auth_default = {
|
|
'login': 'local',
|
|
}
|
|
self.tacplus_global_default = {
|
|
'auth_type': TACPLUS_SERVER_AUTH_TYPE_DEFAULT,
|
|
'timeout': TACPLUS_SERVER_TIMEOUT_DEFAULT,
|
|
'passkey': TACPLUS_SERVER_PASSKEY_DEFAULT
|
|
}
|
|
self.tacplus_global = {}
|
|
self.tacplus_servers = {}
|
|
|
|
self.radius_global_default = {
|
|
'priority': 0,
|
|
'auth_port': RADIUS_SERVER_AUTH_PORT_DEFAULT,
|
|
'auth_type': RADIUS_SERVER_AUTH_TYPE_DEFAULT,
|
|
'retransmit': RADIUS_SERVER_RETRANSMIT_DEFAULT,
|
|
'timeout': RADIUS_SERVER_TIMEOUT_DEFAULT,
|
|
'passkey': RADIUS_SERVER_PASSKEY_DEFAULT
|
|
}
|
|
self.radius_global = {}
|
|
self.radius_servers = {}
|
|
|
|
self.auth = {}
|
|
self.debug = False
|
|
self.trace = False
|
|
|
|
self.hostname = ""
|
|
|
|
# Load conf from ConfigDb
|
|
def load(self, aaa_conf, tac_global_conf, tacplus_conf, rad_global_conf, radius_conf):
|
|
for row in aaa_conf:
|
|
self.aaa_update(row, aaa_conf[row], modify_conf=False)
|
|
for row in tac_global_conf:
|
|
self.tacacs_global_update(row, tac_global_conf[row], modify_conf=False)
|
|
for row in tacplus_conf:
|
|
self.tacacs_server_update(row, tacplus_conf[row], modify_conf=False)
|
|
|
|
for row in rad_global_conf:
|
|
self.radius_global_update(row, rad_global_conf[row], modify_conf=False)
|
|
for row in radius_conf:
|
|
self.radius_server_update(row, radius_conf[row], modify_conf=False)
|
|
|
|
self.modify_conf_file()
|
|
|
|
def aaa_update(self, key, data, modify_conf=True):
|
|
if key == 'authentication':
|
|
self.auth = data
|
|
if 'failthrough' in data:
|
|
self.auth['failthrough'] = is_true(data['failthrough'])
|
|
if 'debug' in data:
|
|
self.debug = is_true(data['debug'])
|
|
if modify_conf:
|
|
self.modify_conf_file()
|
|
|
|
def pick_src_intf_ipaddrs(self, keys, src_intf):
|
|
new_ipv4_addr = ""
|
|
new_ipv6_addr = ""
|
|
|
|
for it in keys:
|
|
if src_intf != it[0] or (isinstance(it, tuple) == False):
|
|
continue
|
|
if new_ipv4_addr != "" and new_ipv6_addr != "":
|
|
break
|
|
ip_str = it[1].split("/")[0]
|
|
ip_addr = ipaddress.IPAddress(ip_str)
|
|
# Pick the first IP address from the table that matches the source interface
|
|
if isinstance(ip_addr, ipaddress.IPv6Address):
|
|
if new_ipv6_addr != "":
|
|
continue
|
|
new_ipv6_addr = ip_str
|
|
else:
|
|
if new_ipv4_addr != "":
|
|
continue
|
|
new_ipv4_addr = ip_str
|
|
|
|
return(new_ipv4_addr, new_ipv6_addr)
|
|
|
|
def tacacs_global_update(self, key, data, modify_conf=True):
|
|
if key == 'global':
|
|
self.tacplus_global = data
|
|
if modify_conf:
|
|
self.modify_conf_file()
|
|
|
|
def tacacs_server_update(self, key, data, modify_conf=True):
|
|
if data == {}:
|
|
if key in self.tacplus_servers:
|
|
del self.tacplus_servers[key]
|
|
else:
|
|
self.tacplus_servers[key] = data
|
|
|
|
if modify_conf:
|
|
self.modify_conf_file()
|
|
|
|
def handle_radius_source_intf_ip_chg(self, key):
|
|
modify_conf=False
|
|
if 'src_intf' in self.radius_global:
|
|
if key[0] == self.radius_global['src_intf']:
|
|
modify_conf=True
|
|
for addr in self.radius_servers:
|
|
if ('src_intf' in self.radius_servers[addr]) and \
|
|
(key[0] == self.radius_servers[addr]['src_intf']):
|
|
modify_conf=True
|
|
break
|
|
|
|
if not modify_conf:
|
|
return
|
|
|
|
syslog.syslog(syslog.LOG_INFO, 'RADIUS IP change - key:{}, current server info {}'.format(key, self.radius_servers))
|
|
self.modify_conf_file()
|
|
|
|
def handle_radius_nas_ip_chg(self, key):
|
|
modify_conf=False
|
|
# Mgmt IP configuration affects only the default nas_ip
|
|
if 'nas_ip' not in self.radius_global:
|
|
for addr in self.radius_servers:
|
|
if 'nas_ip' not in self.radius_servers[addr]:
|
|
modify_conf=True
|
|
break
|
|
|
|
if not modify_conf:
|
|
return
|
|
|
|
syslog.syslog(syslog.LOG_INFO, 'RADIUS (NAS) IP change - key:{}, current global info {}'.format(key, self.radius_global))
|
|
self.modify_conf_file()
|
|
|
|
def radius_global_update(self, key, data, modify_conf=True):
|
|
if key == 'global':
|
|
self.radius_global = data
|
|
if 'statistics' in data:
|
|
self.radius_global['statistics'] = is_true(data['statistics'])
|
|
if modify_conf:
|
|
self.modify_conf_file()
|
|
|
|
def radius_server_update(self, key, data, modify_conf=True):
|
|
if data == {}:
|
|
if key in self.radius_servers:
|
|
del self.radius_servers[key]
|
|
else:
|
|
self.radius_servers[key] = data
|
|
|
|
if modify_conf:
|
|
self.modify_conf_file()
|
|
|
|
def hostname_update(self, hostname, modify_conf=True):
|
|
if self.hostname == hostname:
|
|
return
|
|
|
|
self.hostname = hostname
|
|
|
|
# Currently only used for RADIUS
|
|
if len(self.radius_servers) == 0:
|
|
return
|
|
|
|
if modify_conf:
|
|
self.modify_conf_file()
|
|
|
|
def get_hostname(self):
|
|
return self.hostname
|
|
|
|
def get_interface_ip(self, source, addr=None):
|
|
keys = None
|
|
try:
|
|
if source.startswith("Eth"):
|
|
if is_vlan_sub_interface(source):
|
|
keys = self.config_db.get_keys('VLAN_SUB_INTERFACE')
|
|
else:
|
|
keys = self.config_db.get_keys('INTERFACE')
|
|
elif source.startswith("Po"):
|
|
if is_vlan_sub_interface(source):
|
|
keys = self.config_db.get_keys('VLAN_SUB_INTERFACE')
|
|
else:
|
|
keys = self.config_db.get_keys('PORTCHANNEL_INTERFACE')
|
|
elif source.startswith("Vlan"):
|
|
keys = self.config_db.get_keys('VLAN_INTERFACE')
|
|
elif source.startswith("Loopback"):
|
|
keys = self.config_db.get_keys('LOOPBACK_INTERFACE')
|
|
elif source == "eth0":
|
|
keys = self.config_db.get_keys('MGMT_INTERFACE')
|
|
except Exception as e:
|
|
pass
|
|
|
|
interface_ip = ""
|
|
if keys != None:
|
|
ipv4_addr, ipv6_addr = self.pick_src_intf_ipaddrs(keys, source)
|
|
# Based on the type of addr, return v4 or v6
|
|
if addr and isinstance(addr, ipaddress.IPv6Address):
|
|
interface_ip = ipv6_addr
|
|
else:
|
|
# This could be tuned, but that involves a DNS query, so
|
|
# offline configuration might trip (or cause delays).
|
|
interface_ip = ipv4_addr
|
|
return interface_ip
|
|
|
|
def modify_single_file(self, filename, operations=None):
|
|
if operations:
|
|
cmd = "sed -e {0} {1} > {1}.new; mv -f {1} {1}.old; mv -f {1}.new {1}".format(' -e '.join(operations), filename)
|
|
os.system(cmd)
|
|
|
|
def modify_conf_file(self):
|
|
auth = self.auth_default.copy()
|
|
auth.update(self.auth)
|
|
tacplus_global = self.tacplus_global_default.copy()
|
|
tacplus_global.update(self.tacplus_global)
|
|
if 'src_ip' in tacplus_global:
|
|
src_ip = tacplus_global['src_ip']
|
|
else:
|
|
src_ip = None
|
|
|
|
servers_conf = []
|
|
if self.tacplus_servers:
|
|
for addr in self.tacplus_servers:
|
|
server = tacplus_global.copy()
|
|
server['ip'] = addr
|
|
server.update(self.tacplus_servers[addr])
|
|
servers_conf.append(server)
|
|
servers_conf = sorted(servers_conf, key=lambda t: int(t['priority']), reverse=True)
|
|
|
|
radius_global = self.radius_global_default.copy()
|
|
radius_global.update(self.radius_global)
|
|
|
|
# RADIUS: Set the default nas_ip, and nas_id
|
|
if 'nas_ip' not in radius_global:
|
|
nas_ip = self.get_interface_ip("eth0")
|
|
if len(nas_ip) > 0:
|
|
radius_global['nas_ip'] = nas_ip
|
|
if 'nas_id' not in radius_global:
|
|
nas_id = self.get_hostname()
|
|
if len(nas_id) > 0:
|
|
radius_global['nas_id'] = nas_id
|
|
|
|
radsrvs_conf = []
|
|
if self.radius_servers:
|
|
for addr in self.radius_servers:
|
|
server = radius_global.copy()
|
|
server['ip'] = addr
|
|
server.update(self.radius_servers[addr])
|
|
|
|
if 'src_intf' in server:
|
|
# RADIUS: Log a message if src_ip is already defined.
|
|
if 'src_ip' in server:
|
|
syslog.syslog(syslog.LOG_INFO, \
|
|
"RADIUS_SERVER|{}: src_intf found. Ignoring src_ip".format(addr))
|
|
# RADIUS: If server.src_intf, then get the corresponding
|
|
# src_ip based on the server.ip, and set it.
|
|
src_ip = self.get_interface_ip(server['src_intf'], addr)
|
|
if len(src_ip) > 0:
|
|
server['src_ip'] = src_ip
|
|
elif 'src_ip' in server:
|
|
syslog.syslog(syslog.LOG_INFO, \
|
|
"RADIUS_SERVER|{}: src_intf has no usable IP addr.".format(addr))
|
|
del server['src_ip']
|
|
|
|
radsrvs_conf.append(server)
|
|
radsrvs_conf = sorted(radsrvs_conf, key=lambda t: int(t['priority']), reverse=True)
|
|
|
|
template_file = os.path.abspath(PAM_AUTH_CONF_TEMPLATE)
|
|
env = jinja2.Environment(loader=jinja2.FileSystemLoader('/'), trim_blocks=True)
|
|
env.filters['sub'] = sub
|
|
template = env.get_template(template_file)
|
|
if 'radius' in auth['login']:
|
|
pam_conf = template.render(debug=self.debug, trace=self.trace, auth=auth, servers=radsrvs_conf)
|
|
else:
|
|
pam_conf = template.render(auth=auth, src_ip=src_ip, servers=servers_conf)
|
|
|
|
# Use rename(), which is atomic (on the same fs) to avoid empty file
|
|
with open(PAM_AUTH_CONF + ".tmp", 'w') as f:
|
|
f.write(pam_conf)
|
|
os.chmod(PAM_AUTH_CONF + ".tmp", 0o644)
|
|
os.rename(PAM_AUTH_CONF + ".tmp", PAM_AUTH_CONF)
|
|
|
|
# Modify common-auth include file in /etc/pam.d/login, sshd.
|
|
# /etc/pam.d/sudo is not handled, because it would change the existing
|
|
# behavior. It can be modified once a config knob is added for sudo.
|
|
if os.path.isfile(PAM_AUTH_CONF):
|
|
self.modify_single_file(ETC_PAMD_SSHD, [ "'/^@include/s/common-auth$/common-auth-sonic/'" ])
|
|
self.modify_single_file(ETC_PAMD_LOGIN, [ "'/^@include/s/common-auth$/common-auth-sonic/'" ])
|
|
else:
|
|
self.modify_single_file(ETC_PAMD_SSHD, [ "'/^@include/s/common-auth-sonic$/common-auth/'" ])
|
|
self.modify_single_file(ETC_PAMD_LOGIN, [ "'/^@include/s/common-auth-sonic$/common-auth/'" ])
|
|
|
|
# Add tacplus/radius in nsswitch.conf if TACACS+/RADIUS enable
|
|
if 'tacacs+' in auth['login']:
|
|
if os.path.isfile(NSS_CONF):
|
|
self.modify_single_file(NSS_CONF, [ "'/^passwd/s/ radius//'" ])
|
|
self.modify_single_file(NSS_CONF, [ "'/tacplus/b'", "'/^passwd/s/compat/tacplus &/'", "'/^passwd/s/files/tacplus &/'" ])
|
|
elif 'radius' in auth['login']:
|
|
if os.path.isfile(NSS_CONF):
|
|
self.modify_single_file(NSS_CONF, [ "'/^passwd/s/tacplus //'" ])
|
|
self.modify_single_file(NSS_CONF, [ "'/radius/b'", "'/^passwd/s/compat/& radius/'", "'/^passwd/s/files/& radius/'" ])
|
|
else:
|
|
if os.path.isfile(NSS_CONF):
|
|
self.modify_single_file(NSS_CONF, [ "'/^passwd/s/tacplus //g'" ])
|
|
self.modify_single_file(NSS_CONF, [ "'/^passwd/s/ radius//'" ])
|
|
|
|
# Set tacacs+ server in nss-tacplus conf
|
|
template_file = os.path.abspath(NSS_TACPLUS_CONF_TEMPLATE)
|
|
template = env.get_template(template_file)
|
|
nss_tacplus_conf = template.render(debug=self.debug, src_ip=src_ip, servers=servers_conf)
|
|
with open(NSS_TACPLUS_CONF, 'w') as f:
|
|
f.write(nss_tacplus_conf)
|
|
|
|
# Set debug in nss-radius conf
|
|
template_file = os.path.abspath(NSS_RADIUS_CONF_TEMPLATE)
|
|
template = env.get_template(template_file)
|
|
nss_radius_conf = template.render(debug=self.debug, trace=self.trace, servers=radsrvs_conf)
|
|
with open(NSS_RADIUS_CONF, 'w') as f:
|
|
f.write(nss_radius_conf)
|
|
|
|
# Create the per server pam_radius_auth.conf
|
|
if radsrvs_conf:
|
|
for srv in radsrvs_conf:
|
|
# Configuration File
|
|
pam_radius_auth_file = RADIUS_PAM_AUTH_CONF_DIR + srv['ip'] + "_" + srv['auth_port'] + ".conf"
|
|
template_file = os.path.abspath(PAM_RADIUS_AUTH_CONF_TEMPLATE)
|
|
template = env.get_template(template_file)
|
|
pam_radius_auth_conf = template.render(server=srv)
|
|
|
|
open(pam_radius_auth_file, 'a').close()
|
|
os.chmod(pam_radius_auth_file, 0o600)
|
|
with open(pam_radius_auth_file, 'w+') as f:
|
|
f.write(pam_radius_auth_conf)
|
|
|
|
# Start the statistics service. Only RADIUS implemented
|
|
if ('radius' in auth['login']) and ('statistics' in radius_global) and\
|
|
radius_global['statistics']:
|
|
cmd = 'service aaastatsd start'
|
|
else:
|
|
cmd = 'service aaastatsd stop'
|
|
syslog.syslog(syslog.LOG_INFO, "cmd - {}".format(cmd))
|
|
try:
|
|
subprocess.check_call(cmd, shell=True)
|
|
except subprocess.CalledProcessError as err:
|
|
syslog.syslog(syslog.LOG_ERR,
|
|
"{} - failed: return code - {}, output:\n{}"
|
|
.format(err.cmd, err.returncode, err.output))
|
|
|
|
|
|
class KdumpCfg(object):
|
|
def __init__(self, CfgDb):
|
|
self.config_db = CfgDb
|
|
self.kdump_defaults = { "enabled" : "false",
|
|
"memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M",
|
|
"num_dumps": "3" }
|
|
|
|
def load(self, kdump_table):
|
|
"""
|
|
Set the KDUMP table in CFG DB to kdump_defaults if not set by the user
|
|
"""
|
|
syslog.syslog(syslog.LOG_INFO, "KdumpCfg init ...")
|
|
kdump_conf = kdump_table.get("config", {})
|
|
for row in self.kdump_defaults:
|
|
value = self.kdump_defaults.get(row)
|
|
if not kdump_conf.get(row):
|
|
self.config_db.mod_entry("KDUMP", "config", {row : value})
|
|
|
|
def kdump_update(self, key, data):
|
|
syslog.syslog(syslog.LOG_INFO, "Kdump global configuration update")
|
|
if key == "config":
|
|
# Admin mode
|
|
kdump_enabled = self.kdump_defaults["enabled"]
|
|
if data.get("enabled") is not None:
|
|
kdump_enabled = data.get("enabled")
|
|
if kdump_enabled.lower() == "true":
|
|
enabled = True
|
|
else:
|
|
enabled = False
|
|
if enabled:
|
|
run_cmd("sonic-kdump-config --enable")
|
|
else:
|
|
run_cmd("sonic-kdump-config --disable")
|
|
|
|
# Memory configuration
|
|
memory = self.kdump_defaults["memory"]
|
|
if data.get("memory") is not None:
|
|
memory = data.get("memory")
|
|
if data.get("memory") is not None:
|
|
run_cmd("sonic-kdump-config --memory " + memory)
|
|
|
|
# Num dumps
|
|
num_dumps = self.kdump_defaults["num_dumps"]
|
|
if data.get("num_dumps") is not None:
|
|
num_dumps = data.get("num_dumps")
|
|
if data.get("num_dumps") is not None:
|
|
run_cmd("sonic-kdump-config --num_dumps " + num_dumps)
|
|
|
|
class NtpCfg(object):
|
|
"""
|
|
NtpCfg Config Daemon
|
|
1) ntp-config.service handles the configuration updates and then starts ntp.service
|
|
2) Both of them start after all the feature services start
|
|
3) Purpose of this daemon is to propagate runtime config changes in
|
|
NTP, NTP_SERVER and LOOPBACK_INTERFACE
|
|
"""
|
|
def __init__(self):
|
|
self.ntp_global = {}
|
|
self.ntp_servers = set()
|
|
|
|
def handle_ntp_source_intf_chg(self, intf_name):
|
|
# if no ntp server configured, do nothing
|
|
if not self.ntp_servers:
|
|
return
|
|
|
|
# check only the intf configured as source interface
|
|
if intf_name not in self.ntp_global.get('src_intf', '').split(';'):
|
|
return
|
|
else:
|
|
# just restart ntp config
|
|
cmd = 'systemctl restart ntp-config'
|
|
run_cmd(cmd)
|
|
|
|
def ntp_global_update(self, key, data):
|
|
syslog.syslog(syslog.LOG_INFO, 'NTP GLOBAL Update')
|
|
orig_src = self.ntp_global.get('src_intf', '')
|
|
orig_src_set = set(orig_src.split(";"))
|
|
orig_vrf = self.ntp_global.get('vrf', '')
|
|
|
|
new_src = data.get('src_intf', '')
|
|
new_src_set = set(new_src.split(";"))
|
|
new_vrf = data.get('vrf', '')
|
|
|
|
# Update the Local Cache
|
|
self.ntp_global = data
|
|
|
|
# check if ntp server configured, if not, do nothing
|
|
if not self.ntp_servers:
|
|
syslog.syslog(syslog.LOG_INFO, "No ntp server when global config change, do nothing")
|
|
return
|
|
|
|
if orig_src_set != new_src_set:
|
|
syslog.syslog(syslog.LOG_INFO, "ntp global update for source intf old {} new {}, restarting ntp-config"
|
|
.format(orig_src_set, new_src_set))
|
|
cmd = 'systemctl restart ntp-config'
|
|
run_cmd(cmd)
|
|
elif new_vrf != orig_vrf:
|
|
syslog.syslog(syslog.LOG_INFO, "ntp global update for vrf old {} new {}, restarting ntp service"
|
|
.format(orig_vrf, new_vrf))
|
|
cmd = 'service ntp restart'
|
|
run_cmd(cmd)
|
|
|
|
def ntp_server_update(self, key, op):
|
|
syslog.syslog(syslog.LOG_INFO, 'ntp server update key {}'.format(key))
|
|
|
|
restart_config = False
|
|
if op == "SET" and key not in self.ntp_servers:
|
|
restart_config = True
|
|
self.ntp_servers.add(key)
|
|
elif op == "DEL" and key in self.ntp_servers:
|
|
restart_config = True
|
|
self.ntp_servers.remove(key)
|
|
|
|
if restart_config:
|
|
cmd = 'systemctl restart ntp-config'
|
|
syslog.syslog(syslog.LOG_INFO, 'ntp server update, restarting ntp-config, ntp servers configured {}'.format(self.ntp_servers))
|
|
run_cmd(cmd)
|
|
|
|
class HostConfigDaemon:
|
|
def __init__(self):
|
|
# Just a sanity check to verify if the CONFIG_DB has been initialized
|
|
# before moving forward
|
|
self.config_db = ConfigDBConnector()
|
|
self.config_db.connect(wait_for_init=True, retry_on=True)
|
|
self.dbconn = DBConnector(CFG_DB, 0)
|
|
self.selector = Select()
|
|
syslog.syslog(syslog.LOG_INFO, 'ConfigDB connect success')
|
|
|
|
self.select = Select()
|
|
self.callbacks = dict()
|
|
self.subscriber_map = dict()
|
|
|
|
# Load DEVICE metadata configurations
|
|
self.device_config = {}
|
|
self.device_config['DEVICE_METADATA'] = self.config_db.get_table('DEVICE_METADATA')
|
|
|
|
# Initialize KDump Config and set the config to default if nothing is provided
|
|
self.kdumpCfg = KdumpCfg(self.config_db)
|
|
self.kdumpCfg.load(self.config_db.get_table('KDUMP'))
|
|
|
|
# Initialize IpTables
|
|
self.iptables = Iptables()
|
|
|
|
# Intialize Feature Handler
|
|
self.feature_handler = FeatureHandler(self.config_db, self.device_config)
|
|
self.feature_handler.sync_state_field()
|
|
|
|
# Initialize Ntp Config Handler
|
|
self.ntpcfg = NtpCfg()
|
|
|
|
self.is_multi_npu = device_info.is_multi_npu()
|
|
|
|
# Initialize AAACfg
|
|
self.hostname_cache=""
|
|
self.aaacfg = AaaCfg()
|
|
|
|
|
|
def load(self):
|
|
aaa = self.config_db.get_table('AAA')
|
|
tacacs_global = self.config_db.get_table('TACPLUS')
|
|
tacacs_server = self.config_db.get_table('TACPLUS_SERVER')
|
|
radius_global = self.config_db.get_table('RADIUS')
|
|
radius_server = self.config_db.get_table('RADIUS_SERVER')
|
|
self.aaacfg.load(aaa, tacacs_global, tacacs_server, radius_global, radius_server)
|
|
|
|
try:
|
|
dev_meta = self.config_db.get_table('DEVICE_METADATA')
|
|
if 'localhost' in dev_meta:
|
|
if 'hostname' in dev_meta['localhost']:
|
|
self.hostname_cache = dev_meta['localhost']['hostname']
|
|
except Exception as e:
|
|
pass
|
|
|
|
# Update AAA with the hostname
|
|
self.aaacfg.hostname_update(self.hostname_cache)
|
|
|
|
def __get_intf_name(self, key):
|
|
if isinstance(key, tuple) and key:
|
|
intf = key[0]
|
|
else:
|
|
intf = key
|
|
return intf
|
|
|
|
def aaa_handler(self, key, op, data):
|
|
self.aaacfg.aaa_update(key, data)
|
|
syslog.syslog(syslog.LOG_INFO, 'AAA Update: key: {}, op: {}, data: {}'.format(key, op, data))
|
|
|
|
def tacacs_server_handler(self, key, op, data):
|
|
self.aaacfg.tacacs_server_update(key, data)
|
|
log_data = copy.deepcopy(data)
|
|
if 'passkey' in log_data:
|
|
log_data['passkey'] = obfuscate(log_data['passkey'])
|
|
syslog.syslog(syslog.LOG_INFO, 'TACPLUS_SERVER update: key: {}, op: {}, data: {}'.format(key, op, log_data))
|
|
|
|
def tacacs_global_handler(self, key, op, data):
|
|
self.aaacfg.tacacs_global_update(key, data)
|
|
log_data = copy.deepcopy(data)
|
|
if 'passkey' in log_data:
|
|
log_data['passkey'] = obfuscate(log_data['passkey'])
|
|
syslog.syslog(syslog.LOG_INFO, 'TACPLUS Global update: key: {}, op: {}, data: {}'.format(key, op, log_data))
|
|
|
|
def radius_server_handler(self, key, op, data):
|
|
self.aaacfg.radius_server_update(key, data)
|
|
log_data = copy.deepcopy(data)
|
|
if 'passkey' in log_data:
|
|
log_data['passkey'] = obfuscate(log_data['passkey'])
|
|
syslog.syslog(syslog.LOG_INFO, 'RADIUS_SERVER update: key: {}, op: {}, data: {}'.format(key, op, log_data))
|
|
|
|
def radius_global_handler(self, key, op, data):
|
|
self.aaacfg.radius_global_update(key, data)
|
|
log_data = copy.deepcopy(data)
|
|
if 'passkey' in log_data:
|
|
log_data['passkey'] = obfuscate(log_data['passkey'])
|
|
syslog.syslog(syslog.LOG_INFO, 'RADIUS Global update: key: {}, op: {}, data: {}'.format(key, op, log_data))
|
|
|
|
def mgmt_intf_handler(self, key, op, data):
|
|
key = ConfigDBConnector.deserialize_key(key)
|
|
mgmt_intf_name = self.__get_intf_name(key)
|
|
self.aaacfg.handle_radius_source_intf_ip_chg(mgmt_intf_name)
|
|
self.aaacfg.handle_radius_nas_ip_chg(mgmt_intf_name)
|
|
|
|
def lpbk_handler(self, key, op, data):
|
|
key = ConfigDBConnector.deserialize_key(key)
|
|
if op == "DEL":
|
|
add = False
|
|
else:
|
|
add = True
|
|
|
|
self.iptables.iptables_handler(key, data, add)
|
|
lpbk_name = self.__get_intf_name(key)
|
|
self.ntpcfg.handle_ntp_source_intf_chg(lpbk_name)
|
|
self.aaacfg.handle_radius_source_intf_ip_chg(key)
|
|
|
|
def vlan_intf_handler(self, key, op, data):
|
|
key = ConfigDBConnector.deserialize_key(key)
|
|
self.aaacfg.handle_radius_source_intf_ip_chg(key)
|
|
|
|
def vlan_sub_intf_handler(self, key, op, data):
|
|
key = ConfigDBConnector.deserialize_key(key)
|
|
self.aaacfg.handle_radius_source_intf_ip_chg(key)
|
|
|
|
def portchannel_intf_handler(self, key, op, data):
|
|
key = ConfigDBConnector.deserialize_key(key)
|
|
self.aaacfg.handle_radius_source_intf_ip_chg(key)
|
|
|
|
def phy_intf_handler(self, key, op, data):
|
|
key = ConfigDBConnector.deserialize_key(key)
|
|
self.aaacfg.handle_radius_source_intf_ip_chg(key)
|
|
|
|
def ntp_server_handler(self, key, op, data):
|
|
self.ntpcfg.ntp_server_update(key, op)
|
|
|
|
def ntp_global_handler(self, key, op, data):
|
|
self.ntpcfg.ntp_global_update(key, data)
|
|
|
|
def kdump_handler (self, key, op, data):
|
|
syslog.syslog(syslog.LOG_INFO, 'Kdump handler...')
|
|
self.kdumpCfg.kdump_update(key, data)
|
|
|
|
def wait_till_system_init_done(self):
|
|
# No need to print the output in the log file so using the "--quiet"
|
|
# flag
|
|
systemctl_cmd = "sudo systemctl is-system-running --wait --quiet"
|
|
subprocess.call(systemctl_cmd, shell=True)
|
|
|
|
def subscribe(self, table, callback, pri):
|
|
try:
|
|
if table not in self.callbacks:
|
|
self.callbacks[table] = []
|
|
subscriber = SubscriberStateTable(self.dbconn, table, TableConsumable.DEFAULT_POP_BATCH_SIZE, pri)
|
|
self.selector.addSelectable(subscriber) # Add to the Selector
|
|
self.subscriber_map[subscriber.getFd()] = (subscriber, table) # Maintain a mapping b/w subscriber & fd
|
|
|
|
self.callbacks[table].append(callback)
|
|
except Exception as err:
|
|
syslog.syslog(syslog.LOG_ERR, "Subscribe to table {} failed with error {}".format(table, err))
|
|
|
|
def register_callbacks(self):
|
|
self.subscribe('KDUMP', lambda table, key, op, data: self.kdump_handler(key, op, data), HOSTCFGD_MAX_PRI)
|
|
# Handle FEATURE updates before other tables
|
|
self.subscribe('FEATURE', lambda table, key, op, data: self.feature_handler.handle(key, op, data), HOSTCFGD_MAX_PRI-1)
|
|
# Handle AAA, TACACS and RADIUS related tables
|
|
self.subscribe('AAA', lambda table, key, op, data: self.aaa_handler(key, op, data), HOSTCFGD_MAX_PRI-2)
|
|
self.subscribe('TACPLUS', lambda table, key, op, data: self.tacacs_global_handler(key, op, data), HOSTCFGD_MAX_PRI-2)
|
|
self.subscribe('TACPLUS_SERVER', lambda table, key, op, data: self.tacacs_server_handler(key, op, data), HOSTCFGD_MAX_PRI-2)
|
|
self.subscribe('RADIUS', lambda table, key, op, data: self.radius_global_handler(key, op, data), HOSTCFGD_MAX_PRI-2)
|
|
self.subscribe('RADIUS_SERVER', lambda table, key, op, data: self.radius_server_handler(key, op, data), HOSTCFGD_MAX_PRI-2)
|
|
# Handle IPTables configuration
|
|
self.subscribe('LOOPBACK_INTERFACE', lambda table, key, op, data: self.lpbk_handler(key, op, data), HOSTCFGD_MAX_PRI-3)
|
|
# Handle NTP & NTP_SERVER updates
|
|
self.subscribe('NTP', lambda table, key, op, data: self.ntp_global_handler(key, op, data), HOSTCFGD_MAX_PRI-4)
|
|
self.subscribe('NTP_SERVER', lambda table, key, op, data: self.ntp_server_handler(key, op, data), HOSTCFGD_MAX_PRI-4)
|
|
# Handle updates to src intf changes in radius
|
|
self.subscribe('MGMT_INTERFACE', lambda table, key, op, data: self.mgmt_intf_handler(key, op, data), HOSTCFGD_MAX_PRI-5)
|
|
self.subscribe('VLAN_INTERFACE', lambda table, key, op, data: self.vlan_intf_handler(key, op, data), HOSTCFGD_MAX_PRI-5)
|
|
self.subscribe('VLAN_SUB_INTERFACE', lambda table, key, op, data: self.vlan_sub_intf_handler(key, op, data), HOSTCFGD_MAX_PRI-5)
|
|
self.subscribe('PORTCHANNEL_INTERFACE', lambda table, key, op, data: self.portchannel_intf_handler(key, op, data), HOSTCFGD_MAX_PRI-5)
|
|
self.subscribe('INTERFACE', lambda table, key, op, data: self.phy_intf_handler(key, op, data), HOSTCFGD_MAX_PRI-5)
|
|
|
|
syslog.syslog(syslog.LOG_INFO,
|
|
"Waiting for systemctl to finish initialization")
|
|
self.wait_till_system_init_done()
|
|
syslog.syslog(syslog.LOG_INFO,
|
|
"systemctl has finished initialization -- proceeding ...")
|
|
|
|
def start(self):
|
|
while True:
|
|
state, selectable_ = self.selector.select(DEFAULT_SELECT_TIMEOUT)
|
|
if state == self.selector.TIMEOUT:
|
|
continue
|
|
elif state == self.selector.ERROR:
|
|
syslog.syslog(syslog.LOG_ERR,
|
|
"error returned by select")
|
|
continue
|
|
|
|
fd = selectable_.getFd()
|
|
# Get the Corresponding subscriber & table
|
|
subscriber, table = self.subscriber_map.get(fd, (None, ""))
|
|
if not subscriber:
|
|
syslog.syslog(syslog.LOG_ERR,
|
|
"No Subscriber object found for fd: {}, subscriber map: {}".format(fd, subscriber_map))
|
|
continue
|
|
key, op, fvs = subscriber.pop()
|
|
# Get the registered callback
|
|
cbs = self.callbacks.get(table, None)
|
|
for callback in cbs:
|
|
callback(table, key, op, dict(fvs))
|
|
|
|
|
|
def main():
|
|
signal.signal(signal.SIGTERM, signal_handler)
|
|
signal.signal(signal.SIGINT, signal_handler)
|
|
signal.signal(signal.SIGHUP, signal_handler)
|
|
daemon = HostConfigDaemon()
|
|
daemon.register_callbacks()
|
|
daemon.load()
|
|
daemon.start()
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
|